caamalg.c 75.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*
 * caam - Freescale FSL CAAM support for crypto API
 *
 * Copyright 2008-2011 Freescale Semiconductor, Inc.
 *
 * Based on talitos crypto API driver.
 *
 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
 *
 * ---------------                     ---------------
 * | JobDesc #1  |-------------------->|  ShareDesc  |
 * | *(packet 1) |                     |   (PDB)     |
 * ---------------      |------------->|  (hashKey)  |
 *       .              |              | (cipherKey) |
 *       .              |    |-------->| (operation) |
 * ---------------      |    |         ---------------
 * | JobDesc #2  |------|    |
 * | *(packet 2) |           |
 * ---------------           |
 *       .                   |
 *       .                   |
 * ---------------           |
 * | JobDesc #3  |------------
 * | *(packet 3) |
 * ---------------
 *
 * The SharedDesc never changes for a connection unless rekeyed, but
 * each packet will likely be in a different place. So all we need
 * to know to process the packet is where the input is, where the
 * output goes, and what context we want to process with. Context is
 * in the SharedDesc, packet references in the JobDesc.
 *
 * So, a job desc looks like:
 *
 * ---------------------
 * | Header            |
 * | ShareDesc Pointer |
 * | SEQ_OUT_PTR       |
 * | (output buffer)   |
40
 * | (output length)   |
41 42
 * | SEQ_IN_PTR        |
 * | (input buffer)    |
43
 * | (input length)    |
44 45 46 47 48 49 50 51 52 53
 * ---------------------
 */

#include "compat.h"

#include "regs.h"
#include "intern.h"
#include "desc_constr.h"
#include "jr.h"
#include "error.h"
Y
Yuan Kang 已提交
54
#include "sg_sw_sec4.h"
Y
Yuan Kang 已提交
55
#include "key_gen.h"
56 57 58 59 60 61 62 63 64 65 66

/*
 * crypto alg
 */
#define CAAM_CRA_PRIORITY		3000
/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
#define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
					 SHA512_DIGEST_SIZE * 2)
/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
#define CAAM_MAX_IV_LENGTH		16

67
/* length of descriptors text */
68
#define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
69 70
#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
71 72
#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)

73 74 75 76
#define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
#define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
#define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)

Y
Yuan Kang 已提交
77 78 79 80 81 82
#define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
					 20 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
					 15 * CAAM_CMD_SZ)

83 84 85
#define DESC_MAX_USED_BYTES		(DESC_AEAD_GIVENC_LEN + \
					 CAAM_MAX_KEY_SIZE)
#define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
86

87 88 89 90 91 92
#ifdef DEBUG
/* for print_hex_dumps with line references */
#define debug(format, arg...) printk(format, arg)
#else
#define debug(format, arg...)
#endif
93
static struct list_head alg_list;
94

95 96 97 98 99
/* Set DK bit in class 1 operation if shared */
static inline void append_dec_op1(u32 *desc, u32 type)
{
	u32 *jump_cmd, *uncond_jump_cmd;

100 101 102 103 104 105 106
	/* DK bit is valid only for AES */
	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
		append_operation(desc, type | OP_ALG_AS_INITFINAL |
				 OP_ALG_DECRYPT);
		return;
	}

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
	append_operation(desc, type | OP_ALG_AS_INITFINAL |
			 OP_ALG_DECRYPT);
	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
	set_jump_tgt_here(desc, jump_cmd);
	append_operation(desc, type | OP_ALG_AS_INITFINAL |
			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
	set_jump_tgt_here(desc, uncond_jump_cmd);
}

/*
 * For aead functions, read payload and write payload,
 * both of which are specified in req->src and req->dst
 */
static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
{
123
	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
124 125 126 127 128 129 130 131 132 133 134 135 136 137
	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
}

/*
 * For aead encrypt and decrypt, read iv for both classes
 */
static inline void aead_append_ld_iv(u32 *desc, int ivsize)
{
	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
		   LDST_CLASS_1_CCB | ivsize);
	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
}

Y
Yuan Kang 已提交
138 139 140 141 142 143
/*
 * For ablkcipher encrypt and decrypt, read from req->src and
 * write to req->dst
 */
static inline void ablkcipher_append_src_dst(u32 *desc)
{
144 145 146 147 148
	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
Y
Yuan Kang 已提交
149 150
}

151 152 153 154 155 156 157
/*
 * If all data, including src (with assoc and iv) or dst (with iv only) are
 * contiguous
 */
#define GIV_SRC_CONTIG		1
#define GIV_DST_CONTIG		(1 << 1)

158 159 160 161 162
/*
 * per-session context
 */
struct caam_ctx {
	struct device *jrdev;
163 164 165 166 167 168
	u32 sh_desc_enc[DESC_MAX_USED_LEN];
	u32 sh_desc_dec[DESC_MAX_USED_LEN];
	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
	dma_addr_t sh_desc_enc_dma;
	dma_addr_t sh_desc_dec_dma;
	dma_addr_t sh_desc_givenc_dma;
169 170 171
	u32 class1_alg_type;
	u32 class2_alg_type;
	u32 alg_op;
172
	u8 key[CAAM_MAX_KEY_SIZE];
Y
Yuan Kang 已提交
173
	dma_addr_t key_dma;
174 175 176 177 178 179
	unsigned int enckeylen;
	unsigned int split_key_len;
	unsigned int split_key_pad_len;
	unsigned int authsize;
};

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
			    int keys_fit_inline)
{
	if (keys_fit_inline) {
		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
				  ctx->split_key_len, CLASS_2 |
				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
		append_key_as_imm(desc, (void *)ctx->key +
				  ctx->split_key_pad_len, ctx->enckeylen,
				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
	} else {
		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
			   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
	}
}

static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
				  int keys_fit_inline)
{
	u32 *key_jump_cmd;

203
	init_sh_desc(desc, HDR_SHARE_SERIAL);
204 205 206 207 208 209 210 211 212 213

	/* Skip if already shared */
	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
				   JUMP_COND_SHRD);

	append_key_aead(desc, ctx, keys_fit_inline);

	set_jump_tgt_here(desc, key_jump_cmd);
}

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
	struct aead_tfm *tfm = &aead->base.crt_aead;
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
	bool keys_fit_inline = false;
	u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
	u32 *desc;

	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
	if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
		keys_fit_inline = true;

	/* aead_encrypt shared descriptor */
	desc = ctx->sh_desc_enc;

	init_sh_desc(desc, HDR_SHARE_SERIAL);

	/* Skip if already shared */
	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
				   JUMP_COND_SHRD);
	if (keys_fit_inline)
		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
				  ctx->split_key_len, CLASS_2 |
				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
	else
		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
	set_jump_tgt_here(desc, key_jump_cmd);

	/* cryptlen = seqoutlen - authsize */
	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);

	/*
	 * NULL encryption; IV is zero
	 * assoclen = (assoclen + cryptlen) - cryptlen
	 */
	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);

	/* read assoc before reading payload */
	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
			     KEY_VLF);

	/* Prepare to read and write cryptlen bytes */
	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);

	/*
	 * MOVE_LEN opcode is not available in all SEC HW revisions,
	 * thus need to do some magic, i.e. self-patch the descriptor
	 * buffer.
	 */
	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
				    MOVE_DEST_MATH3 |
				    (0x6 << MOVE_LEN_SHIFT));
	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
				     MOVE_DEST_DESCBUF |
				     MOVE_WAITCOMP |
				     (0x8 << MOVE_LEN_SHIFT));

	/* Class 2 operation */
	append_operation(desc, ctx->class2_alg_type |
			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);

	/* Read and write cryptlen bytes */
	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);

	set_move_tgt_here(desc, read_move_cmd);
	set_move_tgt_here(desc, write_move_cmd);
	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
		    MOVE_AUX_LS);

	/* Write ICV */
	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
			 LDST_SRCDST_BYTE_CONTEXT);

	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
					      desc_bytes(desc),
					      DMA_TO_DEVICE);
	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
		dev_err(jrdev, "unable to map shared descriptor\n");
		return -ENOMEM;
	}
#ifdef DEBUG
	print_hex_dump(KERN_ERR,
		       "aead null enc shdesc@"__stringify(__LINE__)": ",
		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
		       desc_bytes(desc), 1);
#endif

	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
313
	keys_fit_inline = false;
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
	if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
		keys_fit_inline = true;

	desc = ctx->sh_desc_dec;

	/* aead_decrypt shared descriptor */
	init_sh_desc(desc, HDR_SHARE_SERIAL);

	/* Skip if already shared */
	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
				   JUMP_COND_SHRD);
	if (keys_fit_inline)
		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
				  ctx->split_key_len, CLASS_2 |
				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
	else
		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
	set_jump_tgt_here(desc, key_jump_cmd);

	/* Class 2 operation */
	append_operation(desc, ctx->class2_alg_type |
			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);

	/* assoclen + cryptlen = seqinlen - ivsize - authsize */
	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
				ctx->authsize + tfm->ivsize);
	/* assoclen = (assoclen + cryptlen) - cryptlen */
	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);

	/* read assoc before reading payload */
	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
			     KEY_VLF);

	/* Prepare to read and write cryptlen bytes */
	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);

	/*
	 * MOVE_LEN opcode is not available in all SEC HW revisions,
	 * thus need to do some magic, i.e. self-patch the descriptor
	 * buffer.
	 */
	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
				    MOVE_DEST_MATH2 |
				    (0x6 << MOVE_LEN_SHIFT));
	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
				     MOVE_DEST_DESCBUF |
				     MOVE_WAITCOMP |
				     (0x8 << MOVE_LEN_SHIFT));

	/* Read and write cryptlen bytes */
	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);

	/*
	 * Insert a NOP here, since we need at least 4 instructions between
	 * code patching the descriptor buffer and the location being patched.
	 */
	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
	set_jump_tgt_here(desc, jump_cmd);

	set_move_tgt_here(desc, read_move_cmd);
	set_move_tgt_here(desc, write_move_cmd);
	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
		    MOVE_AUX_LS);
	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);

	/* Load ICV */
	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);

	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
					      desc_bytes(desc),
					      DMA_TO_DEVICE);
	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
		dev_err(jrdev, "unable to map shared descriptor\n");
		return -ENOMEM;
	}
#ifdef DEBUG
	print_hex_dump(KERN_ERR,
		       "aead null dec shdesc@"__stringify(__LINE__)": ",
		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
		       desc_bytes(desc), 1);
#endif

	return 0;
}

405 406 407 408 409
static int aead_set_sh_desc(struct crypto_aead *aead)
{
	struct aead_tfm *tfm = &aead->base.crt_aead;
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
K
Kim Phillips 已提交
410
	bool keys_fit_inline = false;
411 412 413
	u32 geniv, moveiv;
	u32 *desc;

414
	if (!ctx->authsize)
415 416
		return 0;

417 418 419 420
	/* NULL encryption / decryption */
	if (!ctx->enckeylen)
		return aead_null_set_sh_desc(aead);

421 422 423 424 425 426 427
	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
	if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
	    ctx->split_key_pad_len + ctx->enckeylen <=
	    CAAM_DESC_BYTES_MAX)
K
Kim Phillips 已提交
428
		keys_fit_inline = true;
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444

	/* aead_encrypt shared descriptor */
	desc = ctx->sh_desc_enc;

	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);

	/* Class 2 operation */
	append_operation(desc, ctx->class2_alg_type |
			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);

	/* cryptlen = seqoutlen - authsize */
	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);

	/* assoclen + cryptlen = seqinlen - ivsize */
	append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);

445
	/* assoclen = (assoclen + cryptlen) - cryptlen */
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
	append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);

	/* read assoc before reading payload */
	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
			     KEY_VLF);
	aead_append_ld_iv(desc, tfm->ivsize);

	/* Class 1 operation */
	append_operation(desc, ctx->class1_alg_type |
			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);

	/* Read and write cryptlen bytes */
	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);

	/* Write ICV */
	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
			 LDST_SRCDST_BYTE_CONTEXT);

	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
					      desc_bytes(desc),
					      DMA_TO_DEVICE);
	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
		dev_err(jrdev, "unable to map shared descriptor\n");
		return -ENOMEM;
	}
#ifdef DEBUG
474
	print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
475 476 477 478 479 480 481 482
		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
		       desc_bytes(desc), 1);
#endif

	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
483
	keys_fit_inline = false;
484 485 486
	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
	    ctx->split_key_pad_len + ctx->enckeylen <=
	    CAAM_DESC_BYTES_MAX)
K
Kim Phillips 已提交
487
		keys_fit_inline = true;
488 489

	/* aead_decrypt shared descriptor */
490
	desc = ctx->sh_desc_dec;
491

492
	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
493 494 495 496 497

	/* Class 2 operation */
	append_operation(desc, ctx->class2_alg_type |
			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);

498
	/* assoclen + cryptlen = seqinlen - ivsize - authsize */
499
	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
500
				ctx->authsize + tfm->ivsize);
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
	/* assoclen = (assoclen + cryptlen) - cryptlen */
	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);

	/* read assoc before reading payload */
	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
			     KEY_VLF);

	aead_append_ld_iv(desc, tfm->ivsize);

	append_dec_op1(desc, ctx->class1_alg_type);

	/* Read and write cryptlen bytes */
	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);

	/* Load ICV */
	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);

	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
					      desc_bytes(desc),
					      DMA_TO_DEVICE);
	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
		dev_err(jrdev, "unable to map shared descriptor\n");
		return -ENOMEM;
	}
#ifdef DEBUG
530
	print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
531 532 533 534 535 536 537 538
		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
		       desc_bytes(desc), 1);
#endif

	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
539
	keys_fit_inline = false;
540 541 542
	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
	    ctx->split_key_pad_len + ctx->enckeylen <=
	    CAAM_DESC_BYTES_MAX)
K
Kim Phillips 已提交
543
		keys_fit_inline = true;
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613

	/* aead_givencrypt shared descriptor */
	desc = ctx->sh_desc_givenc;

	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);

	/* Generate IV */
	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
		NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
	append_move(desc, MOVE_SRC_INFIFO |
		    MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);

	/* Copy IV to class 1 context */
	append_move(desc, MOVE_SRC_CLASS1CTX |
		    MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));

	/* Return to encryption */
	append_operation(desc, ctx->class2_alg_type |
			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);

	/* ivsize + cryptlen = seqoutlen - authsize */
	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);

	/* assoclen = seqinlen - (ivsize + cryptlen) */
	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);

	/* read assoc before reading payload */
	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
			     KEY_VLF);

	/* Copy iv from class 1 ctx to class 2 fifo*/
	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
		 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
	append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);

	/* Class 1 operation */
	append_operation(desc, ctx->class1_alg_type |
			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);

	/* Will write ivsize + cryptlen */
	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);

	/* Not need to reload iv */
	append_seq_fifo_load(desc, tfm->ivsize,
			     FIFOLD_CLASS_SKIP);

	/* Will read cryptlen */
	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);

	/* Write ICV */
	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
			 LDST_SRCDST_BYTE_CONTEXT);

	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
						 desc_bytes(desc),
						 DMA_TO_DEVICE);
	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
		dev_err(jrdev, "unable to map shared descriptor\n");
		return -ENOMEM;
	}
#ifdef DEBUG
614
	print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
615 616 617 618 619 620 621
		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
		       desc_bytes(desc), 1);
#endif

	return 0;
}

Y
Yuan Kang 已提交
622
static int aead_setauthsize(struct crypto_aead *authenc,
623 624 625 626 627
				    unsigned int authsize)
{
	struct caam_ctx *ctx = crypto_aead_ctx(authenc);

	ctx->authsize = authsize;
628
	aead_set_sh_desc(authenc);
629 630 631 632

	return 0;
}

Y
Yuan Kang 已提交
633 634
static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
			      u32 authkeylen)
635
{
Y
Yuan Kang 已提交
636 637 638
	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
			       ctx->split_key_pad_len, key_in, authkeylen,
			       ctx->alg_op);
639 640
}

Y
Yuan Kang 已提交
641
static int aead_setkey(struct crypto_aead *aead,
642 643 644 645 646 647
			       const u8 *key, unsigned int keylen)
{
	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
648
	struct crypto_authenc_keys keys;
649 650
	int ret = 0;

651
	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
652 653 654 655 656 657 658
		goto badkey;

	/* Pick class 2 key length from algorithm submask */
	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
				      OP_ALG_ALGSEL_SHIFT] * 2;
	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);

659 660 661
	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
		goto badkey;

662 663
#ifdef DEBUG
	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
664 665
	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
	       keys.authkeylen);
666 667
	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
	       ctx->split_key_len, ctx->split_key_pad_len);
668
	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
669 670 671
		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif

672
	ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
673 674 675 676 677
	if (ret) {
		goto badkey;
	}

	/* postpend encryption key to auth split key */
678
	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
679

Y
Yuan Kang 已提交
680
	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
681
				      keys.enckeylen, DMA_TO_DEVICE);
Y
Yuan Kang 已提交
682
	if (dma_mapping_error(jrdev, ctx->key_dma)) {
683 684 685 686
		dev_err(jrdev, "unable to map key i/o memory\n");
		return -ENOMEM;
	}
#ifdef DEBUG
687
	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
688
		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
689
		       ctx->split_key_pad_len + keys.enckeylen, 1);
690 691
#endif

692
	ctx->enckeylen = keys.enckeylen;
693

694
	ret = aead_set_sh_desc(aead);
695
	if (ret) {
Y
Yuan Kang 已提交
696
		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
697
				 keys.enckeylen, DMA_TO_DEVICE);
698 699 700 701 702 703 704 705
	}

	return ret;
badkey:
	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
	return -EINVAL;
}

Y
Yuan Kang 已提交
706 707 708 709 710 711 712
static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
			     const u8 *key, unsigned int keylen)
{
	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
	struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
	struct device *jrdev = ctx->jrdev;
	int ret = 0;
713
	u32 *key_jump_cmd;
Y
Yuan Kang 已提交
714 715 716
	u32 *desc;

#ifdef DEBUG
717
	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
718 719 720 721 722 723 724 725 726 727 728 729 730 731
		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif

	memcpy(ctx->key, key, keylen);
	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
				      DMA_TO_DEVICE);
	if (dma_mapping_error(jrdev, ctx->key_dma)) {
		dev_err(jrdev, "unable to map key i/o memory\n");
		return -ENOMEM;
	}
	ctx->enckeylen = keylen;

	/* ablkcipher_encrypt shared descriptor */
	desc = ctx->sh_desc_enc;
732
	init_sh_desc(desc, HDR_SHARE_SERIAL);
Y
Yuan Kang 已提交
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
	/* Skip if already shared */
	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
				   JUMP_COND_SHRD);

	/* Load class1 key only */
	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
			  ctx->enckeylen, CLASS_1 |
			  KEY_DEST_CLASS_REG);

	set_jump_tgt_here(desc, key_jump_cmd);

	/* Load iv */
	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
		   LDST_CLASS_1_CCB | tfm->ivsize);

	/* Load operation */
	append_operation(desc, ctx->class1_alg_type |
			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);

	/* Perform operation */
	ablkcipher_append_src_dst(desc);

	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
					      desc_bytes(desc),
					      DMA_TO_DEVICE);
	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
		dev_err(jrdev, "unable to map shared descriptor\n");
		return -ENOMEM;
	}
#ifdef DEBUG
763 764
	print_hex_dump(KERN_ERR,
		       "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
765 766 767 768 769 770
		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
		       desc_bytes(desc), 1);
#endif
	/* ablkcipher_decrypt shared descriptor */
	desc = ctx->sh_desc_dec;

771
	init_sh_desc(desc, HDR_SHARE_SERIAL);
Y
Yuan Kang 已提交
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
	/* Skip if already shared */
	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
				   JUMP_COND_SHRD);

	/* Load class1 key only */
	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
			  ctx->enckeylen, CLASS_1 |
			  KEY_DEST_CLASS_REG);

	set_jump_tgt_here(desc, key_jump_cmd);

	/* load IV */
	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
		   LDST_CLASS_1_CCB | tfm->ivsize);

	/* Choose operation */
	append_dec_op1(desc, ctx->class1_alg_type);

	/* Perform operation */
	ablkcipher_append_src_dst(desc);

	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
					      desc_bytes(desc),
					      DMA_TO_DEVICE);
796
	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
Y
Yuan Kang 已提交
797 798 799 800 801
		dev_err(jrdev, "unable to map shared descriptor\n");
		return -ENOMEM;
	}

#ifdef DEBUG
802 803
	print_hex_dump(KERN_ERR,
		       "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
804 805 806 807 808 809 810
		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
		       desc_bytes(desc), 1);
#endif

	return ret;
}

811
/*
812 813
 * aead_edesc - s/w-extended aead descriptor
 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
Y
Yuan Kang 已提交
814
 * @assoc_chained: if source is chained
815
 * @src_nents: number of segments in input scatterlist
Y
Yuan Kang 已提交
816
 * @src_chained: if source is chained
817
 * @dst_nents: number of segments in output scatterlist
Y
Yuan Kang 已提交
818
 * @dst_chained: if destination is chained
819
 * @iv_dma: dma address of iv for checking continuity and link table
820
 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Y
Yuan Kang 已提交
821 822
 * @sec4_sg_bytes: length of dma mapped sec4_sg space
 * @sec4_sg_dma: bus physical mapped address of h/w link table
823 824
 * @hw_desc: the h/w job descriptor followed by any referenced link tables
 */
Y
Yuan Kang 已提交
825
struct aead_edesc {
826
	int assoc_nents;
Y
Yuan Kang 已提交
827
	bool assoc_chained;
828
	int src_nents;
Y
Yuan Kang 已提交
829
	bool src_chained;
830
	int dst_nents;
Y
Yuan Kang 已提交
831
	bool dst_chained;
832
	dma_addr_t iv_dma;
Y
Yuan Kang 已提交
833 834 835
	int sec4_sg_bytes;
	dma_addr_t sec4_sg_dma;
	struct sec4_sg_entry *sec4_sg;
836 837 838
	u32 hw_desc[0];
};

Y
Yuan Kang 已提交
839 840 841
/*
 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
 * @src_nents: number of segments in input scatterlist
Y
Yuan Kang 已提交
842
 * @src_chained: if source is chained
Y
Yuan Kang 已提交
843
 * @dst_nents: number of segments in output scatterlist
Y
Yuan Kang 已提交
844
 * @dst_chained: if destination is chained
Y
Yuan Kang 已提交
845 846
 * @iv_dma: dma address of iv for checking continuity and link table
 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
Y
Yuan Kang 已提交
847 848
 * @sec4_sg_bytes: length of dma mapped sec4_sg space
 * @sec4_sg_dma: bus physical mapped address of h/w link table
Y
Yuan Kang 已提交
849 850 851 852
 * @hw_desc: the h/w job descriptor followed by any referenced link tables
 */
struct ablkcipher_edesc {
	int src_nents;
Y
Yuan Kang 已提交
853
	bool src_chained;
Y
Yuan Kang 已提交
854
	int dst_nents;
Y
Yuan Kang 已提交
855
	bool dst_chained;
Y
Yuan Kang 已提交
856
	dma_addr_t iv_dma;
Y
Yuan Kang 已提交
857 858 859
	int sec4_sg_bytes;
	dma_addr_t sec4_sg_dma;
	struct sec4_sg_entry *sec4_sg;
Y
Yuan Kang 已提交
860 861 862
	u32 hw_desc[0];
};

863
static void caam_unmap(struct device *dev, struct scatterlist *src,
Y
Yuan Kang 已提交
864 865
		       struct scatterlist *dst, int src_nents,
		       bool src_chained, int dst_nents, bool dst_chained,
Y
Yuan Kang 已提交
866 867
		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
		       int sec4_sg_bytes)
868
{
Y
Yuan Kang 已提交
869 870 871 872 873
	if (dst != src) {
		dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
				     src_chained);
		dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
				     dst_chained);
874
	} else {
Y
Yuan Kang 已提交
875 876
		dma_unmap_sg_chained(dev, src, src_nents ? : 1,
				     DMA_BIDIRECTIONAL, src_chained);
877 878
	}

879 880
	if (iv_dma)
		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Y
Yuan Kang 已提交
881 882
	if (sec4_sg_bytes)
		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
883 884 885
				 DMA_TO_DEVICE);
}

886 887 888 889 890 891 892
static void aead_unmap(struct device *dev,
		       struct aead_edesc *edesc,
		       struct aead_request *req)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	int ivsize = crypto_aead_ivsize(aead);

Y
Yuan Kang 已提交
893 894
	dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
			     DMA_TO_DEVICE, edesc->assoc_chained);
895 896

	caam_unmap(dev, req->src, req->dst,
Y
Yuan Kang 已提交
897 898 899
		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
		   edesc->dst_chained, edesc->iv_dma, ivsize,
		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
900 901
}

Y
Yuan Kang 已提交
902 903 904 905 906 907 908 909
static void ablkcipher_unmap(struct device *dev,
			     struct ablkcipher_edesc *edesc,
			     struct ablkcipher_request *req)
{
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);

	caam_unmap(dev, req->src, req->dst,
Y
Yuan Kang 已提交
910 911 912
		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
		   edesc->dst_chained, edesc->iv_dma, ivsize,
		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Y
Yuan Kang 已提交
913 914
}

Y
Yuan Kang 已提交
915
static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
916 917
				   void *context)
{
Y
Yuan Kang 已提交
918 919
	struct aead_request *req = context;
	struct aead_edesc *edesc;
920
#ifdef DEBUG
Y
Yuan Kang 已提交
921
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
922
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
923
	int ivsize = crypto_aead_ivsize(aead);
924 925 926

	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
927

Y
Yuan Kang 已提交
928 929
	edesc = (struct aead_edesc *)((char *)desc -
		 offsetof(struct aead_edesc, hw_desc));
930

931 932
	if (err)
		caam_jr_strstatus(jrdev, err);
933

Y
Yuan Kang 已提交
934
	aead_unmap(jrdev, edesc, req);
935 936

#ifdef DEBUG
937
	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
938 939
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
		       req->assoclen , 1);
940
	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
941
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
942
		       edesc->src_nents ? 100 : ivsize, 1);
943
	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
944 945
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
		       edesc->src_nents ? 100 : req->cryptlen +
946 947 948 949 950
		       ctx->authsize + 4, 1);
#endif

	kfree(edesc);

Y
Yuan Kang 已提交
951
	aead_request_complete(req, err);
952 953
}

Y
Yuan Kang 已提交
954
static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
955 956
				   void *context)
{
Y
Yuan Kang 已提交
957 958
	struct aead_request *req = context;
	struct aead_edesc *edesc;
959
#ifdef DEBUG
Y
Yuan Kang 已提交
960
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
961
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
962
	int ivsize = crypto_aead_ivsize(aead);
963 964 965

	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
966

Y
Yuan Kang 已提交
967 968
	edesc = (struct aead_edesc *)((char *)desc -
		 offsetof(struct aead_edesc, hw_desc));
969

970
#ifdef DEBUG
971
	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
972 973
		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
		       ivsize, 1);
974
	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
975
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
976
		       req->cryptlen - ctx->authsize, 1);
977 978
#endif

979 980
	if (err)
		caam_jr_strstatus(jrdev, err);
981

Y
Yuan Kang 已提交
982
	aead_unmap(jrdev, edesc, req);
983 984 985 986 987 988 989 990

	/*
	 * verify hw auth check passed else return -EBADMSG
	 */
	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
		err = -EBADMSG;

#ifdef DEBUG
991
	print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
992
		       DUMP_PREFIX_ADDRESS, 16, 4,
Y
Yuan Kang 已提交
993 994 995
		       ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
		       sizeof(struct iphdr) + req->assoclen +
		       ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
996
		       ctx->authsize + 36, 1);
Y
Yuan Kang 已提交
997
	if (!err && edesc->sec4_sg_bytes) {
Y
Yuan Kang 已提交
998
		struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
999
		print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
1000 1001 1002 1003
			       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
			sg->length + ctx->authsize + 16, 1);
	}
#endif
1004

1005 1006
	kfree(edesc);

Y
Yuan Kang 已提交
1007
	aead_request_complete(req, err);
1008 1009
}

Y
Yuan Kang 已提交
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
				   void *context)
{
	struct ablkcipher_request *req = context;
	struct ablkcipher_edesc *edesc;
#ifdef DEBUG
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);

	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif

	edesc = (struct ablkcipher_edesc *)((char *)desc -
		 offsetof(struct ablkcipher_edesc, hw_desc));

1025 1026
	if (err)
		caam_jr_strstatus(jrdev, err);
Y
Yuan Kang 已提交
1027 1028

#ifdef DEBUG
1029
	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1030 1031
		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
		       edesc->src_nents > 1 ? 100 : ivsize, 1);
1032
	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif

	ablkcipher_unmap(jrdev, edesc, req);
	kfree(edesc);

	ablkcipher_request_complete(req, err);
}

static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
				    void *context)
{
	struct ablkcipher_request *req = context;
	struct ablkcipher_edesc *edesc;
#ifdef DEBUG
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);

	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif

	edesc = (struct ablkcipher_edesc *)((char *)desc -
		 offsetof(struct ablkcipher_edesc, hw_desc));
1057 1058
	if (err)
		caam_jr_strstatus(jrdev, err);
Y
Yuan Kang 已提交
1059 1060

#ifdef DEBUG
1061
	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1062 1063
		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
		       ivsize, 1);
1064
	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif

	ablkcipher_unmap(jrdev, edesc, req);
	kfree(edesc);

	ablkcipher_request_complete(req, err);
}

1075
/*
1076
 * Fill in aead job descriptor
1077
 */
1078 1079 1080 1081
static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
			  struct aead_edesc *edesc,
			  struct aead_request *req,
			  bool all_contig, bool encrypt)
1082
{
Y
Yuan Kang 已提交
1083
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1084 1085 1086
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	int ivsize = crypto_aead_ivsize(aead);
	int authsize = ctx->authsize;
1087 1088 1089
	u32 *desc = edesc->hw_desc;
	u32 out_options = 0, in_options;
	dma_addr_t dst_dma, src_dma;
Y
Yuan Kang 已提交
1090
	int len, sec4_sg_index = 0;
1091

1092
#ifdef DEBUG
1093
	debug("assoclen %d cryptlen %d authsize %d\n",
Y
Yuan Kang 已提交
1094
	      req->assoclen, req->cryptlen, authsize);
1095
	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1096 1097
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
		       req->assoclen , 1);
1098
	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1099
		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1100
		       edesc->src_nents ? 100 : ivsize, 1);
1101
	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1102
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1103
			edesc->src_nents ? 100 : req->cryptlen, 1);
1104
	print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1105 1106 1107 1108
		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
		       desc_bytes(sh_desc), 1);
#endif

1109 1110
	len = desc_len(sh_desc);
	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1111

1112 1113 1114
	if (all_contig) {
		src_dma = sg_dma_address(req->assoc);
		in_options = 0;
1115
	} else {
Y
Yuan Kang 已提交
1116 1117 1118
		src_dma = edesc->sec4_sg_dma;
		sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
				 (edesc->src_nents ? : 1);
1119
		in_options = LDST_SGF;
1120
	}
1121 1122 1123

	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
			  in_options);
1124

1125 1126 1127 1128
	if (likely(req->src == req->dst)) {
		if (all_contig) {
			dst_dma = sg_dma_address(req->src);
		} else {
Y
Yuan Kang 已提交
1129
			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1130 1131 1132
				  ((edesc->assoc_nents ? : 1) + 1);
			out_options = LDST_SGF;
		}
1133 1134
	} else {
		if (!edesc->dst_nents) {
Y
Yuan Kang 已提交
1135
			dst_dma = sg_dma_address(req->dst);
1136
		} else {
Y
Yuan Kang 已提交
1137 1138 1139
			dst_dma = edesc->sec4_sg_dma +
				  sec4_sg_index *
				  sizeof(struct sec4_sg_entry);
1140
			out_options = LDST_SGF;
1141 1142 1143
		}
	}
	if (encrypt)
1144 1145
		append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
				   out_options);
1146
	else
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
		append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
				   out_options);
}

/*
 * Fill in aead givencrypt job descriptor
 */
static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
			      struct aead_edesc *edesc,
			      struct aead_request *req,
			      int contig)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	int ivsize = crypto_aead_ivsize(aead);
	int authsize = ctx->authsize;
	u32 *desc = edesc->hw_desc;
	u32 out_options = 0, in_options;
	dma_addr_t dst_dma, src_dma;
Y
Yuan Kang 已提交
1166
	int len, sec4_sg_index = 0;
1167 1168

#ifdef DEBUG
1169 1170
	debug("assoclen %d cryptlen %d authsize %d\n",
	      req->assoclen, req->cryptlen, authsize);
1171
	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
1172 1173
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
		       req->assoclen , 1);
1174
	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1175
		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1176
	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1177 1178
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
			edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1179
	print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1180 1181
		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
		       desc_bytes(sh_desc), 1);
1182 1183
#endif

1184 1185 1186 1187 1188 1189 1190
	len = desc_len(sh_desc);
	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);

	if (contig & GIV_SRC_CONTIG) {
		src_dma = sg_dma_address(req->assoc);
		in_options = 0;
	} else {
Y
Yuan Kang 已提交
1191 1192
		src_dma = edesc->sec4_sg_dma;
		sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1193
		in_options = LDST_SGF;
1194
	}
1195 1196
	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
			  in_options);
1197

1198 1199 1200 1201
	if (contig & GIV_DST_CONTIG) {
		dst_dma = edesc->iv_dma;
	} else {
		if (likely(req->src == req->dst)) {
Y
Yuan Kang 已提交
1202
			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1203 1204 1205
				  edesc->assoc_nents;
			out_options = LDST_SGF;
		} else {
Y
Yuan Kang 已提交
1206 1207 1208
			dst_dma = edesc->sec4_sg_dma +
				  sec4_sg_index *
				  sizeof(struct sec4_sg_entry);
1209 1210 1211 1212
			out_options = LDST_SGF;
		}
	}

1213 1214
	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
			   out_options);
1215 1216
}

Y
Yuan Kang 已提交
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
/*
 * Fill in ablkcipher job descriptor
 */
static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
				struct ablkcipher_edesc *edesc,
				struct ablkcipher_request *req,
				bool iv_contig)
{
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
	u32 *desc = edesc->hw_desc;
	u32 out_options = 0, in_options;
	dma_addr_t dst_dma, src_dma;
Y
Yuan Kang 已提交
1230
	int len, sec4_sg_index = 0;
Y
Yuan Kang 已提交
1231 1232

#ifdef DEBUG
1233
	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1234 1235
		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
		       ivsize, 1);
1236
	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
		       edesc->src_nents ? 100 : req->nbytes, 1);
#endif

	len = desc_len(sh_desc);
	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);

	if (iv_contig) {
		src_dma = edesc->iv_dma;
		in_options = 0;
	} else {
Y
Yuan Kang 已提交
1248 1249
		src_dma = edesc->sec4_sg_dma;
		sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
Y
Yuan Kang 已提交
1250 1251 1252 1253 1254 1255 1256 1257
		in_options = LDST_SGF;
	}
	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);

	if (likely(req->src == req->dst)) {
		if (!edesc->src_nents && iv_contig) {
			dst_dma = sg_dma_address(req->src);
		} else {
Y
Yuan Kang 已提交
1258 1259
			dst_dma = edesc->sec4_sg_dma +
				sizeof(struct sec4_sg_entry);
Y
Yuan Kang 已提交
1260 1261 1262 1263 1264 1265
			out_options = LDST_SGF;
		}
	} else {
		if (!edesc->dst_nents) {
			dst_dma = sg_dma_address(req->dst);
		} else {
Y
Yuan Kang 已提交
1266 1267
			dst_dma = edesc->sec4_sg_dma +
				sec4_sg_index * sizeof(struct sec4_sg_entry);
Y
Yuan Kang 已提交
1268 1269 1270 1271 1272 1273
			out_options = LDST_SGF;
		}
	}
	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
}

1274
/*
1275
 * allocate and map the aead extended descriptor
1276
 */
Y
Yuan Kang 已提交
1277
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1278 1279
					   int desc_bytes, bool *all_contig_ptr,
					   bool encrypt)
1280
{
Y
Yuan Kang 已提交
1281
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1282 1283
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
1284 1285 1286
	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
	int assoc_nents, src_nents, dst_nents = 0;
Y
Yuan Kang 已提交
1287
	struct aead_edesc *edesc;
1288 1289 1290
	dma_addr_t iv_dma = 0;
	int sgc;
	bool all_contig = true;
Y
Yuan Kang 已提交
1291
	bool assoc_chained = false, src_chained = false, dst_chained = false;
1292
	int ivsize = crypto_aead_ivsize(aead);
Y
Yuan Kang 已提交
1293
	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1294
	unsigned int authsize = ctx->authsize;
1295

Y
Yuan Kang 已提交
1296
	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1297

1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
	if (unlikely(req->dst != req->src)) {
		src_nents = sg_count(req->src, req->cryptlen, &src_chained);
		dst_nents = sg_count(req->dst,
				     req->cryptlen +
					(encrypt ? authsize : (-authsize)),
				     &dst_chained);
	} else {
		src_nents = sg_count(req->src,
				     req->cryptlen +
					(encrypt ? authsize : 0),
				     &src_chained);
	}
1310

Y
Yuan Kang 已提交
1311
	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1312
				 DMA_TO_DEVICE, assoc_chained);
1313
	if (likely(req->src == req->dst)) {
Y
Yuan Kang 已提交
1314 1315
		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
					 DMA_BIDIRECTIONAL, src_chained);
1316
	} else {
Y
Yuan Kang 已提交
1317 1318 1319 1320
		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
					 DMA_TO_DEVICE, src_chained);
		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
					 DMA_FROM_DEVICE, dst_chained);
1321 1322 1323
	}

	iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1324 1325 1326 1327 1328 1329
	if (dma_mapping_error(jrdev, iv_dma)) {
		dev_err(jrdev, "unable to map IV\n");
		return ERR_PTR(-ENOMEM);
	}

	/* Check if data are contiguous */
1330 1331 1332 1333 1334 1335
	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
	    iv_dma || src_nents || iv_dma + ivsize !=
	    sg_dma_address(req->src)) {
		all_contig = false;
		assoc_nents = assoc_nents ? : 1;
		src_nents = src_nents ? : 1;
Y
Yuan Kang 已提交
1336
		sec4_sg_len = assoc_nents + 1 + src_nents;
1337
	}
Y
Yuan Kang 已提交
1338
	sec4_sg_len += dst_nents;
1339

Y
Yuan Kang 已提交
1340
	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1341 1342

	/* allocate space for base edesc and hw desc commands, link tables */
Y
Yuan Kang 已提交
1343
	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Y
Yuan Kang 已提交
1344
			sec4_sg_bytes, GFP_DMA | flags);
1345 1346 1347 1348 1349 1350
	if (!edesc) {
		dev_err(jrdev, "could not allocate extended descriptor\n");
		return ERR_PTR(-ENOMEM);
	}

	edesc->assoc_nents = assoc_nents;
Y
Yuan Kang 已提交
1351
	edesc->assoc_chained = assoc_chained;
1352
	edesc->src_nents = src_nents;
Y
Yuan Kang 已提交
1353
	edesc->src_chained = src_chained;
1354
	edesc->dst_nents = dst_nents;
Y
Yuan Kang 已提交
1355
	edesc->dst_chained = dst_chained;
1356
	edesc->iv_dma = iv_dma;
Y
Yuan Kang 已提交
1357 1358 1359
	edesc->sec4_sg_bytes = sec4_sg_bytes;
	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
			 desc_bytes;
1360 1361
	*all_contig_ptr = all_contig;

Y
Yuan Kang 已提交
1362
	sec4_sg_index = 0;
1363
	if (!all_contig) {
Y
Yuan Kang 已提交
1364 1365 1366 1367 1368 1369
		sg_to_sec4_sg(req->assoc,
			      (assoc_nents ? : 1),
			      edesc->sec4_sg +
			      sec4_sg_index, 0);
		sec4_sg_index += assoc_nents ? : 1;
		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1370
				   iv_dma, ivsize, 0);
Y
Yuan Kang 已提交
1371 1372 1373 1374 1375 1376
		sec4_sg_index += 1;
		sg_to_sec4_sg_last(req->src,
				   (src_nents ? : 1),
				   edesc->sec4_sg +
				   sec4_sg_index, 0);
		sec4_sg_index += src_nents ? : 1;
1377 1378
	}
	if (dst_nents) {
Y
Yuan Kang 已提交
1379 1380
		sg_to_sec4_sg_last(req->dst, dst_nents,
				   edesc->sec4_sg + sec4_sg_index, 0);
1381
	}
1382 1383
	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
					    sec4_sg_bytes, DMA_TO_DEVICE);
1384 1385 1386 1387
	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
		dev_err(jrdev, "unable to map S/G table\n");
		return ERR_PTR(-ENOMEM);
	}
1388 1389 1390 1391

	return edesc;
}

Y
Yuan Kang 已提交
1392
static int aead_encrypt(struct aead_request *req)
1393
{
Y
Yuan Kang 已提交
1394 1395
	struct aead_edesc *edesc;
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1396 1397
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
1398
	bool all_contig;
1399
	u32 *desc;
1400 1401
	int ret = 0;

1402
	/* allocate extended descriptor */
1403
	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1404
				 CAAM_CMD_SZ, &all_contig, true);
1405 1406 1407
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

1408 1409 1410 1411
	/* Create and submit job descriptor */
	init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
		      all_contig, true);
#ifdef DEBUG
1412
	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1413 1414 1415
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif
1416

1417 1418 1419 1420 1421 1422 1423 1424
	desc = edesc->hw_desc;
	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
	if (!ret) {
		ret = -EINPROGRESS;
	} else {
		aead_unmap(jrdev, edesc, req);
		kfree(edesc);
	}
1425

1426
	return ret;
1427 1428
}

Y
Yuan Kang 已提交
1429
static int aead_decrypt(struct aead_request *req)
1430
{
1431
	struct aead_edesc *edesc;
1432 1433 1434
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
1435
	bool all_contig;
1436
	u32 *desc;
1437
	int ret = 0;
1438 1439

	/* allocate extended descriptor */
1440
	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1441
				 CAAM_CMD_SZ, &all_contig, false);
1442 1443 1444
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

1445
#ifdef DEBUG
1446
	print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1447 1448 1449 1450 1451 1452 1453 1454
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
		       req->cryptlen, 1);
#endif

	/* Create and submit job descriptor*/
	init_aead_job(ctx->sh_desc_dec,
		      ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
#ifdef DEBUG
1455
	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1456 1457 1458 1459
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif

1460
	desc = edesc->hw_desc;
1461 1462 1463 1464 1465 1466 1467
	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
	if (!ret) {
		ret = -EINPROGRESS;
	} else {
		aead_unmap(jrdev, edesc, req);
		kfree(edesc);
	}
1468

1469 1470
	return ret;
}
1471

1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490
/*
 * allocate and map the aead extended descriptor for aead givencrypt
 */
static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
					       *greq, int desc_bytes,
					       u32 *contig_ptr)
{
	struct aead_request *req = &greq->areq;
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
	int assoc_nents, src_nents, dst_nents = 0;
	struct aead_edesc *edesc;
	dma_addr_t iv_dma = 0;
	int sgc;
	u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
	int ivsize = crypto_aead_ivsize(aead);
Y
Yuan Kang 已提交
1491
	bool assoc_chained = false, src_chained = false, dst_chained = false;
Y
Yuan Kang 已提交
1492
	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1493

Y
Yuan Kang 已提交
1494 1495
	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
	src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1496

1497
	if (unlikely(req->dst != req->src))
1498 1499
		dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
				     &dst_chained);
1500

Y
Yuan Kang 已提交
1501
	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1502
				 DMA_TO_DEVICE, assoc_chained);
1503
	if (likely(req->src == req->dst)) {
Y
Yuan Kang 已提交
1504 1505
		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
					 DMA_BIDIRECTIONAL, src_chained);
1506
	} else {
Y
Yuan Kang 已提交
1507 1508 1509 1510
		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
					 DMA_TO_DEVICE, src_chained);
		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
					 DMA_FROM_DEVICE, dst_chained);
1511 1512 1513
	}

	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1514 1515 1516 1517 1518 1519
	if (dma_mapping_error(jrdev, iv_dma)) {
		dev_err(jrdev, "unable to map IV\n");
		return ERR_PTR(-ENOMEM);
	}

	/* Check if data are contiguous */
1520 1521 1522 1523 1524
	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
	    iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
		contig &= ~GIV_SRC_CONTIG;
	if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
		contig &= ~GIV_DST_CONTIG;
K
Kim Phillips 已提交
1525 1526 1527 1528
	if (unlikely(req->src != req->dst)) {
		dst_nents = dst_nents ? : 1;
		sec4_sg_len += 1;
	}
1529 1530 1531
	if (!(contig & GIV_SRC_CONTIG)) {
		assoc_nents = assoc_nents ? : 1;
		src_nents = src_nents ? : 1;
Y
Yuan Kang 已提交
1532
		sec4_sg_len += assoc_nents + 1 + src_nents;
1533 1534 1535
		if (likely(req->src == req->dst))
			contig &= ~GIV_DST_CONTIG;
	}
Y
Yuan Kang 已提交
1536
	sec4_sg_len += dst_nents;
1537

Y
Yuan Kang 已提交
1538
	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1539 1540 1541

	/* allocate space for base edesc and hw desc commands, link tables */
	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
Y
Yuan Kang 已提交
1542
			sec4_sg_bytes, GFP_DMA | flags);
1543 1544 1545 1546 1547 1548
	if (!edesc) {
		dev_err(jrdev, "could not allocate extended descriptor\n");
		return ERR_PTR(-ENOMEM);
	}

	edesc->assoc_nents = assoc_nents;
Y
Yuan Kang 已提交
1549
	edesc->assoc_chained = assoc_chained;
1550
	edesc->src_nents = src_nents;
Y
Yuan Kang 已提交
1551
	edesc->src_chained = src_chained;
1552
	edesc->dst_nents = dst_nents;
Y
Yuan Kang 已提交
1553
	edesc->dst_chained = dst_chained;
1554
	edesc->iv_dma = iv_dma;
Y
Yuan Kang 已提交
1555 1556 1557
	edesc->sec4_sg_bytes = sec4_sg_bytes;
	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
			 desc_bytes;
1558 1559
	*contig_ptr = contig;

Y
Yuan Kang 已提交
1560
	sec4_sg_index = 0;
1561
	if (!(contig & GIV_SRC_CONTIG)) {
Y
Yuan Kang 已提交
1562 1563 1564 1565 1566
		sg_to_sec4_sg(req->assoc, assoc_nents,
			      edesc->sec4_sg +
			      sec4_sg_index, 0);
		sec4_sg_index += assoc_nents;
		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1567
				   iv_dma, ivsize, 0);
Y
Yuan Kang 已提交
1568 1569 1570 1571 1572
		sec4_sg_index += 1;
		sg_to_sec4_sg_last(req->src, src_nents,
				   edesc->sec4_sg +
				   sec4_sg_index, 0);
		sec4_sg_index += src_nents;
1573 1574
	}
	if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
Y
Yuan Kang 已提交
1575
		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1576
				   iv_dma, ivsize, 0);
Y
Yuan Kang 已提交
1577 1578 1579
		sec4_sg_index += 1;
		sg_to_sec4_sg_last(req->dst, dst_nents,
				   edesc->sec4_sg + sec4_sg_index, 0);
1580
	}
1581 1582
	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
					    sec4_sg_bytes, DMA_TO_DEVICE);
1583 1584 1585 1586
	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
		dev_err(jrdev, "unable to map S/G table\n");
		return ERR_PTR(-ENOMEM);
	}
1587 1588

	return edesc;
1589 1590
}

Y
Yuan Kang 已提交
1591
static int aead_givencrypt(struct aead_givcrypt_request *areq)
1592
{
Y
Yuan Kang 已提交
1593 1594 1595
	struct aead_request *req = &areq->areq;
	struct aead_edesc *edesc;
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1596 1597
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
1598
	u32 contig;
1599
	u32 *desc;
1600
	int ret = 0;
1601 1602

	/* allocate extended descriptor */
1603 1604 1605
	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
				     CAAM_CMD_SZ, &contig);

1606 1607 1608
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

1609
#ifdef DEBUG
1610
	print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
1611 1612 1613
		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
		       req->cryptlen, 1);
#endif
1614

1615 1616 1617 1618
	/* Create and submit job descriptor*/
	init_aead_giv_job(ctx->sh_desc_givenc,
			  ctx->sh_desc_givenc_dma, edesc, req, contig);
#ifdef DEBUG
1619
	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1620 1621 1622
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif
1623

1624 1625 1626 1627 1628 1629 1630 1631
	desc = edesc->hw_desc;
	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
	if (!ret) {
		ret = -EINPROGRESS;
	} else {
		aead_unmap(jrdev, edesc, req);
		kfree(edesc);
	}
1632

1633
	return ret;
1634 1635
}

1636 1637 1638 1639 1640
static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
{
	return aead_encrypt(&areq->areq);
}

Y
Yuan Kang 已提交
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
/*
 * allocate and map the ablkcipher extended descriptor for ablkcipher
 */
static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
						       *req, int desc_bytes,
						       bool *iv_contig_out)
{
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
	struct device *jrdev = ctx->jrdev;
	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
		       GFP_KERNEL : GFP_ATOMIC;
Y
Yuan Kang 已提交
1654
	int src_nents, dst_nents = 0, sec4_sg_bytes;
Y
Yuan Kang 已提交
1655 1656 1657 1658 1659
	struct ablkcipher_edesc *edesc;
	dma_addr_t iv_dma = 0;
	bool iv_contig = false;
	int sgc;
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Y
Yuan Kang 已提交
1660
	bool src_chained = false, dst_chained = false;
Y
Yuan Kang 已提交
1661
	int sec4_sg_index;
Y
Yuan Kang 已提交
1662

Y
Yuan Kang 已提交
1663
	src_nents = sg_count(req->src, req->nbytes, &src_chained);
Y
Yuan Kang 已提交
1664

Y
Yuan Kang 已提交
1665 1666
	if (req->dst != req->src)
		dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
Y
Yuan Kang 已提交
1667 1668

	if (likely(req->src == req->dst)) {
Y
Yuan Kang 已提交
1669 1670
		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
					 DMA_BIDIRECTIONAL, src_chained);
Y
Yuan Kang 已提交
1671
	} else {
Y
Yuan Kang 已提交
1672 1673 1674 1675
		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
					 DMA_TO_DEVICE, src_chained);
		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
					 DMA_FROM_DEVICE, dst_chained);
Y
Yuan Kang 已提交
1676 1677
	}

1678 1679 1680 1681 1682 1683
	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
	if (dma_mapping_error(jrdev, iv_dma)) {
		dev_err(jrdev, "unable to map IV\n");
		return ERR_PTR(-ENOMEM);
	}

Y
Yuan Kang 已提交
1684 1685 1686 1687 1688 1689 1690 1691
	/*
	 * Check if iv can be contiguous with source and destination.
	 * If so, include it. If not, create scatterlist.
	 */
	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
		iv_contig = true;
	else
		src_nents = src_nents ? : 1;
Y
Yuan Kang 已提交
1692 1693
	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
			sizeof(struct sec4_sg_entry);
Y
Yuan Kang 已提交
1694 1695 1696

	/* allocate space for base edesc and hw desc commands, link tables */
	edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
Y
Yuan Kang 已提交
1697
			sec4_sg_bytes, GFP_DMA | flags);
Y
Yuan Kang 已提交
1698 1699 1700 1701 1702 1703
	if (!edesc) {
		dev_err(jrdev, "could not allocate extended descriptor\n");
		return ERR_PTR(-ENOMEM);
	}

	edesc->src_nents = src_nents;
Y
Yuan Kang 已提交
1704
	edesc->src_chained = src_chained;
Y
Yuan Kang 已提交
1705
	edesc->dst_nents = dst_nents;
Y
Yuan Kang 已提交
1706
	edesc->dst_chained = dst_chained;
Y
Yuan Kang 已提交
1707 1708 1709
	edesc->sec4_sg_bytes = sec4_sg_bytes;
	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
			 desc_bytes;
Y
Yuan Kang 已提交
1710

Y
Yuan Kang 已提交
1711
	sec4_sg_index = 0;
Y
Yuan Kang 已提交
1712
	if (!iv_contig) {
Y
Yuan Kang 已提交
1713 1714 1715 1716
		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
		sg_to_sec4_sg_last(req->src, src_nents,
				   edesc->sec4_sg + 1, 0);
		sec4_sg_index += 1 + src_nents;
Y
Yuan Kang 已提交
1717 1718
	}

Y
Yuan Kang 已提交
1719
	if (dst_nents) {
Y
Yuan Kang 已提交
1720 1721
		sg_to_sec4_sg_last(req->dst, dst_nents,
			edesc->sec4_sg + sec4_sg_index, 0);
Y
Yuan Kang 已提交
1722 1723
	}

Y
Yuan Kang 已提交
1724 1725
	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
					    sec4_sg_bytes, DMA_TO_DEVICE);
1726 1727 1728 1729 1730
	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
		dev_err(jrdev, "unable to map S/G table\n");
		return ERR_PTR(-ENOMEM);
	}

Y
Yuan Kang 已提交
1731 1732 1733
	edesc->iv_dma = iv_dma;

#ifdef DEBUG
1734
	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1735 1736
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
		       sec4_sg_bytes, 1);
Y
Yuan Kang 已提交
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
#endif

	*iv_contig_out = iv_contig;
	return edesc;
}

static int ablkcipher_encrypt(struct ablkcipher_request *req)
{
	struct ablkcipher_edesc *edesc;
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
	struct device *jrdev = ctx->jrdev;
	bool iv_contig;
	u32 *desc;
	int ret = 0;

	/* allocate extended descriptor */
	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
				       CAAM_CMD_SZ, &iv_contig);
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* Create and submit job descriptor*/
	init_ablkcipher_job(ctx->sh_desc_enc,
		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
#ifdef DEBUG
1763
	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif
	desc = edesc->hw_desc;
	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);

	if (!ret) {
		ret = -EINPROGRESS;
	} else {
		ablkcipher_unmap(jrdev, edesc, req);
		kfree(edesc);
	}

	return ret;
}

static int ablkcipher_decrypt(struct ablkcipher_request *req)
{
	struct ablkcipher_edesc *edesc;
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
	struct device *jrdev = ctx->jrdev;
	bool iv_contig;
	u32 *desc;
	int ret = 0;

	/* allocate extended descriptor */
	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
				       CAAM_CMD_SZ, &iv_contig);
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* Create and submit job descriptor*/
	init_ablkcipher_job(ctx->sh_desc_dec,
		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
	desc = edesc->hw_desc;
#ifdef DEBUG
1801
	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif

	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
	if (!ret) {
		ret = -EINPROGRESS;
	} else {
		ablkcipher_unmap(jrdev, edesc, req);
		kfree(edesc);
	}

	return ret;
}

Y
Yuan Kang 已提交
1817
#define template_aead		template_u.aead
Y
Yuan Kang 已提交
1818
#define template_ablkcipher	template_u.ablkcipher
1819 1820 1821 1822
struct caam_alg_template {
	char name[CRYPTO_MAX_ALG_NAME];
	char driver_name[CRYPTO_MAX_ALG_NAME];
	unsigned int blocksize;
Y
Yuan Kang 已提交
1823 1824 1825 1826 1827 1828 1829 1830 1831
	u32 type;
	union {
		struct ablkcipher_alg ablkcipher;
		struct aead_alg aead;
		struct blkcipher_alg blkcipher;
		struct cipher_alg cipher;
		struct compress_alg compress;
		struct rng_alg rng;
	} template_u;
1832 1833 1834 1835 1836 1837
	u32 class1_alg_type;
	u32 class2_alg_type;
	u32 alg_op;
};

static struct caam_alg_template driver_algs[] = {
1838
	/* single-pass ipsec_esp descriptor */
1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
	{
		.name = "authenc(hmac(md5),ecb(cipher_null))",
		.driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
		.blocksize = NULL_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_null_givencrypt,
			.geniv = "<built-in>",
			.ivsize = NULL_IV_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
			},
		.class1_alg_type = 0,
		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
	},
	{
		.name = "authenc(hmac(sha1),ecb(cipher_null))",
		.driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
		.blocksize = NULL_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_null_givencrypt,
			.geniv = "<built-in>",
			.ivsize = NULL_IV_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
			},
		.class1_alg_type = 0,
		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
	},
	{
		.name = "authenc(hmac(sha224),ecb(cipher_null))",
		.driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
		.blocksize = NULL_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_null_givencrypt,
			.geniv = "<built-in>",
			.ivsize = NULL_IV_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
			},
		.class1_alg_type = 0,
		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
	},
	{
		.name = "authenc(hmac(sha256),ecb(cipher_null))",
		.driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
		.blocksize = NULL_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_null_givencrypt,
			.geniv = "<built-in>",
			.ivsize = NULL_IV_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
			},
		.class1_alg_type = 0,
		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
	},
	{
		.name = "authenc(hmac(sha384),ecb(cipher_null))",
		.driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
		.blocksize = NULL_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_null_givencrypt,
			.geniv = "<built-in>",
			.ivsize = NULL_IV_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
			},
		.class1_alg_type = 0,
		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
	},
	{
		.name = "authenc(hmac(sha512),ecb(cipher_null))",
		.driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
		.blocksize = NULL_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_null_givencrypt,
			.geniv = "<built-in>",
			.ivsize = NULL_IV_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
			},
		.class1_alg_type = 0,
		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
	},
1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975
	{
		.name = "authenc(hmac(md5),cbc(aes))",
		.driver_name = "authenc-hmac-md5-cbc-aes-caam",
		.blocksize = AES_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
			.geniv = "<built-in>",
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
	},
1976 1977 1978 1979
	{
		.name = "authenc(hmac(sha1),cbc(aes))",
		.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
		.blocksize = AES_BLOCK_SIZE,
Y
Yuan Kang 已提交
1980 1981
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
Y
Yuan Kang 已提交
1982 1983 1984 1985 1986
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
1987 1988 1989 1990 1991 1992 1993 1994
			.geniv = "<built-in>",
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
	},
1995 1996 1997 1998
	{
		.name = "authenc(hmac(sha224),cbc(aes))",
		.driver_name = "authenc-hmac-sha224-cbc-aes-caam",
		.blocksize = AES_BLOCK_SIZE,
1999
		.type = CRYPTO_ALG_TYPE_AEAD,
2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
			.geniv = "<built-in>",
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
	},
2015 2016 2017 2018
	{
		.name = "authenc(hmac(sha256),cbc(aes))",
		.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
		.blocksize = AES_BLOCK_SIZE,
Y
Yuan Kang 已提交
2019 2020
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
Y
Yuan Kang 已提交
2021 2022 2023 2024 2025
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
2026 2027 2028 2029 2030 2031 2032 2033 2034
			.geniv = "<built-in>",
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
	},
2035 2036 2037 2038
	{
		.name = "authenc(hmac(sha384),cbc(aes))",
		.driver_name = "authenc-hmac-sha384-cbc-aes-caam",
		.blocksize = AES_BLOCK_SIZE,
2039
		.type = CRYPTO_ALG_TYPE_AEAD,
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
			.geniv = "<built-in>",
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
	},

2056 2057 2058 2059
	{
		.name = "authenc(hmac(sha512),cbc(aes))",
		.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
		.blocksize = AES_BLOCK_SIZE,
Y
Yuan Kang 已提交
2060 2061
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
Y
Yuan Kang 已提交
2062 2063 2064 2065 2066
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
2067 2068 2069 2070 2071 2072 2073 2074 2075
			.geniv = "<built-in>",
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
	},
2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
	{
		.name = "authenc(hmac(md5),cbc(des3_ede))",
		.driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
		.blocksize = DES3_EDE_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
			.geniv = "<built-in>",
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
	},
2095 2096 2097 2098
	{
		.name = "authenc(hmac(sha1),cbc(des3_ede))",
		.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
		.blocksize = DES3_EDE_BLOCK_SIZE,
Y
Yuan Kang 已提交
2099 2100
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
Y
Yuan Kang 已提交
2101 2102 2103 2104 2105
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
2106 2107 2108 2109 2110 2111 2112 2113
			.geniv = "<built-in>",
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
	},
2114 2115 2116 2117
	{
		.name = "authenc(hmac(sha224),cbc(des3_ede))",
		.driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
		.blocksize = DES3_EDE_BLOCK_SIZE,
2118
		.type = CRYPTO_ALG_TYPE_AEAD,
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
			.geniv = "<built-in>",
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
	},
2134 2135 2136 2137
	{
		.name = "authenc(hmac(sha256),cbc(des3_ede))",
		.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
		.blocksize = DES3_EDE_BLOCK_SIZE,
Y
Yuan Kang 已提交
2138 2139
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
Y
Yuan Kang 已提交
2140 2141 2142 2143 2144
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
2145 2146 2147 2148 2149 2150 2151 2152 2153
			.geniv = "<built-in>",
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
	},
2154 2155 2156 2157
	{
		.name = "authenc(hmac(sha384),cbc(des3_ede))",
		.driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
		.blocksize = DES3_EDE_BLOCK_SIZE,
2158
		.type = CRYPTO_ALG_TYPE_AEAD,
2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
			.geniv = "<built-in>",
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
	},
2174 2175 2176 2177
	{
		.name = "authenc(hmac(sha512),cbc(des3_ede))",
		.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
		.blocksize = DES3_EDE_BLOCK_SIZE,
Y
Yuan Kang 已提交
2178 2179
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
Y
Yuan Kang 已提交
2180 2181 2182 2183 2184
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
2185 2186 2187 2188 2189 2190 2191 2192 2193
			.geniv = "<built-in>",
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
	},
2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
	{
		.name = "authenc(hmac(md5),cbc(des))",
		.driver_name = "authenc-hmac-md5-cbc-des-caam",
		.blocksize = DES_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
			.geniv = "<built-in>",
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
	},
2213 2214 2215 2216
	{
		.name = "authenc(hmac(sha1),cbc(des))",
		.driver_name = "authenc-hmac-sha1-cbc-des-caam",
		.blocksize = DES_BLOCK_SIZE,
Y
Yuan Kang 已提交
2217 2218
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
Y
Yuan Kang 已提交
2219 2220 2221 2222 2223
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
2224 2225 2226 2227 2228 2229 2230 2231
			.geniv = "<built-in>",
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
	},
2232 2233 2234 2235
	{
		.name = "authenc(hmac(sha224),cbc(des))",
		.driver_name = "authenc-hmac-sha224-cbc-des-caam",
		.blocksize = DES_BLOCK_SIZE,
2236
		.type = CRYPTO_ALG_TYPE_AEAD,
2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
			.geniv = "<built-in>",
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
	},
2252 2253 2254 2255
	{
		.name = "authenc(hmac(sha256),cbc(des))",
		.driver_name = "authenc-hmac-sha256-cbc-des-caam",
		.blocksize = DES_BLOCK_SIZE,
Y
Yuan Kang 已提交
2256 2257
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
Y
Yuan Kang 已提交
2258 2259 2260 2261 2262
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
2263 2264 2265 2266 2267 2268 2269 2270 2271
			.geniv = "<built-in>",
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
	},
2272 2273 2274 2275
	{
		.name = "authenc(hmac(sha384),cbc(des))",
		.driver_name = "authenc-hmac-sha384-cbc-des-caam",
		.blocksize = DES_BLOCK_SIZE,
2276
		.type = CRYPTO_ALG_TYPE_AEAD,
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
		.template_aead = {
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
			.geniv = "<built-in>",
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
	},
2292 2293 2294 2295
	{
		.name = "authenc(hmac(sha512),cbc(des))",
		.driver_name = "authenc-hmac-sha512-cbc-des-caam",
		.blocksize = DES_BLOCK_SIZE,
Y
Yuan Kang 已提交
2296 2297
		.type = CRYPTO_ALG_TYPE_AEAD,
		.template_aead = {
Y
Yuan Kang 已提交
2298 2299 2300 2301 2302
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.givencrypt = aead_givencrypt,
2303 2304 2305 2306 2307 2308 2309 2310 2311
			.geniv = "<built-in>",
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
				   OP_ALG_AAI_HMAC_PRECOMP,
		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
	},
Y
Yuan Kang 已提交
2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
	/* ablkcipher descriptor */
	{
		.name = "cbc(aes)",
		.driver_name = "cbc-aes-caam",
		.blocksize = AES_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
		.template_ablkcipher = {
			.setkey = ablkcipher_setkey,
			.encrypt = ablkcipher_encrypt,
			.decrypt = ablkcipher_decrypt,
			.geniv = "eseqiv",
			.min_keysize = AES_MIN_KEY_SIZE,
			.max_keysize = AES_MAX_KEY_SIZE,
			.ivsize = AES_BLOCK_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
	},
	{
		.name = "cbc(des3_ede)",
		.driver_name = "cbc-3des-caam",
		.blocksize = DES3_EDE_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
		.template_ablkcipher = {
			.setkey = ablkcipher_setkey,
			.encrypt = ablkcipher_encrypt,
			.decrypt = ablkcipher_decrypt,
			.geniv = "eseqiv",
			.min_keysize = DES3_EDE_KEY_SIZE,
			.max_keysize = DES3_EDE_KEY_SIZE,
			.ivsize = DES3_EDE_BLOCK_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
	},
	{
		.name = "cbc(des)",
		.driver_name = "cbc-des-caam",
		.blocksize = DES_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
		.template_ablkcipher = {
			.setkey = ablkcipher_setkey,
			.encrypt = ablkcipher_encrypt,
			.decrypt = ablkcipher_decrypt,
			.geniv = "eseqiv",
			.min_keysize = DES_KEY_SIZE,
			.max_keysize = DES_KEY_SIZE,
			.ivsize = DES_BLOCK_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
	}
2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
};

struct caam_crypto_alg {
	struct list_head entry;
	int class1_alg_type;
	int class2_alg_type;
	int alg_op;
	struct crypto_alg crypto_alg;
};

static int caam_cra_init(struct crypto_tfm *tfm)
{
	struct crypto_alg *alg = tfm->__crt_alg;
	struct caam_crypto_alg *caam_alg =
		 container_of(alg, struct caam_crypto_alg, crypto_alg);
	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);

2378 2379 2380 2381 2382
	ctx->jrdev = caam_jr_alloc();
	if (IS_ERR(ctx->jrdev)) {
		pr_err("Job Ring Device allocation for transform failed\n");
		return PTR_ERR(ctx->jrdev);
	}
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395

	/* copy descriptor header template value */
	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;

	return 0;
}

static void caam_cra_exit(struct crypto_tfm *tfm)
{
	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);

2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
	if (ctx->sh_desc_enc_dma &&
	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
		dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
	if (ctx->sh_desc_dec_dma &&
	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
		dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
				 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
	if (ctx->sh_desc_givenc_dma &&
	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
				 desc_bytes(ctx->sh_desc_givenc),
2408
				 DMA_TO_DEVICE);
2409 2410 2411 2412 2413
	if (ctx->key_dma &&
	    !dma_mapping_error(ctx->jrdev, ctx->key_dma))
		dma_unmap_single(ctx->jrdev, ctx->key_dma,
				 ctx->enckeylen + ctx->split_key_pad_len,
				 DMA_TO_DEVICE);
2414 2415

	caam_jr_free(ctx->jrdev);
2416 2417 2418 2419 2420 2421 2422
}

static void __exit caam_algapi_exit(void)
{

	struct caam_crypto_alg *t_alg, *n;

2423
	if (!alg_list.next)
2424 2425
		return;

2426
	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2427 2428 2429 2430 2431 2432
		crypto_unregister_alg(&t_alg->crypto_alg);
		list_del(&t_alg->entry);
		kfree(t_alg);
	}
}

2433
static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2434 2435 2436 2437 2438 2439 2440
					      *template)
{
	struct caam_crypto_alg *t_alg;
	struct crypto_alg *alg;

	t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
	if (!t_alg) {
2441
		pr_err("failed to allocate t_alg\n");
2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456
		return ERR_PTR(-ENOMEM);
	}

	alg = &t_alg->crypto_alg;

	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
		 template->driver_name);
	alg->cra_module = THIS_MODULE;
	alg->cra_init = caam_cra_init;
	alg->cra_exit = caam_cra_exit;
	alg->cra_priority = CAAM_CRA_PRIORITY;
	alg->cra_blocksize = template->blocksize;
	alg->cra_alignmask = 0;
	alg->cra_ctxsize = sizeof(struct caam_ctx);
2457 2458
	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
			 template->type;
Y
Yuan Kang 已提交
2459
	switch (template->type) {
Y
Yuan Kang 已提交
2460 2461 2462 2463
	case CRYPTO_ALG_TYPE_ABLKCIPHER:
		alg->cra_type = &crypto_ablkcipher_type;
		alg->cra_ablkcipher = template->template_ablkcipher;
		break;
Y
Yuan Kang 已提交
2464 2465 2466 2467 2468
	case CRYPTO_ALG_TYPE_AEAD:
		alg->cra_type = &crypto_aead_type;
		alg->cra_aead = template->template_aead;
		break;
	}
2469 2470 2471 2472 2473 2474 2475 2476 2477 2478

	t_alg->class1_alg_type = template->class1_alg_type;
	t_alg->class2_alg_type = template->class2_alg_type;
	t_alg->alg_op = template->alg_op;

	return t_alg;
}

static int __init caam_algapi_init(void)
{
2479 2480 2481 2482
	struct device_node *dev_node;
	struct platform_device *pdev;
	struct device *ctrldev;
	void *priv;
2483 2484
	int i = 0, err = 0;

2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
	if (!dev_node) {
		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
		if (!dev_node)
			return -ENODEV;
	}

	pdev = of_find_device_by_node(dev_node);
	if (!pdev) {
		of_node_put(dev_node);
		return -ENODEV;
	}

	ctrldev = &pdev->dev;
	priv = dev_get_drvdata(ctrldev);
	of_node_put(dev_node);

	/*
	 * If priv is NULL, it's probably because the caam driver wasn't
	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
	 */
	if (!priv)
		return -ENODEV;


2510
	INIT_LIST_HEAD(&alg_list);
2511 2512 2513 2514 2515 2516

	/* register crypto algorithms the device supports */
	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
		/* TODO: check if h/w supports alg */
		struct caam_crypto_alg *t_alg;

2517
		t_alg = caam_alg_alloc(&driver_algs[i]);
2518 2519
		if (IS_ERR(t_alg)) {
			err = PTR_ERR(t_alg);
2520 2521
			pr_warn("%s alg allocation failed\n",
				driver_algs[i].driver_name);
2522 2523 2524 2525 2526
			continue;
		}

		err = crypto_register_alg(&t_alg->crypto_alg);
		if (err) {
2527
			pr_warn("%s alg registration failed\n",
2528 2529
				t_alg->crypto_alg.cra_driver_name);
			kfree(t_alg);
2530
		} else
2531
			list_add_tail(&t_alg->entry, &alg_list);
2532
	}
2533 2534
	if (!list_empty(&alg_list))
		pr_info("caam algorithms registered in /proc/crypto\n");
2535 2536 2537 2538 2539 2540 2541 2542 2543 2544

	return err;
}

module_init(caam_algapi_init);
module_exit(caam_algapi_exit);

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM support for crypto API");
MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");