caamalg.c 93.0 KB
Newer Older
1 2 3 4
/*
 * caam - Freescale FSL CAAM support for crypto API
 *
 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5
 * Copyright 2016 NXP
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 *
 * Based on talitos crypto API driver.
 *
 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
 *
 * ---------------                     ---------------
 * | JobDesc #1  |-------------------->|  ShareDesc  |
 * | *(packet 1) |                     |   (PDB)     |
 * ---------------      |------------->|  (hashKey)  |
 *       .              |              | (cipherKey) |
 *       .              |    |-------->| (operation) |
 * ---------------      |    |         ---------------
 * | JobDesc #2  |------|    |
 * | *(packet 2) |           |
 * ---------------           |
 *       .                   |
 *       .                   |
 * ---------------           |
 * | JobDesc #3  |------------
 * | *(packet 3) |
 * ---------------
 *
 * The SharedDesc never changes for a connection unless rekeyed, but
 * each packet will likely be in a different place. So all we need
 * to know to process the packet is where the input is, where the
 * output goes, and what context we want to process with. Context is
 * in the SharedDesc, packet references in the JobDesc.
 *
 * So, a job desc looks like:
 *
 * ---------------------
 * | Header            |
 * | ShareDesc Pointer |
 * | SEQ_OUT_PTR       |
 * | (output buffer)   |
41
 * | (output length)   |
42 43
 * | SEQ_IN_PTR        |
 * | (input buffer)    |
44
 * | (input length)    |
45 46 47 48 49 50 51 52 53 54
 * ---------------------
 */

#include "compat.h"

#include "regs.h"
#include "intern.h"
#include "desc_constr.h"
#include "jr.h"
#include "error.h"
Y
Yuan Kang 已提交
55
#include "sg_sw_sec4.h"
Y
Yuan Kang 已提交
56
#include "key_gen.h"
57
#include "caamalg_desc.h"
58 59 60 61 62 63 64

/*
 * crypto alg
 */
#define CAAM_CRA_PRIORITY		3000
/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
#define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
65
					 CTR_RFC3686_NONCE_SIZE + \
66 67
					 SHA512_DIGEST_SIZE * 2)

68 69 70
#define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
#define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
					 CAAM_CMD_SZ * 4)
71 72
#define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
					 CAAM_CMD_SZ * 5)
73

74 75
#define DESC_MAX_USED_BYTES		(CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
76

77 78 79 80 81 82
#ifdef DEBUG
/* for print_hex_dumps with line references */
#define debug(format, arg...) printk(format, arg)
#else
#define debug(format, arg...)
#endif
C
Catalin Vasile 已提交
83 84 85 86 87 88

#ifdef DEBUG
#include <linux/highmem.h>

static void dbg_dump_sg(const char *level, const char *prefix_str,
			int prefix_type, int rowsize, int groupsize,
89
			struct scatterlist *sg, size_t tlen, bool ascii)
C
Catalin Vasile 已提交
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
{
	struct scatterlist *it;
	void *it_page;
	size_t len;
	void *buf;

	for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
		/*
		 * make sure the scatterlist's page
		 * has a valid virtual memory mapping
		 */
		it_page = kmap_atomic(sg_page(it));
		if (unlikely(!it_page)) {
			printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
			return;
		}

		buf = it_page + it->offset;
108
		len = min_t(size_t, tlen, it->length);
C
Catalin Vasile 已提交
109 110 111 112 113 114 115 116 117
		print_hex_dump(level, prefix_str, prefix_type, rowsize,
			       groupsize, buf, len, ascii);
		tlen -= len;

		kunmap_atomic(it_page);
	}
}
#endif

118
static struct list_head alg_list;
119

120 121 122 123 124 125 126 127 128 129 130 131 132
struct caam_alg_entry {
	int class1_alg_type;
	int class2_alg_type;
	bool rfc3686;
	bool geniv;
};

struct caam_aead_alg {
	struct aead_alg aead;
	struct caam_alg_entry caam;
	bool registered;
};

133 134 135 136
/*
 * per-session context
 */
struct caam_ctx {
137 138 139
	u32 sh_desc_enc[DESC_MAX_USED_LEN];
	u32 sh_desc_dec[DESC_MAX_USED_LEN];
	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
140
	u8 key[CAAM_MAX_KEY_SIZE];
141 142 143
	dma_addr_t sh_desc_enc_dma;
	dma_addr_t sh_desc_dec_dma;
	dma_addr_t sh_desc_givenc_dma;
Y
Yuan Kang 已提交
144
	dma_addr_t key_dma;
145
	struct device *jrdev;
146 147
	struct alginfo adata;
	struct alginfo cdata;
148 149 150
	unsigned int authsize;
};

151 152 153 154 155
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
	u32 *desc;
156 157
	int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
			ctx->adata.keylen_pad;
158 159 160 161 162

	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
163
	if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
164
		ctx->adata.key_inline = true;
165
		ctx->adata.key_virt = ctx->key;
166 167
	} else {
		ctx->adata.key_inline = false;
168
		ctx->adata.key_dma = ctx->key_dma;
169
	}
170

171
	/* aead_encrypt shared descriptor */
172
	desc = ctx->sh_desc_enc;
173
	cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
174 175
	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
176 177 178 179 180

	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
181
	if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
182
		ctx->adata.key_inline = true;
183
		ctx->adata.key_virt = ctx->key;
184 185
	} else {
		ctx->adata.key_inline = false;
186
		ctx->adata.key_dma = ctx->key_dma;
187
	}
188

189
	/* aead_decrypt shared descriptor */
190 191
	desc = ctx->sh_desc_dec;
	cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
192 193
	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
194 195 196 197

	return 0;
}

198 199
static int aead_set_sh_desc(struct crypto_aead *aead)
{
200 201
	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
						 struct caam_aead_alg, aead);
202
	unsigned int ivsize = crypto_aead_ivsize(aead);
203 204
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
205
	u32 ctx1_iv_off = 0;
206
	u32 *desc, *nonce = NULL;
207 208
	u32 inl_mask;
	unsigned int data_len[2];
209
	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
210
			       OP_ALG_AAI_CTR_MOD128);
211
	const bool is_rfc3686 = alg->caam.rfc3686;
212

213 214 215
	if (!ctx->authsize)
		return 0;

216
	/* NULL encryption / decryption */
217
	if (!ctx->cdata.keylen)
218 219
		return aead_null_set_sh_desc(aead);

220 221 222 223 224 225 226 227 228 229 230 231
	/*
	 * AES-CTR needs to load IV in CONTEXT1 reg
	 * at an offset of 128bits (16bytes)
	 * CONTEXT1[255:128] = IV
	 */
	if (ctr_mode)
		ctx1_iv_off = 16;

	/*
	 * RFC3686 specific:
	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
	 */
232
	if (is_rfc3686) {
233
		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
234 235 236
		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
	}
237

238 239 240
	data_len[0] = ctx->adata.keylen_pad;
	data_len[1] = ctx->cdata.keylen;

241 242 243
	if (alg->caam.geniv)
		goto skip_enc;

244 245 246 247
	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
248 249 250 251 252 253 254
	if (desc_inline_query(DESC_AEAD_ENC_LEN +
			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
			      ARRAY_SIZE(data_len)) < 0)
		return -EINVAL;

	if (inl_mask & 1)
255
		ctx->adata.key_virt = ctx->key;
256
	else
257
		ctx->adata.key_dma = ctx->key_dma;
258 259

	if (inl_mask & 2)
260
		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
261
	else
262
		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
263 264 265

	ctx->adata.key_inline = !!(inl_mask & 1);
	ctx->cdata.key_inline = !!(inl_mask & 2);
266

267
	/* aead_encrypt shared descriptor */
268
	desc = ctx->sh_desc_enc;
269 270
	cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
			       is_rfc3686, nonce, ctx1_iv_off);
271 272
	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
273

274
skip_enc:
275 276 277 278
	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
279 280 281 282 283 284 285
	if (desc_inline_query(DESC_AEAD_DEC_LEN +
			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
			      ARRAY_SIZE(data_len)) < 0)
		return -EINVAL;

	if (inl_mask & 1)
286
		ctx->adata.key_virt = ctx->key;
287
	else
288
		ctx->adata.key_dma = ctx->key_dma;
289 290

	if (inl_mask & 2)
291
		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
292
	else
293
		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
294 295 296

	ctx->adata.key_inline = !!(inl_mask & 1);
	ctx->cdata.key_inline = !!(inl_mask & 2);
297

298
	/* aead_decrypt shared descriptor */
299
	desc = ctx->sh_desc_dec;
300 301 302
	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
			       ctx->authsize, alg->caam.geniv, is_rfc3686,
			       nonce, ctx1_iv_off);
303 304
	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
305

306 307 308
	if (!alg->caam.geniv)
		goto skip_givenc;

309 310 311 312
	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
313 314 315 316 317 318 319
	if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
			      ARRAY_SIZE(data_len)) < 0)
		return -EINVAL;

	if (inl_mask & 1)
320
		ctx->adata.key_virt = ctx->key;
321
	else
322
		ctx->adata.key_dma = ctx->key_dma;
323 324

	if (inl_mask & 2)
325
		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
326
	else
327
		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
328 329 330

	ctx->adata.key_inline = !!(inl_mask & 1);
	ctx->cdata.key_inline = !!(inl_mask & 2);
331 332

	/* aead_givencrypt shared descriptor */
333
	desc = ctx->sh_desc_enc;
334 335 336
	cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
				  ctx->authsize, is_rfc3686, nonce,
				  ctx1_iv_off);
337 338
	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
339

340
skip_givenc:
341 342 343
	return 0;
}

Y
Yuan Kang 已提交
344
static int aead_setauthsize(struct crypto_aead *authenc,
345 346 347 348 349
				    unsigned int authsize)
{
	struct caam_ctx *ctx = crypto_aead_ctx(authenc);

	ctx->authsize = authsize;
350
	aead_set_sh_desc(authenc);
351 352 353 354

	return 0;
}

355 356 357 358 359
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
	u32 *desc;
360 361
	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
			ctx->cdata.keylen;
362

363
	if (!ctx->cdata.keylen || !ctx->authsize)
364 365 366 367 368 369 370
		return 0;

	/*
	 * AES GCM encrypt shared descriptor
	 * Job Descriptor and Shared Descriptor
	 * must fit into the 64-word Descriptor h/w Buffer
	 */
371
	if (rem_bytes >= DESC_GCM_ENC_LEN) {
372
		ctx->cdata.key_inline = true;
373
		ctx->cdata.key_virt = ctx->key;
374 375
	} else {
		ctx->cdata.key_inline = false;
376
		ctx->cdata.key_dma = ctx->key_dma;
377
	}
378 379

	desc = ctx->sh_desc_enc;
380
	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
381 382
	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
383 384 385 386 387

	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
388
	if (rem_bytes >= DESC_GCM_DEC_LEN) {
389
		ctx->cdata.key_inline = true;
390
		ctx->cdata.key_virt = ctx->key;
391 392
	} else {
		ctx->cdata.key_inline = false;
393
		ctx->cdata.key_dma = ctx->key_dma;
394
	}
395 396

	desc = ctx->sh_desc_dec;
397
	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
398 399
	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
400 401 402 403 404 405 406 407 408 409 410 411 412 413

	return 0;
}

static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{
	struct caam_ctx *ctx = crypto_aead_ctx(authenc);

	ctx->authsize = authsize;
	gcm_set_sh_desc(authenc);

	return 0;
}

414 415 416 417 418
static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
	u32 *desc;
419 420
	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
			ctx->cdata.keylen;
421

422
	if (!ctx->cdata.keylen || !ctx->authsize)
423 424 425 426 427 428 429
		return 0;

	/*
	 * RFC4106 encrypt shared descriptor
	 * Job Descriptor and Shared Descriptor
	 * must fit into the 64-word Descriptor h/w Buffer
	 */
430
	if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
431
		ctx->cdata.key_inline = true;
432
		ctx->cdata.key_virt = ctx->key;
433 434
	} else {
		ctx->cdata.key_inline = false;
435
		ctx->cdata.key_dma = ctx->key_dma;
436
	}
437 438

	desc = ctx->sh_desc_enc;
439
	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
440 441
	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
442 443 444 445 446

	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
447
	if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
448
		ctx->cdata.key_inline = true;
449
		ctx->cdata.key_virt = ctx->key;
450 451
	} else {
		ctx->cdata.key_inline = false;
452
		ctx->cdata.key_dma = ctx->key_dma;
453
	}
454 455

	desc = ctx->sh_desc_dec;
456
	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
457 458
	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473

	return 0;
}

static int rfc4106_setauthsize(struct crypto_aead *authenc,
			       unsigned int authsize)
{
	struct caam_ctx *ctx = crypto_aead_ctx(authenc);

	ctx->authsize = authsize;
	rfc4106_set_sh_desc(authenc);

	return 0;
}

474 475 476 477 478
static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
	u32 *desc;
479 480
	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
			ctx->cdata.keylen;
481

482
	if (!ctx->cdata.keylen || !ctx->authsize)
483 484 485 486 487 488 489
		return 0;

	/*
	 * RFC4543 encrypt shared descriptor
	 * Job Descriptor and Shared Descriptor
	 * must fit into the 64-word Descriptor h/w Buffer
	 */
490
	if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
491
		ctx->cdata.key_inline = true;
492
		ctx->cdata.key_virt = ctx->key;
493 494
	} else {
		ctx->cdata.key_inline = false;
495
		ctx->cdata.key_dma = ctx->key_dma;
496
	}
497 498

	desc = ctx->sh_desc_enc;
499
	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
500 501
	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
502 503 504 505 506

	/*
	 * Job Descriptor and Shared Descriptors
	 * must all fit into the 64-word Descriptor h/w Buffer
	 */
507
	if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
508
		ctx->cdata.key_inline = true;
509
		ctx->cdata.key_virt = ctx->key;
510 511
	} else {
		ctx->cdata.key_inline = false;
512
		ctx->cdata.key_dma = ctx->key_dma;
513
	}
514 515

	desc = ctx->sh_desc_dec;
516
	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
517 518
	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
519

520 521
	return 0;
}
522

523 524 525 526
static int rfc4543_setauthsize(struct crypto_aead *authenc,
			       unsigned int authsize)
{
	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
527

528 529
	ctx->authsize = authsize;
	rfc4543_set_sh_desc(authenc);
530

531 532
	return 0;
}
533

Y
Yuan Kang 已提交
534
static int aead_setkey(struct crypto_aead *aead,
535 536 537 538
			       const u8 *key, unsigned int keylen)
{
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
539
	struct crypto_authenc_keys keys;
540 541
	int ret = 0;

542
	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
543 544 545 546
		goto badkey;

#ifdef DEBUG
	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
547 548
	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
	       keys.authkeylen);
549
	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
550 551 552
		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif

553 554 555
	ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
			    keys.enckeylen);
556 557 558 559 560
	if (ret) {
		goto badkey;
	}

	/* postpend encryption key to auth split key */
561
	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
562 563
	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
				   keys.enckeylen, DMA_TO_DEVICE);
564
#ifdef DEBUG
565
	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
566
		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
567
		       ctx->adata.keylen_pad + keys.enckeylen, 1);
568
#endif
569
	ctx->cdata.keylen = keys.enckeylen;
570
	return aead_set_sh_desc(aead);
571 572 573 574 575
badkey:
	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
	return -EINVAL;
}

576 577 578 579 580 581 582 583 584 585 586 587
static int gcm_setkey(struct crypto_aead *aead,
		      const u8 *key, unsigned int keylen)
{
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;

#ifdef DEBUG
	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif

	memcpy(ctx->key, key, keylen);
588
	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
589
	ctx->cdata.keylen = keylen;
590

591
	return gcm_set_sh_desc(aead);
592 593
}

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
static int rfc4106_setkey(struct crypto_aead *aead,
			  const u8 *key, unsigned int keylen)
{
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;

	if (keylen < 4)
		return -EINVAL;

#ifdef DEBUG
	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif

	memcpy(ctx->key, key, keylen);

	/*
	 * The last four bytes of the key material are used as the salt value
	 * in the nonce. Update the AES key length.
	 */
614
	ctx->cdata.keylen = keylen - 4;
615 616 617
	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
				   DMA_TO_DEVICE);
	return rfc4106_set_sh_desc(aead);
618 619
}

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
static int rfc4543_setkey(struct crypto_aead *aead,
			  const u8 *key, unsigned int keylen)
{
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;

	if (keylen < 4)
		return -EINVAL;

#ifdef DEBUG
	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif

	memcpy(ctx->key, key, keylen);

	/*
	 * The last four bytes of the key material are used as the salt value
	 * in the nonce. Update the AES key length.
	 */
640
	ctx->cdata.keylen = keylen - 4;
641 642 643
	dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
				   DMA_TO_DEVICE);
	return rfc4543_set_sh_desc(aead);
644 645
}

Y
Yuan Kang 已提交
646 647 648 649
static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
			     const u8 *key, unsigned int keylen)
{
	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
650 651
	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
	const char *alg_name = crypto_tfm_alg_name(tfm);
Y
Yuan Kang 已提交
652
	struct device *jrdev = ctx->jrdev;
653
	unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
Y
Yuan Kang 已提交
654
	u32 *desc;
655
	u32 ctx1_iv_off = 0;
656
	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
657
			       OP_ALG_AAI_CTR_MOD128);
658 659
	const bool is_rfc3686 = (ctr_mode &&
				 (strstr(alg_name, "rfc3686") != NULL));
Y
Yuan Kang 已提交
660

661
	memcpy(ctx->key, key, keylen);
Y
Yuan Kang 已提交
662
#ifdef DEBUG
663
	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
664 665
		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
666 667 668 669 670 671 672
	/*
	 * AES-CTR needs to load IV in CONTEXT1 reg
	 * at an offset of 128bits (16bytes)
	 * CONTEXT1[255:128] = IV
	 */
	if (ctr_mode)
		ctx1_iv_off = 16;
Y
Yuan Kang 已提交
673

674 675 676 677 678 679 680 681 682 683
	/*
	 * RFC3686 specific:
	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
	 *	| *key = {KEY, NONCE}
	 */
	if (is_rfc3686) {
		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
		keylen -= CTR_RFC3686_NONCE_SIZE;
	}

684
	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
685
	ctx->cdata.keylen = keylen;
686
	ctx->cdata.key_virt = ctx->key;
687
	ctx->cdata.key_inline = true;
Y
Yuan Kang 已提交
688 689 690

	/* ablkcipher_encrypt shared descriptor */
	desc = ctx->sh_desc_enc;
691 692
	cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
				     ctx1_iv_off);
693 694
	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
695

Y
Yuan Kang 已提交
696 697
	/* ablkcipher_decrypt shared descriptor */
	desc = ctx->sh_desc_dec;
698 699
	cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
				     ctx1_iv_off);
700 701
	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
Y
Yuan Kang 已提交
702

703 704
	/* ablkcipher_givencrypt shared descriptor */
	desc = ctx->sh_desc_givenc;
705 706
	cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
					ctx1_iv_off);
707 708
	dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
Y
Yuan Kang 已提交
709

710
	return 0;
Y
Yuan Kang 已提交
711 712
}

713 714 715 716 717
static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
				 const u8 *key, unsigned int keylen)
{
	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
	struct device *jrdev = ctx->jrdev;
718
	u32 *desc;
719 720 721 722 723 724 725 726 727

	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
		crypto_ablkcipher_set_flags(ablkcipher,
					    CRYPTO_TFM_RES_BAD_KEY_LEN);
		dev_err(jrdev, "key size mismatch\n");
		return -EINVAL;
	}

	memcpy(ctx->key, key, keylen);
728
	dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
729
	ctx->cdata.keylen = keylen;
730
	ctx->cdata.key_virt = ctx->key;
731
	ctx->cdata.key_inline = true;
732 733 734

	/* xts_ablkcipher_encrypt shared descriptor */
	desc = ctx->sh_desc_enc;
735
	cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
736 737
	dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
738 739 740

	/* xts_ablkcipher_decrypt shared descriptor */
	desc = ctx->sh_desc_dec;
741
	cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
742 743
	dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
				   desc_bytes(desc), DMA_TO_DEVICE);
744 745 746 747

	return 0;
}

748
/*
749
 * aead_edesc - s/w-extended aead descriptor
750 751
 * @src_nents: number of segments in input s/w scatterlist
 * @dst_nents: number of segments in output s/w scatterlist
Y
Yuan Kang 已提交
752 753
 * @sec4_sg_bytes: length of dma mapped sec4_sg space
 * @sec4_sg_dma: bus physical mapped address of h/w link table
754
 * @sec4_sg: pointer to h/w link table
755 756
 * @hw_desc: the h/w job descriptor followed by any referenced link tables
 */
Y
Yuan Kang 已提交
757
struct aead_edesc {
758 759
	int src_nents;
	int dst_nents;
Y
Yuan Kang 已提交
760 761 762
	int sec4_sg_bytes;
	dma_addr_t sec4_sg_dma;
	struct sec4_sg_entry *sec4_sg;
763
	u32 hw_desc[];
764 765
};

Y
Yuan Kang 已提交
766 767
/*
 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
768 769
 * @src_nents: number of segments in input s/w scatterlist
 * @dst_nents: number of segments in output s/w scatterlist
Y
Yuan Kang 已提交
770
 * @iv_dma: dma address of iv for checking continuity and link table
Y
Yuan Kang 已提交
771 772
 * @sec4_sg_bytes: length of dma mapped sec4_sg space
 * @sec4_sg_dma: bus physical mapped address of h/w link table
773
 * @sec4_sg: pointer to h/w link table
Y
Yuan Kang 已提交
774 775 776 777 778 779
 * @hw_desc: the h/w job descriptor followed by any referenced link tables
 */
struct ablkcipher_edesc {
	int src_nents;
	int dst_nents;
	dma_addr_t iv_dma;
Y
Yuan Kang 已提交
780 781 782
	int sec4_sg_bytes;
	dma_addr_t sec4_sg_dma;
	struct sec4_sg_entry *sec4_sg;
Y
Yuan Kang 已提交
783 784 785
	u32 hw_desc[0];
};

786
static void caam_unmap(struct device *dev, struct scatterlist *src,
Y
Yuan Kang 已提交
787
		       struct scatterlist *dst, int src_nents,
788
		       int dst_nents,
Y
Yuan Kang 已提交
789 790
		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
		       int sec4_sg_bytes)
791
{
Y
Yuan Kang 已提交
792
	if (dst != src) {
793 794 795
		if (src_nents)
			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
		dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
796
	} else {
797
		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
798 799
	}

800 801
	if (iv_dma)
		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
Y
Yuan Kang 已提交
802 803
	if (sec4_sg_bytes)
		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
804 805 806
				 DMA_TO_DEVICE);
}

807 808 809
static void aead_unmap(struct device *dev,
		       struct aead_edesc *edesc,
		       struct aead_request *req)
810 811
{
	caam_unmap(dev, req->src, req->dst,
812
		   edesc->src_nents, edesc->dst_nents, 0, 0,
813 814 815
		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}

Y
Yuan Kang 已提交
816 817 818 819 820 821 822 823
static void ablkcipher_unmap(struct device *dev,
			     struct ablkcipher_edesc *edesc,
			     struct ablkcipher_request *req)
{
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);

	caam_unmap(dev, req->src, req->dst,
824 825
		   edesc->src_nents, edesc->dst_nents,
		   edesc->iv_dma, ivsize,
Y
Yuan Kang 已提交
826
		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
Y
Yuan Kang 已提交
827 828
}

Y
Yuan Kang 已提交
829
static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
830 831
				   void *context)
{
Y
Yuan Kang 已提交
832 833
	struct aead_request *req = context;
	struct aead_edesc *edesc;
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850

#ifdef DEBUG
	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif

	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);

	if (err)
		caam_jr_strstatus(jrdev, err);

	aead_unmap(jrdev, edesc, req);

	kfree(edesc);

	aead_request_complete(req, err);
}

Y
Yuan Kang 已提交
851
static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
852 853
				   void *context)
{
Y
Yuan Kang 已提交
854 855
	struct aead_request *req = context;
	struct aead_edesc *edesc;
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878

#ifdef DEBUG
	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif

	edesc = container_of(desc, struct aead_edesc, hw_desc[0]);

	if (err)
		caam_jr_strstatus(jrdev, err);

	aead_unmap(jrdev, edesc, req);

	/*
	 * verify hw auth check passed else return -EBADMSG
	 */
	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
		err = -EBADMSG;

	kfree(edesc);

	aead_request_complete(req, err);
}

Y
Yuan Kang 已提交
879 880 881 882 883 884 885 886 887 888 889 890
static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
				   void *context)
{
	struct ablkcipher_request *req = context;
	struct ablkcipher_edesc *edesc;
#ifdef DEBUG
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);

	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif

891
	edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
Y
Yuan Kang 已提交
892

893 894
	if (err)
		caam_jr_strstatus(jrdev, err);
Y
Yuan Kang 已提交
895 896

#ifdef DEBUG
897
	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
898 899
		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
		       edesc->src_nents > 1 ? 100 : ivsize, 1);
C
Catalin Vasile 已提交
900 901
	dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
902
		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
Y
Yuan Kang 已提交
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
#endif

	ablkcipher_unmap(jrdev, edesc, req);
	kfree(edesc);

	ablkcipher_request_complete(req, err);
}

static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
				    void *context)
{
	struct ablkcipher_request *req = context;
	struct ablkcipher_edesc *edesc;
#ifdef DEBUG
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);

	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif

923
	edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
924 925
	if (err)
		caam_jr_strstatus(jrdev, err);
Y
Yuan Kang 已提交
926 927

#ifdef DEBUG
928
	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
929 930
		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
		       ivsize, 1);
C
Catalin Vasile 已提交
931 932
	dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
933
		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
Y
Yuan Kang 已提交
934 935 936 937 938 939 940 941
#endif

	ablkcipher_unmap(jrdev, edesc, req);
	kfree(edesc);

	ablkcipher_request_complete(req, err);
}

942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
/*
 * Fill in aead job descriptor
 */
static void init_aead_job(struct aead_request *req,
			  struct aead_edesc *edesc,
			  bool all_contig, bool encrypt)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	int authsize = ctx->authsize;
	u32 *desc = edesc->hw_desc;
	u32 out_options, in_options;
	dma_addr_t dst_dma, src_dma;
	int len, sec4_sg_index = 0;
	dma_addr_t ptr;
	u32 *sh_desc;

	sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
	ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;

	len = desc_len(sh_desc);
	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);

	if (all_contig) {
966
		src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
967 968 969 970 971 972 973 974 975 976 977 978 979 980
		in_options = 0;
	} else {
		src_dma = edesc->sec4_sg_dma;
		sec4_sg_index += edesc->src_nents;
		in_options = LDST_SGF;
	}

	append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
			  in_options);

	dst_dma = src_dma;
	out_options = in_options;

	if (unlikely(req->src != req->dst)) {
981
		if (edesc->dst_nents == 1) {
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
			dst_dma = sg_dma_address(req->dst);
		} else {
			dst_dma = edesc->sec4_sg_dma +
				  sec4_sg_index *
				  sizeof(struct sec4_sg_entry);
			out_options = LDST_SGF;
		}
	}

	if (encrypt)
		append_seq_out_ptr(desc, dst_dma,
				   req->assoclen + req->cryptlen + authsize,
				   out_options);
	else
		append_seq_out_ptr(desc, dst_dma,
				   req->assoclen + req->cryptlen - authsize,
				   out_options);

	/* REG3 = assoclen */
	append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
}

static void init_gcm_job(struct aead_request *req,
			 struct aead_edesc *edesc,
			 bool all_contig, bool encrypt)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	unsigned int ivsize = crypto_aead_ivsize(aead);
	u32 *desc = edesc->hw_desc;
	bool generic_gcm = (ivsize == 12);
	unsigned int last;

	init_aead_job(req, edesc, all_contig, encrypt);

	/* BUG This should not be specific to generic GCM. */
	last = 0;
	if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
		last = FIFOLD_TYPE_LAST1;

	/* Read GCM IV */
	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
	/* Append Salt */
	if (!generic_gcm)
1027
		append_data(desc, ctx->key + ctx->cdata.keylen, 4);
1028 1029 1030 1031 1032
	/* Append IV */
	append_data(desc, req->iv, ivsize);
	/* End of blank commands */
}

1033 1034 1035
static void init_authenc_job(struct aead_request *req,
			     struct aead_edesc *edesc,
			     bool all_contig, bool encrypt)
1036 1037
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1038 1039 1040
	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
						 struct caam_aead_alg, aead);
	unsigned int ivsize = crypto_aead_ivsize(aead);
1041
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1042
	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1043 1044
			       OP_ALG_AAI_CTR_MOD128);
	const bool is_rfc3686 = alg->caam.rfc3686;
1045
	u32 *desc = edesc->hw_desc;
1046
	u32 ivoffset = 0;
1047

1048 1049 1050 1051 1052 1053 1054
	/*
	 * AES-CTR needs to load IV in CONTEXT1 reg
	 * at an offset of 128bits (16bytes)
	 * CONTEXT1[255:128] = IV
	 */
	if (ctr_mode)
		ivoffset = 16;
1055

1056 1057 1058 1059 1060 1061
	/*
	 * RFC3686 specific:
	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
	 */
	if (is_rfc3686)
		ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
1062

1063
	init_aead_job(req, edesc, all_contig, encrypt);
1064

1065
	if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
1066 1067 1068 1069
		append_load_as_imm(desc, req->iv, ivsize,
				   LDST_CLASS_1_CCB |
				   LDST_SRCDST_BYTE_CONTEXT |
				   (ivoffset << LDST_OFFSET_SHIFT));
1070 1071
}

Y
Yuan Kang 已提交
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
/*
 * Fill in ablkcipher job descriptor
 */
static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
				struct ablkcipher_edesc *edesc,
				struct ablkcipher_request *req,
				bool iv_contig)
{
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
	u32 *desc = edesc->hw_desc;
	u32 out_options = 0, in_options;
	dma_addr_t dst_dma, src_dma;
Y
Yuan Kang 已提交
1085
	int len, sec4_sg_index = 0;
Y
Yuan Kang 已提交
1086 1087

#ifdef DEBUG
1088
	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1089 1090
		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
		       ivsize, 1);
1091 1092
	pr_err("asked=%d, nbytes%d\n",
	       (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
C
Catalin Vasile 已提交
1093 1094
	dbg_dump_sg(KERN_ERR, "src    @"__stringify(__LINE__)": ",
		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1095
		    edesc->src_nents > 1 ? 100 : req->nbytes, 1);
Y
Yuan Kang 已提交
1096 1097 1098 1099 1100 1101 1102 1103 1104
#endif

	len = desc_len(sh_desc);
	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);

	if (iv_contig) {
		src_dma = edesc->iv_dma;
		in_options = 0;
	} else {
Y
Yuan Kang 已提交
1105
		src_dma = edesc->sec4_sg_dma;
1106
		sec4_sg_index += edesc->src_nents + 1;
Y
Yuan Kang 已提交
1107 1108 1109 1110 1111
		in_options = LDST_SGF;
	}
	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);

	if (likely(req->src == req->dst)) {
1112
		if (edesc->src_nents == 1 && iv_contig) {
Y
Yuan Kang 已提交
1113 1114
			dst_dma = sg_dma_address(req->src);
		} else {
Y
Yuan Kang 已提交
1115 1116
			dst_dma = edesc->sec4_sg_dma +
				sizeof(struct sec4_sg_entry);
Y
Yuan Kang 已提交
1117 1118 1119
			out_options = LDST_SGF;
		}
	} else {
1120
		if (edesc->dst_nents == 1) {
Y
Yuan Kang 已提交
1121 1122
			dst_dma = sg_dma_address(req->dst);
		} else {
Y
Yuan Kang 已提交
1123 1124
			dst_dma = edesc->sec4_sg_dma +
				sec4_sg_index * sizeof(struct sec4_sg_entry);
Y
Yuan Kang 已提交
1125 1126 1127 1128 1129 1130
			out_options = LDST_SGF;
		}
	}
	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
}

1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
/*
 * Fill in ablkcipher givencrypt job descriptor
 */
static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
				    struct ablkcipher_edesc *edesc,
				    struct ablkcipher_request *req,
				    bool iv_contig)
{
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
	u32 *desc = edesc->hw_desc;
	u32 out_options, in_options;
	dma_addr_t dst_dma, src_dma;
	int len, sec4_sg_index = 0;

#ifdef DEBUG
	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
		       ivsize, 1);
C
Catalin Vasile 已提交
1150 1151
	dbg_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1152
		    edesc->src_nents > 1 ? 100 : req->nbytes, 1);
1153 1154 1155 1156 1157
#endif

	len = desc_len(sh_desc);
	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);

1158
	if (edesc->src_nents == 1) {
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
		src_dma = sg_dma_address(req->src);
		in_options = 0;
	} else {
		src_dma = edesc->sec4_sg_dma;
		sec4_sg_index += edesc->src_nents;
		in_options = LDST_SGF;
	}
	append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);

	if (iv_contig) {
		dst_dma = edesc->iv_dma;
		out_options = 0;
	} else {
		dst_dma = edesc->sec4_sg_dma +
			  sec4_sg_index * sizeof(struct sec4_sg_entry);
		out_options = LDST_SGF;
	}
	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
}

1179
/*
1180
 * allocate and map the aead extended descriptor
1181
 */
1182 1183 1184
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
					   int desc_bytes, bool *all_contig_ptr,
					   bool encrypt)
1185
{
Y
Yuan Kang 已提交
1186
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1187 1188
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
1189 1190
	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1191
	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
Y
Yuan Kang 已提交
1192
	struct aead_edesc *edesc;
1193
	int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
1194
	unsigned int authsize = ctx->authsize;
1195

1196
	if (unlikely(req->dst != req->src)) {
1197 1198
		src_nents = sg_nents_for_len(req->src, req->assoclen +
					     req->cryptlen);
1199 1200 1201 1202 1203 1204
		if (unlikely(src_nents < 0)) {
			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
				req->assoclen + req->cryptlen);
			return ERR_PTR(src_nents);
		}

1205 1206 1207 1208
		dst_nents = sg_nents_for_len(req->dst, req->assoclen +
					     req->cryptlen +
						(encrypt ? authsize :
							   (-authsize)));
1209 1210 1211 1212 1213 1214
		if (unlikely(dst_nents < 0)) {
			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
				req->assoclen + req->cryptlen +
				(encrypt ? authsize : (-authsize)));
			return ERR_PTR(dst_nents);
		}
1215
	} else {
1216 1217 1218
		src_nents = sg_nents_for_len(req->src, req->assoclen +
					     req->cryptlen +
					     (encrypt ? authsize : 0));
1219 1220 1221 1222 1223 1224
		if (unlikely(src_nents < 0)) {
			dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
				req->assoclen + req->cryptlen +
				(encrypt ? authsize : 0));
			return ERR_PTR(src_nents);
		}
1225
	}
1226

1227
	if (likely(req->src == req->dst)) {
1228 1229 1230
		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
					      DMA_BIDIRECTIONAL);
		if (unlikely(!mapped_src_nents)) {
1231 1232 1233 1234
			dev_err(jrdev, "unable to map source\n");
			return ERR_PTR(-ENOMEM);
		}
	} else {
1235 1236
		/* Cover also the case of null (zero length) input data */
		if (src_nents) {
1237 1238 1239
			mapped_src_nents = dma_map_sg(jrdev, req->src,
						      src_nents, DMA_TO_DEVICE);
			if (unlikely(!mapped_src_nents)) {
1240 1241 1242
				dev_err(jrdev, "unable to map source\n");
				return ERR_PTR(-ENOMEM);
			}
1243 1244
		} else {
			mapped_src_nents = 0;
1245 1246
		}

1247 1248 1249
		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
					      DMA_FROM_DEVICE);
		if (unlikely(!mapped_dst_nents)) {
1250
			dev_err(jrdev, "unable to map destination\n");
1251
			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1252 1253 1254 1255
			return ERR_PTR(-ENOMEM);
		}
	}

1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
	sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
	sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);

	/* allocate space for base edesc and hw desc commands, link tables */
	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
			GFP_DMA | flags);
	if (!edesc) {
		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
			   0, 0, 0);
		return ERR_PTR(-ENOMEM);
	}

1269 1270
	edesc->src_nents = src_nents;
	edesc->dst_nents = dst_nents;
Y
Yuan Kang 已提交
1271 1272
	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
			 desc_bytes;
1273
	*all_contig_ptr = !(mapped_src_nents > 1);
1274

Y
Yuan Kang 已提交
1275
	sec4_sg_index = 0;
1276 1277 1278 1279
	if (mapped_src_nents > 1) {
		sg_to_sec4_sg_last(req->src, mapped_src_nents,
				   edesc->sec4_sg + sec4_sg_index, 0);
		sec4_sg_index += mapped_src_nents;
1280
	}
1281 1282
	if (mapped_dst_nents > 1) {
		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
Y
Yuan Kang 已提交
1283
				   edesc->sec4_sg + sec4_sg_index, 0);
1284
	}
1285 1286 1287 1288

	if (!sec4_sg_bytes)
		return edesc;

1289 1290
	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
					    sec4_sg_bytes, DMA_TO_DEVICE);
1291 1292
	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
		dev_err(jrdev, "unable to map S/G table\n");
1293 1294
		aead_unmap(jrdev, edesc, req);
		kfree(edesc);
1295 1296
		return ERR_PTR(-ENOMEM);
	}
1297

1298 1299
	edesc->sec4_sg_bytes = sec4_sg_bytes;

1300 1301 1302
	return edesc;
}

1303
static int gcm_encrypt(struct aead_request *req)
1304
{
Y
Yuan Kang 已提交
1305 1306
	struct aead_edesc *edesc;
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1307 1308
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
1309
	bool all_contig;
1310
	u32 *desc;
1311 1312
	int ret = 0;

1313
	/* allocate extended descriptor */
1314
	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
1315 1316 1317
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

1318
	/* Create and submit job descriptor */
1319
	init_gcm_job(req, edesc, all_contig, true);
1320
#ifdef DEBUG
1321
	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1322 1323 1324
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif
1325

1326 1327 1328 1329 1330 1331 1332 1333
	desc = edesc->hw_desc;
	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
	if (!ret) {
		ret = -EINPROGRESS;
	} else {
		aead_unmap(jrdev, edesc, req);
		kfree(edesc);
	}
1334

1335
	return ret;
1336 1337
}

1338 1339 1340 1341 1342 1343 1344 1345
static int ipsec_gcm_encrypt(struct aead_request *req)
{
	if (req->assoclen < 8)
		return -EINVAL;

	return gcm_encrypt(req);
}

1346
static int aead_encrypt(struct aead_request *req)
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
{
	struct aead_edesc *edesc;
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
	bool all_contig;
	u32 *desc;
	int ret = 0;

	/* allocate extended descriptor */
1357 1358
	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
				 &all_contig, true);
1359 1360 1361 1362
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* Create and submit job descriptor */
1363
	init_authenc_job(req, edesc, all_contig, true);
1364 1365 1366 1367 1368 1369 1370
#ifdef DEBUG
	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif

	desc = edesc->hw_desc;
1371
	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1372 1373 1374
	if (!ret) {
		ret = -EINPROGRESS;
	} else {
1375
		aead_unmap(jrdev, edesc, req);
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
		kfree(edesc);
	}

	return ret;
}

static int gcm_decrypt(struct aead_request *req)
{
	struct aead_edesc *edesc;
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
	bool all_contig;
	u32 *desc;
	int ret = 0;

	/* allocate extended descriptor */
	edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* Create and submit job descriptor*/
	init_gcm_job(req, edesc, all_contig, false);
#ifdef DEBUG
	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif

	desc = edesc->hw_desc;
	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
	if (!ret) {
		ret = -EINPROGRESS;
	} else {
		aead_unmap(jrdev, edesc, req);
		kfree(edesc);
	}

	return ret;
}

1417 1418 1419 1420 1421 1422 1423 1424
static int ipsec_gcm_decrypt(struct aead_request *req)
{
	if (req->assoclen < 8)
		return -EINVAL;

	return gcm_decrypt(req);
}

1425
static int aead_decrypt(struct aead_request *req)
1426
{
1427
	struct aead_edesc *edesc;
1428 1429 1430
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct caam_ctx *ctx = crypto_aead_ctx(aead);
	struct device *jrdev = ctx->jrdev;
1431
	bool all_contig;
1432
	u32 *desc;
1433
	int ret = 0;
1434

C
Catalin Vasile 已提交
1435 1436 1437
#ifdef DEBUG
	dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
1438
		    req->assoclen + req->cryptlen, 1);
C
Catalin Vasile 已提交
1439 1440
#endif

1441
	/* allocate extended descriptor */
1442 1443
	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
				 &all_contig, false);
1444 1445 1446
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

1447
	/* Create and submit job descriptor*/
1448
	init_authenc_job(req, edesc, all_contig, false);
1449
#ifdef DEBUG
1450
	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1451 1452 1453 1454
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif

1455
	desc = edesc->hw_desc;
1456
	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1457 1458 1459
	if (!ret) {
		ret = -EINPROGRESS;
	} else {
1460
		aead_unmap(jrdev, edesc, req);
1461 1462
		kfree(edesc);
	}
1463

1464 1465
	return ret;
}
1466

Y
Yuan Kang 已提交
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
/*
 * allocate and map the ablkcipher extended descriptor for ablkcipher
 */
static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
						       *req, int desc_bytes,
						       bool *iv_contig_out)
{
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
	struct device *jrdev = ctx->jrdev;
	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
		       GFP_KERNEL : GFP_ATOMIC;
1480
	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
Y
Yuan Kang 已提交
1481 1482
	struct ablkcipher_edesc *edesc;
	dma_addr_t iv_dma = 0;
1483
	bool in_contig;
Y
Yuan Kang 已提交
1484
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1485
	int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
Y
Yuan Kang 已提交
1486

1487
	src_nents = sg_nents_for_len(req->src, req->nbytes);
1488 1489 1490 1491 1492
	if (unlikely(src_nents < 0)) {
		dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
			req->nbytes);
		return ERR_PTR(src_nents);
	}
Y
Yuan Kang 已提交
1493

1494
	if (req->dst != req->src) {
1495
		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1496 1497 1498 1499 1500 1501
		if (unlikely(dst_nents < 0)) {
			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
				req->nbytes);
			return ERR_PTR(dst_nents);
		}
	}
Y
Yuan Kang 已提交
1502 1503

	if (likely(req->src == req->dst)) {
1504 1505 1506
		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
					      DMA_BIDIRECTIONAL);
		if (unlikely(!mapped_src_nents)) {
1507 1508 1509
			dev_err(jrdev, "unable to map source\n");
			return ERR_PTR(-ENOMEM);
		}
Y
Yuan Kang 已提交
1510
	} else {
1511 1512 1513
		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
					      DMA_TO_DEVICE);
		if (unlikely(!mapped_src_nents)) {
1514 1515 1516 1517
			dev_err(jrdev, "unable to map source\n");
			return ERR_PTR(-ENOMEM);
		}

1518 1519 1520
		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
					      DMA_FROM_DEVICE);
		if (unlikely(!mapped_dst_nents)) {
1521
			dev_err(jrdev, "unable to map destination\n");
1522
			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1523 1524
			return ERR_PTR(-ENOMEM);
		}
Y
Yuan Kang 已提交
1525 1526
	}

1527 1528 1529
	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
	if (dma_mapping_error(jrdev, iv_dma)) {
		dev_err(jrdev, "unable to map IV\n");
1530 1531
		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
			   0, 0, 0);
1532 1533 1534
		return ERR_PTR(-ENOMEM);
	}

1535 1536
	if (mapped_src_nents == 1 &&
	    iv_dma + ivsize == sg_dma_address(req->src)) {
1537 1538 1539 1540
		in_contig = true;
		sec4_sg_ents = 0;
	} else {
		in_contig = false;
1541
		sec4_sg_ents = 1 + mapped_src_nents;
1542 1543
	}
	dst_sg_idx = sec4_sg_ents;
1544
	sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
1545
	sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
Y
Yuan Kang 已提交
1546 1547

	/* allocate space for base edesc and hw desc commands, link tables */
1548 1549
	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
			GFP_DMA | flags);
Y
Yuan Kang 已提交
1550 1551
	if (!edesc) {
		dev_err(jrdev, "could not allocate extended descriptor\n");
1552 1553
		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
			   iv_dma, ivsize, 0, 0);
Y
Yuan Kang 已提交
1554 1555 1556 1557 1558
		return ERR_PTR(-ENOMEM);
	}

	edesc->src_nents = src_nents;
	edesc->dst_nents = dst_nents;
Y
Yuan Kang 已提交
1559 1560 1561
	edesc->sec4_sg_bytes = sec4_sg_bytes;
	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
			 desc_bytes;
Y
Yuan Kang 已提交
1562

1563
	if (!in_contig) {
Y
Yuan Kang 已提交
1564
		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1565
		sg_to_sec4_sg_last(req->src, mapped_src_nents,
Y
Yuan Kang 已提交
1566
				   edesc->sec4_sg + 1, 0);
Y
Yuan Kang 已提交
1567 1568
	}

1569 1570 1571
	if (mapped_dst_nents > 1) {
		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
				   edesc->sec4_sg + dst_sg_idx, 0);
Y
Yuan Kang 已提交
1572 1573
	}

Y
Yuan Kang 已提交
1574 1575
	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
					    sec4_sg_bytes, DMA_TO_DEVICE);
1576 1577
	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
		dev_err(jrdev, "unable to map S/G table\n");
1578 1579 1580
		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
			   iv_dma, ivsize, 0, 0);
		kfree(edesc);
1581 1582 1583
		return ERR_PTR(-ENOMEM);
	}

Y
Yuan Kang 已提交
1584 1585 1586
	edesc->iv_dma = iv_dma;

#ifdef DEBUG
1587
	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1588 1589
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
		       sec4_sg_bytes, 1);
Y
Yuan Kang 已提交
1590 1591
#endif

1592
	*iv_contig_out = in_contig;
Y
Yuan Kang 已提交
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
	return edesc;
}

static int ablkcipher_encrypt(struct ablkcipher_request *req)
{
	struct ablkcipher_edesc *edesc;
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
	struct device *jrdev = ctx->jrdev;
	bool iv_contig;
	u32 *desc;
	int ret = 0;

	/* allocate extended descriptor */
	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
				       CAAM_CMD_SZ, &iv_contig);
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* Create and submit job descriptor*/
	init_ablkcipher_job(ctx->sh_desc_enc,
		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
#ifdef DEBUG
1616
	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif
	desc = edesc->hw_desc;
	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);

	if (!ret) {
		ret = -EINPROGRESS;
	} else {
		ablkcipher_unmap(jrdev, edesc, req);
		kfree(edesc);
	}

	return ret;
}

static int ablkcipher_decrypt(struct ablkcipher_request *req)
{
	struct ablkcipher_edesc *edesc;
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
	struct device *jrdev = ctx->jrdev;
	bool iv_contig;
	u32 *desc;
	int ret = 0;

	/* allocate extended descriptor */
	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
				       CAAM_CMD_SZ, &iv_contig);
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* Create and submit job descriptor*/
	init_ablkcipher_job(ctx->sh_desc_dec,
		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
	desc = edesc->hw_desc;
#ifdef DEBUG
1654
	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
Y
Yuan Kang 已提交
1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif

	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
	if (!ret) {
		ret = -EINPROGRESS;
	} else {
		ablkcipher_unmap(jrdev, edesc, req);
		kfree(edesc);
	}

	return ret;
}

1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
/*
 * allocate and map the ablkcipher extended descriptor
 * for ablkcipher givencrypt
 */
static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
				struct skcipher_givcrypt_request *greq,
				int desc_bytes,
				bool *iv_contig_out)
{
	struct ablkcipher_request *req = &greq->creq;
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
	struct device *jrdev = ctx->jrdev;
	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
		       GFP_KERNEL : GFP_ATOMIC;
1686
	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
1687 1688
	struct ablkcipher_edesc *edesc;
	dma_addr_t iv_dma = 0;
1689
	bool out_contig;
1690
	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1691
	int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
1692

1693
	src_nents = sg_nents_for_len(req->src, req->nbytes);
1694 1695 1696 1697 1698
	if (unlikely(src_nents < 0)) {
		dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
			req->nbytes);
		return ERR_PTR(src_nents);
	}
1699 1700

	if (likely(req->src == req->dst)) {
1701 1702 1703
		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
					      DMA_BIDIRECTIONAL);
		if (unlikely(!mapped_src_nents)) {
1704 1705 1706
			dev_err(jrdev, "unable to map source\n");
			return ERR_PTR(-ENOMEM);
		}
1707 1708

		dst_nents = src_nents;
1709
		mapped_dst_nents = src_nents;
1710
	} else {
1711 1712 1713
		mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
					      DMA_TO_DEVICE);
		if (unlikely(!mapped_src_nents)) {
1714 1715 1716 1717
			dev_err(jrdev, "unable to map source\n");
			return ERR_PTR(-ENOMEM);
		}

1718
		dst_nents = sg_nents_for_len(req->dst, req->nbytes);
1719 1720 1721 1722 1723 1724
		if (unlikely(dst_nents < 0)) {
			dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
				req->nbytes);
			return ERR_PTR(dst_nents);
		}

1725 1726 1727
		mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
					      DMA_FROM_DEVICE);
		if (unlikely(!mapped_dst_nents)) {
1728
			dev_err(jrdev, "unable to map destination\n");
1729
			dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1730 1731
			return ERR_PTR(-ENOMEM);
		}
1732 1733 1734 1735 1736 1737 1738 1739 1740
	}

	/*
	 * Check if iv can be contiguous with source and destination.
	 * If so, include it. If not, create scatterlist.
	 */
	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
	if (dma_mapping_error(jrdev, iv_dma)) {
		dev_err(jrdev, "unable to map IV\n");
1741 1742
		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
			   0, 0, 0);
1743 1744 1745
		return ERR_PTR(-ENOMEM);
	}

1746
	sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
1747
	dst_sg_idx = sec4_sg_ents;
1748 1749
	if (mapped_dst_nents == 1 &&
	    iv_dma + ivsize == sg_dma_address(req->dst)) {
1750 1751 1752
		out_contig = true;
	} else {
		out_contig = false;
1753
		sec4_sg_ents += 1 + mapped_dst_nents;
1754
	}
1755 1756

	/* allocate space for base edesc and hw desc commands, link tables */
1757
	sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
1758 1759
	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
			GFP_DMA | flags);
1760 1761
	if (!edesc) {
		dev_err(jrdev, "could not allocate extended descriptor\n");
1762 1763
		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
			   iv_dma, ivsize, 0, 0);
1764 1765 1766 1767 1768 1769 1770 1771 1772
		return ERR_PTR(-ENOMEM);
	}

	edesc->src_nents = src_nents;
	edesc->dst_nents = dst_nents;
	edesc->sec4_sg_bytes = sec4_sg_bytes;
	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
			 desc_bytes;

1773 1774 1775
	if (mapped_src_nents > 1)
		sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
				   0);
1776

1777 1778
	if (!out_contig) {
		dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
1779
				   iv_dma, ivsize, 0);
1780
		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
1781
				   edesc->sec4_sg + dst_sg_idx + 1, 0);
1782 1783 1784 1785 1786 1787
	}

	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
					    sec4_sg_bytes, DMA_TO_DEVICE);
	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
		dev_err(jrdev, "unable to map S/G table\n");
1788 1789 1790
		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
			   iv_dma, ivsize, 0, 0);
		kfree(edesc);
1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
		return ERR_PTR(-ENOMEM);
	}
	edesc->iv_dma = iv_dma;

#ifdef DEBUG
	print_hex_dump(KERN_ERR,
		       "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
		       sec4_sg_bytes, 1);
#endif

1802
	*iv_contig_out = out_contig;
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
	return edesc;
}

static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
{
	struct ablkcipher_request *req = &creq->creq;
	struct ablkcipher_edesc *edesc;
	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
	struct device *jrdev = ctx->jrdev;
1813
	bool iv_contig = false;
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
	u32 *desc;
	int ret = 0;

	/* allocate extended descriptor */
	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
				       CAAM_CMD_SZ, &iv_contig);
	if (IS_ERR(edesc))
		return PTR_ERR(edesc);

	/* Create and submit job descriptor*/
	init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
				edesc, req, iv_contig);
#ifdef DEBUG
	print_hex_dump(KERN_ERR,
		       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
		       desc_bytes(edesc->hw_desc), 1);
#endif
	desc = edesc->hw_desc;
	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);

	if (!ret) {
		ret = -EINPROGRESS;
	} else {
		ablkcipher_unmap(jrdev, edesc, req);
		kfree(edesc);
	}

	return ret;
}

Y
Yuan Kang 已提交
1845
#define template_aead		template_u.aead
Y
Yuan Kang 已提交
1846
#define template_ablkcipher	template_u.ablkcipher
1847 1848 1849 1850
struct caam_alg_template {
	char name[CRYPTO_MAX_ALG_NAME];
	char driver_name[CRYPTO_MAX_ALG_NAME];
	unsigned int blocksize;
Y
Yuan Kang 已提交
1851 1852 1853 1854
	u32 type;
	union {
		struct ablkcipher_alg ablkcipher;
	} template_u;
1855 1856 1857 1858 1859
	u32 class1_alg_type;
	u32 class2_alg_type;
};

static struct caam_alg_template driver_algs[] = {
1860
	/* ablkcipher descriptor */
1861
	{
1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
		.name = "cbc(aes)",
		.driver_name = "cbc-aes-caam",
		.blocksize = AES_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
		.template_ablkcipher = {
			.setkey = ablkcipher_setkey,
			.encrypt = ablkcipher_encrypt,
			.decrypt = ablkcipher_decrypt,
			.givencrypt = ablkcipher_givencrypt,
			.geniv = "<built-in>",
			.min_keysize = AES_MIN_KEY_SIZE,
			.max_keysize = AES_MAX_KEY_SIZE,
			.ivsize = AES_BLOCK_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
	},
	{
		.name = "cbc(des3_ede)",
		.driver_name = "cbc-3des-caam",
		.blocksize = DES3_EDE_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
		.template_ablkcipher = {
			.setkey = ablkcipher_setkey,
			.encrypt = ablkcipher_encrypt,
			.decrypt = ablkcipher_decrypt,
			.givencrypt = ablkcipher_givencrypt,
			.geniv = "<built-in>",
			.min_keysize = DES3_EDE_KEY_SIZE,
			.max_keysize = DES3_EDE_KEY_SIZE,
			.ivsize = DES3_EDE_BLOCK_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
	},
	{
		.name = "cbc(des)",
		.driver_name = "cbc-des-caam",
		.blocksize = DES_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
		.template_ablkcipher = {
			.setkey = ablkcipher_setkey,
			.encrypt = ablkcipher_encrypt,
			.decrypt = ablkcipher_decrypt,
			.givencrypt = ablkcipher_givencrypt,
			.geniv = "<built-in>",
			.min_keysize = DES_KEY_SIZE,
			.max_keysize = DES_KEY_SIZE,
			.ivsize = DES_BLOCK_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
	},
	{
		.name = "ctr(aes)",
		.driver_name = "ctr-aes-caam",
		.blocksize = 1,
		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
		.template_ablkcipher = {
			.setkey = ablkcipher_setkey,
			.encrypt = ablkcipher_encrypt,
			.decrypt = ablkcipher_decrypt,
			.geniv = "chainiv",
			.min_keysize = AES_MIN_KEY_SIZE,
			.max_keysize = AES_MAX_KEY_SIZE,
			.ivsize = AES_BLOCK_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
	},
	{
		.name = "rfc3686(ctr(aes))",
		.driver_name = "rfc3686-ctr-aes-caam",
		.blocksize = 1,
		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
		.template_ablkcipher = {
			.setkey = ablkcipher_setkey,
			.encrypt = ablkcipher_encrypt,
			.decrypt = ablkcipher_decrypt,
			.givencrypt = ablkcipher_givencrypt,
1938
			.geniv = "<built-in>",
1939 1940 1941 1942 1943 1944 1945
			.min_keysize = AES_MIN_KEY_SIZE +
				       CTR_RFC3686_NONCE_SIZE,
			.max_keysize = AES_MAX_KEY_SIZE +
				       CTR_RFC3686_NONCE_SIZE,
			.ivsize = CTR_RFC3686_IV_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
	},
	{
		.name = "xts(aes)",
		.driver_name = "xts-aes-caam",
		.blocksize = AES_BLOCK_SIZE,
		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
		.template_ablkcipher = {
			.setkey = xts_ablkcipher_setkey,
			.encrypt = ablkcipher_encrypt,
			.decrypt = ablkcipher_decrypt,
			.geniv = "eseqiv",
			.min_keysize = 2 * AES_MIN_KEY_SIZE,
			.max_keysize = 2 * AES_MAX_KEY_SIZE,
			.ivsize = AES_BLOCK_SIZE,
			},
		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
	},
1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034
};

static struct caam_aead_alg driver_aeads[] = {
	{
		.aead = {
			.base = {
				.cra_name = "rfc4106(gcm(aes))",
				.cra_driver_name = "rfc4106-gcm-aes-caam",
				.cra_blocksize = 1,
			},
			.setkey = rfc4106_setkey,
			.setauthsize = rfc4106_setauthsize,
			.encrypt = ipsec_gcm_encrypt,
			.decrypt = ipsec_gcm_decrypt,
			.ivsize = 8,
			.maxauthsize = AES_BLOCK_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "rfc4543(gcm(aes))",
				.cra_driver_name = "rfc4543-gcm-aes-caam",
				.cra_blocksize = 1,
			},
			.setkey = rfc4543_setkey,
			.setauthsize = rfc4543_setauthsize,
			.encrypt = ipsec_gcm_encrypt,
			.decrypt = ipsec_gcm_decrypt,
			.ivsize = 8,
			.maxauthsize = AES_BLOCK_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
		},
	},
	/* Galois Counter Mode */
	{
		.aead = {
			.base = {
				.cra_name = "gcm(aes)",
				.cra_driver_name = "gcm-aes-caam",
				.cra_blocksize = 1,
			},
			.setkey = gcm_setkey,
			.setauthsize = gcm_setauthsize,
			.encrypt = gcm_encrypt,
			.decrypt = gcm_decrypt,
			.ivsize = 12,
			.maxauthsize = AES_BLOCK_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
		},
	},
	/* single-pass ipsec_esp descriptor */
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(md5),"
					    "ecb(cipher_null))",
				.cra_driver_name = "authenc-hmac-md5-"
						   "ecb-cipher_null-caam",
				.cra_blocksize = NULL_BLOCK_SIZE,
			},
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
2035
			.ivsize = NULL_IV_SIZE,
2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050
			.maxauthsize = MD5_DIGEST_SIZE,
		},
		.caam = {
			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha1),"
					    "ecb(cipher_null))",
				.cra_driver_name = "authenc-hmac-sha1-"
						   "ecb-cipher_null-caam",
				.cra_blocksize = NULL_BLOCK_SIZE,
2051
			},
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = NULL_IV_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
		},
		.caam = {
			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2063 2064
	},
	{
2065 2066 2067 2068 2069 2070 2071 2072
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha224),"
					    "ecb(cipher_null))",
				.cra_driver_name = "authenc-hmac-sha224-"
						   "ecb-cipher_null-caam",
				.cra_blocksize = NULL_BLOCK_SIZE,
			},
2073 2074
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2075 2076
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
2077 2078
			.ivsize = NULL_IV_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
2079 2080 2081 2082 2083
		},
		.caam = {
			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2084 2085
	},
	{
2086 2087 2088 2089 2090 2091 2092 2093
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha256),"
					    "ecb(cipher_null))",
				.cra_driver_name = "authenc-hmac-sha256-"
						   "ecb-cipher_null-caam",
				.cra_blocksize = NULL_BLOCK_SIZE,
			},
2094 2095
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2096 2097
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
2098 2099
			.ivsize = NULL_IV_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
2100 2101 2102 2103 2104
		},
		.caam = {
			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2105 2106
	},
	{
2107 2108 2109 2110 2111 2112 2113 2114
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha384),"
					    "ecb(cipher_null))",
				.cra_driver_name = "authenc-hmac-sha384-"
						   "ecb-cipher_null-caam",
				.cra_blocksize = NULL_BLOCK_SIZE,
			},
2115 2116
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2117 2118
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
2119 2120
			.ivsize = NULL_IV_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
2121 2122 2123 2124 2125
		},
		.caam = {
			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2126 2127
	},
	{
2128 2129 2130 2131 2132 2133 2134 2135
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha512),"
					    "ecb(cipher_null))",
				.cra_driver_name = "authenc-hmac-sha512-"
						   "ecb-cipher_null-caam",
				.cra_blocksize = NULL_BLOCK_SIZE,
			},
2136 2137
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2138 2139
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
2140 2141
			.ivsize = NULL_IV_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
		},
		.caam = {
			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(md5),cbc(aes))",
				.cra_driver_name = "authenc-hmac-md5-"
						   "cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
2155
			},
2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2168
	},
2169
	{
2170 2171 2172 2173 2174 2175 2176 2177
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(md5),"
					    "cbc(aes)))",
				.cra_driver_name = "echainiv-authenc-hmac-md5-"
						   "cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
			},
2178 2179
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2180
			.encrypt = aead_encrypt,
2181
			.decrypt = aead_decrypt,
2182 2183
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha1),cbc(aes))",
				.cra_driver_name = "authenc-hmac-sha1-"
						   "cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
2199
			},
2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2212
	},
2213
	{
2214 2215 2216 2217 2218 2219 2220 2221
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha1),"
					    "cbc(aes)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha1-cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
			},
Y
Yuan Kang 已提交
2222 2223
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2224
			.encrypt = aead_encrypt,
2225
			.decrypt = aead_decrypt,
2226 2227
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha224),cbc(aes))",
				.cra_driver_name = "authenc-hmac-sha224-"
						   "cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
2243
			},
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2256
	},
2257
	{
2258 2259 2260 2261 2262 2263 2264 2265
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha224),"
					    "cbc(aes)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha224-cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
			},
2266 2267
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2268
			.encrypt = aead_encrypt,
2269
			.decrypt = aead_decrypt,
2270 2271
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha256),cbc(aes))",
				.cra_driver_name = "authenc-hmac-sha256-"
						   "cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
2287
			},
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2300
	},
2301
	{
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha256),"
					    "cbc(aes)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha256-cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
			},
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
2313
			.decrypt = aead_decrypt,
2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha384),cbc(aes))",
				.cra_driver_name = "authenc-hmac-sha384-"
						   "cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
			},
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha384),"
					    "cbc(aes)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha384-cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
			},
Y
Yuan Kang 已提交
2354 2355
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2356
			.encrypt = aead_encrypt,
2357
			.decrypt = aead_decrypt,
2358
			.ivsize = AES_BLOCK_SIZE,
2359 2360 2361 2362 2363 2364 2365 2366
			.maxauthsize = SHA384_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
2367
	},
2368
	{
2369 2370 2371 2372 2373 2374 2375
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha512),cbc(aes))",
				.cra_driver_name = "authenc-hmac-sha512-"
						   "cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
			},
2376 2377
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2378 2379
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
2380
			.ivsize = AES_BLOCK_SIZE,
2381 2382 2383 2384 2385 2386 2387
			.maxauthsize = SHA512_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2388
	},
2389
	{
2390 2391 2392 2393 2394 2395 2396 2397
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha512),"
					    "cbc(aes)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha512-cbc-aes-caam",
				.cra_blocksize = AES_BLOCK_SIZE,
			},
Y
Yuan Kang 已提交
2398 2399
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2400
			.encrypt = aead_encrypt,
2401
			.decrypt = aead_decrypt,
2402 2403
			.ivsize = AES_BLOCK_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-md5-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2419
			},
2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		}
2432
	},
2433
	{
2434 2435 2436 2437 2438 2439 2440 2441
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(md5),"
					    "cbc(des3_ede)))",
				.cra_driver_name = "echainiv-authenc-hmac-md5-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			},
2442 2443
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2444
			.encrypt = aead_encrypt,
2445
			.decrypt = aead_decrypt,
2446 2447
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		}
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha1),"
					    "cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-sha1-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2464
			},
2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2477
	},
2478
	{
2479 2480 2481 2482 2483 2484 2485 2486 2487
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha1),"
					    "cbc(des3_ede)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha1-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			},
Y
Yuan Kang 已提交
2488 2489
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2490
			.encrypt = aead_encrypt,
2491
			.decrypt = aead_decrypt,
2492 2493
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha224),"
					    "cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-sha224-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2510
			},
2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2523
	},
2524
	{
2525 2526 2527 2528 2529 2530 2531 2532 2533
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha224),"
					    "cbc(des3_ede)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha224-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			},
2534 2535
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2536
			.encrypt = aead_encrypt,
2537
			.decrypt = aead_decrypt,
2538 2539
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha256),"
					    "cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-sha256-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2556
			},
2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2569
	},
2570
	{
2571 2572 2573 2574 2575 2576 2577 2578 2579
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha256),"
					    "cbc(des3_ede)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha256-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			},
Y
Yuan Kang 已提交
2580 2581
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2582
			.encrypt = aead_encrypt,
2583
			.decrypt = aead_decrypt,
2584 2585
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha384),"
					    "cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-sha384-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2602
			},
2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2615
	},
2616
	{
2617 2618 2619 2620 2621 2622 2623 2624 2625
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha384),"
					    "cbc(des3_ede)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha384-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			},
2626 2627
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2628
			.encrypt = aead_encrypt,
2629
			.decrypt = aead_decrypt,
2630 2631
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha512),"
					    "cbc(des3_ede))",
				.cra_driver_name = "authenc-hmac-sha512-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2648
			},
2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2661
	},
2662
	{
2663 2664 2665 2666 2667 2668 2669 2670 2671
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha512),"
					    "cbc(des3_ede)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha512-"
						   "cbc-des3_ede-caam",
				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
			},
Y
Yuan Kang 已提交
2672 2673
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2674
			.encrypt = aead_encrypt,
2675
			.decrypt = aead_decrypt,
2676 2677
			.ivsize = DES3_EDE_BLOCK_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(md5),cbc(des))",
				.cra_driver_name = "authenc-hmac-md5-"
						   "cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
2693
			},
2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2706
	},
2707
	{
2708 2709 2710 2711 2712 2713 2714 2715
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(md5),"
					    "cbc(des)))",
				.cra_driver_name = "echainiv-authenc-hmac-md5-"
						   "cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
			},
2716 2717
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2718
			.encrypt = aead_encrypt,
2719
			.decrypt = aead_decrypt,
2720 2721
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha1),cbc(des))",
				.cra_driver_name = "authenc-hmac-sha1-"
						   "cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
2737
			},
2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2750
	},
2751
	{
2752 2753 2754 2755 2756 2757 2758 2759
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha1),"
					    "cbc(des)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha1-cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
			},
Y
Yuan Kang 已提交
2760 2761
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2762
			.encrypt = aead_encrypt,
2763
			.decrypt = aead_decrypt,
2764 2765
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA1_DIGEST_SIZE,
2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha224),cbc(des))",
				.cra_driver_name = "authenc-hmac-sha224-"
						   "cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
2781
			},
2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2794
	},
2795
	{
2796 2797 2798 2799 2800 2801 2802 2803
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha224),"
					    "cbc(des)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha224-cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
			},
2804 2805
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2806
			.encrypt = aead_encrypt,
2807
			.decrypt = aead_decrypt,
2808 2809
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA224_DIGEST_SIZE,
2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha256),cbc(des))",
				.cra_driver_name = "authenc-hmac-sha256-"
						   "cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
2825
			},
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2838
	},
2839
	{
2840 2841 2842 2843 2844 2845 2846 2847
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha256),"
					    "cbc(des)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha256-cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
			},
Y
Yuan Kang 已提交
2848 2849
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2850
			.encrypt = aead_encrypt,
2851
			.decrypt = aead_decrypt,
2852 2853
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha384),cbc(des))",
				.cra_driver_name = "authenc-hmac-sha384-"
						   "cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
2869
			},
2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2882
	},
2883
	{
2884 2885 2886 2887 2888 2889 2890 2891
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha384),"
					    "cbc(des)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha384-cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
			},
2892 2893
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2894
			.encrypt = aead_encrypt,
2895
			.decrypt = aead_decrypt,
2896 2897
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
	},
	{
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha512),cbc(des))",
				.cra_driver_name = "authenc-hmac-sha512-"
						   "cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
2913
			},
2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
					   OP_ALG_AAI_HMAC_PRECOMP,
		},
2926
	},
2927
	{
2928 2929 2930 2931 2932 2933 2934 2935
		.aead = {
			.base = {
				.cra_name = "echainiv(authenc(hmac(sha512),"
					    "cbc(des)))",
				.cra_driver_name = "echainiv-authenc-"
						   "hmac-sha512-cbc-des-caam",
				.cra_blocksize = DES_BLOCK_SIZE,
			},
Y
Yuan Kang 已提交
2936 2937
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2938
			.encrypt = aead_encrypt,
2939
			.decrypt = aead_decrypt,
2940 2941
			.ivsize = DES_BLOCK_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
2942 2943 2944 2945 2946 2947 2948
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.geniv = true,
		},
2949
	},
2950
	{
2951 2952 2953 2954 2955 2956 2957 2958
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(md5),"
					    "rfc3686(ctr(aes)))",
				.cra_driver_name = "authenc-hmac-md5-"
						   "rfc3686-ctr-aes-caam",
				.cra_blocksize = 1,
			},
2959 2960
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2961 2962
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
2963 2964
			.ivsize = CTR_RFC3686_IV_SIZE,
			.maxauthsize = MD5_DIGEST_SIZE,
2965 2966 2967 2968 2969 2970 2971 2972
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
		},
2973 2974
	},
	{
2975 2976 2977 2978 2979 2980 2981 2982
		.aead = {
			.base = {
				.cra_name = "seqiv(authenc("
					    "hmac(md5),rfc3686(ctr(aes))))",
				.cra_driver_name = "seqiv-authenc-hmac-md5-"
						   "rfc3686-ctr-aes-caam",
				.cra_blocksize = 1,
			},
2983 2984
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
2985
			.encrypt = aead_encrypt,
2986
			.decrypt = aead_decrypt,
2987
			.ivsize = CTR_RFC3686_IV_SIZE,
2988 2989 2990 2991 2992 2993 2994 2995 2996 2997
			.maxauthsize = MD5_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
			.geniv = true,
		},
2998 2999
	},
	{
3000 3001 3002 3003 3004 3005 3006 3007
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha1),"
					    "rfc3686(ctr(aes)))",
				.cra_driver_name = "authenc-hmac-sha1-"
						   "rfc3686-ctr-aes-caam",
				.cra_blocksize = 1,
			},
3008 3009
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
3010 3011
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
3012
			.ivsize = CTR_RFC3686_IV_SIZE,
3013 3014 3015 3016 3017 3018 3019 3020 3021
			.maxauthsize = SHA1_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
		},
3022 3023
	},
	{
3024 3025 3026 3027 3028 3029 3030 3031
		.aead = {
			.base = {
				.cra_name = "seqiv(authenc("
					    "hmac(sha1),rfc3686(ctr(aes))))",
				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
						   "rfc3686-ctr-aes-caam",
				.cra_blocksize = 1,
			},
3032 3033
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
3034
			.encrypt = aead_encrypt,
3035
			.decrypt = aead_decrypt,
3036
			.ivsize = CTR_RFC3686_IV_SIZE,
3037 3038 3039 3040 3041 3042 3043 3044 3045 3046
			.maxauthsize = SHA1_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
			.geniv = true,
		},
3047 3048
	},
	{
3049 3050 3051 3052 3053 3054 3055 3056
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha224),"
					    "rfc3686(ctr(aes)))",
				.cra_driver_name = "authenc-hmac-sha224-"
						   "rfc3686-ctr-aes-caam",
				.cra_blocksize = 1,
			},
3057 3058
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
3059 3060
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
3061
			.ivsize = CTR_RFC3686_IV_SIZE,
3062 3063 3064 3065 3066 3067 3068 3069 3070
			.maxauthsize = SHA224_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
		},
3071 3072
	},
	{
3073 3074 3075 3076 3077 3078 3079 3080
		.aead = {
			.base = {
				.cra_name = "seqiv(authenc("
					    "hmac(sha224),rfc3686(ctr(aes))))",
				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
						   "rfc3686-ctr-aes-caam",
				.cra_blocksize = 1,
			},
3081 3082
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
3083
			.encrypt = aead_encrypt,
3084
			.decrypt = aead_decrypt,
3085
			.ivsize = CTR_RFC3686_IV_SIZE,
3086 3087 3088 3089 3090 3091 3092 3093 3094 3095
			.maxauthsize = SHA224_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
			.geniv = true,
		},
Y
Yuan Kang 已提交
3096 3097
	},
	{
3098 3099 3100 3101 3102 3103 3104
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha256),"
					    "rfc3686(ctr(aes)))",
				.cra_driver_name = "authenc-hmac-sha256-"
						   "rfc3686-ctr-aes-caam",
				.cra_blocksize = 1,
Y
Yuan Kang 已提交
3105
			},
3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = CTR_RFC3686_IV_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
		},
Y
Yuan Kang 已提交
3120 3121
	},
	{
3122 3123 3124 3125 3126 3127 3128
		.aead = {
			.base = {
				.cra_name = "seqiv(authenc(hmac(sha256),"
					    "rfc3686(ctr(aes))))",
				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
						   "rfc3686-ctr-aes-caam",
				.cra_blocksize = 1,
Y
Yuan Kang 已提交
3129
			},
3130 3131 3132
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
3133
			.decrypt = aead_decrypt,
3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144
			.ivsize = CTR_RFC3686_IV_SIZE,
			.maxauthsize = SHA256_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
			.geniv = true,
		},
3145 3146
	},
	{
3147 3148 3149 3150 3151 3152 3153
		.aead = {
			.base = {
				.cra_name = "authenc(hmac(sha384),"
					    "rfc3686(ctr(aes)))",
				.cra_driver_name = "authenc-hmac-sha384-"
						   "rfc3686-ctr-aes-caam",
				.cra_blocksize = 1,
3154
			},
3155 3156 3157 3158
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
3159
			.ivsize = CTR_RFC3686_IV_SIZE,
3160 3161 3162 3163 3164 3165 3166 3167 3168 3169
			.maxauthsize = SHA384_DIGEST_SIZE,
		},
		.caam = {
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
		},
	},
3170 3171 3172
	{
		.aead = {
			.base = {
3173 3174 3175 3176
				.cra_name = "seqiv(authenc(hmac(sha384),"
					    "rfc3686(ctr(aes))))",
				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
						   "rfc3686-ctr-aes-caam",
3177 3178
				.cra_blocksize = 1,
			},
3179 3180 3181
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
3182
			.decrypt = aead_decrypt,
3183 3184
			.ivsize = CTR_RFC3686_IV_SIZE,
			.maxauthsize = SHA384_DIGEST_SIZE,
3185 3186
		},
		.caam = {
3187 3188 3189 3190 3191 3192
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
			.geniv = true,
3193 3194 3195 3196 3197
		},
	},
	{
		.aead = {
			.base = {
3198 3199 3200 3201
				.cra_name = "authenc(hmac(sha512),"
					    "rfc3686(ctr(aes)))",
				.cra_driver_name = "authenc-hmac-sha512-"
						   "rfc3686-ctr-aes-caam",
3202 3203
				.cra_blocksize = 1,
			},
3204 3205 3206 3207 3208 3209
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
			.decrypt = aead_decrypt,
			.ivsize = CTR_RFC3686_IV_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
3210 3211
		},
		.caam = {
3212 3213 3214 3215 3216
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
3217 3218 3219 3220 3221
		},
	},
	{
		.aead = {
			.base = {
3222 3223 3224 3225
				.cra_name = "seqiv(authenc(hmac(sha512),"
					    "rfc3686(ctr(aes))))",
				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
						   "rfc3686-ctr-aes-caam",
3226 3227
				.cra_blocksize = 1,
			},
3228 3229 3230
			.setkey = aead_setkey,
			.setauthsize = aead_setauthsize,
			.encrypt = aead_encrypt,
3231
			.decrypt = aead_decrypt,
3232 3233
			.ivsize = CTR_RFC3686_IV_SIZE,
			.maxauthsize = SHA512_DIGEST_SIZE,
3234 3235
		},
		.caam = {
3236 3237 3238 3239 3240 3241
			.class1_alg_type = OP_ALG_ALGSEL_AES |
					   OP_ALG_AAI_CTR_MOD128,
			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
					   OP_ALG_AAI_HMAC_PRECOMP,
			.rfc3686 = true,
			.geniv = true,
3242 3243 3244 3245 3246
		},
	},
};

struct caam_crypto_alg {
3247
	struct crypto_alg crypto_alg;
3248 3249
	struct list_head entry;
	struct caam_alg_entry caam;
3250 3251
};

3252
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
3253
{
3254 3255
	dma_addr_t dma_addr;

3256 3257 3258 3259 3260
	ctx->jrdev = caam_jr_alloc();
	if (IS_ERR(ctx->jrdev)) {
		pr_err("Job Ring Device allocation for transform failed\n");
		return PTR_ERR(ctx->jrdev);
	}
3261

3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278
	dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
					offsetof(struct caam_ctx,
						 sh_desc_enc_dma),
					DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
	if (dma_mapping_error(ctx->jrdev, dma_addr)) {
		dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
		caam_jr_free(ctx->jrdev);
		return -ENOMEM;
	}

	ctx->sh_desc_enc_dma = dma_addr;
	ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
						   sh_desc_dec);
	ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
						      sh_desc_givenc);
	ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);

3279
	/* copy descriptor header template value */
3280 3281
	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
3282 3283 3284 3285

	return 0;
}

3286
static int caam_cra_init(struct crypto_tfm *tfm)
3287
{
3288 3289 3290
	struct crypto_alg *alg = tfm->__crt_alg;
	struct caam_crypto_alg *caam_alg =
		 container_of(alg, struct caam_crypto_alg, crypto_alg);
3291 3292
	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);

3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307
	return caam_init_common(ctx, &caam_alg->caam);
}

static int caam_aead_init(struct crypto_aead *tfm)
{
	struct aead_alg *alg = crypto_aead_alg(tfm);
	struct caam_aead_alg *caam_alg =
		 container_of(alg, struct caam_aead_alg, aead);
	struct caam_ctx *ctx = crypto_aead_ctx(tfm);

	return caam_init_common(ctx, &caam_alg->caam);
}

static void caam_exit_common(struct caam_ctx *ctx)
{
3308 3309 3310
	dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
			       offsetof(struct caam_ctx, sh_desc_enc_dma),
			       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
3311
	caam_jr_free(ctx->jrdev);
3312 3313
}

3314 3315 3316 3317 3318 3319 3320 3321 3322 3323
static void caam_cra_exit(struct crypto_tfm *tfm)
{
	caam_exit_common(crypto_tfm_ctx(tfm));
}

static void caam_aead_exit(struct crypto_aead *tfm)
{
	caam_exit_common(crypto_aead_ctx(tfm));
}

3324 3325 3326 3327
static void __exit caam_algapi_exit(void)
{

	struct caam_crypto_alg *t_alg, *n;
3328 3329 3330 3331 3332 3333 3334 3335
	int i;

	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
		struct caam_aead_alg *t_alg = driver_aeads + i;

		if (t_alg->registered)
			crypto_unregister_aead(&t_alg->aead);
	}
3336

3337
	if (!alg_list.next)
3338 3339
		return;

3340
	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
3341 3342 3343 3344 3345 3346
		crypto_unregister_alg(&t_alg->crypto_alg);
		list_del(&t_alg->entry);
		kfree(t_alg);
	}
}

3347
static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
3348 3349 3350 3351 3352
					      *template)
{
	struct caam_crypto_alg *t_alg;
	struct crypto_alg *alg;

3353
	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
3354
	if (!t_alg) {
3355
		pr_err("failed to allocate t_alg\n");
3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370
		return ERR_PTR(-ENOMEM);
	}

	alg = &t_alg->crypto_alg;

	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
		 template->driver_name);
	alg->cra_module = THIS_MODULE;
	alg->cra_init = caam_cra_init;
	alg->cra_exit = caam_cra_exit;
	alg->cra_priority = CAAM_CRA_PRIORITY;
	alg->cra_blocksize = template->blocksize;
	alg->cra_alignmask = 0;
	alg->cra_ctxsize = sizeof(struct caam_ctx);
3371 3372
	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
			 template->type;
Y
Yuan Kang 已提交
3373
	switch (template->type) {
3374 3375 3376 3377
	case CRYPTO_ALG_TYPE_GIVCIPHER:
		alg->cra_type = &crypto_givcipher_type;
		alg->cra_ablkcipher = template->template_ablkcipher;
		break;
Y
Yuan Kang 已提交
3378 3379 3380 3381
	case CRYPTO_ALG_TYPE_ABLKCIPHER:
		alg->cra_type = &crypto_ablkcipher_type;
		alg->cra_ablkcipher = template->template_ablkcipher;
		break;
Y
Yuan Kang 已提交
3382
	}
3383

3384 3385
	t_alg->caam.class1_alg_type = template->class1_alg_type;
	t_alg->caam.class2_alg_type = template->class2_alg_type;
3386 3387 3388 3389

	return t_alg;
}

3390 3391 3392 3393 3394 3395 3396
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
{
	struct aead_alg *alg = &t_alg->aead;

	alg->base.cra_module = THIS_MODULE;
	alg->base.cra_priority = CAAM_CRA_PRIORITY;
	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3397
	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
3398 3399 3400 3401 3402

	alg->init = caam_aead_init;
	alg->exit = caam_aead_exit;
}

3403 3404
static int __init caam_algapi_init(void)
{
3405 3406 3407
	struct device_node *dev_node;
	struct platform_device *pdev;
	struct device *ctrldev;
3408
	struct caam_drv_private *priv;
3409
	int i = 0, err = 0;
3410 3411
	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
	unsigned int md_limit = SHA512_DIGEST_SIZE;
3412
	bool registered = false;
3413

3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438
	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
	if (!dev_node) {
		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
		if (!dev_node)
			return -ENODEV;
	}

	pdev = of_find_device_by_node(dev_node);
	if (!pdev) {
		of_node_put(dev_node);
		return -ENODEV;
	}

	ctrldev = &pdev->dev;
	priv = dev_get_drvdata(ctrldev);
	of_node_put(dev_node);

	/*
	 * If priv is NULL, it's probably because the caam driver wasn't
	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
	 */
	if (!priv)
		return -ENODEV;


3439
	INIT_LIST_HEAD(&alg_list);
3440

3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454
	/*
	 * Register crypto algorithms the device supports.
	 * First, detect presence and attributes of DES, AES, and MD blocks.
	 */
	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;

	/* If MD is present, limit digest size based on LP256 */
	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
		md_limit = SHA256_DIGEST_SIZE;

3455 3456
	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
		struct caam_crypto_alg *t_alg;
3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468
		struct caam_alg_template *alg = driver_algs + i;
		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;

		/* Skip DES algorithms if not supported by device */
		if (!des_inst &&
		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
		     (alg_sel == OP_ALG_ALGSEL_DES)))
				continue;

		/* Skip AES algorithms if not supported by device */
		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
				continue;
3469

3470 3471 3472 3473 3474 3475 3476 3477 3478
		/*
		 * Check support for AES modes not available
		 * on LP devices.
		 */
		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
			if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
			     OP_ALG_AAI_XTS)
				continue;

3479
		t_alg = caam_alg_alloc(alg);
3480 3481
		if (IS_ERR(t_alg)) {
			err = PTR_ERR(t_alg);
3482
			pr_warn("%s alg allocation failed\n", alg->driver_name);
3483 3484 3485 3486 3487
			continue;
		}

		err = crypto_register_alg(&t_alg->crypto_alg);
		if (err) {
3488
			pr_warn("%s alg registration failed\n",
3489 3490
				t_alg->crypto_alg.cra_driver_name);
			kfree(t_alg);
3491 3492 3493 3494 3495 3496 3497 3498 3499
			continue;
		}

		list_add_tail(&t_alg->entry, &alg_list);
		registered = true;
	}

	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
		struct caam_aead_alg *t_alg = driver_aeads + i;
3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
				 OP_ALG_ALGSEL_MASK;
		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
				 OP_ALG_ALGSEL_MASK;
		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;

		/* Skip DES algorithms if not supported by device */
		if (!des_inst &&
		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
				continue;

		/* Skip AES algorithms if not supported by device */
		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
				continue;

		/*
		 * Check support for AES algorithms not available
		 * on LP devices.
		 */
		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
			if (alg_aai == OP_ALG_AAI_GCM)
				continue;

		/*
		 * Skip algorithms requiring message digests
		 * if MD or MD size is not supported by device.
		 */
		if (c2_alg_sel &&
		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
				continue;
3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542

		caam_aead_alg_init(t_alg);

		err = crypto_register_aead(&t_alg->aead);
		if (err) {
			pr_warn("%s alg registration failed\n",
				t_alg->aead.base.cra_driver_name);
			continue;
		}

		t_alg->registered = true;
		registered = true;
3543
	}
3544 3545

	if (registered)
3546
		pr_info("caam algorithms registered in /proc/crypto\n");
3547 3548 3549 3550 3551 3552 3553 3554 3555 3556

	return err;
}

module_init(caam_algapi_init);
module_exit(caam_algapi_exit);

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM support for crypto API");
MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");