nx-aes-ccm.c 15.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
/**
 * AES CCM routines supporting the Power 7+ Nest Accelerators driver
 *
 * Copyright (C) 2012 International Business Machines Inc.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; version 2 only.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 * Author: Kent Yoder <yoder1@us.ibm.com>
 */

#include <crypto/internal/aead.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <asm/vio.h>

#include "nx_csbcpb.h"
#include "nx.h"


static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
			      const u8           *in_key,
			      unsigned int        key_len)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;

	nx_ctx_init(nx_ctx, HCOP_FC_AES);

	switch (key_len) {
	case AES_KEYSIZE_128:
		NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
		NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
		nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
		break;
	default:
		return -EINVAL;
	}

	csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
	memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);

	csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
	memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);

	return 0;

}

static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
				  const u8           *in_key,
				  unsigned int        key_len)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);

	if (key_len < 3)
		return -EINVAL;

	key_len -= 3;

	memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);

	return ccm_aes_nx_set_key(tfm, in_key, key_len);
}

static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
				  unsigned int authsize)
{
	switch (authsize) {
	case 4:
	case 6:
	case 8:
	case 10:
	case 12:
	case 14:
	case 16:
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
				      unsigned int authsize)
{
	switch (authsize) {
	case 8:
	case 12:
	case 16:
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

/* taken from crypto/ccm.c */
static int set_msg_len(u8 *block, unsigned int msglen, int csize)
{
	__be32 data;

	memset(block, 0, csize);
	block += csize;

	if (csize >= 4)
		csize = 4;
	else if (msglen > (unsigned int)(1 << (8 * csize)))
		return -EOVERFLOW;

	data = cpu_to_be32(msglen);
	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);

	return 0;
}

/* taken from crypto/ccm.c */
static inline int crypto_ccm_check_iv(const u8 *iv)
{
	/* 2 <= L <= 8, so 1 <= L' <= 7. */
	if (1 > iv[0] || iv[0] > 7)
		return -EINVAL;

	return 0;
}

/* based on code from crypto/ccm.c */
static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
		       unsigned int cryptlen, u8 *b0)
{
	unsigned int l, lp, m = authsize;
	int rc;

	memcpy(b0, iv, 16);

	lp = b0[0];
	l = lp + 1;

	/* set m, bits 3-5 */
	*b0 |= (8 * ((m - 2) / 2));

	/* set adata, bit 6, if associated data is used */
	if (assoclen)
		*b0 |= 64;

	rc = set_msg_len(b0 + 16 - l, cryptlen, l);

	return rc;
}

static int generate_pat(u8                   *iv,
			struct aead_request  *req,
			struct nx_crypto_ctx *nx_ctx,
			unsigned int          authsize,
			unsigned int          nbytes,
173
			unsigned int	      assoclen,
174 175 176 177 178 179 180
			u8                   *out)
{
	struct nx_sg *nx_insg = nx_ctx->in_sg;
	struct nx_sg *nx_outsg = nx_ctx->out_sg;
	unsigned int iauth_len = 0;
	u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
	int rc;
181
	unsigned int max_sg_len;
182 183 184 185

	/* zero the ctr value */
	memset(iv + 15 - iv[0], 0, iv[0] + 1);

186 187 188 189 190 191 192 193 194 195 196 197 198 199
	/* page 78 of nx_wb.pdf has,
	 * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
	 * in length. If a full message is used, the AES CCA implementation
	 * restricts the maximum AAD length to 2^32 -1 bytes.
	 * If partial messages are used, the implementation supports
	 * 2^64 -1 bytes maximum AAD length.
	 *
	 * However, in the cryptoapi's aead_request structure,
	 * assoclen is an unsigned int, thus it cannot hold a length
	 * value greater than 2^32 - 1.
	 * Thus the AAD is further constrained by this and is never
	 * greater than 2^32.
	 */

200
	if (!assoclen) {
201
		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
202
	} else if (assoclen <= 14) {
203 204 205 206 207
		/* if associated data is 14 bytes or less, we do 1 GCM
		 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
		 * which is fed in through the source buffers here */
		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
		b1 = nx_ctx->priv.ccm.iauth_tag;
208 209
		iauth_len = assoclen;
	} else if (assoclen <= 65280) {
210 211 212 213 214 215 216 217 218 219 220 221 222
		/* if associated data is less than (2^16 - 2^8), we construct
		 * B1 differently and feed in the associated data to a CCA
		 * operation */
		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
		iauth_len = 14;
	} else {
		b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
		b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
		iauth_len = 10;
	}

	/* generate B0 */
223
	rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
224 225
	if (rc)
		return rc;
226

227 228 229 230 231 232
	/* generate B1:
	 * add control info for associated data
	 * RFC 3610 and NIST Special Publication 800-38C
	 */
	if (b1) {
		memset(b1, 0, 16);
233 234 235
		if (assoclen <= 65280) {
			*(u16 *)b1 = assoclen;
			scatterwalk_map_and_copy(b1 + 2, req->src, 0,
236 237 238
					 iauth_len, SCATTERWALK_FROM_SG);
		} else {
			*(u16 *)b1 = (u16)(0xfffe);
239 240
			*(u32 *)&b1[2] = assoclen;
			scatterwalk_map_and_copy(b1 + 6, req->src, 0,
241 242 243 244 245
					 iauth_len, SCATTERWALK_FROM_SG);
		}
	}

	/* now copy any remaining AAD to scatterlist and call nx... */
246
	if (!assoclen) {
247
		return rc;
248
	} else if (assoclen <= 14) {
249 250 251 252 253 254 255 256
		unsigned int len = 16;

		nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);

		if (len != 16)
			return -EINVAL;

		nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
257 258
					    nx_ctx->ap->sglen);

259 260 261
		if (len != 16)
			return -EINVAL;

262 263 264 265 266 267 268 269 270 271 272 273
		/* inlen should be negative, indicating to phyp that its a
		 * pointer to an sg list */
		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
					sizeof(struct nx_sg);
		nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
					sizeof(struct nx_sg);

		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
		NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;

		result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;

274 275 276 277 278 279
		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			return rc;

		atomic_inc(&(nx_ctx->stats->aes_ops));
280
		atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
281 282

	} else {
283 284 285 286
		unsigned int processed = 0, to_process;

		processed += iauth_len;

287 288 289 290 291 292
		/* page_limit: number of sg entries that fit on one page */
		max_sg_len = min_t(u64, nx_ctx->ap->sglen,
				nx_driver.of.max_sg_len/sizeof(struct nx_sg));
		max_sg_len = min_t(u64, max_sg_len,
				nx_ctx->ap->databytelen/NX_PAGE_SIZE);

293
		do {
294
			to_process = min_t(u32, assoclen - processed,
295
					   nx_ctx->ap->databytelen);
296 297 298

			nx_insg = nx_walk_and_build(nx_ctx->in_sg,
						    nx_ctx->ap->sglen,
299
						    req->src, processed,
300
						    &to_process);
301

302
			if ((to_process + processed) < assoclen) {
303 304 305 306 307 308 309 310 311 312
				NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
					NX_FDM_INTERMEDIATE;
			} else {
				NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
					~NX_FDM_INTERMEDIATE;
			}


			nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
						sizeof(struct nx_sg);
313

314
			result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
315

316 317 318 319
			rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
			if (rc)
				return rc;
320

321 322 323
			memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
				nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
				AES_BLOCK_SIZE);
324

325
			NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
326

327
			atomic_inc(&(nx_ctx->stats->aes_ops));
328
			atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
329

330
			processed += to_process;
331
		} while (processed < assoclen);
332 333

		result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
334
	}
335 336 337

	memcpy(out, result, AES_BLOCK_SIZE);

338 339 340 341
	return rc;
}

static int ccm_nx_decrypt(struct aead_request   *req,
342 343
			  struct blkcipher_desc *desc,
			  unsigned int assoclen)
344 345 346 347 348 349
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	unsigned int nbytes = req->cryptlen;
	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
	struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
350
	unsigned long irq_flags;
351
	unsigned int processed = 0, to_process;
352 353
	int rc = -1;

354 355
	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

356 357 358 359
	nbytes -= authsize;

	/* copy out the auth tag to compare with later */
	scatterwalk_map_and_copy(priv->oauth_tag,
360
				 req->src, nbytes + req->assoclen, authsize,
361 362
				 SCATTERWALK_FROM_SG);

363
	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
364 365 366 367
			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
	if (rc)
		goto out;

368 369 370 371 372
	do {

		/* to_process: the AES_BLOCK_SIZE data chunk to process in this
		 * update. This value is bound by sg list limits.
		 */
373
		to_process = nbytes - processed;
374

375 376 377 378
		if ((to_process + processed) < nbytes)
			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
		else
			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
379

380 381 382
		NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;

		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
383 384
				       &to_process, processed + req->assoclen,
				       csbcpb->cpb.aes_ccm.iv_or_ctr);
385 386 387 388
		if (rc)
			goto out;

		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
389
			   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
390 391 392 393 394 395 396 397 398 399 400 401 402
		if (rc)
			goto out;

		/* for partial completion, copy following for next
		 * entry into loop...
		 */
		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_s0,
			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);

		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
403

404 405 406 407 408 409 410
		/* update stats */
		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(csbcpb->csb.processed_byte_count,
			     &(nx_ctx->stats->aes_bytes));

		processed += to_process;
	} while (processed < nbytes);
411 412 413 414

	rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
		    authsize) ? -EBADMSG : 0;
out:
415
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
416 417 418 419
	return rc;
}

static int ccm_nx_encrypt(struct aead_request   *req,
420 421
			  struct blkcipher_desc *desc,
			  unsigned int assoclen)
422 423 424 425 426
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	unsigned int nbytes = req->cryptlen;
	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
427
	unsigned long irq_flags;
428
	unsigned int processed = 0, to_process;
429 430
	int rc = -1;

431 432
	spin_lock_irqsave(&nx_ctx->lock, irq_flags);

433
	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
434 435 436 437
			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
	if (rc)
		goto out;

438 439 440 441
	do {
		/* to process: the AES_BLOCK_SIZE data chunk to process in this
		 * update. This value is bound by sg list limits.
		 */
442
		to_process = nbytes - processed;
443 444 445 446 447 448 449 450 451

		if ((to_process + processed) < nbytes)
			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
		else
			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;

		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;

		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
452
				       &to_process, processed + req->assoclen,
453 454 455
				       csbcpb->cpb.aes_ccm.iv_or_ctr);
		if (rc)
			goto out;
456

457 458 459 460
		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
		if (rc)
			goto out;
461

462 463 464 465 466 467 468 469
		/* for partial completion, copy following for next
		 * entry into loop...
		 */
		memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
			csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
		memcpy(csbcpb->cpb.aes_ccm.in_s0,
			csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
470

471 472 473 474 475 476 477 478 479 480
		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;

		/* update stats */
		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic64_add(csbcpb->csb.processed_byte_count,
			     &(nx_ctx->stats->aes_bytes));

		processed += to_process;

	} while (processed < nbytes);
481 482 483

	/* copy out the auth tag */
	scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
484
				 req->dst, nbytes + req->assoclen, authsize,
485
				 SCATTERWALK_TO_SG);
486

487
out:
488
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
489 490 491 492 493 494
	return rc;
}

static int ccm4309_aes_nx_encrypt(struct aead_request *req)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
H
Herbert Xu 已提交
495
	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
496
	struct blkcipher_desc desc;
H
Herbert Xu 已提交
497
	u8 *iv = rctx->iv;
498 499 500 501 502 503 504

	iv[0] = 3;
	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
	memcpy(iv + 4, req->iv, 8);

	desc.info = iv;

505
	return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
506 507 508 509 510 511 512 513 514 515 516 517 518
}

static int ccm_aes_nx_encrypt(struct aead_request *req)
{
	struct blkcipher_desc desc;
	int rc;

	desc.info = req->iv;

	rc = crypto_ccm_check_iv(desc.info);
	if (rc)
		return rc;

519
	return ccm_nx_encrypt(req, &desc, req->assoclen);
520 521 522 523 524
}

static int ccm4309_aes_nx_decrypt(struct aead_request *req)
{
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
H
Herbert Xu 已提交
525
	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
526
	struct blkcipher_desc desc;
H
Herbert Xu 已提交
527
	u8 *iv = rctx->iv;
528 529 530 531 532 533 534

	iv[0] = 3;
	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
	memcpy(iv + 4, req->iv, 8);

	desc.info = iv;

535
	return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
536 537 538 539 540 541 542 543 544 545 546 547 548
}

static int ccm_aes_nx_decrypt(struct aead_request *req)
{
	struct blkcipher_desc desc;
	int rc;

	desc.info = req->iv;

	rc = crypto_ccm_check_iv(desc.info);
	if (rc)
		return rc;

549
	return ccm_nx_decrypt(req, &desc, req->assoclen);
550 551 552 553 554 555 556
}

/* tell the block cipher walk routines that this is a stream cipher by
 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
 * during encrypt/decrypt doesn't solve this problem, because it calls
 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
 * but instead uses this tfm->blocksize. */
557 558 559 560 561
struct aead_alg nx_ccm_aes_alg = {
	.base = {
		.cra_name        = "ccm(aes)",
		.cra_driver_name = "ccm-aes-nx",
		.cra_priority    = 300,
562
		.cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
563 564 565 566 567 568 569 570 571 572 573 574
		.cra_blocksize   = 1,
		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
		.cra_module      = THIS_MODULE,
	},
	.init        = nx_crypto_ctx_aes_ccm_init,
	.exit        = nx_crypto_ctx_aead_exit,
	.ivsize      = AES_BLOCK_SIZE,
	.maxauthsize = AES_BLOCK_SIZE,
	.setkey      = ccm_aes_nx_set_key,
	.setauthsize = ccm_aes_nx_setauthsize,
	.encrypt     = ccm_aes_nx_encrypt,
	.decrypt     = ccm_aes_nx_decrypt,
575 576
};

577 578 579 580 581
struct aead_alg nx_ccm4309_aes_alg = {
	.base = {
		.cra_name        = "rfc4309(ccm(aes))",
		.cra_driver_name = "rfc4309-ccm-aes-nx",
		.cra_priority    = 300,
582
		.cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
583 584 585 586 587 588 589 590 591 592 593 594
		.cra_blocksize   = 1,
		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
		.cra_module      = THIS_MODULE,
	},
	.init        = nx_crypto_ctx_aes_ccm_init,
	.exit        = nx_crypto_ctx_aead_exit,
	.ivsize      = 8,
	.maxauthsize = AES_BLOCK_SIZE,
	.setkey      = ccm4309_aes_nx_set_key,
	.setauthsize = ccm4309_aes_nx_setauthsize,
	.encrypt     = ccm4309_aes_nx_encrypt,
	.decrypt     = ccm4309_aes_nx_decrypt,
595
};