sec_crypto.c 41.2 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */

#include <crypto/aes.h>
#include <crypto/algapi.h>
6
#include <crypto/authenc.h>
7
#include <crypto/des.h>
8 9
#include <crypto/hash.h>
#include <crypto/internal/aead.h>
10 11
#include <crypto/sha1.h>
#include <crypto/sha2.h>
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
#include <crypto/skcipher.h>
#include <crypto/xts.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>

#include "sec.h"
#include "sec_crypto.h"

#define SEC_PRIORITY		4001
#define SEC_XTS_MIN_KEY_SIZE	(2 * AES_MIN_KEY_SIZE)
#define SEC_XTS_MAX_KEY_SIZE	(2 * AES_MAX_KEY_SIZE)
#define SEC_DES3_2KEY_SIZE	(2 * DES_KEY_SIZE)
#define SEC_DES3_3KEY_SIZE	(3 * DES_KEY_SIZE)

/* SEC sqe(bd) bit operational relative MACRO */
#define SEC_DE_OFFSET		1
#define SEC_CIPHER_OFFSET	4
#define SEC_SCENE_OFFSET	3
#define SEC_DST_SGL_OFFSET	2
#define SEC_SRC_SGL_OFFSET	7
#define SEC_CKEY_OFFSET		9
#define SEC_CMODE_OFFSET	12
35 36 37 38
#define SEC_AKEY_OFFSET         5
#define SEC_AEAD_ALG_OFFSET     11
#define SEC_AUTH_OFFSET		6

39 40 41 42 43 44 45
#define SEC_FLAG_OFFSET		7
#define SEC_FLAG_MASK		0x0780
#define SEC_TYPE_MASK		0x0F
#define SEC_DONE_MASK		0x0001

#define SEC_TOTAL_IV_SZ		(SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR		128
46 47 48
#define SEC_CIPHER_AUTH		0xfe
#define SEC_AUTH_CIPHER		0x1
#define SEC_MAX_MAC_LEN		64
49
#define SEC_MAX_AAD_LEN		65535
50
#define SEC_TOTAL_MAC_SZ	(SEC_MAX_MAC_LEN * QM_Q_DEPTH)
51 52 53 54 55 56 57 58 59 60 61 62 63

#define SEC_PBUF_SZ			512
#define SEC_PBUF_IV_OFFSET		SEC_PBUF_SZ
#define SEC_PBUF_MAC_OFFSET		(SEC_PBUF_SZ + SEC_IV_SIZE)
#define SEC_PBUF_PKG		(SEC_PBUF_SZ + SEC_IV_SIZE +	\
			SEC_MAX_MAC_LEN * 2)
#define SEC_PBUF_NUM		(PAGE_SIZE / SEC_PBUF_PKG)
#define SEC_PBUF_PAGE_NUM	(QM_Q_DEPTH / SEC_PBUF_NUM)
#define SEC_PBUF_LEFT_SZ	(SEC_PBUF_PKG * (QM_Q_DEPTH -	\
			SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
#define SEC_TOTAL_PBUF_SZ	(PAGE_SIZE * SEC_PBUF_PAGE_NUM +	\
			SEC_PBUF_LEFT_SZ)

64
#define SEC_SQE_LEN_RATE	4
65
#define SEC_SQE_CFLAG		2
66
#define SEC_SQE_AEAD_FLAG	3
67
#define SEC_SQE_DONE		0x1
68 69

/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
70
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
71 72 73 74 75 76 77 78 79
{
	if (req->c_req.encrypt)
		return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
				 ctx->hlf_q_num;

	return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
				 ctx->hlf_q_num;
}

80
static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
{
	if (req->c_req.encrypt)
		atomic_dec(&ctx->enc_qcyclic);
	else
		atomic_dec(&ctx->dec_qcyclic);
}

static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
	int req_id;

	mutex_lock(&qp_ctx->req_lock);

	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
				  0, QM_Q_DEPTH, GFP_ATOMIC);
	mutex_unlock(&qp_ctx->req_lock);
97
	if (unlikely(req_id < 0)) {
98
		dev_err(req->ctx->dev, "alloc req id fail!\n");
99 100 101 102 103
		return req_id;
	}

	req->qp_ctx = qp_ctx;
	qp_ctx->req_list[req_id] = req;
104

105 106 107 108 109 110 111 112
	return req_id;
}

static void sec_free_req_id(struct sec_req *req)
{
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
	int req_id = req->req_id;

113
	if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
114
		dev_err(req->ctx->dev, "free request id invalid!\n");
115 116 117 118 119 120 121 122 123 124 125
		return;
	}

	qp_ctx->req_list[req_id] = NULL;
	req->qp_ctx = NULL;

	mutex_lock(&qp_ctx->req_lock);
	idr_remove(&qp_ctx->req_idr, req_id);
	mutex_unlock(&qp_ctx->req_lock);
}

126
static int sec_aead_verify(struct sec_req *req)
127 128 129 130
{
	struct aead_request *aead_req = req->aead_req.aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
	size_t authsize = crypto_aead_authsize(tfm);
131
	u8 *mac_out = req->aead_req.out_mac;
132 133 134 135 136 137 138 139
	u8 *mac = mac_out + SEC_MAX_MAC_LEN;
	struct scatterlist *sgl = aead_req->src;
	size_t sz;

	sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
				aead_req->cryptlen + aead_req->assoclen -
				authsize);
	if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
140
		dev_err(req->ctx->dev, "aead verify failure!\n");
141 142 143 144 145 146
		return -EBADMSG;
	}

	return 0;
}

147 148 149
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
	struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
150
	struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
151
	struct sec_sqe *bd = resp;
152 153
	struct sec_ctx *ctx;
	struct sec_req *req;
154
	u16 done, flag;
155
	int err = 0;
156 157 158
	u8 type;

	type = bd->type_cipher_auth & SEC_TYPE_MASK;
159
	if (unlikely(type != SEC_BD_TYPE2)) {
160
		atomic64_inc(&dfx->err_bd_cnt);
161 162 163 164
		pr_err("err bd type [%d]\n", type);
		return;
	}

165
	req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
166 167
	if (unlikely(!req)) {
		atomic64_inc(&dfx->invalid_req_cnt);
168
		atomic_inc(&qp->qp_status.used);
169 170
		return;
	}
171 172 173 174 175
	req->err_type = bd->type2.error_type;
	ctx = req->ctx;
	done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
	flag = (le16_to_cpu(bd->type2.done_flag) &
		SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
176 177 178
	if (unlikely(req->err_type || done != SEC_SQE_DONE ||
	    (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) ||
	    (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) {
179
		dev_err_ratelimited(ctx->dev,
180 181
			"err_type[%d],done[%d],flag[%d]\n",
			req->err_type, done, flag);
182
		err = -EIO;
183
		atomic64_inc(&dfx->done_flag_cnt);
184
	}
185

186
	if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
187
		err = sec_aead_verify(req);
188

189
	atomic64_inc(&dfx->recv_cnt);
190

191 192
	ctx->req_op->buf_unmap(ctx, req);

193
	ctx->req_op->callback(ctx, req, err);
194 195 196 197 198 199 200
}

static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
{
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
	int ret;

201 202 203 204 205
	if (ctx->fake_req_limit <=
	    atomic_read(&qp_ctx->qp->qp_status.used) &&
	    !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
		return -EBUSY;

206 207
	mutex_lock(&qp_ctx->req_lock);
	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
208 209 210 211 212 213 214 215 216

	if (ctx->fake_req_limit <=
	    atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
		list_add_tail(&req->backlog_head, &qp_ctx->backlog);
		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
		atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
		mutex_unlock(&qp_ctx->req_lock);
		return -EBUSY;
	}
217 218
	mutex_unlock(&qp_ctx->req_lock);

219
	if (unlikely(ret == -EBUSY))
220 221
		return -ENOBUFS;

222 223 224
	if (likely(!ret)) {
		ret = -EINPROGRESS;
		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
225 226 227 228 229
	}

	return ret;
}

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
/* Get DMA memory resources */
static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
	int i;

	res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
					 &res->c_ivin_dma, GFP_KERNEL);
	if (!res->c_ivin)
		return -ENOMEM;

	for (i = 1; i < QM_Q_DEPTH; i++) {
		res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
		res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
	}

	return 0;
}

static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
{
	if (res->c_ivin)
		dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
				  res->c_ivin, res->c_ivin_dma);
}

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
{
	int i;

	res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
					  &res->out_mac_dma, GFP_KERNEL);
	if (!res->out_mac)
		return -ENOMEM;

	for (i = 1; i < QM_Q_DEPTH; i++) {
		res[i].out_mac_dma = res->out_mac_dma +
				     i * (SEC_MAX_MAC_LEN << 1);
		res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
	}

	return 0;
}

static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
{
	if (res->out_mac)
		dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
				  res->out_mac, res->out_mac_dma);
}

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
	if (res->pbuf)
		dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
				  res->pbuf, res->pbuf_dma);
}

/*
 * To improve performance, pbuffer is used for
 * small packets (< 512Bytes) as IOMMU translation using.
 */
static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
	int pbuf_page_offset;
	int i, j, k;

	res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
				&res->pbuf_dma, GFP_KERNEL);
	if (!res->pbuf)
		return -ENOMEM;

	/*
	 * SEC_PBUF_PKG contains data pbuf, iv and
	 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
	 * Every PAGE contains six SEC_PBUF_PKG
	 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
	 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
	 * for the SEC_TOTAL_PBUF_SZ
	 */
	for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
		pbuf_page_offset = PAGE_SIZE * i;
		for (j = 0; j < SEC_PBUF_NUM; j++) {
			k = i * SEC_PBUF_NUM + j;
			if (k == QM_Q_DEPTH)
				break;
			res[k].pbuf = res->pbuf +
				j * SEC_PBUF_PKG + pbuf_page_offset;
			res[k].pbuf_dma = res->pbuf_dma +
				j * SEC_PBUF_PKG + pbuf_page_offset;
		}
	}
321

322 323 324
	return 0;
}

325 326 327
static int sec_alg_resource_alloc(struct sec_ctx *ctx,
				  struct sec_qp_ctx *qp_ctx)
{
328
	struct sec_alg_res *res = qp_ctx->res;
329
	struct device *dev = ctx->dev;
330 331 332 333 334
	int ret;

	ret = sec_alloc_civ_resource(dev, res);
	if (ret)
		return ret;
335

336 337 338
	if (ctx->alg_type == SEC_AEAD) {
		ret = sec_alloc_mac_resource(dev, res);
		if (ret)
339
			goto alloc_fail;
340
	}
341 342 343 344
	if (ctx->pbuf_supported) {
		ret = sec_alloc_pbuf_resource(dev, res);
		if (ret) {
			dev_err(dev, "fail to alloc pbuf dma resource!\n");
345
			goto alloc_pbuf_fail;
346 347
		}
	}
348 349

	return 0;
350

351 352 353
alloc_pbuf_fail:
	if (ctx->alg_type == SEC_AEAD)
		sec_free_mac_resource(dev, qp_ctx->res);
354
alloc_fail:
355 356
	sec_free_civ_resource(dev, res);
	return ret;
357 358 359 360 361
}

static void sec_alg_resource_free(struct sec_ctx *ctx,
				  struct sec_qp_ctx *qp_ctx)
{
362
	struct device *dev = ctx->dev;
363 364

	sec_free_civ_resource(dev, qp_ctx->res);
365

366 367
	if (ctx->pbuf_supported)
		sec_free_pbuf_resource(dev, qp_ctx->res);
368 369
	if (ctx->alg_type == SEC_AEAD)
		sec_free_mac_resource(dev, qp_ctx->res);
370 371
}

372 373 374
static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
			     int qp_ctx_id, int alg_type)
{
375
	struct device *dev = ctx->dev;
376 377 378 379 380
	struct sec_qp_ctx *qp_ctx;
	struct hisi_qp *qp;
	int ret = -ENOMEM;

	qp_ctx = &ctx->qp_ctx[qp_ctx_id];
381
	qp = ctx->qps[qp_ctx_id];
382 383 384 385 386 387 388 389
	qp->req_type = 0;
	qp->qp_ctx = qp_ctx;
	qp->req_cb = sec_req_cb;
	qp_ctx->qp = qp;
	qp_ctx->ctx = ctx;

	mutex_init(&qp_ctx->req_lock);
	idr_init(&qp_ctx->req_idr);
390
	INIT_LIST_HEAD(&qp_ctx->backlog);
391 392 393

	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
						     SEC_SGL_SGE_NR);
394
	if (IS_ERR(qp_ctx->c_in_pool)) {
395
		dev_err(dev, "fail to create sgl pool for input!\n");
396
		goto err_destroy_idr;
397 398 399 400
	}

	qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
						      SEC_SGL_SGE_NR);
401
	if (IS_ERR(qp_ctx->c_out_pool)) {
402 403 404 405
		dev_err(dev, "fail to create sgl pool for output!\n");
		goto err_free_c_in_pool;
	}

406
	ret = sec_alg_resource_alloc(ctx, qp_ctx);
407 408 409 410 411 412 413 414 415 416
	if (ret)
		goto err_free_c_out_pool;

	ret = hisi_qm_start_qp(qp, 0);
	if (ret < 0)
		goto err_queue_free;

	return 0;

err_queue_free:
417
	sec_alg_resource_free(ctx, qp_ctx);
418 419 420 421 422 423 424 425 426 427 428 429
err_free_c_out_pool:
	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
err_destroy_idr:
	idr_destroy(&qp_ctx->req_idr);
	return ret;
}

static void sec_release_qp_ctx(struct sec_ctx *ctx,
			       struct sec_qp_ctx *qp_ctx)
{
430
	struct device *dev = ctx->dev;
431 432

	hisi_qm_stop_qp(qp_ctx->qp);
433
	sec_alg_resource_free(ctx, qp_ctx);
434 435 436 437 438 439 440

	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);

	idr_destroy(&qp_ctx->req_idr);
}

441
static int sec_ctx_base_init(struct sec_ctx *ctx)
442 443 444 445
{
	struct sec_dev *sec;
	int i, ret;

446 447 448
	ctx->qps = sec_create_qps();
	if (!ctx->qps) {
		pr_err("Can not create sec qps!\n");
449 450
		return -ENODEV;
	}
451 452

	sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
453
	ctx->sec = sec;
454
	ctx->dev = &sec->qm.pdev->dev;
455
	ctx->hlf_q_num = sec->ctx_q_num >> 1;
456

457 458
	ctx->pbuf_supported = ctx->sec->iommu_used;

459
	/* Half of queue depth is taken as fake requests limit in the queue. */
460
	ctx->fake_req_limit = QM_Q_DEPTH >> 1;
461 462
	ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
			      GFP_KERNEL);
463 464 465 466
	if (!ctx->qp_ctx) {
		ret = -ENOMEM;
		goto err_destroy_qps;
	}
467 468

	for (i = 0; i < sec->ctx_q_num; i++) {
469
		ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
470 471 472 473 474
		if (ret)
			goto err_sec_release_qp_ctx;
	}

	return 0;
475

476 477 478 479
err_sec_release_qp_ctx:
	for (i = i - 1; i >= 0; i--)
		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
	kfree(ctx->qp_ctx);
480 481
err_destroy_qps:
	sec_destroy_qps(ctx->qps, sec->ctx_q_num);
482 483 484
	return ret;
}

485
static void sec_ctx_base_uninit(struct sec_ctx *ctx)
486
{
487
	int i;
488 489 490 491

	for (i = 0; i < ctx->sec->ctx_q_num; i++)
		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);

492
	sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
493 494 495
	kfree(ctx->qp_ctx);
}

496 497 498 499
static int sec_cipher_init(struct sec_ctx *ctx)
{
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;

500
	c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
501 502 503 504 505 506 507 508 509 510 511 512
					  &c_ctx->c_key_dma, GFP_KERNEL);
	if (!c_ctx->c_key)
		return -ENOMEM;

	return 0;
}

static void sec_cipher_uninit(struct sec_ctx *ctx)
{
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;

	memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
513
	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
514 515 516
			  c_ctx->c_key, c_ctx->c_key_dma);
}

517 518 519 520
static int sec_auth_init(struct sec_ctx *ctx)
{
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;

521
	a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
522 523 524 525 526 527 528 529 530 531 532 533
					  &a_ctx->a_key_dma, GFP_KERNEL);
	if (!a_ctx->a_key)
		return -ENOMEM;

	return 0;
}

static void sec_auth_uninit(struct sec_ctx *ctx)
{
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;

	memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
534
	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
535 536 537
			  a_ctx->a_key, a_ctx->a_key_dma);
}

538 539 540 541 542
static int sec_skcipher_init(struct crypto_skcipher *tfm)
{
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
	int ret;

543
	ctx->alg_type = SEC_SKCIPHER;
544 545 546
	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
	ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
547
		pr_err("get error skcipher iv size!\n");
548 549 550 551 552 553 554 555 556 557 558 559
		return -EINVAL;
	}

	ret = sec_ctx_base_init(ctx);
	if (ret)
		return ret;

	ret = sec_cipher_init(ctx);
	if (ret)
		goto err_cipher_init;

	return 0;
560

561 562 563 564 565 566 567 568 569 570 571 572 573
err_cipher_init:
	sec_ctx_base_uninit(ctx);
	return ret;
}

static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
{
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);

	sec_cipher_uninit(ctx);
	sec_ctx_base_uninit(ctx);
}

574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
				    const u32 keylen,
				    const enum sec_cmode c_mode)
{
	switch (keylen) {
	case SEC_DES3_2KEY_SIZE:
		c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
		break;
	case SEC_DES3_3KEY_SIZE:
		c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
				       const u32 keylen,
				       const enum sec_cmode c_mode)
{
	if (c_mode == SEC_CMODE_XTS) {
		switch (keylen) {
		case SEC_XTS_MIN_KEY_SIZE:
			c_ctx->c_key_len = SEC_CKEY_128BIT;
			break;
		case SEC_XTS_MAX_KEY_SIZE:
			c_ctx->c_key_len = SEC_CKEY_256BIT;
			break;
		default:
			pr_err("hisi_sec2: xts mode key error!\n");
			return -EINVAL;
		}
	} else {
		switch (keylen) {
		case AES_KEYSIZE_128:
			c_ctx->c_key_len = SEC_CKEY_128BIT;
			break;
		case AES_KEYSIZE_192:
			c_ctx->c_key_len = SEC_CKEY_192BIT;
			break;
		case AES_KEYSIZE_256:
			c_ctx->c_key_len = SEC_CKEY_256BIT;
			break;
		default:
			pr_err("hisi_sec2: aes key error!\n");
			return -EINVAL;
		}
	}

	return 0;
}

static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
			       const u32 keylen, const enum sec_calg c_alg,
			       const enum sec_cmode c_mode)
{
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
634
	struct device *dev = ctx->dev;
635 636 637 638 639
	int ret;

	if (c_mode == SEC_CMODE_XTS) {
		ret = xts_verify_key(tfm, key, keylen);
		if (ret) {
640
			dev_err(dev, "xts mode key err!\n");
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
			return ret;
		}
	}

	c_ctx->c_alg  = c_alg;
	c_ctx->c_mode = c_mode;

	switch (c_alg) {
	case SEC_CALG_3DES:
		ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
		break;
	case SEC_CALG_AES:
	case SEC_CALG_SM4:
		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
		break;
	default:
		return -EINVAL;
	}

	if (ret) {
661
		dev_err(dev, "set sec key err!\n");
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
		return ret;
	}

	memcpy(c_ctx->c_key, key, keylen);

	return 0;
}

#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode)			\
static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
	u32 keylen)							\
{									\
	return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode);	\
}

GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)

GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)

GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)

687 688 689 690 691 692
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
			struct scatterlist *src)
{
	struct aead_request *aead_req = req->aead_req.aead_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
693
	struct device *dev = ctx->dev;
694 695 696 697 698 699 700 701 702
	int copy_size, pbuf_length;
	int req_id = req->req_id;

	if (ctx->alg_type == SEC_AEAD)
		copy_size = aead_req->cryptlen + aead_req->assoclen;
	else
		copy_size = c_req->c_len;

	pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
703 704
							qp_ctx->res[req_id].pbuf,
							copy_size);
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
	if (unlikely(pbuf_length != copy_size)) {
		dev_err(dev, "copy src data to pbuf error!\n");
		return -EINVAL;
	}

	c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;

	if (!c_req->c_in_dma) {
		dev_err(dev, "fail to set pbuffer address!\n");
		return -ENOMEM;
	}

	c_req->c_out_dma = c_req->c_in_dma;

	return 0;
}

static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
			struct scatterlist *dst)
{
	struct aead_request *aead_req = req->aead_req.aead_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
728
	struct device *dev = ctx->dev;
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
	int copy_size, pbuf_length;
	int req_id = req->req_id;

	if (ctx->alg_type == SEC_AEAD)
		copy_size = c_req->c_len + aead_req->assoclen;
	else
		copy_size = c_req->c_len;

	pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
				qp_ctx->res[req_id].pbuf,
				copy_size);
	if (unlikely(pbuf_length != copy_size))
		dev_err(dev, "copy pbuf data to dst error!\n");
}

744
static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
745
			  struct scatterlist *src, struct scatterlist *dst)
746 747
{
	struct sec_cipher_req *c_req = &req->c_req;
748
	struct sec_aead_req *a_req = &req->aead_req;
749
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
750
	struct sec_alg_res *res = &qp_ctx->res[req->req_id];
751
	struct device *dev = ctx->dev;
752 753 754 755 756 757 758 759 760 761 762
	int ret;

	if (req->use_pbuf) {
		ret = sec_cipher_pbuf_map(ctx, req, src);
		c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
		c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
		if (ctx->alg_type == SEC_AEAD) {
			a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
			a_req->out_mac_dma = res->pbuf_dma +
					SEC_PBUF_MAC_OFFSET;
		}
763

764 765
		return ret;
	}
766 767 768 769 770 771
	c_req->c_ivin = res->c_ivin;
	c_req->c_ivin_dma = res->c_ivin_dma;
	if (ctx->alg_type == SEC_AEAD) {
		a_req->out_mac = res->out_mac;
		a_req->out_mac_dma = res->out_mac_dma;
	}
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801

	c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
						    qp_ctx->c_in_pool,
						    req->req_id,
						    &c_req->c_in_dma);

	if (IS_ERR(c_req->c_in)) {
		dev_err(dev, "fail to dma map input sgl buffers!\n");
		return PTR_ERR(c_req->c_in);
	}

	if (dst == src) {
		c_req->c_out = c_req->c_in;
		c_req->c_out_dma = c_req->c_in_dma;
	} else {
		c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
							     qp_ctx->c_out_pool,
							     req->req_id,
							     &c_req->c_out_dma);

		if (IS_ERR(c_req->c_out)) {
			dev_err(dev, "fail to dma map output sgl buffers!\n");
			hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
			return PTR_ERR(c_req->c_out);
		}
	}

	return 0;
}

802
static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
803 804
			     struct scatterlist *src, struct scatterlist *dst)
{
805
	struct sec_cipher_req *c_req = &req->c_req;
806
	struct device *dev = ctx->dev;
807

808 809 810 811 812
	if (req->use_pbuf) {
		sec_cipher_pbuf_unmap(ctx, req, dst);
	} else {
		if (dst != src)
			hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
813

814 815
		hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
	}
816 817
}

818 819
static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
820
	struct skcipher_request *sq = req->c_req.sk_req;
821

822
	return sec_cipher_map(ctx, req, sq->src, sq->dst);
823 824 825 826
}

static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
827
	struct skcipher_request *sq = req->c_req.sk_req;
828

829
	sec_cipher_unmap(ctx, req, sq->src, sq->dst);
830 831
}

832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
				struct crypto_authenc_keys *keys)
{
	switch (keys->enckeylen) {
	case AES_KEYSIZE_128:
		c_ctx->c_key_len = SEC_CKEY_128BIT;
		break;
	case AES_KEYSIZE_192:
		c_ctx->c_key_len = SEC_CKEY_192BIT;
		break;
	case AES_KEYSIZE_256:
		c_ctx->c_key_len = SEC_CKEY_256BIT;
		break;
	default:
		pr_err("hisi_sec2: aead aes key error!\n");
		return -EINVAL;
	}
	memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);

	return 0;
}

static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
				 struct crypto_authenc_keys *keys)
{
	struct crypto_shash *hash_tfm = ctx->hash_tfm;
858
	int blocksize, digestsize, ret;
859 860 861 862 863 864 865

	if (!keys->authkeylen) {
		pr_err("hisi_sec2: aead auth key error!\n");
		return -EINVAL;
	}

	blocksize = crypto_shash_blocksize(hash_tfm);
866
	digestsize = crypto_shash_digestsize(hash_tfm);
867
	if (keys->authkeylen > blocksize) {
868 869
		ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
					      keys->authkeylen, ctx->a_key);
870
		if (ret) {
871
			pr_err("hisi_sec2: aead auth digest error!\n");
872 873
			return -EINVAL;
		}
874
		ctx->a_key_len = digestsize;
875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
	} else {
		memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
		ctx->a_key_len = keys->authkeylen;
	}

	return 0;
}

static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
			   const u32 keylen, const enum sec_hash_alg a_alg,
			   const enum sec_calg c_alg,
			   const enum sec_mac_len mac_len,
			   const enum sec_cmode c_mode)
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
891
	struct device *dev = ctx->dev;
892 893 894 895 896 897 898 899 900 901 902 903 904
	struct crypto_authenc_keys keys;
	int ret;

	ctx->a_ctx.a_alg = a_alg;
	ctx->c_ctx.c_alg = c_alg;
	ctx->a_ctx.mac_len = mac_len;
	c_ctx->c_mode = c_mode;

	if (crypto_authenc_extractkeys(&keys, key, keylen))
		goto bad_key;

	ret = sec_aead_aes_set_key(c_ctx, &keys);
	if (ret) {
905
		dev_err(dev, "set sec cipher key err!\n");
906 907 908 909 910
		goto bad_key;
	}

	ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
	if (ret) {
911
		dev_err(dev, "set sec auth key err!\n");
912 913 914 915
		goto bad_key;
	}

	return 0;
916

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
bad_key:
	memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
	return -EINVAL;
}


#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode)	\
static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key,	\
	u32 keylen)							\
{									\
	return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
}

GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
			 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
			 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
			 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)

static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
	struct aead_request *aq = req->aead_req.aead_req;

941
	return sec_cipher_map(ctx, req, aq->src, aq->dst);
942 943 944 945 946 947
}

static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
	struct aead_request *aq = req->aead_req.aead_req;

948
	sec_cipher_unmap(ctx, req, aq->src, aq->dst);
949 950
}

951 952 953 954 955
static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
{
	int ret;

	ret = ctx->req_op->buf_map(ctx, req);
956
	if (unlikely(ret))
957 958 959 960 961
		return ret;

	ctx->req_op->do_transfer(ctx, req);

	ret = ctx->req_op->bd_fill(ctx, req);
962
	if (unlikely(ret))
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
		goto unmap_req_buf;

	return ret;

unmap_req_buf:
	ctx->req_op->buf_unmap(ctx, req);
	return ret;
}

static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
{
	ctx->req_op->buf_unmap(ctx, req);
}

static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
	struct skcipher_request *sk_req = req->c_req.sk_req;
980
	struct sec_cipher_req *c_req = &req->c_req;
981

982
	memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
983 984 985 986 987 988 989 990 991
}

static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
{
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
	struct sec_cipher_req *c_req = &req->c_req;
	struct sec_sqe *sec_sqe = &req->sec_sqe;
	u8 scene, sa_type, da_type;
	u8 bd_type, cipher;
992
	u8 de = 0;
993 994 995 996

	memset(sec_sqe, 0, sizeof(struct sec_sqe));

	sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
997
	sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
	sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
	sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);

	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
						SEC_CMODE_OFFSET);
	sec_sqe->type2.c_alg = c_ctx->c_alg;
	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
						SEC_CKEY_OFFSET);

	bd_type = SEC_BD_TYPE2;
	if (c_req->encrypt)
		cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
	else
		cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
	sec_sqe->type_cipher_auth = bd_type | cipher;

1014 1015 1016 1017
	if (req->use_pbuf)
		sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
	else
		sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1018 1019 1020 1021 1022 1023 1024
	scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
	if (c_req->c_in_dma != c_req->c_out_dma)
		de = 0x1 << SEC_DE_OFFSET;

	sec_sqe->sds_sa_type = (de | scene | sa_type);

	/* Just set DST address type */
1025 1026 1027 1028
	if (req->use_pbuf)
		da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
	else
		da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1029 1030 1031 1032 1033 1034 1035 1036
	sec_sqe->sdm_addr_type |= da_type;

	sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
	sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);

	return 0;
}

1037
static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1038
{
1039
	struct aead_request *aead_req = req->aead_req.aead_req;
1040 1041 1042
	struct skcipher_request *sk_req = req->c_req.sk_req;
	u32 iv_size = req->ctx->c_ctx.ivsize;
	struct scatterlist *sgl;
1043
	unsigned int cryptlen;
1044
	size_t sz;
1045
	u8 *iv;
1046 1047

	if (req->c_req.encrypt)
1048
		sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
1049
	else
1050 1051 1052 1053 1054 1055 1056 1057 1058
		sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;

	if (alg_type == SEC_SKCIPHER) {
		iv = sk_req->iv;
		cryptlen = sk_req->cryptlen;
	} else {
		iv = aead_req->iv;
		cryptlen = aead_req->cryptlen;
	}
1059

1060 1061
	sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
				cryptlen - iv_size);
1062
	if (unlikely(sz != iv_size))
1063
		dev_err(req->ctx->dev, "copy output iv error!\n");
1064 1065
}

1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
				struct sec_qp_ctx *qp_ctx)
{
	struct sec_req *backlog_req = NULL;

	mutex_lock(&qp_ctx->req_lock);
	if (ctx->fake_req_limit >=
	    atomic_read(&qp_ctx->qp->qp_status.used) &&
	    !list_empty(&qp_ctx->backlog)) {
		backlog_req = list_first_entry(&qp_ctx->backlog,
				typeof(*backlog_req), backlog_head);
		list_del(&backlog_req->backlog_head);
	}
	mutex_unlock(&qp_ctx->req_lock);

	return backlog_req;
}

1084 1085
static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
				  int err)
1086 1087 1088
{
	struct skcipher_request *sk_req = req->c_req.sk_req;
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1089 1090
	struct skcipher_request *backlog_sk_req;
	struct sec_req *backlog_req;
1091 1092 1093 1094

	sec_free_req_id(req);

	/* IV output at encrypto of CBC mode */
1095
	if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
1096
		sec_update_iv(req, SEC_SKCIPHER);
1097

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
	while (1) {
		backlog_req = sec_back_req_clear(ctx, qp_ctx);
		if (!backlog_req)
			break;

		backlog_sk_req = backlog_req->c_req.sk_req;
		backlog_sk_req->base.complete(&backlog_sk_req->base,
						-EINPROGRESS);
		atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
	}

1109
	sk_req->base.complete(&sk_req->base, err);
1110 1111
}

1112 1113 1114
static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
	struct aead_request *aead_req = req->aead_req.aead_req;
1115
	struct sec_cipher_req *c_req = &req->c_req;
1116

1117
	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
}

static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
			       struct sec_req *req, struct sec_sqe *sec_sqe)
{
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct aead_request *aq = a_req->aead_req;

	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);

	sec_sqe->type2.mac_key_alg =
			cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);

	sec_sqe->type2.mac_key_alg |=
			cpu_to_le32((u32)((ctx->a_key_len) /
			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);

	sec_sqe->type2.mac_key_alg |=
			cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);

	sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;

	if (dir)
		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
	else
		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;

	sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);

	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);

1150
	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
}

static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
{
	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
	struct sec_sqe *sec_sqe = &req->sec_sqe;
	int ret;

	ret = sec_skcipher_bd_fill(ctx, req);
	if (unlikely(ret)) {
1161
		dev_err(ctx->dev, "skcipher bd fill is error!\n");
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
		return ret;
	}

	sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);

	return 0;
}

static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
	struct aead_request *a_req = req->aead_req.aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1174
	struct sec_aead_req *aead_req = &req->aead_req;
1175 1176 1177
	struct sec_cipher_req *c_req = &req->c_req;
	size_t authsize = crypto_aead_authsize(tfm);
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1178 1179
	struct aead_request *backlog_aead_req;
	struct sec_req *backlog_req;
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	size_t sz;

	if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
		sec_update_iv(req, SEC_AEAD);

	/* Copy output mac */
	if (!err && c_req->encrypt) {
		struct scatterlist *sgl = a_req->dst;

		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
1190
					  aead_req->out_mac,
1191 1192 1193 1194
					  authsize, a_req->cryptlen +
					  a_req->assoclen);

		if (unlikely(sz != authsize)) {
1195
			dev_err(c->dev, "copy out mac err!\n");
1196 1197 1198 1199 1200 1201
			err = -EINVAL;
		}
	}

	sec_free_req_id(req);

1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
	while (1) {
		backlog_req = sec_back_req_clear(c, qp_ctx);
		if (!backlog_req)
			break;

		backlog_aead_req = backlog_req->aead_req.aead_req;
		backlog_aead_req->base.complete(&backlog_aead_req->base,
						-EINPROGRESS);
		atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
	}
1212 1213 1214 1215

	a_req->base.complete(&a_req->base, err);
}

1216 1217 1218
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
{
	sec_free_req_id(req);
1219
	sec_free_queue_id(ctx, req);
1220 1221 1222 1223 1224
}

static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
	struct sec_qp_ctx *qp_ctx;
1225
	int queue_id;
1226 1227

	/* To load balance */
1228 1229
	queue_id = sec_alloc_queue_id(ctx, req);
	qp_ctx = &ctx->qp_ctx[queue_id];
1230 1231

	req->req_id = sec_alloc_req_id(req, qp_ctx);
1232
	if (unlikely(req->req_id < 0)) {
1233
		sec_free_queue_id(ctx, req);
1234 1235 1236
		return req->req_id;
	}

1237
	return 0;
1238 1239 1240 1241
}

static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
1242
	struct sec_cipher_req *c_req = &req->c_req;
1243 1244 1245
	int ret;

	ret = sec_request_init(ctx, req);
1246
	if (unlikely(ret))
1247 1248 1249
		return ret;

	ret = sec_request_transfer(ctx, req);
1250
	if (unlikely(ret))
1251 1252 1253 1254
		goto err_uninit_req;

	/* Output IV as decrypto */
	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
1255
		sec_update_iv(req, ctx->alg_type);
1256 1257

	ret = ctx->req_op->bd_send(ctx, req);
1258 1259
	if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
		(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1260
		dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1261 1262 1263 1264 1265 1266 1267
		goto err_send_req;
	}

	return ret;

err_send_req:
	/* As failing, restore the IV from user */
1268 1269
	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
		if (ctx->alg_type == SEC_SKCIPHER)
1270
			memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1271 1272
			       ctx->c_ctx.ivsize);
		else
1273
			memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1274 1275
			       ctx->c_ctx.ivsize);
	}
1276 1277 1278 1279 1280 1281 1282

	sec_request_untransfer(ctx, req);
err_uninit_req:
	sec_request_uninit(ctx, req);
	return ret;
}

1283
static const struct sec_req_op sec_skcipher_req_ops = {
1284 1285 1286 1287 1288 1289 1290 1291 1292
	.buf_map	= sec_skcipher_sgl_map,
	.buf_unmap	= sec_skcipher_sgl_unmap,
	.do_transfer	= sec_skcipher_copy_iv,
	.bd_fill	= sec_skcipher_bd_fill,
	.bd_send	= sec_bd_send,
	.callback	= sec_skcipher_callback,
	.process	= sec_process,
};

1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
static const struct sec_req_op sec_aead_req_ops = {
	.buf_map	= sec_aead_sgl_map,
	.buf_unmap	= sec_aead_sgl_unmap,
	.do_transfer	= sec_aead_copy_iv,
	.bd_fill	= sec_aead_bd_fill,
	.bd_send	= sec_bd_send,
	.callback	= sec_aead_callback,
	.process	= sec_process,
};

1303 1304 1305 1306
static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);

1307
	ctx->req_op = &sec_skcipher_req_ops;
1308 1309 1310 1311 1312 1313

	return sec_skcipher_init(tfm);
}

static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
{
1314
	sec_skcipher_uninit(tfm);
1315 1316
}

1317 1318 1319 1320 1321 1322 1323 1324 1325
static int sec_aead_init(struct crypto_aead *tfm)
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	int ret;

	crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
	ctx->alg_type = SEC_AEAD;
	ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1326
		dev_err(ctx->dev, "get error aead iv size!\n");
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
		return -EINVAL;
	}

	ctx->req_op = &sec_aead_req_ops;
	ret = sec_ctx_base_init(ctx);
	if (ret)
		return ret;

	ret = sec_auth_init(ctx);
	if (ret)
		goto err_auth_init;

	ret = sec_cipher_init(ctx);
	if (ret)
		goto err_cipher_init;

	return ret;

err_cipher_init:
	sec_auth_uninit(ctx);
err_auth_init:
	sec_ctx_base_uninit(ctx);
	return ret;
}

static void sec_aead_exit(struct crypto_aead *tfm)
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);

	sec_cipher_uninit(ctx);
	sec_auth_uninit(ctx);
	sec_ctx_base_uninit(ctx);
}

static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
	int ret;

	ret = sec_aead_init(tfm);
	if (ret) {
		pr_err("hisi_sec2: aead init error!\n");
		return ret;
	}

	auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
	if (IS_ERR(auth_ctx->hash_tfm)) {
1375
		dev_err(ctx->dev, "aead alloc shash error!\n");
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
		sec_aead_exit(tfm);
		return PTR_ERR(auth_ctx->hash_tfm);
	}

	return 0;
}

static void sec_aead_ctx_exit(struct crypto_aead *tfm)
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);

	crypto_free_shash(ctx->a_ctx.hash_tfm);
	sec_aead_exit(tfm);
}

static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
{
	return sec_aead_ctx_init(tfm, "sha1");
}

static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
{
	return sec_aead_ctx_init(tfm, "sha256");
}

static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
{
	return sec_aead_ctx_init(tfm, "sha512");
}

1406
static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1407
{
1408
	struct skcipher_request *sk_req = sreq->c_req.sk_req;
1409
	struct device *dev = ctx->dev;
1410
	u8 c_alg = ctx->c_ctx.c_alg;
1411

1412
	if (unlikely(!sk_req->src || !sk_req->dst)) {
1413 1414 1415
		dev_err(dev, "skcipher input param error!\n");
		return -EINVAL;
	}
1416
	sreq->c_req.c_len = sk_req->cryptlen;
1417 1418 1419 1420 1421 1422

	if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
		sreq->use_pbuf = true;
	else
		sreq->use_pbuf = false;

1423
	if (c_alg == SEC_CALG_3DES) {
1424
		if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
1425 1426 1427 1428 1429
			dev_err(dev, "skcipher 3des input length error!\n");
			return -EINVAL;
		}
		return 0;
	} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
1430
		if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
1431 1432 1433 1434 1435 1436
			dev_err(dev, "skcipher aes input length error!\n");
			return -EINVAL;
		}
		return 0;
	}
	dev_err(dev, "skcipher algorithm error!\n");
1437

1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
	return -EINVAL;
}

static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
	struct sec_req *req = skcipher_request_ctx(sk_req);
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
	int ret;

	if (!sk_req->cryptlen)
		return 0;

1451
	req->flag = sk_req->base.flags;
1452 1453 1454 1455
	req->c_req.sk_req = sk_req;
	req->c_req.encrypt = encrypt;
	req->ctx = ctx;

1456 1457 1458 1459
	ret = sec_skcipher_param_check(ctx, req);
	if (unlikely(ret))
		return -EINVAL;

1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
	return ctx->req_op->process(ctx, req);
}

static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
{
	return sec_skcipher_crypto(sk_req, true);
}

static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
{
	return sec_skcipher_crypto(sk_req, false);
}

#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
	sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
{\
	.base = {\
		.cra_name = sec_cra_name,\
		.cra_driver_name = "hisi_sec_"sec_cra_name,\
		.cra_priority = SEC_PRIORITY,\
1480
		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
		.cra_blocksize = blk_size,\
		.cra_ctxsize = sizeof(struct sec_ctx),\
		.cra_module = THIS_MODULE,\
	},\
	.init = ctx_init,\
	.exit = ctx_exit,\
	.setkey = sec_set_key,\
	.decrypt = sec_skcipher_decrypt,\
	.encrypt = sec_skcipher_encrypt,\
	.min_keysize = sec_min_key_size,\
	.max_keysize = sec_max_key_size,\
	.ivsize = iv_size,\
},

#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
	max_key_size, blk_size, iv_size) \
	SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
	sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)

1500
static struct skcipher_alg sec_skciphers[] = {
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529
	SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
			 AES_BLOCK_SIZE, 0)

	SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
			 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
			 DES3_EDE_BLOCK_SIZE, 0)

	SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
			 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
			 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
};

1530 1531 1532 1533 1534
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
	struct aead_request *req = sreq->aead_req.aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	size_t authsize = crypto_aead_authsize(tfm);
1535 1536
	struct device *dev = ctx->dev;
	u8 c_alg = ctx->c_ctx.c_alg;
1537

1538 1539
	if (unlikely(!req->src || !req->dst || !req->cryptlen ||
		req->assoclen > SEC_MAX_AAD_LEN)) {
1540
		dev_err(dev, "aead input param error!\n");
1541 1542 1543
		return -EINVAL;
	}

1544 1545 1546 1547 1548 1549
	if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
		SEC_PBUF_SZ)
		sreq->use_pbuf = true;
	else
		sreq->use_pbuf = false;

1550 1551
	/* Support AES only */
	if (unlikely(c_alg != SEC_CALG_AES)) {
1552
		dev_err(dev, "aead crypto alg error!\n");
1553 1554 1555 1556 1557 1558 1559 1560
		return -EINVAL;
	}
	if (sreq->c_req.encrypt)
		sreq->c_req.c_len = req->cryptlen;
	else
		sreq->c_req.c_len = req->cryptlen - authsize;

	if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
1561
		dev_err(dev, "aead crypto length error!\n");
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
		return -EINVAL;
	}

	return 0;
}

static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
{
	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
	struct sec_req *req = aead_request_ctx(a_req);
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	int ret;

1575
	req->flag = a_req->base.flags;
1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
	req->aead_req.aead_req = a_req;
	req->c_req.encrypt = encrypt;
	req->ctx = ctx;

	ret = sec_aead_param_check(ctx, req);
	if (unlikely(ret))
		return -EINVAL;

	return ctx->req_op->process(ctx, req);
}

static int sec_aead_encrypt(struct aead_request *a_req)
{
	return sec_aead_crypto(a_req, true);
}

static int sec_aead_decrypt(struct aead_request *a_req)
{
	return sec_aead_crypto(a_req, false);
}

#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
			 ctx_exit, blk_size, iv_size, max_authsize)\
{\
	.base = {\
		.cra_name = sec_cra_name,\
		.cra_driver_name = "hisi_sec_"sec_cra_name,\
		.cra_priority = SEC_PRIORITY,\
1604
		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
		.cra_blocksize = blk_size,\
		.cra_ctxsize = sizeof(struct sec_ctx),\
		.cra_module = THIS_MODULE,\
	},\
	.init = ctx_init,\
	.exit = ctx_exit,\
	.setkey = sec_set_key,\
	.decrypt = sec_aead_decrypt,\
	.encrypt = sec_aead_encrypt,\
	.ivsize = iv_size,\
	.maxauthsize = max_authsize,\
}

#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
	SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
			sec_aead_ctx_exit, blksize, ivsize, authsize)

static struct aead_alg sec_aeads[] = {
	SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
		     sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
		     AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),

	SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
		     sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
		     AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),

	SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
		     sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
		     AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
};

1636
int sec_register_to_crypto(struct hisi_qm *qm)
1637
{
1638
	int ret;
1639 1640

	/* To avoid repeat register */
1641 1642 1643 1644
	ret = crypto_register_skciphers(sec_skciphers,
					ARRAY_SIZE(sec_skciphers));
	if (ret)
		return ret;
1645

1646 1647 1648 1649
	ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
	if (ret)
		crypto_unregister_skciphers(sec_skciphers,
					    ARRAY_SIZE(sec_skciphers));
1650 1651 1652
	return ret;
}

1653
void sec_unregister_from_crypto(struct hisi_qm *qm)
1654
{
1655 1656 1657
	crypto_unregister_skciphers(sec_skciphers,
				    ARRAY_SIZE(sec_skciphers));
	crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
1658
}