sec_crypto.c 63.1 KB
Newer Older
1 2 3 4
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */

#include <crypto/aes.h>
5
#include <crypto/aead.h>
6
#include <crypto/algapi.h>
7
#include <crypto/authenc.h>
8
#include <crypto/des.h>
9 10
#include <crypto/hash.h>
#include <crypto/internal/aead.h>
11
#include <crypto/internal/des.h>
12 13
#include <crypto/sha1.h>
#include <crypto/sha2.h>
14 15 16 17 18 19 20 21 22 23 24
#include <crypto/skcipher.h>
#include <crypto/xts.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>

#include "sec.h"
#include "sec_crypto.h"

#define SEC_PRIORITY		4001
#define SEC_XTS_MIN_KEY_SIZE	(2 * AES_MIN_KEY_SIZE)
25
#define SEC_XTS_MID_KEY_SIZE	(3 * AES_MIN_KEY_SIZE)
26 27 28 29 30 31 32 33 34 35 36 37
#define SEC_XTS_MAX_KEY_SIZE	(2 * AES_MAX_KEY_SIZE)
#define SEC_DES3_2KEY_SIZE	(2 * DES_KEY_SIZE)
#define SEC_DES3_3KEY_SIZE	(3 * DES_KEY_SIZE)

/* SEC sqe(bd) bit operational relative MACRO */
#define SEC_DE_OFFSET		1
#define SEC_CIPHER_OFFSET	4
#define SEC_SCENE_OFFSET	3
#define SEC_DST_SGL_OFFSET	2
#define SEC_SRC_SGL_OFFSET	7
#define SEC_CKEY_OFFSET		9
#define SEC_CMODE_OFFSET	12
38 39 40 41
#define SEC_AKEY_OFFSET         5
#define SEC_AEAD_ALG_OFFSET     11
#define SEC_AUTH_OFFSET		6

42 43 44 45 46 47 48 49 50 51 52
#define SEC_DE_OFFSET_V3		9
#define SEC_SCENE_OFFSET_V3	5
#define SEC_CKEY_OFFSET_V3	13
#define SEC_SRC_SGL_OFFSET_V3	11
#define SEC_DST_SGL_OFFSET_V3	14
#define SEC_CALG_OFFSET_V3	4
#define SEC_AKEY_OFFSET_V3	9
#define SEC_MAC_OFFSET_V3	4
#define SEC_AUTH_ALG_OFFSET_V3	15
#define SEC_CIPHER_AUTH_V3	0xbf
#define SEC_AUTH_CIPHER_V3	0x40
53 54 55 56
#define SEC_FLAG_OFFSET		7
#define SEC_FLAG_MASK		0x0780
#define SEC_TYPE_MASK		0x0F
#define SEC_DONE_MASK		0x0001
57
#define SEC_ICV_MASK		0x000E
58
#define SEC_SQE_LEN_RATE_MASK	0x3
59 60 61

#define SEC_TOTAL_IV_SZ		(SEC_IV_SIZE * QM_Q_DEPTH)
#define SEC_SGL_SGE_NR		128
62 63 64
#define SEC_CIPHER_AUTH		0xfe
#define SEC_AUTH_CIPHER		0x1
#define SEC_MAX_MAC_LEN		64
65
#define SEC_MAX_AAD_LEN		65535
66
#define SEC_TOTAL_MAC_SZ	(SEC_MAX_MAC_LEN * QM_Q_DEPTH)
67 68 69 70 71 72 73 74 75 76 77 78 79

#define SEC_PBUF_SZ			512
#define SEC_PBUF_IV_OFFSET		SEC_PBUF_SZ
#define SEC_PBUF_MAC_OFFSET		(SEC_PBUF_SZ + SEC_IV_SIZE)
#define SEC_PBUF_PKG		(SEC_PBUF_SZ + SEC_IV_SIZE +	\
			SEC_MAX_MAC_LEN * 2)
#define SEC_PBUF_NUM		(PAGE_SIZE / SEC_PBUF_PKG)
#define SEC_PBUF_PAGE_NUM	(QM_Q_DEPTH / SEC_PBUF_NUM)
#define SEC_PBUF_LEFT_SZ	(SEC_PBUF_PKG * (QM_Q_DEPTH -	\
			SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
#define SEC_TOTAL_PBUF_SZ	(PAGE_SIZE * SEC_PBUF_PAGE_NUM +	\
			SEC_PBUF_LEFT_SZ)

80
#define SEC_SQE_LEN_RATE	4
81
#define SEC_SQE_CFLAG		2
82
#define SEC_SQE_AEAD_FLAG	3
83
#define SEC_SQE_DONE		0x1
84
#define SEC_ICV_ERR		0x2
85 86
#define MIN_MAC_LEN		4
#define MAC_LEN_MASK		0x1U
87 88 89
#define MAX_INPUT_DATA_LEN	0xFFFE00
#define BITS_MASK		0xFF
#define BYTE_BITS		0x8
90
#define SEC_XTS_NAME_SZ		0x3
91 92 93 94 95 96 97 98 99 100 101 102
#define IV_CM_CAL_NUM		2
#define IV_CL_MASK		0x7
#define IV_CL_MIN		2
#define IV_CL_MID		4
#define IV_CL_MAX		8
#define IV_FLAGS_OFFSET	0x6
#define IV_CM_OFFSET		0x3
#define IV_LAST_BYTE1		1
#define IV_LAST_BYTE2		2
#define IV_LAST_BYTE_MASK	0xFF
#define IV_CTR_INIT		0x1
#define IV_BYTE_OFFSET		0x8
103 104

/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
105
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
106 107 108 109 110 111 112 113 114
{
	if (req->c_req.encrypt)
		return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
				 ctx->hlf_q_num;

	return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
				 ctx->hlf_q_num;
}

115
static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
{
	if (req->c_req.encrypt)
		atomic_dec(&ctx->enc_qcyclic);
	else
		atomic_dec(&ctx->dec_qcyclic);
}

static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
	int req_id;

	mutex_lock(&qp_ctx->req_lock);

	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
				  0, QM_Q_DEPTH, GFP_ATOMIC);
	mutex_unlock(&qp_ctx->req_lock);
132
	if (unlikely(req_id < 0)) {
133
		dev_err(req->ctx->dev, "alloc req id fail!\n");
134 135 136 137 138
		return req_id;
	}

	req->qp_ctx = qp_ctx;
	qp_ctx->req_list[req_id] = req;
139

140 141 142 143 144 145 146 147
	return req_id;
}

static void sec_free_req_id(struct sec_req *req)
{
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
	int req_id = req->req_id;

148
	if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
149
		dev_err(req->ctx->dev, "free request id invalid!\n");
150 151 152 153 154 155 156 157 158 159 160
		return;
	}

	qp_ctx->req_list[req_id] = NULL;
	req->qp_ctx = NULL;

	mutex_lock(&qp_ctx->req_lock);
	idr_remove(&qp_ctx->req_idr, req_id);
	mutex_unlock(&qp_ctx->req_lock);
}

161 162 163 164 165
static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
{
	struct sec_sqe *bd = resp;

	status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
166
	status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
167 168 169 170 171 172 173 174 175 176 177 178 179
	status->flag = (le16_to_cpu(bd->type2.done_flag) &
					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
	status->tag = le16_to_cpu(bd->type2.tag);
	status->err_type = bd->type2.error_type;

	return bd->type_cipher_auth & SEC_TYPE_MASK;
}

static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
{
	struct sec_sqe3 *bd3 = resp;

	status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
180
	status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
	status->flag = (le16_to_cpu(bd3->done_flag) &
					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
	status->tag = le64_to_cpu(bd3->tag);
	status->err_type = bd3->error_type;

	return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
}

static int sec_cb_status_check(struct sec_req *req,
			       struct bd_status *status)
{
	struct sec_ctx *ctx = req->ctx;

	if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
		dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
				    req->err_type, status->done);
		return -EIO;
	}

	if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
		if (unlikely(status->flag != SEC_SQE_CFLAG)) {
			dev_err_ratelimited(ctx->dev, "flag[%u]\n",
					    status->flag);
			return -EIO;
		}
206 207 208 209 210 211 212 213
	} else if (unlikely(ctx->alg_type == SEC_AEAD)) {
		if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
			     status->icv == SEC_ICV_ERR)) {
			dev_err_ratelimited(ctx->dev,
					    "flag[%u], icv[%u]\n",
					    status->flag, status->icv);
			return -EBADMSG;
		}
214 215 216 217 218
	}

	return 0;
}

219 220 221
static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
	struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
222
	struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
223 224
	u8 type_supported = qp_ctx->ctx->type_supported;
	struct bd_status status;
225 226
	struct sec_ctx *ctx;
	struct sec_req *req;
227
	int err;
228 229
	u8 type;

230 231 232 233 234 235 236 237 238
	if (type_supported == SEC_BD_TYPE2) {
		type = pre_parse_finished_bd(&status, resp);
		req = qp_ctx->req_list[status.tag];
	} else {
		type = pre_parse_finished_bd3(&status, resp);
		req = (void *)(uintptr_t)status.tag;
	}

	if (unlikely(type != type_supported)) {
239
		atomic64_inc(&dfx->err_bd_cnt);
240 241 242 243
		pr_err("err bd type [%d]\n", type);
		return;
	}

244 245
	if (unlikely(!req)) {
		atomic64_inc(&dfx->invalid_req_cnt);
246
		atomic_inc(&qp->qp_status.used);
247 248
		return;
	}
249 250

	req->err_type = status.err_type;
251
	ctx = req->ctx;
252 253
	err = sec_cb_status_check(req, &status);
	if (err)
254
		atomic64_inc(&dfx->done_flag_cnt);
255

256
	atomic64_inc(&dfx->recv_cnt);
257

258 259
	ctx->req_op->buf_unmap(ctx, req);

260
	ctx->req_op->callback(ctx, req, err);
261 262 263 264 265 266 267
}

static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
{
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
	int ret;

268 269 270 271 272
	if (ctx->fake_req_limit <=
	    atomic_read(&qp_ctx->qp->qp_status.used) &&
	    !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
		return -EBUSY;

273 274
	mutex_lock(&qp_ctx->req_lock);
	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
275 276 277 278 279 280 281 282 283

	if (ctx->fake_req_limit <=
	    atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
		list_add_tail(&req->backlog_head, &qp_ctx->backlog);
		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
		atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
		mutex_unlock(&qp_ctx->req_lock);
		return -EBUSY;
	}
284 285
	mutex_unlock(&qp_ctx->req_lock);

286
	if (unlikely(ret == -EBUSY))
287 288
		return -ENOBUFS;

289 290 291
	if (likely(!ret)) {
		ret = -EINPROGRESS;
		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
292 293 294 295 296
	}

	return ret;
}

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
/* Get DMA memory resources */
static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
	int i;

	res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
					 &res->c_ivin_dma, GFP_KERNEL);
	if (!res->c_ivin)
		return -ENOMEM;

	for (i = 1; i < QM_Q_DEPTH; i++) {
		res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
		res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
	}

	return 0;
}

static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
{
	if (res->c_ivin)
		dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
				  res->c_ivin, res->c_ivin_dma);
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
{
	int i;

	res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
					 &res->a_ivin_dma, GFP_KERNEL);
	if (!res->a_ivin)
		return -ENOMEM;

	for (i = 1; i < QM_Q_DEPTH; i++) {
		res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
		res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
	}

	return 0;
}

static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
{
	if (res->a_ivin)
		dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
				  res->a_ivin, res->a_ivin_dma);
}

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
{
	int i;

	res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
					  &res->out_mac_dma, GFP_KERNEL);
	if (!res->out_mac)
		return -ENOMEM;

	for (i = 1; i < QM_Q_DEPTH; i++) {
		res[i].out_mac_dma = res->out_mac_dma +
				     i * (SEC_MAX_MAC_LEN << 1);
		res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
	}

	return 0;
}

static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
{
	if (res->out_mac)
		dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
				  res->out_mac, res->out_mac_dma);
}

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
	if (res->pbuf)
		dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
				  res->pbuf, res->pbuf_dma);
}

/*
 * To improve performance, pbuffer is used for
 * small packets (< 512Bytes) as IOMMU translation using.
 */
static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
	int pbuf_page_offset;
	int i, j, k;

	res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
				&res->pbuf_dma, GFP_KERNEL);
	if (!res->pbuf)
		return -ENOMEM;

	/*
	 * SEC_PBUF_PKG contains data pbuf, iv and
	 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
	 * Every PAGE contains six SEC_PBUF_PKG
	 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
	 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
	 * for the SEC_TOTAL_PBUF_SZ
	 */
	for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
		pbuf_page_offset = PAGE_SIZE * i;
		for (j = 0; j < SEC_PBUF_NUM; j++) {
			k = i * SEC_PBUF_NUM + j;
			if (k == QM_Q_DEPTH)
				break;
			res[k].pbuf = res->pbuf +
				j * SEC_PBUF_PKG + pbuf_page_offset;
			res[k].pbuf_dma = res->pbuf_dma +
				j * SEC_PBUF_PKG + pbuf_page_offset;
		}
	}
412

413 414 415
	return 0;
}

416 417 418
static int sec_alg_resource_alloc(struct sec_ctx *ctx,
				  struct sec_qp_ctx *qp_ctx)
{
419
	struct sec_alg_res *res = qp_ctx->res;
420
	struct device *dev = ctx->dev;
421 422 423 424 425
	int ret;

	ret = sec_alloc_civ_resource(dev, res);
	if (ret)
		return ret;
426

427
	if (ctx->alg_type == SEC_AEAD) {
428 429 430 431
		ret = sec_alloc_aiv_resource(dev, res);
		if (ret)
			goto alloc_aiv_fail;

432 433
		ret = sec_alloc_mac_resource(dev, res);
		if (ret)
434
			goto alloc_mac_fail;
435
	}
436 437 438 439
	if (ctx->pbuf_supported) {
		ret = sec_alloc_pbuf_resource(dev, res);
		if (ret) {
			dev_err(dev, "fail to alloc pbuf dma resource!\n");
440
			goto alloc_pbuf_fail;
441 442
		}
	}
443 444

	return 0;
445

446 447 448
alloc_pbuf_fail:
	if (ctx->alg_type == SEC_AEAD)
		sec_free_mac_resource(dev, qp_ctx->res);
449 450 451 452
alloc_mac_fail:
	if (ctx->alg_type == SEC_AEAD)
		sec_free_aiv_resource(dev, res);
alloc_aiv_fail:
453 454
	sec_free_civ_resource(dev, res);
	return ret;
455 456 457 458 459
}

static void sec_alg_resource_free(struct sec_ctx *ctx,
				  struct sec_qp_ctx *qp_ctx)
{
460
	struct device *dev = ctx->dev;
461 462

	sec_free_civ_resource(dev, qp_ctx->res);
463

464 465
	if (ctx->pbuf_supported)
		sec_free_pbuf_resource(dev, qp_ctx->res);
466 467
	if (ctx->alg_type == SEC_AEAD)
		sec_free_mac_resource(dev, qp_ctx->res);
468 469
}

470 471 472
static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
			     int qp_ctx_id, int alg_type)
{
473
	struct device *dev = ctx->dev;
474 475 476 477 478
	struct sec_qp_ctx *qp_ctx;
	struct hisi_qp *qp;
	int ret = -ENOMEM;

	qp_ctx = &ctx->qp_ctx[qp_ctx_id];
479
	qp = ctx->qps[qp_ctx_id];
480 481 482 483 484
	qp->req_type = 0;
	qp->qp_ctx = qp_ctx;
	qp_ctx->qp = qp;
	qp_ctx->ctx = ctx;

485 486
	qp->req_cb = sec_req_cb;

487 488
	mutex_init(&qp_ctx->req_lock);
	idr_init(&qp_ctx->req_idr);
489
	INIT_LIST_HEAD(&qp_ctx->backlog);
490 491 492

	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
						     SEC_SGL_SGE_NR);
493
	if (IS_ERR(qp_ctx->c_in_pool)) {
494
		dev_err(dev, "fail to create sgl pool for input!\n");
495
		goto err_destroy_idr;
496 497 498 499
	}

	qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
						      SEC_SGL_SGE_NR);
500
	if (IS_ERR(qp_ctx->c_out_pool)) {
501 502 503 504
		dev_err(dev, "fail to create sgl pool for output!\n");
		goto err_free_c_in_pool;
	}

505
	ret = sec_alg_resource_alloc(ctx, qp_ctx);
506 507 508 509 510 511 512 513 514 515
	if (ret)
		goto err_free_c_out_pool;

	ret = hisi_qm_start_qp(qp, 0);
	if (ret < 0)
		goto err_queue_free;

	return 0;

err_queue_free:
516
	sec_alg_resource_free(ctx, qp_ctx);
517 518 519 520 521 522 523 524 525 526 527 528
err_free_c_out_pool:
	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
err_destroy_idr:
	idr_destroy(&qp_ctx->req_idr);
	return ret;
}

static void sec_release_qp_ctx(struct sec_ctx *ctx,
			       struct sec_qp_ctx *qp_ctx)
{
529
	struct device *dev = ctx->dev;
530 531

	hisi_qm_stop_qp(qp_ctx->qp);
532
	sec_alg_resource_free(ctx, qp_ctx);
533 534 535 536 537 538 539

	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);

	idr_destroy(&qp_ctx->req_idr);
}

540
static int sec_ctx_base_init(struct sec_ctx *ctx)
541 542 543 544
{
	struct sec_dev *sec;
	int i, ret;

545 546 547
	ctx->qps = sec_create_qps();
	if (!ctx->qps) {
		pr_err("Can not create sec qps!\n");
548 549
		return -ENODEV;
	}
550 551

	sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
552
	ctx->sec = sec;
553
	ctx->dev = &sec->qm.pdev->dev;
554
	ctx->hlf_q_num = sec->ctx_q_num >> 1;
555

556 557
	ctx->pbuf_supported = ctx->sec->iommu_used;

558
	/* Half of queue depth is taken as fake requests limit in the queue. */
559
	ctx->fake_req_limit = QM_Q_DEPTH >> 1;
560 561
	ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
			      GFP_KERNEL);
562 563 564 565
	if (!ctx->qp_ctx) {
		ret = -ENOMEM;
		goto err_destroy_qps;
	}
566 567

	for (i = 0; i < sec->ctx_q_num; i++) {
568
		ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
569 570 571 572 573
		if (ret)
			goto err_sec_release_qp_ctx;
	}

	return 0;
574

575 576 577 578
err_sec_release_qp_ctx:
	for (i = i - 1; i >= 0; i--)
		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
	kfree(ctx->qp_ctx);
579 580
err_destroy_qps:
	sec_destroy_qps(ctx->qps, sec->ctx_q_num);
581 582 583
	return ret;
}

584
static void sec_ctx_base_uninit(struct sec_ctx *ctx)
585
{
586
	int i;
587 588 589 590

	for (i = 0; i < ctx->sec->ctx_q_num; i++)
		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);

591
	sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
592 593 594
	kfree(ctx->qp_ctx);
}

595 596 597 598
static int sec_cipher_init(struct sec_ctx *ctx)
{
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;

599
	c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
600 601 602 603 604 605 606 607 608 609 610 611
					  &c_ctx->c_key_dma, GFP_KERNEL);
	if (!c_ctx->c_key)
		return -ENOMEM;

	return 0;
}

static void sec_cipher_uninit(struct sec_ctx *ctx)
{
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;

	memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
612
	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
613 614 615
			  c_ctx->c_key, c_ctx->c_key_dma);
}

616 617 618 619
static int sec_auth_init(struct sec_ctx *ctx)
{
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;

620
	a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
621 622 623 624 625 626 627 628 629 630 631 632
					  &a_ctx->a_key_dma, GFP_KERNEL);
	if (!a_ctx->a_key)
		return -ENOMEM;

	return 0;
}

static void sec_auth_uninit(struct sec_ctx *ctx)
{
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;

	memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
633
	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
634 635 636
			  a_ctx->a_key, a_ctx->a_key_dma);
}

637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
{
	const char *alg = crypto_tfm_alg_name(&tfm->base);
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;

	c_ctx->fallback = false;
	if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
		return 0;

	c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
						  CRYPTO_ALG_NEED_FALLBACK);
	if (IS_ERR(c_ctx->fbtfm)) {
		pr_err("failed to alloc fallback tfm!\n");
		return PTR_ERR(c_ctx->fbtfm);
	}

	return 0;
}

657 658 659 660 661
static int sec_skcipher_init(struct crypto_skcipher *tfm)
{
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
	int ret;

662
	ctx->alg_type = SEC_SKCIPHER;
663 664 665
	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
	ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
666
		pr_err("get error skcipher iv size!\n");
667 668 669 670 671 672 673 674 675 676 677
		return -EINVAL;
	}

	ret = sec_ctx_base_init(ctx);
	if (ret)
		return ret;

	ret = sec_cipher_init(ctx);
	if (ret)
		goto err_cipher_init;

678 679 680 681
	ret = sec_skcipher_fbtfm_init(tfm);
	if (ret)
		goto err_fbtfm_init;

682
	return 0;
683

684 685
err_fbtfm_init:
	sec_cipher_uninit(ctx);
686 687 688 689 690 691 692 693 694
err_cipher_init:
	sec_ctx_base_uninit(ctx);
	return ret;
}

static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
{
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);

695 696 697
	if (ctx->c_ctx.fbtfm)
		crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);

698 699 700 701
	sec_cipher_uninit(ctx);
	sec_ctx_base_uninit(ctx);
}

702
static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
703 704 705
				    const u32 keylen,
				    const enum sec_cmode c_mode)
{
706 707 708 709 710 711 712 713
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
	int ret;

	ret = verify_skcipher_des3_key(tfm, key);
	if (ret)
		return ret;

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
	switch (keylen) {
	case SEC_DES3_2KEY_SIZE:
		c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
		break;
	case SEC_DES3_3KEY_SIZE:
		c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
				       const u32 keylen,
				       const enum sec_cmode c_mode)
{
	if (c_mode == SEC_CMODE_XTS) {
		switch (keylen) {
		case SEC_XTS_MIN_KEY_SIZE:
			c_ctx->c_key_len = SEC_CKEY_128BIT;
			break;
737 738 739
		case SEC_XTS_MID_KEY_SIZE:
			c_ctx->fallback = true;
			break;
740 741 742 743 744 745 746 747
		case SEC_XTS_MAX_KEY_SIZE:
			c_ctx->c_key_len = SEC_CKEY_256BIT;
			break;
		default:
			pr_err("hisi_sec2: xts mode key error!\n");
			return -EINVAL;
		}
	} else {
748 749 750
		if (c_ctx->c_alg == SEC_CALG_SM4 &&
		    keylen != AES_KEYSIZE_128) {
			pr_err("hisi_sec2: sm4 key error!\n");
751
			return -EINVAL;
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
		} else {
			switch (keylen) {
			case AES_KEYSIZE_128:
				c_ctx->c_key_len = SEC_CKEY_128BIT;
				break;
			case AES_KEYSIZE_192:
				c_ctx->c_key_len = SEC_CKEY_192BIT;
				break;
			case AES_KEYSIZE_256:
				c_ctx->c_key_len = SEC_CKEY_256BIT;
				break;
			default:
				pr_err("hisi_sec2: aes key error!\n");
				return -EINVAL;
			}
767 768 769 770 771 772 773 774 775 776 777 778
		}
	}

	return 0;
}

static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
			       const u32 keylen, const enum sec_calg c_alg,
			       const enum sec_cmode c_mode)
{
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
779
	struct device *dev = ctx->dev;
780 781 782 783 784
	int ret;

	if (c_mode == SEC_CMODE_XTS) {
		ret = xts_verify_key(tfm, key, keylen);
		if (ret) {
785
			dev_err(dev, "xts mode key err!\n");
786 787 788 789 790 791 792 793 794
			return ret;
		}
	}

	c_ctx->c_alg  = c_alg;
	c_ctx->c_mode = c_mode;

	switch (c_alg) {
	case SEC_CALG_3DES:
795
		ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
796 797 798 799 800 801 802 803 804 805
		break;
	case SEC_CALG_AES:
	case SEC_CALG_SM4:
		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
		break;
	default:
		return -EINVAL;
	}

	if (ret) {
806
		dev_err(dev, "set sec key err!\n");
807 808 809 810
		return ret;
	}

	memcpy(c_ctx->c_key, key, keylen);
811 812 813 814 815 816 817
	if (c_ctx->fallback) {
		ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
		if (ret) {
			dev_err(dev, "failed to set fallback skcipher key!\n");
			return ret;
		}
	}
818 819 820 821 822 823 824 825 826 827 828 829 830
	return 0;
}

#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode)			\
static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
	u32 keylen)							\
{									\
	return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode);	\
}

GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
831 832 833
GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB)
GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB)
GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
834 835 836 837
GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
838 839 840
GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB)
GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB)
GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
841

842 843 844
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
			struct scatterlist *src)
{
845 846
	struct sec_aead_req *a_req = &req->aead_req;
	struct aead_request *aead_req = a_req->aead_req;
847 848
	struct sec_cipher_req *c_req = &req->c_req;
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
849
	struct device *dev = ctx->dev;
850 851
	int copy_size, pbuf_length;
	int req_id = req->req_id;
852 853 854
	struct crypto_aead *tfm;
	size_t authsize;
	u8 *mac_offset;
855 856 857 858 859 860 861

	if (ctx->alg_type == SEC_AEAD)
		copy_size = aead_req->cryptlen + aead_req->assoclen;
	else
		copy_size = c_req->c_len;

	pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
862
			qp_ctx->res[req_id].pbuf, copy_size);
863 864 865 866
	if (unlikely(pbuf_length != copy_size)) {
		dev_err(dev, "copy src data to pbuf error!\n");
		return -EINVAL;
	}
867 868 869 870 871 872
	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
		tfm = crypto_aead_reqtfm(aead_req);
		authsize = crypto_aead_authsize(tfm);
		mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
		memcpy(a_req->out_mac, mac_offset, authsize);
	}
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894

	c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
	c_req->c_out_dma = c_req->c_in_dma;

	return 0;
}

static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
			struct scatterlist *dst)
{
	struct aead_request *aead_req = req->aead_req.aead_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
	int copy_size, pbuf_length;
	int req_id = req->req_id;

	if (ctx->alg_type == SEC_AEAD)
		copy_size = c_req->c_len + aead_req->assoclen;
	else
		copy_size = c_req->c_len;

	pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
895
			qp_ctx->res[req_id].pbuf, copy_size);
896
	if (unlikely(pbuf_length != copy_size))
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
		dev_err(ctx->dev, "copy pbuf data to dst error!\n");
}

static int sec_aead_mac_init(struct sec_aead_req *req)
{
	struct aead_request *aead_req = req->aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
	size_t authsize = crypto_aead_authsize(tfm);
	u8 *mac_out = req->out_mac;
	struct scatterlist *sgl = aead_req->src;
	size_t copy_size;
	off_t skip_size;

	/* Copy input mac */
	skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
				       authsize, skip_size);
	if (unlikely(copy_size != authsize))
		return -EINVAL;

	return 0;
918 919
}

920
static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
921
			  struct scatterlist *src, struct scatterlist *dst)
922 923
{
	struct sec_cipher_req *c_req = &req->c_req;
924
	struct sec_aead_req *a_req = &req->aead_req;
925
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
926
	struct sec_alg_res *res = &qp_ctx->res[req->req_id];
927
	struct device *dev = ctx->dev;
928 929 930 931 932 933
	int ret;

	if (req->use_pbuf) {
		c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
		c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
		if (ctx->alg_type == SEC_AEAD) {
934 935
			a_req->a_ivin = res->a_ivin;
			a_req->a_ivin_dma = res->a_ivin_dma;
936 937 938 939
			a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
			a_req->out_mac_dma = res->pbuf_dma +
					SEC_PBUF_MAC_OFFSET;
		}
940
		ret = sec_cipher_pbuf_map(ctx, req, src);
941

942 943
		return ret;
	}
944 945 946
	c_req->c_ivin = res->c_ivin;
	c_req->c_ivin_dma = res->c_ivin_dma;
	if (ctx->alg_type == SEC_AEAD) {
947 948
		a_req->a_ivin = res->a_ivin;
		a_req->a_ivin_dma = res->a_ivin_dma;
949 950 951
		a_req->out_mac = res->out_mac;
		a_req->out_mac_dma = res->out_mac_dma;
	}
952 953 954 955 956 957 958 959 960 961 962

	c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
						    qp_ctx->c_in_pool,
						    req->req_id,
						    &c_req->c_in_dma);

	if (IS_ERR(c_req->c_in)) {
		dev_err(dev, "fail to dma map input sgl buffers!\n");
		return PTR_ERR(c_req->c_in);
	}

963 964 965 966 967 968 969
	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
		ret = sec_aead_mac_init(a_req);
		if (unlikely(ret)) {
			dev_err(dev, "fail to init mac data for ICV!\n");
			return ret;
		}
	}
970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
	if (dst == src) {
		c_req->c_out = c_req->c_in;
		c_req->c_out_dma = c_req->c_in_dma;
	} else {
		c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
							     qp_ctx->c_out_pool,
							     req->req_id,
							     &c_req->c_out_dma);

		if (IS_ERR(c_req->c_out)) {
			dev_err(dev, "fail to dma map output sgl buffers!\n");
			hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
			return PTR_ERR(c_req->c_out);
		}
	}

	return 0;
}

989
static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
990 991
			     struct scatterlist *src, struct scatterlist *dst)
{
992
	struct sec_cipher_req *c_req = &req->c_req;
993
	struct device *dev = ctx->dev;
994

995 996 997 998 999
	if (req->use_pbuf) {
		sec_cipher_pbuf_unmap(ctx, req, dst);
	} else {
		if (dst != src)
			hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
1000

1001 1002
		hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
	}
1003 1004
}

1005 1006
static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
1007
	struct skcipher_request *sq = req->c_req.sk_req;
1008

1009
	return sec_cipher_map(ctx, req, sq->src, sq->dst);
1010 1011 1012 1013
}

static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
1014
	struct skcipher_request *sq = req->c_req.sk_req;
1015

1016
	sec_cipher_unmap(ctx, req, sq->src, sq->dst);
1017 1018
}

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
				struct crypto_authenc_keys *keys)
{
	switch (keys->enckeylen) {
	case AES_KEYSIZE_128:
		c_ctx->c_key_len = SEC_CKEY_128BIT;
		break;
	case AES_KEYSIZE_192:
		c_ctx->c_key_len = SEC_CKEY_192BIT;
		break;
	case AES_KEYSIZE_256:
		c_ctx->c_key_len = SEC_CKEY_256BIT;
		break;
	default:
		pr_err("hisi_sec2: aead aes key error!\n");
		return -EINVAL;
	}
	memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);

	return 0;
}

static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
				 struct crypto_authenc_keys *keys)
{
	struct crypto_shash *hash_tfm = ctx->hash_tfm;
1045
	int blocksize, digestsize, ret;
1046 1047 1048 1049 1050 1051 1052

	if (!keys->authkeylen) {
		pr_err("hisi_sec2: aead auth key error!\n");
		return -EINVAL;
	}

	blocksize = crypto_shash_blocksize(hash_tfm);
1053
	digestsize = crypto_shash_digestsize(hash_tfm);
1054
	if (keys->authkeylen > blocksize) {
1055 1056
		ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
					      keys->authkeylen, ctx->a_key);
1057
		if (ret) {
1058
			pr_err("hisi_sec2: aead auth digest error!\n");
1059 1060
			return -EINVAL;
		}
1061
		ctx->a_key_len = digestsize;
1062 1063 1064 1065 1066 1067 1068 1069
	} else {
		memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
		ctx->a_key_len = keys->authkeylen;
	}

	return 0;
}

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
{
	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
	struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;

	if (unlikely(a_ctx->fallback_aead_tfm))
		return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);

	return 0;
}

static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
				    struct crypto_aead *tfm, const u8 *key,
				    unsigned int keylen)
{
	crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
	crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
			      crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
	return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
}

1092 1093 1094 1095 1096 1097 1098 1099
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
			   const u32 keylen, const enum sec_hash_alg a_alg,
			   const enum sec_calg c_alg,
			   const enum sec_mac_len mac_len,
			   const enum sec_cmode c_mode)
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1100
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1101
	struct device *dev = ctx->dev;
1102 1103 1104 1105 1106 1107 1108 1109
	struct crypto_authenc_keys keys;
	int ret;

	ctx->a_ctx.a_alg = a_alg;
	ctx->c_ctx.c_alg = c_alg;
	ctx->a_ctx.mac_len = mac_len;
	c_ctx->c_mode = c_mode;

1110 1111 1112 1113 1114 1115 1116 1117
	if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
		if (ret) {
			dev_err(dev, "set sec aes ccm cipher key err!\n");
			return ret;
		}
		memcpy(c_ctx->c_key, key, keylen);

1118 1119 1120 1121 1122 1123
		if (unlikely(a_ctx->fallback_aead_tfm)) {
			ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
			if (ret)
				return ret;
		}

1124 1125 1126
		return 0;
	}

1127 1128 1129 1130 1131
	if (crypto_authenc_extractkeys(&keys, key, keylen))
		goto bad_key;

	ret = sec_aead_aes_set_key(c_ctx, &keys);
	if (ret) {
1132
		dev_err(dev, "set sec cipher key err!\n");
1133 1134 1135 1136 1137
		goto bad_key;
	}

	ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
	if (ret) {
1138
		dev_err(dev, "set sec auth key err!\n");
1139 1140 1141
		goto bad_key;
	}

1142 1143 1144 1145 1146 1147
	if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK)  ||
	    (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
		dev_err(dev, "MAC or AUTH key length error!\n");
		goto bad_key;
	}

1148
	return 0;
1149

1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
bad_key:
	memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
	return -EINVAL;
}


#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode)	\
static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key,	\
	u32 keylen)							\
{									\
	return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
}

GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
			 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
			 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
			 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
1169 1170 1171 1172 1173 1174 1175 1176
GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
1177 1178 1179 1180 1181

static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
	struct aead_request *aq = req->aead_req.aead_req;

1182
	return sec_cipher_map(ctx, req, aq->src, aq->dst);
1183 1184 1185 1186 1187 1188
}

static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
	struct aead_request *aq = req->aead_req.aead_req;

1189
	sec_cipher_unmap(ctx, req, aq->src, aq->dst);
1190 1191
}

1192 1193 1194 1195 1196
static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
{
	int ret;

	ret = ctx->req_op->buf_map(ctx, req);
1197
	if (unlikely(ret))
1198 1199 1200 1201 1202
		return ret;

	ctx->req_op->do_transfer(ctx, req);

	ret = ctx->req_op->bd_fill(ctx, req);
1203
	if (unlikely(ret))
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
		goto unmap_req_buf;

	return ret;

unmap_req_buf:
	ctx->req_op->buf_unmap(ctx, req);
	return ret;
}

static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
{
	ctx->req_op->buf_unmap(ctx, req);
}

static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
	struct skcipher_request *sk_req = req->c_req.sk_req;
1221
	struct sec_cipher_req *c_req = &req->c_req;
1222

1223
	memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
1224 1225 1226 1227 1228 1229 1230 1231 1232
}

static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
{
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
	struct sec_cipher_req *c_req = &req->c_req;
	struct sec_sqe *sec_sqe = &req->sec_sqe;
	u8 scene, sa_type, da_type;
	u8 bd_type, cipher;
1233
	u8 de = 0;
1234 1235 1236 1237

	memset(sec_sqe, 0, sizeof(struct sec_sqe));

	sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1238
	sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
	sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
	sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);

	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
						SEC_CMODE_OFFSET);
	sec_sqe->type2.c_alg = c_ctx->c_alg;
	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
						SEC_CKEY_OFFSET);

	bd_type = SEC_BD_TYPE2;
	if (c_req->encrypt)
		cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
	else
		cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
	sec_sqe->type_cipher_auth = bd_type | cipher;

1255 1256
	/* Set destination and source address type */
	if (req->use_pbuf) {
1257
		sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1258 1259
		da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
	} else {
1260
		sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1261 1262 1263 1264
		da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
	}

	sec_sqe->sdm_addr_type |= da_type;
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
	scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
	if (c_req->c_in_dma != c_req->c_out_dma)
		de = 0x1 << SEC_DE_OFFSET;

	sec_sqe->sds_sa_type = (de | scene | sa_type);

	sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
	sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);

	return 0;
}

1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
{
	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
	struct sec_cipher_req *c_req = &req->c_req;
	u32 bd_param = 0;
	u16 cipher;

	memset(sec_sqe3, 0, sizeof(struct sec_sqe3));

	sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
	sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
	sec_sqe3->data_src_addr = cpu_to_le64(c_req->c_in_dma);
	sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);

	sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
						c_ctx->c_mode;
	sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
						SEC_CKEY_OFFSET_V3);

	if (c_req->encrypt)
		cipher = SEC_CIPHER_ENC;
	else
		cipher = SEC_CIPHER_DEC;
	sec_sqe3->c_icv_key |= cpu_to_le16(cipher);

	if (req->use_pbuf) {
		bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
		bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
	} else {
		bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
		bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
	}

	bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
	if (c_req->c_in_dma != c_req->c_out_dma)
		bd_param |= 0x1 << SEC_DE_OFFSET_V3;

	bd_param |= SEC_BD_TYPE3;
	sec_sqe3->bd_param = cpu_to_le32(bd_param);

	sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
	sec_sqe3->tag = cpu_to_le64(req);

	return 0;
}

1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
/* increment counter (128-bit int) */
static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
{
	do {
		--bits;
		nums += counter[bits];
		counter[bits] = nums & BITS_MASK;
		nums >>= BYTE_BITS;
	} while (bits && nums);
}

1335
static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1336
{
1337
	struct aead_request *aead_req = req->aead_req.aead_req;
1338 1339 1340
	struct skcipher_request *sk_req = req->c_req.sk_req;
	u32 iv_size = req->ctx->c_ctx.ivsize;
	struct scatterlist *sgl;
1341
	unsigned int cryptlen;
1342
	size_t sz;
1343
	u8 *iv;
1344 1345

	if (req->c_req.encrypt)
1346
		sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
1347
	else
1348 1349 1350 1351 1352 1353 1354 1355 1356
		sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;

	if (alg_type == SEC_SKCIPHER) {
		iv = sk_req->iv;
		cryptlen = sk_req->cryptlen;
	} else {
		iv = aead_req->iv;
		cryptlen = aead_req->cryptlen;
	}
1357

1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
	if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
		sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
					cryptlen - iv_size);
		if (unlikely(sz != iv_size))
			dev_err(req->ctx->dev, "copy output iv error!\n");
	} else {
		sz = cryptlen / iv_size;
		if (cryptlen % iv_size)
			sz += 1;
		ctr_iv_inc(iv, iv_size, sz);
	}
1369 1370
}

1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
				struct sec_qp_ctx *qp_ctx)
{
	struct sec_req *backlog_req = NULL;

	mutex_lock(&qp_ctx->req_lock);
	if (ctx->fake_req_limit >=
	    atomic_read(&qp_ctx->qp->qp_status.used) &&
	    !list_empty(&qp_ctx->backlog)) {
		backlog_req = list_first_entry(&qp_ctx->backlog,
				typeof(*backlog_req), backlog_head);
		list_del(&backlog_req->backlog_head);
	}
	mutex_unlock(&qp_ctx->req_lock);

	return backlog_req;
}

1389 1390
static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
				  int err)
1391 1392 1393
{
	struct skcipher_request *sk_req = req->c_req.sk_req;
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1394 1395
	struct skcipher_request *backlog_sk_req;
	struct sec_req *backlog_req;
1396 1397 1398

	sec_free_req_id(req);

1399 1400 1401
	/* IV output at encrypto of CBC/CTR mode */
	if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
	    ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
1402
		sec_update_iv(req, SEC_SKCIPHER);
1403

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
	while (1) {
		backlog_req = sec_back_req_clear(ctx, qp_ctx);
		if (!backlog_req)
			break;

		backlog_sk_req = backlog_req->c_req.sk_req;
		backlog_sk_req->base.complete(&backlog_sk_req->base,
						-EINPROGRESS);
		atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
	}

1415
	sk_req->base.complete(&sk_req->base, err);
1416 1417
}

1418
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
1419 1420
{
	struct aead_request *aead_req = req->aead_req.aead_req;
1421
	struct sec_cipher_req *c_req = &req->c_req;
1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
	struct sec_aead_req *a_req = &req->aead_req;
	size_t authsize = ctx->a_ctx.mac_len;
	u32 data_size = aead_req->cryptlen;
	u8 flage = 0;
	u8 cm, cl;

	/* the specification has been checked in aead_iv_demension_check() */
	cl = c_req->c_ivin[0] + 1;
	c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
	memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
	c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;

	/* the last 3bit is L' */
	flage |= c_req->c_ivin[0] & IV_CL_MASK;

	/* the M' is bit3~bit5, the Flags is bit6 */
	cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
	flage |= cm << IV_CM_OFFSET;
	if (aead_req->assoclen)
		flage |= 0x01 << IV_FLAGS_OFFSET;

	memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
	a_req->a_ivin[0] = flage;

	/*
	 * the last 32bit is counter's initial number,
	 * but the nonce uses the first 16bit
	 * the tail 16bit fill with the cipher length
	 */
	if (!c_req->encrypt)
		data_size = aead_req->cryptlen - authsize;

	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
			data_size & IV_LAST_BYTE_MASK;
	data_size >>= IV_BYTE_OFFSET;
	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
			data_size & IV_LAST_BYTE_MASK;
}

static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
	struct aead_request *aead_req = req->aead_req.aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
	size_t authsize = crypto_aead_authsize(tfm);
	struct sec_cipher_req *c_req = &req->c_req;
	struct sec_aead_req *a_req = &req->aead_req;
1468

1469
	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536

	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
		/*
		 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
		 * the  counter must set to 0x01
		 */
		ctx->a_ctx.mac_len = authsize;
		/* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
		set_aead_auth_iv(ctx, req);
	}

	/* GCM 12Byte Cipher_IV == Auth_IV */
	if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
		ctx->a_ctx.mac_len = authsize;
		memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
	}
}

static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
				 struct sec_req *req, struct sec_sqe *sec_sqe)
{
	struct sec_aead_req *a_req = &req->aead_req;
	struct aead_request *aq = a_req->aead_req;

	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);

	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
	sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
	sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
	sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;

	if (dir)
		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
	else
		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;

	sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
	sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);

	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
}

static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
				    struct sec_req *req, struct sec_sqe3 *sqe3)
{
	struct sec_aead_req *a_req = &req->aead_req;
	struct aead_request *aq = a_req->aead_req;

	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
	sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);

	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
	sqe3->a_key_addr = sqe3->c_key_addr;
	sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
	sqe3->auth_mac_key |= SEC_NO_AUTH;

	if (dir)
		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
	else
		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;

	sqe3->a_len_key = cpu_to_le32(aq->assoclen);
	sqe3->auth_src_offset = cpu_to_le16(0x0);
	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
}

static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
			       struct sec_req *req, struct sec_sqe *sec_sqe)
{
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct aead_request *aq = a_req->aead_req;

	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);

	sec_sqe->type2.mac_key_alg =
			cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);

	sec_sqe->type2.mac_key_alg |=
			cpu_to_le32((u32)((ctx->a_key_len) /
			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);

	sec_sqe->type2.mac_key_alg |=
			cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);

1558 1559
	if (dir) {
		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1560
		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1561 1562
	} else {
		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
1563
		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1564
	}
1565 1566 1567 1568
	sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);

	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);

1569
	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
}

static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
{
	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
	struct sec_sqe *sec_sqe = &req->sec_sqe;
	int ret;

	ret = sec_skcipher_bd_fill(ctx, req);
	if (unlikely(ret)) {
1580
		dev_err(ctx->dev, "skcipher bd fill is error!\n");
1581 1582 1583
		return ret;
	}

1584 1585 1586 1587 1588
	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
		sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
	else
		sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1589 1590 1591 1592

	return 0;
}

1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
				   struct sec_req *req, struct sec_sqe3 *sqe3)
{
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct aead_request *aq = a_req->aead_req;

	sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);

	sqe3->auth_mac_key |=
			cpu_to_le32((u32)(ctx->mac_len /
			SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);

	sqe3->auth_mac_key |=
			cpu_to_le32((u32)(ctx->a_key_len /
			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);

	sqe3->auth_mac_key |=
			cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);

	if (dir) {
		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
	} else {
		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
	}
	sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);

	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);

	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
}

static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
{
	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
	int ret;

	ret = sec_skcipher_bd_fill_v3(ctx, req);
	if (unlikely(ret)) {
		dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
		return ret;
	}

1639 1640 1641 1642 1643 1644 1645
	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
		sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
					req, sec_sqe3);
	else
		sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
				       req, sec_sqe3);
1646 1647 1648 1649

	return 0;
}

1650 1651 1652 1653
static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
	struct aead_request *a_req = req->aead_req.aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1654
	struct sec_aead_req *aead_req = &req->aead_req;
1655 1656 1657
	struct sec_cipher_req *c_req = &req->c_req;
	size_t authsize = crypto_aead_authsize(tfm);
	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1658 1659
	struct aead_request *backlog_aead_req;
	struct sec_req *backlog_req;
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
	size_t sz;

	if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
		sec_update_iv(req, SEC_AEAD);

	/* Copy output mac */
	if (!err && c_req->encrypt) {
		struct scatterlist *sgl = a_req->dst;

		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
1670
					  aead_req->out_mac,
1671 1672 1673 1674
					  authsize, a_req->cryptlen +
					  a_req->assoclen);

		if (unlikely(sz != authsize)) {
1675
			dev_err(c->dev, "copy out mac err!\n");
1676 1677 1678 1679 1680 1681
			err = -EINVAL;
		}
	}

	sec_free_req_id(req);

1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
	while (1) {
		backlog_req = sec_back_req_clear(c, qp_ctx);
		if (!backlog_req)
			break;

		backlog_aead_req = backlog_req->aead_req.aead_req;
		backlog_aead_req->base.complete(&backlog_aead_req->base,
						-EINPROGRESS);
		atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
	}
1692 1693 1694 1695

	a_req->base.complete(&a_req->base, err);
}

1696 1697 1698
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
{
	sec_free_req_id(req);
1699
	sec_free_queue_id(ctx, req);
1700 1701 1702 1703 1704
}

static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
	struct sec_qp_ctx *qp_ctx;
1705
	int queue_id;
1706 1707

	/* To load balance */
1708 1709
	queue_id = sec_alloc_queue_id(ctx, req);
	qp_ctx = &ctx->qp_ctx[queue_id];
1710 1711

	req->req_id = sec_alloc_req_id(req, qp_ctx);
1712
	if (unlikely(req->req_id < 0)) {
1713
		sec_free_queue_id(ctx, req);
1714 1715 1716
		return req->req_id;
	}

1717
	return 0;
1718 1719 1720 1721
}

static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
1722
	struct sec_cipher_req *c_req = &req->c_req;
1723 1724 1725
	int ret;

	ret = sec_request_init(ctx, req);
1726
	if (unlikely(ret))
1727 1728 1729
		return ret;

	ret = sec_request_transfer(ctx, req);
1730
	if (unlikely(ret))
1731 1732 1733
		goto err_uninit_req;

	/* Output IV as decrypto */
1734 1735
	if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
	    ctx->c_ctx.c_mode == SEC_CMODE_CTR))
1736
		sec_update_iv(req, ctx->alg_type);
1737 1738

	ret = ctx->req_op->bd_send(ctx, req);
1739 1740
	if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
		(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1741
		dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1742 1743 1744 1745 1746 1747 1748
		goto err_send_req;
	}

	return ret;

err_send_req:
	/* As failing, restore the IV from user */
1749 1750
	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
		if (ctx->alg_type == SEC_SKCIPHER)
1751
			memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1752 1753
			       ctx->c_ctx.ivsize);
		else
1754
			memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1755 1756
			       ctx->c_ctx.ivsize);
	}
1757 1758 1759 1760 1761 1762 1763

	sec_request_untransfer(ctx, req);
err_uninit_req:
	sec_request_uninit(ctx, req);
	return ret;
}

1764
static const struct sec_req_op sec_skcipher_req_ops = {
1765 1766 1767 1768 1769 1770 1771 1772 1773
	.buf_map	= sec_skcipher_sgl_map,
	.buf_unmap	= sec_skcipher_sgl_unmap,
	.do_transfer	= sec_skcipher_copy_iv,
	.bd_fill	= sec_skcipher_bd_fill,
	.bd_send	= sec_bd_send,
	.callback	= sec_skcipher_callback,
	.process	= sec_process,
};

1774 1775 1776
static const struct sec_req_op sec_aead_req_ops = {
	.buf_map	= sec_aead_sgl_map,
	.buf_unmap	= sec_aead_sgl_unmap,
1777
	.do_transfer	= sec_aead_set_iv,
1778 1779 1780 1781 1782 1783
	.bd_fill	= sec_aead_bd_fill,
	.bd_send	= sec_bd_send,
	.callback	= sec_aead_callback,
	.process	= sec_process,
};

1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
static const struct sec_req_op sec_skcipher_req_ops_v3 = {
	.buf_map	= sec_skcipher_sgl_map,
	.buf_unmap	= sec_skcipher_sgl_unmap,
	.do_transfer	= sec_skcipher_copy_iv,
	.bd_fill	= sec_skcipher_bd_fill_v3,
	.bd_send	= sec_bd_send,
	.callback	= sec_skcipher_callback,
	.process	= sec_process,
};

static const struct sec_req_op sec_aead_req_ops_v3 = {
	.buf_map	= sec_aead_sgl_map,
	.buf_unmap	= sec_aead_sgl_unmap,
1797
	.do_transfer	= sec_aead_set_iv,
1798 1799 1800 1801 1802 1803
	.bd_fill	= sec_aead_bd_fill_v3,
	.bd_send	= sec_bd_send,
	.callback	= sec_aead_callback,
	.process	= sec_process,
};

1804 1805 1806
static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1807
	int ret;
1808

1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
	ret = sec_skcipher_init(tfm);
	if (ret)
		return ret;

	if (ctx->sec->qm.ver < QM_HW_V3) {
		ctx->type_supported = SEC_BD_TYPE2;
		ctx->req_op = &sec_skcipher_req_ops;
	} else {
		ctx->type_supported = SEC_BD_TYPE3;
		ctx->req_op = &sec_skcipher_req_ops_v3;
	}
1820

1821
	return ret;
1822 1823 1824 1825
}

static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
{
1826
	sec_skcipher_uninit(tfm);
1827 1828
}

1829 1830 1831 1832 1833 1834 1835 1836
static int sec_aead_init(struct crypto_aead *tfm)
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	int ret;

	crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
	ctx->alg_type = SEC_AEAD;
	ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1837 1838 1839
	if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
	    ctx->c_ctx.ivsize > SEC_IV_SIZE) {
		pr_err("get error aead iv size!\n");
1840 1841 1842 1843 1844 1845
		return -EINVAL;
	}

	ret = sec_ctx_base_init(ctx);
	if (ret)
		return ret;
1846 1847 1848 1849 1850 1851 1852
	if (ctx->sec->qm.ver < QM_HW_V3) {
		ctx->type_supported = SEC_BD_TYPE2;
		ctx->req_op = &sec_aead_req_ops;
	} else {
		ctx->type_supported = SEC_BD_TYPE3;
		ctx->req_op = &sec_aead_req_ops_v3;
	}
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893

	ret = sec_auth_init(ctx);
	if (ret)
		goto err_auth_init;

	ret = sec_cipher_init(ctx);
	if (ret)
		goto err_cipher_init;

	return ret;

err_cipher_init:
	sec_auth_uninit(ctx);
err_auth_init:
	sec_ctx_base_uninit(ctx);
	return ret;
}

static void sec_aead_exit(struct crypto_aead *tfm)
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);

	sec_cipher_uninit(ctx);
	sec_auth_uninit(ctx);
	sec_ctx_base_uninit(ctx);
}

static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
	int ret;

	ret = sec_aead_init(tfm);
	if (ret) {
		pr_err("hisi_sec2: aead init error!\n");
		return ret;
	}

	auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
	if (IS_ERR(auth_ctx->hash_tfm)) {
1894
		dev_err(ctx->dev, "aead alloc shash error!\n");
1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
		sec_aead_exit(tfm);
		return PTR_ERR(auth_ctx->hash_tfm);
	}

	return 0;
}

static void sec_aead_ctx_exit(struct crypto_aead *tfm)
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);

	crypto_free_shash(ctx->a_ctx.hash_tfm);
	sec_aead_exit(tfm);
}

1910 1911
static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
{
1912
	struct aead_alg *alg = crypto_aead_alg(tfm);
1913
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1914 1915
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
	const char *aead_name = alg->base.cra_name;
1916 1917 1918 1919 1920 1921 1922 1923
	int ret;

	ret = sec_aead_init(tfm);
	if (ret) {
		dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
		return ret;
	}

1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
						     CRYPTO_ALG_NEED_FALLBACK |
						     CRYPTO_ALG_ASYNC);
	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
		sec_aead_exit(tfm);
		return PTR_ERR(a_ctx->fallback_aead_tfm);
	}
	a_ctx->fallback = false;

1934 1935 1936 1937 1938
	return 0;
}

static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
{
1939 1940 1941
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);

	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
1942 1943 1944
	sec_aead_exit(tfm);
}

1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
{
	return sec_aead_ctx_init(tfm, "sha1");
}

static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
{
	return sec_aead_ctx_init(tfm, "sha256");
}

static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
{
	return sec_aead_ctx_init(tfm, "sha512");
}

1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982

static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
	struct sec_req *sreq)
{
	u32 cryptlen = sreq->c_req.sk_req->cryptlen;
	struct device *dev = ctx->dev;
	u8 c_mode = ctx->c_ctx.c_mode;
	int ret = 0;

	switch (c_mode) {
	case SEC_CMODE_XTS:
		if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
			dev_err(dev, "skcipher XTS mode input length error!\n");
			ret = -EINVAL;
		}
		break;
	case SEC_CMODE_ECB:
	case SEC_CMODE_CBC:
		if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
			dev_err(dev, "skcipher AES input length error!\n");
			ret = -EINVAL;
		}
		break;
1983 1984 1985 1986 1987 1988 1989 1990
	case SEC_CMODE_CFB:
	case SEC_CMODE_OFB:
	case SEC_CMODE_CTR:
		if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
			dev_err(dev, "skcipher HW version error!\n");
			ret = -EINVAL;
		}
		break;
1991 1992 1993 1994 1995 1996 1997
	default:
		ret = -EINVAL;
	}

	return ret;
}

1998
static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
1999
{
2000
	struct skcipher_request *sk_req = sreq->c_req.sk_req;
2001
	struct device *dev = ctx->dev;
2002
	u8 c_alg = ctx->c_ctx.c_alg;
2003

2004 2005
	if (unlikely(!sk_req->src || !sk_req->dst ||
		     sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
2006 2007 2008
		dev_err(dev, "skcipher input param error!\n");
		return -EINVAL;
	}
2009
	sreq->c_req.c_len = sk_req->cryptlen;
2010 2011 2012 2013 2014 2015

	if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
		sreq->use_pbuf = true;
	else
		sreq->use_pbuf = false;

2016
	if (c_alg == SEC_CALG_3DES) {
2017
		if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
2018 2019 2020 2021 2022
			dev_err(dev, "skcipher 3des input length error!\n");
			return -EINVAL;
		}
		return 0;
	} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
2023
		return sec_skcipher_cryptlen_ckeck(ctx, sreq);
2024
	}
2025

2026
	dev_err(dev, "skcipher algorithm error!\n");
2027

2028 2029 2030
	return -EINVAL;
}

2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
				    struct skcipher_request *sreq, bool encrypt)
{
	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
	struct device *dev = ctx->dev;
	int ret;

	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);

	if (!c_ctx->fbtfm) {
		dev_err(dev, "failed to check fallback tfm\n");
		return -EINVAL;
	}

	skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);

	/* software need sync mode to do crypto */
	skcipher_request_set_callback(subreq, sreq->base.flags,
				      NULL, NULL);
	skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
				   sreq->cryptlen, sreq->iv);
	if (encrypt)
		ret = crypto_skcipher_encrypt(subreq);
	else
		ret = crypto_skcipher_decrypt(subreq);

	skcipher_request_zero(subreq);

	return ret;
}

2062 2063 2064 2065 2066 2067 2068
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
	struct sec_req *req = skcipher_request_ctx(sk_req);
	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
	int ret;

2069 2070 2071
	if (!sk_req->cryptlen) {
		if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
			return -EINVAL;
2072
		return 0;
2073
	}
2074

2075
	req->flag = sk_req->base.flags;
2076 2077 2078 2079
	req->c_req.sk_req = sk_req;
	req->c_req.encrypt = encrypt;
	req->ctx = ctx;

2080 2081 2082 2083
	ret = sec_skcipher_param_check(ctx, req);
	if (unlikely(ret))
		return -EINVAL;

2084 2085 2086
	if (unlikely(ctx->c_ctx.fallback))
		return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);

2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
	return ctx->req_op->process(ctx, req);
}

static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
{
	return sec_skcipher_crypto(sk_req, true);
}

static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
{
	return sec_skcipher_crypto(sk_req, false);
}

#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
	sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
{\
	.base = {\
		.cra_name = sec_cra_name,\
		.cra_driver_name = "hisi_sec_"sec_cra_name,\
		.cra_priority = SEC_PRIORITY,\
2107 2108 2109
		.cra_flags = CRYPTO_ALG_ASYNC |\
		 CRYPTO_ALG_ALLOCATES_MEMORY |\
		 CRYPTO_ALG_NEED_FALLBACK,\
2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128
		.cra_blocksize = blk_size,\
		.cra_ctxsize = sizeof(struct sec_ctx),\
		.cra_module = THIS_MODULE,\
	},\
	.init = ctx_init,\
	.exit = ctx_exit,\
	.setkey = sec_set_key,\
	.decrypt = sec_skcipher_decrypt,\
	.encrypt = sec_skcipher_encrypt,\
	.min_keysize = sec_min_key_size,\
	.max_keysize = sec_max_key_size,\
	.ivsize = iv_size,\
},

#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
	max_key_size, blk_size, iv_size) \
	SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
	sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)

2129
static struct skcipher_alg sec_skciphers[] = {
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142
	SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
			 AES_BLOCK_SIZE, 0)

	SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
2143
			 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
2144 2145 2146
			 DES3_EDE_BLOCK_SIZE, 0)

	SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
2147
			 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
			 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
};

2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
static struct skcipher_alg sec_skciphers_v3[] = {
	SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,
			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,
			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,
			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,
			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,
			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)

	SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr,
			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
};

2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
static int aead_iv_demension_check(struct aead_request *aead_req)
{
	u8 cl;

	cl = aead_req->iv[0] + 1;
	if (cl < IV_CL_MIN || cl > IV_CL_MAX)
		return -EINVAL;

	if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
		return -EOVERFLOW;

	return 0;
}

static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
2200 2201 2202 2203
{
	struct aead_request *req = sreq->aead_req.aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	size_t authsize = crypto_aead_authsize(tfm);
2204
	u8 c_mode = ctx->c_ctx.c_mode;
2205
	struct device *dev = ctx->dev;
2206
	int ret;
2207

2208 2209 2210
	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
	    req->assoclen > SEC_MAX_AAD_LEN)) {
		dev_err(dev, "aead input spec error!\n");
2211 2212 2213
		return -EINVAL;
	}

2214 2215 2216 2217
	if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
	   (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
		authsize & MAC_LEN_MASK)))) {
		dev_err(dev, "aead input mac length error!\n");
2218 2219
		return -EINVAL;
	}
2220 2221 2222 2223 2224 2225 2226 2227 2228

	if (c_mode == SEC_CMODE_CCM) {
		ret = aead_iv_demension_check(req);
		if (ret) {
			dev_err(dev, "aead input iv param error!\n");
			return ret;
		}
	}

2229 2230 2231 2232
	if (sreq->c_req.encrypt)
		sreq->c_req.c_len = req->cryptlen;
	else
		sreq->c_req.c_len = req->cryptlen - authsize;
2233 2234 2235 2236 2237 2238 2239 2240 2241
	if (c_mode == SEC_CMODE_CBC) {
		if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
			dev_err(dev, "aead crypto length error!\n");
			return -EINVAL;
		}
	}

	return 0;
}
2242

2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
	struct aead_request *req = sreq->aead_req.aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	size_t authsize = crypto_aead_authsize(tfm);
	struct device *dev = ctx->dev;
	u8 c_alg = ctx->c_ctx.c_alg;

	if (unlikely(!req->src || !req->dst)) {
		dev_err(dev, "aead input param error!\n");
2253 2254 2255
		return -EINVAL;
	}

2256 2257 2258 2259
	if (ctx->sec->qm.ver == QM_HW_V2) {
		if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
		    req->cryptlen <= authsize))) {
			dev_err(dev, "Kunpeng920 not support 0 length!\n");
2260
			ctx->a_ctx.fallback = true;
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
			return -EINVAL;
		}
	}

	/* Support AES or SM4 */
	if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
		dev_err(dev, "aead crypto alg error!\n");
		return -EINVAL;
	}

	if (unlikely(sec_aead_spec_check(ctx, sreq)))
		return -EINVAL;

	if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
		SEC_PBUF_SZ)
		sreq->use_pbuf = true;
	else
		sreq->use_pbuf = false;

2280 2281 2282
	return 0;
}

2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
static int sec_aead_soft_crypto(struct sec_ctx *ctx,
				struct aead_request *aead_req,
				bool encrypt)
{
	struct aead_request *subreq = aead_request_ctx(aead_req);
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
	struct device *dev = ctx->dev;

	/* Kunpeng920 aead mode not support input 0 size */
	if (!a_ctx->fallback_aead_tfm) {
		dev_err(dev, "aead fallbcak tfm is NULL!\n");
		return -EINVAL;
	}

	aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
	aead_request_set_callback(subreq, aead_req->base.flags,
				  aead_req->base.complete, aead_req->base.data);
	aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
			       aead_req->cryptlen, aead_req->iv);
	aead_request_set_ad(subreq, aead_req->assoclen);

	return encrypt ? crypto_aead_encrypt(subreq) :
		   crypto_aead_decrypt(subreq);
}

2308 2309 2310 2311 2312 2313 2314
static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
{
	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
	struct sec_req *req = aead_request_ctx(a_req);
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	int ret;

2315
	req->flag = a_req->base.flags;
2316 2317 2318 2319 2320
	req->aead_req.aead_req = a_req;
	req->c_req.encrypt = encrypt;
	req->ctx = ctx;

	ret = sec_aead_param_check(ctx, req);
2321 2322 2323
	if (unlikely(ret)) {
		if (ctx->a_ctx.fallback)
			return sec_aead_soft_crypto(ctx, a_req, encrypt);
2324
		return -EINVAL;
2325
	}
2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339

	return ctx->req_op->process(ctx, req);
}

static int sec_aead_encrypt(struct aead_request *a_req)
{
	return sec_aead_crypto(a_req, true);
}

static int sec_aead_decrypt(struct aead_request *a_req)
{
	return sec_aead_crypto(a_req, false);
}

2340
#define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
2341 2342 2343 2344 2345 2346
			 ctx_exit, blk_size, iv_size, max_authsize)\
{\
	.base = {\
		.cra_name = sec_cra_name,\
		.cra_driver_name = "hisi_sec_"sec_cra_name,\
		.cra_priority = SEC_PRIORITY,\
2347 2348 2349
		.cra_flags = CRYPTO_ALG_ASYNC |\
		 CRYPTO_ALG_ALLOCATES_MEMORY |\
		 CRYPTO_ALG_NEED_FALLBACK,\
2350 2351 2352 2353 2354 2355 2356
		.cra_blocksize = blk_size,\
		.cra_ctxsize = sizeof(struct sec_ctx),\
		.cra_module = THIS_MODULE,\
	},\
	.init = ctx_init,\
	.exit = ctx_exit,\
	.setkey = sec_set_key,\
2357
	.setauthsize = sec_aead_setauthsize,\
2358 2359 2360 2361 2362 2363 2364 2365 2366
	.decrypt = sec_aead_decrypt,\
	.encrypt = sec_aead_encrypt,\
	.ivsize = iv_size,\
	.maxauthsize = max_authsize,\
}

static struct aead_alg sec_aeads[] = {
	SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
		     sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
2367 2368
		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
		     AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
2369 2370 2371

	SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
		     sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
2372 2373
		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
		     AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
2374 2375 2376

	SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
		     sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396
		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
		     AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),

	SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
		     AES_BLOCK_SIZE, AES_BLOCK_SIZE),

	SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
		     SEC_AIV_SIZE, AES_BLOCK_SIZE)
};

static struct aead_alg sec_aeads_v3[] = {
	SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
		     AES_BLOCK_SIZE, AES_BLOCK_SIZE),

	SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
		     SEC_AIV_SIZE, AES_BLOCK_SIZE)
2397 2398
};

2399
int sec_register_to_crypto(struct hisi_qm *qm)
2400
{
2401
	int ret;
2402 2403

	/* To avoid repeat register */
2404 2405 2406 2407
	ret = crypto_register_skciphers(sec_skciphers,
					ARRAY_SIZE(sec_skciphers));
	if (ret)
		return ret;
2408

2409 2410 2411 2412 2413 2414
	if (qm->ver > QM_HW_V2) {
		ret = crypto_register_skciphers(sec_skciphers_v3,
						ARRAY_SIZE(sec_skciphers_v3));
		if (ret)
			goto reg_skcipher_fail;
	}
2415

2416 2417
	ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
	if (ret)
2418
		goto reg_aead_fail;
2419 2420 2421 2422 2423
	if (qm->ver > QM_HW_V2) {
		ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
		if (ret)
			goto reg_aead_v3_fail;
	}
2424 2425
	return ret;

2426 2427
reg_aead_v3_fail:
	crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
2428 2429 2430 2431 2432 2433 2434
reg_aead_fail:
	if (qm->ver > QM_HW_V2)
		crypto_unregister_skciphers(sec_skciphers_v3,
					    ARRAY_SIZE(sec_skciphers_v3));
reg_skcipher_fail:
	crypto_unregister_skciphers(sec_skciphers,
				    ARRAY_SIZE(sec_skciphers));
2435 2436 2437
	return ret;
}

2438
void sec_unregister_from_crypto(struct hisi_qm *qm)
2439
{
2440 2441 2442 2443 2444
	if (qm->ver > QM_HW_V2)
		crypto_unregister_aeads(sec_aeads_v3,
					ARRAY_SIZE(sec_aeads_v3));
	crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));

2445 2446 2447
	if (qm->ver > QM_HW_V2)
		crypto_unregister_skciphers(sec_skciphers_v3,
					    ARRAY_SIZE(sec_skciphers_v3));
2448 2449
	crypto_unregister_skciphers(sec_skciphers,
				    ARRAY_SIZE(sec_skciphers));
2450
}