提交 ab1c1db8 编写于 作者: L lingmingqiang 提交者: Xie XiuQi

ACC: add sec fusion code

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

Feature or Bugfix:Feature
Signed-off-by: NZhangwei <zhangwei375@huawei.com>
Reviewed-by: Nhucheng.hu <hucheng.hu@huawei.com>
Signed-off-by: Nlingmingqiang <lingmingqiang@huawei.com>
Reviewed-by: Nlingmingqiang <lingmingqiang@huawei.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 2db79a68
......@@ -32,12 +32,27 @@ enum hisi_sec_status {
HISI_SEC_RESET,
};
struct hisi_sec_dfx {
u64 send_cnt;
u64 recv_cnt;
u64 get_task_cnt;
u64 put_task_cnt;
u64 gran_task_cnt;
u64 thread_cnt;
u64 fake_busy_cnt;
u64 busy_comp_cnt;
u64 sec_ctrl;
};
struct hisi_sec {
struct hisi_qm qm;
struct list_head list;
struct hisi_sec_dfx sec_dfx;
struct hisi_sec_ctrl *ctrl;
struct dma_pool *sgl_pool;
int ctx_q_num;
int fusion_limit;
int fusion_tmout_usec;
unsigned long status;
};
......
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2019 HiSilicon Limited.
* Copyright (c) 2018-2019 HiSilicon Limited.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -24,16 +24,70 @@
#include "sec_crypto.h"
#define SEC_ASYNC
#define SEC_INVLD_REQ_ID -1
#define SEC_INVLD_REQ_ID (-1)
#define SEC_PRIORITY (4001)
#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
// #define USE_DM_CRYPT_OPTIMIZE
#define SEC_FUSION_BD
#define SEC_DEBUG
#ifdef SEC_DEBUG
#define dbg(msg, ...) pr_info(msg, ##__VA_ARGS__)
#define dbg(msg, ...) pr_err(msg, ##__VA_ARGS__)
#else
#define dbg(msg, ...)
#endif
enum {
SEC_NO_FUSION = 0x0,
SEC_IV_FUSION = 0x1,
SEC_FUSION_BUTT
};
enum SEC_REQ_OPS_TYPE {
SEC_OPS_SKCIPHER_ALG = 0x0,
SEC_OPS_DMCRYPT = 0x1,
SEC_OPS_MULTI_IV = 0x2,
SEC_OPS_BUTT
};
enum cipher_flags {
CRYPT_MODE_INTEGRITY_AEAD,
CRYPT_IV_LARGE_SECTORS,
};
enum setkey_op {
SETKEY_OP_INIT,
SETKEY_OP_SET,
SETKEY_OP_WIPE,
};
struct geniv_key_info {
enum setkey_op keyop;
unsigned int tfms_count;
u8 *key;
char *ivopts;
sector_t iv_offset;
unsigned long cipher_flags;
unsigned short int sector_size;
unsigned int key_size;
unsigned int key_parts;
unsigned int key_mac_size;
unsigned int on_disk_tag_size;
};
struct geniv_req_info {
sector_t cc_sector;
unsigned int nents;
u8 *integrity_metadata;
};
struct hisi_sec_cipher_req {
struct acc_hw_sgl *c_in;
dma_addr_t c_in_dma;
......@@ -41,7 +95,12 @@ struct hisi_sec_cipher_req {
dma_addr_t c_out_dma;
u8 *c_ivin;
dma_addr_t c_ivin_dma;
struct skcipher_request *sk_req;
struct scatterlist *src;
struct scatterlist *dst;
u32 c_len;
u32 gran_num;
u64 lba;
bool encrypt;
};
......@@ -52,16 +111,32 @@ struct hisi_sec_req {
struct hisi_sec_sqe sec_sqe;
struct hisi_sec_ctx *ctx;
struct hisi_sec_qp_ctx *qp_ctx;
struct skcipher_request *sk_req;
void **priv;
struct hisi_sec_cipher_req c_req;
int err;
int err_type;
int req_id;
bool fake_busy;
int req_cnt;
int fusion_num;
int fake_busy;
};
struct hisi_sec_req_op {
int fusion_type;
int (*alloc)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
int (*free)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
int (*buf_map)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
int (*buf_unmap)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
int (*do_transfer)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
int (*bd_fill)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
int (*bd_send)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
int (*callback)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
};
struct hisi_sec_cipher_ctx {
u8 *c_key;
dma_addr_t c_key_dma;
sector_t iv_offset;
u32 c_gran_size;
u8 c_mode;
u8 c_alg;
u8 c_key_len;
......@@ -70,84 +145,58 @@ struct hisi_sec_cipher_ctx {
struct hisi_sec_qp_ctx {
struct hisi_qp *qp;
struct hisi_sec_req **req_list;
struct hisi_sec_req *fusion_req;
unsigned long *req_bitmap;
spinlock_t req_lock;
atomic_t req_cnt;
struct hisi_sec_sqe *sqe_list;
struct delayed_work work;
int work_cnt;
int fusion_num;
};
struct hisi_sec_ctx {
struct hisi_sec_qp_ctx *qp_ctx;
struct hisi_sec *sec;
struct device *sec_dev;
struct hisi_sec_req_op *req_op;
atomic_t thread_cnt;
int max_thread_cnt;
int req_fake_limit;
int req_limit;
int q_num;
atomic_t q_id;
int enc_q_num;
atomic_t enc_qid;
atomic_t dec_qid;
struct hisi_sec_cipher_ctx c_ctx;
int fusion_tmout_usec;
int fusion_limit;
u64 fusion_cnt;
bool is_fusion;
};
static void dump_data(unsigned char *buf, unsigned int len)
{
unsigned int i;
for (i = 0; i < len; i += 8)
dbg("0x%llx: \t%02x %02x %02x %02x %02x %02x %02x %02x\n",
(unsigned long long)(buf + i),
*(buf + i), (*(buf + i + 1)),
*(buf + i + 2), *(buf + i + 3),
*(buf + i + 4), *(buf + i + 5),
*(buf + i + 6), *(buf + i + 7));
dbg("\n");
}
static void dump_sec_bd(unsigned int *bd)
{
unsigned int i;
for (i = 0; i < 32; i++)
dbg("Word[%d] 0x%08x\n", i, bd[i]);
dbg("\n");
}
#define DES_WEAK_KEY_NUM (4)
u64 des_weak_key[DES_WEAK_KEY_NUM] = {0x0101010101010101, 0xFEFEFEFEFEFEFEFE,
0xE0E0E0E0F1F1F1F1, 0x1F1F1F1F0E0E0E0E};
static void sec_update_iv(struct hisi_sec_req *req, u8 *iv)
{
// todo: update iv by cbc/ctr mode
}
static void sec_cipher_cb(struct hisi_qp *qp, void *);
static void sec_sg_unmap(struct device *dev,
struct skcipher_request *sk_req,
struct hisi_sec_cipher_req *creq,
struct dma_pool *pool)
{
if (sk_req->dst != sk_req->src)
acc_sg_buf_unmap(dev, sk_req->dst,
creq->c_out, creq->c_out_dma, pool);
acc_sg_buf_unmap(dev, sk_req->src, creq->c_in, creq->c_in_dma, pool);
}
static void hisi_sec_req_cb(struct hisi_qp *qp, void *);
static int hisi_sec_alloc_req_id(struct hisi_sec_req *req,
struct hisi_sec_qp_ctx *qp_ctx)
{
struct hisi_sec_ctx *ctx = req->ctx;
int req_id;
unsigned long flags;
spin_lock_irqsave(&qp_ctx->req_lock, flags);
req_id = find_first_zero_bit(qp_ctx->req_bitmap, ctx->req_limit);
if (req_id >= ctx->req_limit) {
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
dump_data((uint8_t *)qp_ctx->req_bitmap, ctx->req_limit / 8);
if (req_id >= ctx->req_limit || req_id < 0) {
dev_err(ctx->sec_dev, "no free req id\n");
return -ENOBUFS;
}
set_bit(req_id, qp_ctx->req_bitmap);
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
qp_ctx->req_list[req_id] = req;
req->req_id = req_id;
......@@ -177,6 +226,60 @@ static void hisi_sec_free_req_id(struct hisi_sec_req *req)
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
}
static int sec_request_transfer(struct hisi_sec_ctx *, struct hisi_sec_req *);
static int sec_request_send(struct hisi_sec_ctx *, struct hisi_sec_req *);
void qp_ctx_work_delayed_process(struct work_struct *work)
{
struct hisi_sec_qp_ctx *qp_ctx;
struct hisi_sec_req *req;
struct hisi_sec_ctx *ctx;
struct delayed_work *dwork;
unsigned long flags;
int ret;
dwork = container_of(work, struct delayed_work, work);
qp_ctx = container_of(dwork, struct hisi_sec_qp_ctx, work);
spin_lock_irqsave(&qp_ctx->req_lock, flags);
req = qp_ctx->fusion_req;
if (req == NULL) {
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
return;
}
ctx = req->ctx;
if (ctx == NULL || req->fusion_num == ctx->fusion_limit) {
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
return;
}
qp_ctx->fusion_req = NULL;
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
ret = sec_request_transfer(ctx, req);
if (ret)
goto err_free_req;
ret = sec_request_send(ctx, req);
if (ret != -EBUSY && ret != -EINPROGRESS) {
dev_err(ctx->sec_dev, "[%s][%d] ret[%d]\n", __func__,
__LINE__, ret);
goto err_unmap_req;
}
return;
err_unmap_req:
ctx->req_op->buf_unmap(ctx, req);
err_free_req:
ctx->req_op->free(ctx, req);
hisi_sec_free_req_id(req);
atomic_dec(&ctx->thread_cnt);
}
static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
int qp_ctx_id, int alg_type, int req_type)
{
......@@ -192,9 +295,11 @@ static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
qp->req_type = req_type;
qp->qp_ctx = qp_ctx;
#ifdef SEC_ASYNC
qp->req_cb = sec_cipher_cb;
qp->req_cb = hisi_sec_req_cb;
#endif
qp_ctx->qp = qp;
qp_ctx->fusion_num = 0;
qp_ctx->fusion_req = NULL;
spin_lock_init(&qp_ctx->req_lock);
atomic_set(&qp_ctx->req_cnt, 0);
......@@ -212,12 +317,26 @@ static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
goto err_free_req_bitmap;
}
qp_ctx->sqe_list = kcalloc(ctx->fusion_limit,
sizeof(struct hisi_sec_sqe), GFP_KERNEL);
if (!qp_ctx->sqe_list) {
ret = -ENOMEM;
goto err_free_req_list;
}
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0)
goto err_free_req_list;
goto err_free_sqe_list;
if (ctx->fusion_limit > 1 && ctx->fusion_tmout_usec) {
INIT_DELAYED_WORK(&qp_ctx->work, qp_ctx_work_delayed_process);
qp_ctx->work_cnt = 0;
}
return 0;
err_free_sqe_list:
kfree(qp_ctx->sqe_list);
err_free_req_list:
kfree(qp_ctx->req_list);
err_free_req_bitmap:
......@@ -232,6 +351,7 @@ static void hisi_sec_release_qp_ctx(struct hisi_sec_qp_ctx *qp_ctx)
hisi_qm_stop_qp(qp_ctx->qp);
kfree(qp_ctx->req_bitmap);
kfree(qp_ctx->req_list);
kfree(qp_ctx->sqe_list);
hisi_qm_release_qp(qp_ctx->qp);
}
......@@ -243,8 +363,8 @@ static int __hisi_sec_ctx_init(struct hisi_sec_ctx *ctx, int qlen)
ctx->req_limit = qlen;
ctx->req_fake_limit = qlen / 2;
atomic_set(&ctx->thread_cnt, 0);
ctx->max_thread_cnt = 0;
atomic_set(&ctx->q_id, 0);
atomic_set(&ctx->enc_qid, 0);
atomic_set(&ctx->dec_qid, ctx->enc_q_num);
return 0;
}
......@@ -270,10 +390,18 @@ static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
ctx->sec_dev = &qm->pdev->dev;
ctx->q_num = sec->ctx_q_num;
ctx->enc_q_num = ctx->q_num / 2;
ctx->qp_ctx = kcalloc(ctx->q_num, sizeof(struct hisi_sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx)
if (!ctx->qp_ctx) {
dev_err(ctx->sec_dev, "failed to alloc qp_ctx");
return -ENOMEM;
}
ctx->fusion_tmout_usec = sec->fusion_tmout_usec;
ctx->fusion_limit = sec->fusion_limit;
ctx->fusion_cnt = 0;
ctx->is_fusion = 0;
for (i = 0; i < ctx->q_num; i++) {
ret = hisi_sec_create_qp_ctx(qm, ctx, i, 0, 0);
......@@ -293,8 +421,7 @@ static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
return __hisi_sec_ctx_init(ctx, QM_Q_DEPTH);
err_sec_release_qp_ctx:
i = i - 1;
for (; i >= 0; i--)
for (i = i - 1; i >= 0; i--)
hisi_sec_release_qp_ctx(&ctx->qp_ctx[i]);
kfree(ctx->qp_ctx);
......@@ -321,175 +448,499 @@ static void hisi_sec_cipher_ctx_exit(struct crypto_skcipher *tfm)
kfree(ctx->qp_ctx);
}
static int sec_alloc_cipher_req(struct hisi_sec_req *req)
static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp)
{
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
struct device *sec_dev = req->ctx->sec_dev;
struct hisi_sec_sqe *sec_sqe = (struct hisi_sec_sqe *)resp;
u32 req_id;
struct hisi_sec_qp_ctx *qp_ctx = qp->qp_ctx;
struct hisi_sec_req *req;
struct hisi_sec_dfx *dfx;
if (sec_sqe->type == 1) {
req_id = sec_sqe->type1.tag;
req = qp_ctx->req_list[req_id];
req->err_type = sec_sqe->type1.error_type;
if (req->err_type || sec_sqe->type1.done != 0x1 ||
sec_sqe->type1.flag != 0x2) {
pr_err("err_type[%d] done[%d] flag[%d]\n",
req->err_type, sec_sqe->type1.done,
sec_sqe->type1.flag);
}
} else if (sec_sqe->type == 2) {
req_id = sec_sqe->type2.tag;
req = qp_ctx->req_list[req_id];
req->err_type = sec_sqe->type2.error_type;
if (req->err_type || sec_sqe->type2.done != 0x1 ||
sec_sqe->type2.flag != 0x2) {
pr_err("err_type[%d] done[%d] flag[%d]\n",
req->err_type, sec_sqe->type2.done,
sec_sqe->type2.flag);
}
} else {
pr_err("err bd type [%d]\n", sec_sqe->type);
return;
}
c_req->c_ivin = dma_alloc_coherent(sec_dev, SEC_IV_SIZE,
&c_req->c_ivin_dma, GFP_KERNEL);
if (!c_req->c_ivin)
return -ENOMEM;
dfx = &req->ctx->sec->sec_dfx;
sec_sqe->type2.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
sec_sqe->type2.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
sec_update_iv(req, req->c_req.sk_req->iv);
return 0;
req->ctx->req_op->buf_unmap(req->ctx, req);
req->ctx->req_op->callback(req->ctx, req);
__sync_add_and_fetch(&dfx->recv_cnt, 1);
}
static int sec_free_cipher_req(struct hisi_sec_req *req)
static int sec_des_weak_key(const u64 *key, const u32 keylen)
{
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct device *sec_dev = req->ctx->sec_dev;
int i;
if (c_req->c_ivin) {
dma_free_coherent(sec_dev, SEC_IV_SIZE,
c_req->c_ivin, c_req->c_ivin_dma);
c_req->c_ivin = NULL;
}
for (i = 0; i < DES_WEAK_KEY_NUM; i++)
if (*key == des_weak_key[i])
return 1;
return 0;
}
static void sec_cipher_cb(struct hisi_qp *qp, void *resp)
static int sec_skcipher_des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
const u32 keylen, const u8 *key)
{
struct hisi_sec_sqe *sec_sqe = (struct hisi_sec_sqe *)resp;
u32 req_id = sec_sqe->type2.tag;
struct hisi_sec_qp_ctx *qp_ctx = qp->qp_ctx;
struct dma_pool *pool;
struct hisi_sec_req *req;
int ret = 0;
if (keylen != DES_KEY_SIZE)
return -EINVAL;
req = qp_ctx->req_list[req_id];
pool = req->ctx->sec->sgl_pool;
if (sec_des_weak_key((const u64 *)key, keylen))
return -EKEYREJECTED;
if (sec_sqe->type2.done != 0x1 || sec_sqe->type2.flag != 0x2) {
ret = sec_sqe->type2.error_type;
dump_sec_bd((uint32_t *)sec_sqe);
dump_data((unsigned char *)sec_sqe,
sizeof(struct hisi_sec_sqe));
}
c_ctx->c_key_len = CKEY_LEN_DES;
sec_update_iv(req, req->sk_req->iv);
sec_sg_unmap(&qp->qm->pdev->dev, req->sk_req, &req->c_req, pool);
sec_free_cipher_req(req);
return 0;
}
hisi_sec_free_req_id(req);
static int sec_skcipher_3des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
const u32 keylen, const enum C_MODE c_mode)
{
switch (keylen) {
case SEC_DES3_2KEY_SIZE:
c_ctx->c_key_len = CKEY_LEN_3DES_2KEY;
break;
case SEC_DES3_3KEY_SIZE:
c_ctx->c_key_len = CKEY_LEN_3DES_3KEY;
break;
default:
return -EINVAL;
}
return 0;
}
if (req->fake_busy) {
req->sk_req->base.complete(&req->sk_req->base, -EINPROGRESS);
req->fake_busy = 0;
static int sec_skcipher_aes_sm4_setkey(struct hisi_sec_cipher_ctx *c_ctx,
const u32 keylen, const enum C_MODE c_mode)
{
if (c_mode == C_MODE_XTS) {
switch (keylen) {
case SEC_XTS_MIN_KEY_SIZE:
c_ctx->c_key_len = CKEY_LEN_128_BIT;
break;
case SEC_XTS_MAX_KEY_SIZE:
c_ctx->c_key_len = CKEY_LEN_256_BIT;
break;
default:
return -EINVAL;
}
} else {
switch (keylen) {
case AES_KEYSIZE_128:
c_ctx->c_key_len = CKEY_LEN_128_BIT;
break;
case AES_KEYSIZE_192:
c_ctx->c_key_len = CKEY_LEN_192_BIT;
break;
case AES_KEYSIZE_256:
c_ctx->c_key_len = CKEY_LEN_256_BIT;
break;
default:
return -EINVAL;
}
}
req->sk_req->base.complete(&req->sk_req->base, ret);
return 0;
}
static int sec_skcipher_setkey(struct hisi_sec_ctx *sec_ctx,
const u8 *key, u32 keylen)
static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
const u32 keylen, const enum C_ALG c_alg, const enum C_MODE c_mode)
{
struct hisi_sec_cipher_ctx *c_ctx = &sec_ctx->c_ctx;
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
int ret;
switch (keylen) {
case AES_KEYSIZE_128:
c_ctx->c_key_len = 0;
if (c_mode == C_MODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
}
c_ctx->c_alg = c_alg;
c_ctx->c_mode = c_mode;
switch (c_alg) {
case C_ALG_DES:
ret = sec_skcipher_des_setkey(c_ctx, keylen, key);
break;
case AES_KEYSIZE_192:
c_ctx->c_key_len = 1;
case C_ALG_3DES:
ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
break;
case AES_KEYSIZE_256:
c_ctx->c_key_len = 2;
case C_ALG_AES:
case C_ALG_SM4:
ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
break;
default:
return -EINVAL;
}
if (ret)
return ret;
memcpy(c_ctx->c_key, key, keylen);
return 0;
}
static int sec_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
u32 keylen)\
{ \
return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
}
GEN_SEC_SETKEY_FUNC(aes_ecb, C_ALG_AES, C_MODE_ECB)
GEN_SEC_SETKEY_FUNC(aes_cbc, C_ALG_AES, C_MODE_CBC)
GEN_SEC_SETKEY_FUNC(aes_ctr, C_ALG_AES, C_MODE_CTR)
GEN_SEC_SETKEY_FUNC(sm4_cbc, C_ALG_SM4, C_MODE_CBC)
GEN_SEC_SETKEY_FUNC(des_ecb, C_ALG_DES, C_MODE_ECB)
GEN_SEC_SETKEY_FUNC(des_cbc, C_ALG_DES, C_MODE_CBC)
GEN_SEC_SETKEY_FUNC(3des_ecb, C_ALG_3DES, C_MODE_ECB)
GEN_SEC_SETKEY_FUNC(3des_cbc, C_ALG_3DES, C_MODE_CBC)
GEN_SEC_SETKEY_FUNC(aes_xts, C_ALG_AES, C_MODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_xts, C_ALG_SM4, C_MODE_XTS)
#ifdef USE_DM_CRYPT_OPTIMIZE
static int sec_setkey_plain64_sm4_xts(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
struct geniv_key_info *info = (struct geniv_key_info *)key;
int ret;
keylen = info->key_size;
key = info->key;
ctx->c_ctx.iv_offset = info->iv_offset;
ctx->c_ctx.c_gran_size = info->sector_size;
ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
return sec_skcipher_setkey(ctx, key, keylen, C_ALG_SM4, C_MODE_XTS);
}
#endif
memcpy(ctx->c_ctx.c_key, key, keylen);
ctx->c_ctx.c_mode = ECB;
ctx->c_ctx.c_alg = AES;
static int hisi_sec_get_async_ret(int ret, int req_cnt, int req_fake_limit)
{
if (ret == 0) {
if (req_cnt >= req_fake_limit)
ret = -EBUSY;
else
ret = -EINPROGRESS;
} else {
if (ret == -EBUSY)
ret = -ENOBUFS;
}
return sec_skcipher_setkey(ctx, key, keylen);
return ret;
}
static int sec_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
static int hisi_sec_skcipher_alloc(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct device *sec_dev = ctx->sec_dev;
c_req->c_ivin = dma_alloc_coherent(sec_dev,
SEC_IV_SIZE * ctx->fusion_limit, &c_req->c_ivin_dma,
GFP_ATOMIC);
memcpy(ctx->c_ctx.c_key, key, keylen);
ctx->c_ctx.c_mode = CBC;
ctx->c_ctx.c_alg = AES;
if (!c_req->c_ivin)
return -ENOMEM;
return sec_skcipher_setkey(ctx, key, keylen);
req->priv = kcalloc(ctx->fusion_limit, sizeof(void *),
GFP_ATOMIC);
if (!req->priv) {
dma_free_coherent(sec_dev, SEC_IV_SIZE * ctx->fusion_limit,
c_req->c_ivin, c_req->c_ivin_dma);
return -ENOMEM;
}
return 0;
}
static int sec_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
static int hisi_sec_skcipher_free(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct device *sec_dev = req->ctx->sec_dev;
kfree(req->priv);
dma_free_coherent(sec_dev, SEC_IV_SIZE * ctx->fusion_limit,
c_req->c_ivin, c_req->c_ivin_dma);
return 0;
}
static int hisi_sec_skcipher_buf_map(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct device *dev = ctx->sec_dev;
struct dma_pool *pool = ctx->sec->sgl_pool;
struct skcipher_request *sk_req =
(struct skcipher_request *)req->priv[0];
struct skcipher_request *sk_next;
int i, ret = 0;
c_req->src = sk_req->src;
c_req->dst = sk_req->dst;
if (ctx->is_fusion && req->fusion_num > 1) {
int src_nents, copyed_src_nents = 0, src_nents_sum = 0;
int dst_nents, copyed_dst_nents = 0, dst_nents_sum = 0;
int sg_size = sizeof(struct scatterlist);
for (i = 0; i < req->fusion_num; i++) {
sk_next = (struct skcipher_request *)req->priv[i];
if (sk_next == NULL) {
dev_err(ctx->sec_dev, "nullptr at [%d]\n", i);
return -EFAULT;
}
src_nents_sum += sg_nents(sk_next->src);
dst_nents_sum += sg_nents(sk_next->dst);
if (sk_next->src == sk_next->dst) {
dev_err(ctx->sec_dev, "err: src == dst\n");
return -EFAULT;
}
}
c_req->src = kcalloc(src_nents_sum, sg_size, GFP_KERNEL);
if (ZERO_OR_NULL_PTR(c_req->src))
return -ENOMEM;
c_req->dst = kcalloc(dst_nents_sum, sg_size, GFP_KERNEL);
if (ZERO_OR_NULL_PTR(c_req->dst))
return -ENOMEM;
for (i = 0; i < req->fusion_num; i++) {
sk_next = (struct skcipher_request *)req->priv[i];
src_nents = sg_nents(sk_next->src);
dst_nents = sg_nents(sk_next->dst);
if (i != req->fusion_num - 1) {
sg_unmark_end(&sk_next->src[src_nents - 1]);
sg_unmark_end(&sk_next->dst[dst_nents - 1]);
}
memcpy(c_req->src + copyed_src_nents, sk_next->src,
src_nents * sg_size);
memcpy(c_req->dst + copyed_dst_nents, sk_next->dst,
dst_nents * sg_size);
copyed_src_nents += src_nents;
copyed_dst_nents += dst_nents;
}
/* ensure copy of sg already done */
mb();
}
c_req->c_in = acc_sg_buf_map_to_hw_sgl(dev, c_req->src, pool,
&c_req->c_in_dma);
if (IS_ERR(c_req->c_in)) {
ret = PTR_ERR(c_req->c_in);
goto err_free_sg_table;
}
if (c_req->dst == c_req->src) {
c_req->c_out = c_req->c_in;
c_req->c_out_dma = c_req->c_in_dma;
} else {
c_req->c_out = acc_sg_buf_map_to_hw_sgl(dev, c_req->dst, pool,
&c_req->c_out_dma);
if (IS_ERR(c_req->c_out)) {
ret = PTR_ERR(c_req->c_out);
goto err_unmap_src;
}
}
memcpy(ctx->c_ctx.c_key, key, keylen);
return 0;
ctx->c_ctx.c_mode = CTR;
ctx->c_ctx.c_alg = AES;
err_unmap_src:
acc_sg_buf_unmap(dev, c_req->src, c_req->c_in, c_req->c_in_dma, pool);
err_free_sg_table:
if (ctx->is_fusion && req->fusion_num > 1) {
kfree(c_req->src);
kfree(c_req->dst);
}
return sec_skcipher_setkey(ctx, key, keylen);
return ret;
}
static int sec_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
static int hisi_sec_skcipher_buf_unmap(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret = 0;
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct device *dev = ctx->sec_dev;
struct dma_pool *pool = ctx->sec->sgl_pool;
ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
if (c_req->dst != c_req->src)
acc_sg_buf_unmap(dev, c_req->dst, c_req->c_out,
c_req->c_out_dma, pool);
memcpy(ctx->c_ctx.c_key, key, keylen);
acc_sg_buf_unmap(dev, c_req->src, c_req->c_in, c_req->c_in_dma, pool);
ctx->c_ctx.c_mode = XTS;
ctx->c_ctx.c_alg = AES;
if (ctx->is_fusion && req->fusion_num > 1) {
kfree(c_req->src);
kfree(c_req->dst);
}
return sec_skcipher_setkey(ctx, key, keylen / 2);
return 0;
}
static int sec_skcipher_setkey_sm4_xts(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
static int hisi_sec_skcipher_copy_iv(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret = 0;
struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct skcipher_request *sk_req =
(struct skcipher_request *)req->priv[0];
struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
struct skcipher_request *sk_next;
int i, iv_size;
ret = xts_verify_key(tfm, key, keylen);
c_req->c_len = sk_req->cryptlen;
iv_size = crypto_skcipher_ivsize(atfm);
if (iv_size > SEC_IV_SIZE)
return -EINVAL;
memcpy(c_req->c_ivin, sk_req->iv, iv_size);
if (ctx->is_fusion) {
for (i = 1; i < req->fusion_num; i++) {
sk_next = (struct skcipher_request *)req->priv[i];
memcpy(c_req->c_ivin + i * iv_size, sk_next->iv,
iv_size);
}
c_req->gran_num = req->fusion_num;
c_ctx->c_gran_size = sk_req->cryptlen;
}
return 0;
}
static int hisi_sec_skcipher_copy_iv_dmcrypt(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
struct skcipher_request *sk_req =
(struct skcipher_request *)req->priv[0];
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct geniv_req_info *info = (struct geniv_req_info *)(sk_req->iv);
c_req->lba = info->cc_sector + ctx->c_ctx.iv_offset;
c_req->gran_num = sk_req->cryptlen / ctx->c_ctx.c_gran_size;
return 0;
}
static int hisi_sec_skcipher_bd_fill_storage(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
if (!c_req->c_len)
return -EINVAL;
sec_sqe->type1.c_key_addr_l = lower_32_bits(c_ctx->c_key_dma);
sec_sqe->type1.c_key_addr_h = upper_32_bits(c_ctx->c_key_dma);
sec_sqe->type1.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
sec_sqe->type1.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
sec_sqe->type1.data_src_addr_l = lower_32_bits(c_req->c_in_dma);
sec_sqe->type1.data_src_addr_h = upper_32_bits(c_req->c_in_dma);
sec_sqe->type1.data_dst_addr_l = lower_32_bits(c_req->c_out_dma);
sec_sqe->type1.data_dst_addr_h = upper_32_bits(c_req->c_out_dma);
sec_sqe->type1.c_mode = c_ctx->c_mode;
sec_sqe->type1.c_alg = c_ctx->c_alg;
sec_sqe->type1.c_key_len = c_ctx->c_key_len;
sec_sqe->src_addr_type = 1;
sec_sqe->dst_addr_type = 1;
sec_sqe->type = 1;
sec_sqe->scene = 5;
sec_sqe->de = c_req->c_in_dma != c_req->c_out_dma;
if (c_req->encrypt == 1)
sec_sqe->cipher = 1;
else
sec_sqe->cipher = 2;
if (c_ctx->c_mode == C_MODE_XTS)
sec_sqe->type1.ci_gen = 0x3;
sec_sqe->type1.cipher_gran_size = c_ctx->c_gran_size;
sec_sqe->type1.gran_num = c_req->gran_num;
__sync_fetch_and_add(&ctx->sec->sec_dfx.gran_task_cnt, c_req->gran_num);
sec_sqe->type1.block_size = 512;
sec_sqe->type1.lba_l = lower_32_bits(c_req->lba);
sec_sqe->type1.lba_h = upper_32_bits(c_req->lba);
sec_sqe->type1.tag = req->req_id;
return 0;
}
static int hisi_sec_skcipher_bd_fill_multi_iv(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
int ret;
ret = hisi_sec_skcipher_bd_fill_storage(ctx, req);
if (ret)
return ret;
memcpy(ctx->c_ctx.c_key, key, keylen);
req->sec_sqe.type1.ci_gen = 0x0;
ctx->c_ctx.c_mode = XTS;
ctx->c_ctx.c_alg = SM4;
return sec_skcipher_setkey(ctx, key, keylen / 2);
return 0;
}
static int sec_cipher_fill_sqe(struct hisi_sec_sqe *sec_sqe,
struct hisi_sec_ctx *ctx, struct hisi_sec_cipher_req *c_req)
static int hisi_sec_skcipher_bd_fill_base(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
if (!c_req->c_len)
return -EINVAL;
sec_sqe->type2.c_key_addr_l = lower_32_bits(c_ctx->c_key_dma);
sec_sqe->type2.c_key_addr_h = upper_32_bits(c_ctx->c_key_dma);
sec_sqe->type2.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
sec_sqe->type2.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
sec_sqe->type2.data_src_addr_l = lower_32_bits(c_req->c_in_dma);
sec_sqe->type2.data_src_addr_h = upper_32_bits(c_req->c_in_dma);
sec_sqe->type2.data_dst_addr_l = lower_32_bits(c_req->c_out_dma);
......@@ -503,7 +954,9 @@ static int sec_cipher_fill_sqe(struct hisi_sec_sqe *sec_sqe,
sec_sqe->dst_addr_type = 1;
sec_sqe->type = 2;
sec_sqe->scene = 1;
sec_sqe->de = 1;
sec_sqe->de = c_req->c_in_dma != c_req->c_out_dma;
__sync_fetch_and_add(&ctx->sec->sec_dfx.gran_task_cnt, 1);
if (c_req->encrypt == 1)
sec_sqe->cipher = 1;
......@@ -511,260 +964,429 @@ static int sec_cipher_fill_sqe(struct hisi_sec_sqe *sec_sqe,
sec_sqe->cipher = 2;
sec_sqe->type2.c_len = c_req->c_len;
sec_sqe->type2.tag = req->req_id;
return 0;
}
static int sec_skcipher_crypto(struct skcipher_request *sk_req,
bool encrypt)
static int hisi_sec_bd_send_asyn(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
int ret = 0;
struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(atfm);
struct hisi_sec_req *req = skcipher_request_ctx(sk_req);
struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
struct device *dev = ctx->sec_dev;
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct dma_pool *pool = ctx->sec->sgl_pool;
struct hisi_sec_qp_ctx *qp_ctx;
struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
unsigned long flags;
int req_cnt;
int thread_cnt;
int issue_id;
int req_cnt = req->req_cnt;
int ret;
if (!sk_req->src || !sk_req->dst || !sk_req->cryptlen)
return -EINVAL;
spin_lock_irqsave(&qp_ctx->req_lock, flags);
ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
__sync_add_and_fetch(&ctx->sec->sec_dfx.send_cnt, 1);
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
thread_cnt = atomic_inc_return(&ctx->thread_cnt);
if (thread_cnt > ctx->max_thread_cnt)
ctx->max_thread_cnt = thread_cnt;
return hisi_sec_get_async_ret(ret, req_cnt, ctx->req_fake_limit);
}
req->sk_req = sk_req;
req->ctx = ctx;
static int hisi_sec_skcipher_complete(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req, int err_code)
{
struct skcipher_request **sk_reqs =
(struct skcipher_request **)req->priv;
int i, req_fusion_num;
memset(sec_sqe, 0, sizeof(struct hisi_sec_sqe));
if (ctx->is_fusion == SEC_NO_FUSION)
req_fusion_num = 1;
else
req_fusion_num = req->fusion_num;
ret = sec_alloc_cipher_req(req);
if (ret) {
dev_err(dev, "sec alloc cipher request failed\n");
atomic_dec(&ctx->thread_cnt);
return ret;
}
/* ensure data already writeback */
mb();
c_req->c_in = acc_sg_buf_map_to_hw_sgl(dev, sk_req->src, pool,
&c_req->c_in_dma);
if (IS_ERR(c_req->c_in)) {
ret = PTR_ERR(c_req->c_in);
goto err_free_cipher_req;
}
for (i = 0; i < req_fusion_num; i++)
sk_reqs[i]->base.complete(&sk_reqs[i]->base, err_code);
if (sk_req->dst == sk_req->src) {
c_req->c_out = c_req->c_in;
c_req->c_out_dma = c_req->c_in_dma;
/* free sk_reqs if this request is completed */
if (err_code != -EINPROGRESS) {
__sync_add_and_fetch(&ctx->sec->sec_dfx.put_task_cnt,
req_fusion_num);
kfree(sk_reqs);
} else {
c_req->c_out = acc_sg_buf_map_to_hw_sgl(dev, sk_req->dst, pool,
&c_req->c_out_dma);
if (IS_ERR(c_req->c_out)) {
ret = PTR_ERR(c_req->c_out);
goto err_unmap_src_sg;
}
__sync_add_and_fetch(&ctx->sec->sec_dfx.busy_comp_cnt,
req_fusion_num);
}
c_req->c_len = sk_req->cryptlen;
c_req->encrypt = encrypt;
return 0;
}
ret = sec_cipher_fill_sqe(sec_sqe, ctx, c_req);
if (ret) {
dev_err(dev, "sec cipher fill sqe failed\n");
goto err_unmap_dst_sg;
}
static int hisi_sec_skcipher_callback(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct device *sec_dev = req->ctx->sec_dev;
dma_free_coherent(sec_dev, SEC_IV_SIZE * ctx->fusion_limit,
c_req->c_ivin, c_req->c_ivin_dma);
hisi_sec_free_req_id(req);
if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
hisi_sec_skcipher_complete(ctx, req, -EINPROGRESS);
hisi_sec_skcipher_complete(ctx, req, req->err_type);
return 0;
}
static int sec_get_issue_id_range(atomic_t *qid, int start, int end)
{
int issue_id;
int issue_len = end - start;
issue_id = (atomic_inc_return(qid) - start) % issue_len + start;
if (issue_id % issue_len == 0 && atomic_read(qid) > issue_len)
atomic_sub(issue_len, qid);
return issue_id;
}
static inline int sec_get_issue_id(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
int issue_id;
if (req->c_req.encrypt == 1)
issue_id = sec_get_issue_id_range(&ctx->enc_qid, 0,
ctx->enc_q_num);
else
issue_id = sec_get_issue_id_range(&ctx->dec_qid, ctx->enc_q_num,
ctx->q_num);
return issue_id;
}
static inline int hisi_sec_inc_thread_cnt(struct hisi_sec_ctx *ctx)
{
int thread_cnt;
if (!crypto_skcipher_ivsize(atfm)) {
ret = -EINVAL;
goto err_unmap_dst_sg;
} else
memcpy(c_req->c_ivin, sk_req->iv, crypto_skcipher_ivsize(atfm));
thread_cnt = atomic_inc_return(&ctx->thread_cnt);
if (thread_cnt > ctx->sec->sec_dfx.thread_cnt)
ctx->sec->sec_dfx.thread_cnt = thread_cnt;
return 0;
}
static struct hisi_sec_req *sec_request_alloc(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *in_req, int *fusion_send, int *fake_busy)
{
struct hisi_sec_qp_ctx *qp_ctx;
struct hisi_sec_req *req;
unsigned long flags;
int issue_id, ret;
/* get issue_id */
issue_id = atomic_fetch_inc(&ctx->q_id) % ctx->q_num;
if (issue_id % ctx->q_num == 0 && ctx->q_id.counter > ctx->q_num)
atomic_sub(ctx->q_num, &ctx->q_id);
__sync_add_and_fetch(&ctx->sec->sec_dfx.get_task_cnt, 1);
issue_id = sec_get_issue_id(ctx, in_req);
qp_ctx = &ctx->qp_ctx[issue_id];
#ifdef SEC_ASYNC
ret = hisi_sec_alloc_req_id(req, qp_ctx);
if (ret) {
dev_err(dev, "sec alloc req id failed\n");
goto err_unmap_dst_sg;
spin_lock_irqsave(&qp_ctx->req_lock, flags);
if (in_req->c_req.sk_req->src == in_req->c_req.sk_req->dst) {
*fusion_send = 1;
} else if (qp_ctx->fusion_req &&
qp_ctx->fusion_req->fusion_num < ctx->fusion_limit) {
req = qp_ctx->fusion_req;
*fake_busy = req->fake_busy;
__sync_add_and_fetch(&ctx->sec->sec_dfx.fake_busy_cnt,
fake_busy);
req->priv[req->fusion_num] = in_req->c_req.sk_req;
req->fusion_num++;
in_req->fusion_num = req->fusion_num;
if (req->fusion_num == ctx->fusion_limit) {
*fusion_send = 1;
qp_ctx->fusion_req = NULL;
}
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
return req;
}
req = in_req;
hisi_sec_inc_thread_cnt(ctx);
if (hisi_sec_alloc_req_id(req, qp_ctx)) {
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
return NULL;
}
req->fake_busy = 0;
req_cnt = atomic_inc_return(&qp_ctx->req_cnt);
if (req_cnt >= ctx->req_fake_limit)
req->req_cnt = atomic_inc_return(&qp_ctx->req_cnt);
if (req->req_cnt >= ctx->req_fake_limit) {
req->fake_busy = 1;
*fake_busy = 1;
__sync_add_and_fetch(&ctx->sec->sec_dfx.fake_busy_cnt, 1);
}
sec_sqe->type2.tag = req->req_id;
#endif
spin_lock_irqsave(&qp_ctx->req_lock, flags);
ret = hisi_qp_send(qp_ctx->qp, sec_sqe);
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
if (ret < 0) {
#ifdef SEC_ASYNC
if (ret == -EBUSY)
ret = -ENOBUFS;
ret = ctx->req_op->alloc(ctx, req);
if (ret) {
dev_err(ctx->sec_dev, "req_op alloc failed\n");
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
goto err_free_req_id;
#else
goto err_unmap_dst_sg;
#endif
}
#ifdef SEC_ASYNC
if (req_cnt >= ctx->req_fake_limit)
ret = -EBUSY;
else
ret = -EINPROGRESS;
#else
ret = hisi_qp_wait(qp_ctx->qp);
if (ret < 0)
goto err_unmap_dst_sg;
if (ctx->is_fusion && *fusion_send == 0)
qp_ctx->fusion_req = req;
sec_update_iv(req, sk_req->iv);
sec_sg_unmap(dev, sk_req, c_req, pool);
req->fusion_num = 1;
sec_free_cipher_req(req);
#endif
req->priv[0] = in_req->c_req.sk_req;
spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
atomic_dec(&ctx->thread_cnt);
return ret;
if (ctx->is_fusion && *fusion_send == 0) {
if (ctx->sec->qm.wq)
queue_delayed_work(ctx->sec->qm.wq, &qp_ctx->work,
nsecs_to_jiffies(ctx->fusion_tmout_usec));
else
schedule_delayed_work(&qp_ctx->work,
nsecs_to_jiffies(ctx->fusion_tmout_usec));
}
return req;
#ifdef SEC_ASYNC
err_free_req_id:
hisi_sec_free_req_id(req);
#endif
err_unmap_dst_sg:
if (sk_req->dst != sk_req->src)
acc_sg_buf_unmap(dev, sk_req->dst,
c_req->c_out, c_req->c_out_dma, pool);
err_unmap_src_sg:
acc_sg_buf_unmap(dev, sk_req->src,
c_req->c_in, c_req->c_in_dma, pool);
err_free_cipher_req:
sec_free_cipher_req(req);
return NULL;
}
atomic_dec(&ctx->thread_cnt);
static int sec_request_transfer(struct hisi_sec_ctx *ctx,
struct hisi_sec_req *req)
{
int ret;
ret = ctx->req_op->buf_map(ctx, req);
if (ret)
return ret;
ret = ctx->req_op->do_transfer(ctx, req);
if (ret)
goto unmap_req_buf;
memset(&req->sec_sqe, 0, sizeof(struct hisi_sec_sqe));
ret = ctx->req_op->bd_fill(ctx, req);
if (ret)
goto unmap_req_buf;
return 0;
unmap_req_buf:
ctx->req_op->buf_unmap(ctx, req);
return ret;
}
static int sec_skcipher_encrypt(struct skcipher_request *req)
static int sec_request_send(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req)
{
return sec_skcipher_crypto(req, true);
int ret;
ret = ctx->req_op->bd_send(ctx, req);
if (ret == 0 || ret == -EBUSY || ret == -EINPROGRESS)
atomic_dec(&ctx->thread_cnt);
return ret;
}
static int sec_skcipher_decrypt(struct skcipher_request *req)
static int sec_io_proc(struct hisi_sec_ctx *ctx, struct hisi_sec_req *in_req)
{
return sec_skcipher_crypto(req, false);
struct hisi_sec_req *req;
int ret, fusion_send = 0, fake_busy = 0;
in_req->fusion_num = 1;
req = sec_request_alloc(ctx, in_req, &fusion_send, &fake_busy);
if (!req) {
dev_err(ctx->sec_dev, "sec_request_alloc failed\n");
return -ENOMEM;
}
if (ctx->is_fusion && fusion_send == 0)
return fake_busy ? -EBUSY : -EINPROGRESS;
ret = sec_request_transfer(ctx, req);
if (ret) {
dev_err(ctx->sec_dev, "sec_transfer failed! ret[%d]\n", ret);
goto err_free_req;
}
ret = sec_request_send(ctx, req);
if (ret != -EBUSY && ret != -EINPROGRESS) {
dev_err(ctx->sec_dev, "sec_send failed ret[%d]\n", ret);
goto err_unmap_req;
}
return ret;
err_unmap_req:
ctx->req_op->buf_unmap(ctx, req);
err_free_req:
ctx->req_op->free(ctx, req);
hisi_sec_free_req_id(req);
atomic_dec(&ctx->thread_cnt);
return ret;
}
static struct skcipher_alg sec_algs[] = {
struct hisi_sec_req_op sec_req_ops_tbl[] = {
{
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "hisi_sec_aes_ecb",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hisi_sec_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = hisi_sec_cipher_ctx_init,
.exit = hisi_sec_cipher_ctx_exit,
.setkey = sec_skcipher_setkey_aes_ecb,
.decrypt = sec_skcipher_decrypt,
.encrypt = sec_skcipher_encrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
}, {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "hisi_sec_aes_cbc",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hisi_sec_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = hisi_sec_cipher_ctx_init,
.exit = hisi_sec_cipher_ctx_exit,
.setkey = sec_skcipher_setkey_aes_cbc,
.decrypt = sec_skcipher_decrypt,
.encrypt = sec_skcipher_encrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.fusion_type = SEC_NO_FUSION,
.alloc = hisi_sec_skcipher_alloc,
.free = hisi_sec_skcipher_free,
.buf_map = hisi_sec_skcipher_buf_map,
.buf_unmap = hisi_sec_skcipher_buf_unmap,
.do_transfer = hisi_sec_skcipher_copy_iv,
.bd_fill = hisi_sec_skcipher_bd_fill_base,
.bd_send = hisi_sec_bd_send_asyn,
.callback = hisi_sec_skcipher_callback,
}, {
.base = {
.cra_name = "ctr(aes)",
.cra_driver_name = "hisi_sec_aes_ctr",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hisi_sec_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = hisi_sec_cipher_ctx_init,
.exit = hisi_sec_cipher_ctx_exit,
.setkey = sec_skcipher_setkey_aes_ctr,
.decrypt = sec_skcipher_decrypt,
.encrypt = sec_skcipher_encrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.fusion_type = SEC_NO_FUSION,
.alloc = hisi_sec_skcipher_alloc,
.free = hisi_sec_skcipher_free,
.buf_map = hisi_sec_skcipher_buf_map,
.buf_unmap = hisi_sec_skcipher_buf_unmap,
.do_transfer = hisi_sec_skcipher_copy_iv_dmcrypt,
.bd_fill = hisi_sec_skcipher_bd_fill_storage,
.bd_send = hisi_sec_bd_send_asyn,
.callback = hisi_sec_skcipher_callback,
}, {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "hisi_sec_aes_xts",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hisi_sec_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = hisi_sec_cipher_ctx_init,
.exit = hisi_sec_cipher_ctx_exit,
.setkey = sec_skcipher_setkey_aes_xts,
.decrypt = sec_skcipher_decrypt,
.encrypt = sec_skcipher_encrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
}, {
.base = {
.cra_name = "xts(sm4)",
.cra_driver_name = "hisi_sec_sm4_xts",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hisi_sec_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = hisi_sec_cipher_ctx_init,
.exit = hisi_sec_cipher_ctx_exit,
.setkey = sec_skcipher_setkey_sm4_xts,
.decrypt = sec_skcipher_decrypt,
.encrypt = sec_skcipher_encrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
.fusion_type = SEC_IV_FUSION,
.alloc = hisi_sec_skcipher_alloc,
.free = hisi_sec_skcipher_free,
.buf_map = hisi_sec_skcipher_buf_map,
.buf_unmap = hisi_sec_skcipher_buf_unmap,
.do_transfer = hisi_sec_skcipher_copy_iv,
.bd_fill = hisi_sec_skcipher_bd_fill_multi_iv,
.bd_send = hisi_sec_bd_send_asyn,
.callback = hisi_sec_skcipher_callback,
}
};
static int sec_skcipher_crypto(struct skcipher_request *sk_req,
bool encrypt, enum SEC_REQ_OPS_TYPE req_ops_type)
{
struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(atfm);
struct hisi_sec_req *req = skcipher_request_ctx(sk_req);
if (!sk_req->src || !sk_req->dst || !sk_req->cryptlen)
return -EINVAL;
req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
req->ctx = ctx;
ctx->req_op = &sec_req_ops_tbl[req_ops_type];
ctx->is_fusion = ctx->req_op->fusion_type;
return sec_io_proc(ctx, req);
}
#define SEC_SKCIPHER_GEN_CRYPT(suffix, encrypt, fusion_type) \
static int sec_skcipher_##suffix(struct skcipher_request *req) \
{ \
return sec_skcipher_crypto(req, encrypt, fusion_type); \
}
SEC_SKCIPHER_GEN_CRYPT(alg_encrypt, true, SEC_OPS_SKCIPHER_ALG)
SEC_SKCIPHER_GEN_CRYPT(alg_decrypt, false, SEC_OPS_SKCIPHER_ALG)
#ifdef USE_DM_CRYPT_OPTIMIZE
SEC_SKCIPHER_GEN_CRYPT(dm_encrypt, true, SEC_OPS_DMCRYPT)
SEC_SKCIPHER_GEN_CRYPT(dm_decrypt, false, SEC_OPS_DMCRYPT)
#endif
SEC_SKCIPHER_GEN_CRYPT(fusion_encrypt, true, SEC_OPS_MULTI_IV)
SEC_SKCIPHER_GEN_CRYPT(fusion_decrypt, false, SEC_OPS_MULTI_IV)
#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
sec_max_key_size, sec_decrypt, sec_encrypt, blk_size, iv_size)\
{\
.base = {\
.cra_name = sec_cra_name,\
.cra_driver_name = "hisi_sec_"sec_cra_name,\
.cra_priority = SEC_PRIORITY,\
.cra_flags = CRYPTO_ALG_ASYNC,\
.cra_blocksize = blk_size,\
.cra_ctxsize = sizeof(struct hisi_sec_ctx),\
.cra_alignmask = 0,\
.cra_module = THIS_MODULE,\
},\
.init = hisi_sec_cipher_ctx_init,\
.exit = hisi_sec_cipher_ctx_exit,\
.setkey = sec_set_key,\
.decrypt = sec_decrypt,\
.encrypt = sec_encrypt,\
.min_keysize = sec_min_key_size,\
.max_keysize = sec_max_key_size,\
.ivsize = iv_size,\
},
#define SEC_SKCIPHER_NORMAL_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_alg_decrypt, sec_skcipher_alg_encrypt, blk_size, iv_size)
#define SEC_SKCIPHER_DM_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_dm_decrypt, sec_skcipher_dm_encrypt, blk_size, iv_size)
#define SEC_SKCIPHER_FUSION_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_fusion_decrypt, sec_skcipher_fusion_encrypt, blk_size, \
iv_size)
static struct skcipher_alg sec_algs[] = {
SEC_SKCIPHER_NORMAL_ALG("ecb(aes)", sec_setkey_aes_ecb,
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0)
SEC_SKCIPHER_NORMAL_ALG("cbc(aes)", sec_setkey_aes_cbc,
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE,
AES_BLOCK_SIZE)
SEC_SKCIPHER_NORMAL_ALG("ctr(aes)", sec_setkey_aes_ctr,
AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE,
AES_BLOCK_SIZE)
SEC_SKCIPHER_NORMAL_ALG("xts(aes)", sec_setkey_aes_xts,
SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE,
AES_BLOCK_SIZE)
SEC_SKCIPHER_NORMAL_ALG("ecb(des)", sec_setkey_des_ecb,
DES_KEY_SIZE, DES_KEY_SIZE, DES_BLOCK_SIZE, 0)
SEC_SKCIPHER_NORMAL_ALG("cbc(des)", sec_setkey_des_cbc,
DES_KEY_SIZE, DES_KEY_SIZE, DES_BLOCK_SIZE, DES_BLOCK_SIZE)
SEC_SKCIPHER_NORMAL_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0)
SEC_SKCIPHER_NORMAL_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
DES3_EDE_BLOCK_SIZE)
#ifndef SEC_FUSION_BD
SEC_SKCIPHER_NORMAL_ALG("xts(sm4)", sec_setkey_sm4_xts,
SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE,
AES_BLOCK_SIZE)
SEC_SKCIPHER_NORMAL_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, AES_BLOCK_SIZE,
AES_BLOCK_SIZE)
#else
SEC_SKCIPHER_FUSION_ALG("xts(sm4)", sec_setkey_sm4_xts,
SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE,
AES_BLOCK_SIZE)
SEC_SKCIPHER_FUSION_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, AES_BLOCK_SIZE,
AES_BLOCK_SIZE)
#endif
#ifdef USE_DM_CRYPT_OPTIMIZE
SEC_SKCIPHER_DM_ALG("plain64(xts(sm4))", sec_setkey_plain64_sm4_xts,
sizeof(struct geniv_key_info), sizeof(struct geniv_key_info),
AES_BLOCK_SIZE, AES_BLOCK_SIZE)
#endif
};
int hisi_sec_register_to_crypto(void)
......
......@@ -12,7 +12,7 @@
#ifndef HISI_SEC_CRYPTO_H
#define HISI_SEC_CRYPTO_H
#define SEC_IV_SIZE 16
#define SEC_IV_SIZE 24
#define SEC_MAX_KEY_SIZE 64
int hisi_sec_register_to_crypto(void);
......
......@@ -26,239 +26,85 @@
#define HSEC_VF_NUM 63
#define HSEC_QUEUE_NUM_V1 4096
#define HSEC_QUEUE_NUM_V2 1024
#define PCI_DEVICE_ID_SEC_PF 0xa255
#define PCI_DEVICE_ID_SEC_VF 0xa256
#define PCI_DEVICE_ID_SEC_PF 0xa255
#define PCI_DEVICE_ID_SEC_VF 0xa256
#define HSEC_COMMON_REG_OFF 0x1000
#define HSEC_COMMON_REG_OFF 0x1000
#define HSEC_FSM_MAX_CNT 0x301008
#define HSEC_PORT_ARCA_CHE_0 0x301040
#define HSEC_PORT_ARCA_CHE_1 0x301044
#define HSEC_PORT_AWCA_CHE_0 0x301060
#define HSEC_PORT_AWCA_CHE_1 0x301064
#define HSEC_BD_RUSER_32_63 0x301110
#define HSEC_SGL_RUSER_32_63 0x30111c
#define HSEC_DATA_RUSER_32_63 0x301128
#define HSEC_DATA_WUSER_32_63 0x301134
#define HSEC_BD_WUSER_32_63 0x301140
#define HSEC_QM_IDEL_STATUS 0x3040e4
#define HSEC_MASTER_GLOBAL_CTRL 0x300000
#define MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
#define HSEC_MASTER_TRANS_RETURN 0x300150
#define MASTER_TRANS_RETURN_RW 0x3
#define HSEC_CORE_INT_SOURCE 0x301010
#define HSEC_CORE_INT_MASK 0x301000
#define HSEC_CORE_INT_MASK 0x301000
#define HSEC_CORE_INT_STATUS 0x301008
#define HSEC_CORE_INT_STATUS_M_ECC BIT(1)
#define HSEC_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HSEC_CORE_INT_STATUS_M_ECC BIT(2)
#define HSEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
#define SRAM_ECC_ERR_NUM_SHIFT 16
#define SRAM_ECC_ERR_ADDR_SHIFT 24
#define HSEC_CORE_INT_DISABLE 0x000001FF
#define SRAM_ECC_ERR_ADDR_SHIFT 0
#define HSEC_CORE_INT_DISABLE 0x0
#define HSEC_CORE_INT_ENABLE 0x1ff
#define HSEC_SM4_CTR_ENABLE_REG 0x301380
#define HSEC_SM4_CTR_ENABLE_MSK 0xEFFFFFFF
#define HSEC_SM4_CTR_DISABLE_MSK 0xFFFFFFFF
#define HSEC_SM4_CTR_ENABLE_REG 0X301380
#define HSEC_SM4_CTR_ENABLE_MSK 0XEFFFFFFF
#define HSEC_SM4_CTR_DISABLE_MSK 0XFFFFFFFF
#define HSEC_XTS_MIV_ENABLE_REG 0x301384
#define HSEC_XTS_MIV_ENABLE_MSK 0x7FFFFFFF
#define HSEC_XTS_MIV_DISABLE_MSK 0xFFFFFFFF
#define HSEC_SQE_SIZE 128
#define HSEC_SQ_SIZE (HSEC_SQE_SIZE * QM_Q_DEPTH)
#define HSEC_PF_DEF_Q_NUM 64
#define HSEC_PF_DEF_Q_BASE 0
#define HSEC_SOFT_CTRL_CNT_CLR_CE 0x301000
#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HSEC_CTRL_CNT_CLR_CE 0x301120
#define HSEC_CTRL_CNT_CLR_CE_BIT BIT(0)
#define AM_CURR_ALL_RET_MASK GENMASK(1, 0)
#define SEC_ENGINE_PF_CFG_OFF 0x300000
#define SEC_ACC_COMMON_REG_OFF 0x1000
#define SC_SEC_ICG_EN_REG 0x390
#define SC_SEC_ICG_DIS_REG 0x394
#define SC_SEC_RESET_REQ_REG 0xA28
#define SC_SEC_RESET_DREQ_REG 0xA2C
#define SC_SEC_ICG_ST_REG 0x5390
#define SC_SEC_RESET_ST_REG 0x5A28
#define SEC_RESET_MASK GENMASK(1, 0)
#define SEC_ENGINE_PF_CFG_OFF 0x300000
#define SEC_ACC_COMMON_REG_OFF 0x1000
#define SEC_PF_ABNORMAL_INT_ENABLE_REG 0x000
#define SEC_PF_INT_MSK 0x1ff
#define SEC_PF_ABNORMAL_INT_STATUS_REG 0x0008
#define SEC_PF_ABNORMAL_INT_SOURCE_REG 0x0010
#define SEC_PF_ABNORMAL_INT_SET_REG 0x0018
#define SEC_RAS_CE_INT_COUNT_REG 0x0030
#define SEC_RAS_INT_WIDTH_PLUS_REG 0x0034
#define SEC_RAS_CE_ENABLE_REG 0x50
#define SEC_RAS_FE_ENABLE_REG 0x54
#define SEC_RAS_NFE_ENABLE_REG 0x58
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_RAS_NFE_ENABLE_REG 0x58
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_RAS_NFE_ENB_MSK 0x177
#define SEC_MEM_START_INIT_REG 0x0100
#define SEC_MEM_INIT_DONE_REG 0x0104
#define SEC_MEM_TIMING_REG 0x0108
#define SEC_ECC_ENABLE_REG 0x010c
#define SEC_CNT_CLR_CE_REG 0x0120
#define SEC_FSM_MAX_CNT_REG 0x0124
#define SEC_SGL_OFFSET_CONTROL_REG 0x0130
#define SEC_PAGE_SIZE_CONTROL_REG 0x0134
#define SEC_DIF_CRC_INIT_REG 0x0138
#define SEC_CONTROL_REG 0x0200
#define SEC_TRNG_EN_SHIFT 8
#define SEC_AXI_CACHE_CFG_REG 0x0210
#define SEC_AXI_CACHE_CFG_1_REG 0x0214
#define SEC_SNPATTR_CFG_REG 0x0218
#define SEC_MEM_START_INIT_REG 0x0100
#define SEC_MEM_INIT_DONE_REG 0x0104
#define SEC_CONTROL_REG 0x0200
#define SEC_TRNG_EN_SHIFT 8
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
#define SEC_BD_CS_PACKET_OST_CFG_REG 0x0240
#define SEC_DATA_OST_CFG_REG 0x0248
#define SEC_SAA_CLK_EN_REG 0x0260
#define SEC_SAA_EN_REG 0x0270
#define SEC_REQ_TRNG_TIME_TH_REG 0x0280
#define SEC_BD_ERR_CHK_EN_REG(n) (0x0380 + (n) * 0x04)
#define BD_LATENCY_MIN_REG 0x0600
#define BD_LATENCY_MAX_REG 0x0608
#define BD_LATENCY_AVG_REG 0x060C
#define BD_NUM_IN_SAA_0_REG 0x0670
#define BD_NUM_IN_SAA_1_REG 0x0674
#define BD_NUM_IN_SEC_REG 0x0680
#define SEC_PF_FSM_HBEAT_INFO_REG(n) (0x20 + (n) * 0x4)
#define SEC_FSM_USE_REG_NUM 2
#define SEC_BD_M_FSM_REG 0x700
#define SEC_KEY_FSM_REG 0x704
#define SEC_IV_FSM_REG 0x708
#define SEC_IV_KEY_FSM_REG 0x70c
#define SEC_CLU_ALG_FSM_REG 0x710
#define SEC_RD_SGE_FSM_REG 0x72c
#define SEC_RD_HAC_SGE_FSM_REG(n) (0x730 + (n) * 0x4)
#define SEC_AW_HAC_FSM_REG(n) (0x750 + (n) * 0x4)
#define SEC_SGE_CBB_NUM 3
#define SEC_DIF_SHAPE_REG(n) (0x760 + (n) * 0x4)
#define SEC_CHANNEL_NUM 9
#define SEC_BD_TOP_FSM_REG 0x7A0
#define SEC_ECC_1BIT_CNT_REG 0xC00
#define SEC_ECC_1BIT_INFO_REG 0xC04
#define SEC_ECC_2BIT_CNT_REG 0xC10
#define SEC_ECC_2BIT_INFO_REG 0xC14
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
#define BD_CIPHER_SHIFT 4
#define BD_AUTH_SHIFT 6
#define BD_SCENE_SHIFT 11
#define BD_A_ALG_SHIFT 11
#define BD_AKEY_LEN_SHIFT 5
#define BD_C_WIDTH_SHIFT 6
#define BD_CKEY_LEN_SHIFT 9
#define BD_C_MODE_SHIFT 12
#define BD_C_ALG_SHIFT 16
#define BD_CIPHER_SRC_OFFSET_SHIFT 16
#define BD_DK_LEN_SHIFT 16
#define BD_PAGE_PAD_TYPE_SHIFT 4
#define BD_CHK_GRD_CTRL_SHIFT 8
#define BD_CHK_REF_CTRL_SHIFT 12
#define BD_BLOCK_SIZE_SHIFT 16
#define BD_TAG_MASK GENMASK(15, 0)
#define BD_TYPE_MASK GENMASK(3, 0)
#define BD_ICV_MASK GENMASK(3, 1)
#define BD_ICV_SHIFT 1
#define BD_ICV_CHECK_FAIL 0x2
#define BD_ICV_ERROR 0x3
#define BD_CSC_MASK GENMASK(6, 4)
#define BD_CSC_SHIFT 4
#define BD_CSC_CHECK_FAIL 0x2
#define BD_FLAG_MASK GENMASK(10, 7)
#define BD_FLAG_SHIFT 7
#define BD_DC_MASK GENMASK(13, 11)
#define BD_DC_SHIFT 11
#define BD_DC_FAIL 0x2
#define BD_ERROR_TYPE_MASK GENMASK(23, 16)
#define BD_ERROR_TYPE_SHIFT 16
#define BD_WARNING_TYPE_MASK GENMASK(31, 24)
#define BD_WARNING_TYPE_SHIFT 24
#define SEC_NO_SCENE 0x0
#define SEC_IPSEC_SCENE 0x1
#define SEC_BASEBAND_SCENE 0x2
#define SEC_SSLTLS_SCENE 0x3
#define SEC_DTLS_SCENE 0x4
#define SEC_STORAGE_ACCESS_DISK_SCENE 0x5
#define SEC_STORAGE_NAS_SCENE 0x6
#define SEC_STREAM_DATA_SCENE 0x7
#define SEC_PBKDF2_SCENE 0x8
#define SEC_SMB_SCENE 0x9
#define C_ALG_DES 0x0
#define C_ALG_3DES 0x1
#define C_ALG_AES 0x2
#define C_ALG_SM4 0x3
#define C_MODE_ECB 0x0
#define C_MODE_CBC 0x1
#define C_MODE_CTR 0x4
#define C_MODE_CCM 0x5
#define C_MODE_GCM 0x6
#define C_MODE_XTS 0x7
#define C_MODE_CBC_CS 0x9
#define CKEY_LEN_128_BIT 0x0
#define CKEY_LEN_192_BIT 0x1
#define CKEY_LEN_256_BIT 0x2
#define C_ICV_LEN_16_BYTE 0x10
#define C_WIDTH_CS1 0x1
#define C_WIDTH_CS2 0x2
#define C_WIDTH_CS3 0x3
#define A_ALG_HMAC_SHA1 0x10
#define A_ALG_HMAC_SHA256 0x11
#define A_ALG_AES_CMAC 0x21
#define A_ALG_AES_GMAC 0x22
#define AKEY_LEN_128_BIT 0x4
#define MAC_LEN_96_BIT 0x3
#define MAC_LEN_128_BIT 0x4
#define MAC_LEN_256_BIT 0x8
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000 /* 1ms */
#define SEC_CHAIN_ABN_RD_ADDR_LOW 0x300
#define SEC_CHAIN_ABN_RD_ADDR_HIG 0x304
#define SEC_CHAIN_ABN_RD_LEN 0x308
#define SEC_CHAIN_ABN_WR_ADDR_LOW 0x310
#define SEC_CHAIN_ABN_WR_ADDR_HIG 0x314
#define SEC_CHAIN_ABN_WR_LEN 0x318
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
#define SEC_CHAIN_ABN_RD_ADDR_LOW 0x300
#define SEC_CHAIN_ABN_RD_ADDR_HIG 0x304
#define SEC_CHAIN_ABN_RD_LEN 0x308
#define SEC_CHAIN_ABN_WR_ADDR_LOW 0x310
#define SEC_CHAIN_ABN_WR_ADDR_HIG 0x314
#define SEC_CHAIN_ABN_WR_LEN 0x318
#define SEC_CHAIN_ABN_LEN 128UL
#define FORMAT_DECIMAL 10
#define FORMAT_DECIMAL 10
static const char hisi_sec_name[] = "hisi_sec";
static atomic_t hisi_sec_ref = {0};
static struct dentry *hsec_debugfs_root;
LIST_HEAD(hisi_sec_list);
DEFINE_MUTEX(hisi_sec_list_lock);
static struct workqueue_struct *sec_wq;
struct hisi_sec *find_sec_device(int node)
{
struct hisi_sec *ret = NULL;
......@@ -292,17 +138,15 @@ struct hisi_sec_hw_error {
};
static const struct hisi_sec_hw_error sec_hw_error[] = {
{.int_msk = BIT(0), .msg = "sec_ecc_1bitt_err"},
{.int_msk = BIT(1), .msg = "sec_ecc_2bit_err"},
{.int_msk = BIT(2), .msg = "sec_axi_rresp_err"},
{.int_msk = BIT(3), .msg = "sec_axi_bresp_err"},
{.int_msk = BIT(4), .msg = "sec_src_addr_parse_err"},
{.int_msk = BIT(5), .msg = "sec_dst_addr_parse_err"},
{.int_msk = BIT(6), .msg = "sec_pre_in_addr_err"},
{.int_msk = BIT(7), .msg = "sec_pre_in_data_err"},
{.int_msk = BIT(8), .msg = "sec_com_inf_err"},
{.int_msk = BIT(9), .msg = "sec_enc_inf_err"},
{.int_msk = BIT(10), .msg = "sec_pre_out_err"},
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
{.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
{.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
{.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"},
{.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"},
{.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"},
{.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"},
{.int_msk = BIT(7), .msg = "sec_bd_err_rint"},
{.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"},
{ /* sentinel */ }
};
......@@ -312,7 +156,7 @@ enum ctrl_debug_file_index {
HSEC_DEBUG_FILE_NUM,
};
static const char *const ctrl_debug_file_name[] = {
static const char * const ctrl_debug_file_name[] = {
[HSEC_CURRENT_QM] = "current_qm",
[HSEC_CLEAR_ENABLE] = "clear_enable",
};
......@@ -339,6 +183,7 @@ struct hisi_sec_ctrl {
static struct debugfs_reg32 hsec_dfx_regs[] = {
{"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
{"HSEC_SAA_EN ", 0x301270},
{"HSEC_BD_LATENCY_MIN ", 0x301600},
{"HSEC_BD_LATENCY_MAX ", 0x301608},
{"HSEC_BD_LATENCY_AVG ", 0x30160C},
......@@ -436,6 +281,12 @@ module_param(enable_sm4_ctr, int, 0444);
static int ctx_q_num = 64;
module_param(ctx_q_num, int, 0444);
static int fusion_limit = 64;
module_param(fusion_limit, int, 0444);
static int fusion_tmout_usec = 500;
module_param(fusion_tmout_usec, int, 0444);
static const struct pci_device_id hisi_sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_SEC_PF) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_SEC_VF) },
......@@ -467,7 +318,7 @@ u8 sec_get_endian(struct hisi_sec *hisi_sec)
*/
if (hisi_sec->qm.pdev->is_virtfn) {
dev_err_ratelimited(&hisi_sec->qm.pdev->dev,
"error! shouldn't access a register of the engine in a VF\n");
"error! shouldn't access a register in VF\n");
return SEC_LE;
}
reg = readl_relaxed(hisi_sec->qm.io_base + SEC_ENGINE_PF_CFG_OFF +
......@@ -489,7 +340,12 @@ static int sec_engine_init(struct hisi_sec *hisi_sec)
u32 reg;
struct hisi_qm *qm = &hisi_sec->qm;
void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF +
SEC_ACC_COMMON_REG_OFF;
SEC_ACC_COMMON_REG_OFF;
/* disable clock gate control */
reg = readl_relaxed(base + SEC_CONTROL_REG);
reg &= ~BIT(3);
writel_relaxed(reg, base + SEC_CONTROL_REG);
writel_relaxed(0x1, base + SEC_MEM_START_INIT_REG);
ret = readl_relaxed_poll_timeout(base +
......@@ -515,8 +371,7 @@ static int sec_engine_init(struct hisi_sec *hisi_sec)
writel_relaxed(0xfffff7fd, base + SEC_BD_ERR_CHK_EN_REG(1));
writel_relaxed(0xffffbfff, base + SEC_BD_ERR_CHK_EN_REG(3));
/* enable abnormal int */
writel_relaxed(SEC_PF_INT_MSK, base + SEC_PF_ABNORMAL_INT_ENABLE_REG);
/* enable RAS int */
writel_relaxed(SEC_RAS_CE_ENB_MSK, base + SEC_RAS_CE_ENABLE_REG);
writel_relaxed(SEC_RAS_FE_ENB_MSK, base + SEC_RAS_FE_ENABLE_REG);
writel_relaxed(SEC_RAS_NFE_ENB_MSK, base + SEC_RAS_NFE_ENABLE_REG);
......@@ -526,7 +381,7 @@ static int sec_engine_init(struct hisi_sec *hisi_sec)
reg |= BIT(3);
writel_relaxed(reg, base + SEC_CONTROL_REG);
/*config endian */
/* config endian */
reg = readl_relaxed(base + SEC_CONTROL_REG);
reg |= sec_get_endian(hisi_sec);
writel_relaxed(reg, base + SEC_CONTROL_REG);
......@@ -535,6 +390,10 @@ static int sec_engine_init(struct hisi_sec *hisi_sec)
writel_relaxed(HSEC_SM4_CTR_ENABLE_MSK,
qm->io_base + HSEC_SM4_CTR_ENABLE_REG);
/* todo: add enable_sm4_xts_miv*/
writel_relaxed(HSEC_XTS_MIV_ENABLE_MSK,
qm->io_base + HSEC_XTS_MIV_ENABLE_REG);
return 0;
}
......@@ -578,7 +437,7 @@ static void hisi_sec_hw_error_set_state(struct hisi_sec *hisi_sec, bool state)
if (qm->ver == QM_HW_V1) {
writel(HSEC_CORE_INT_DISABLE, qm->io_base + HSEC_CORE_INT_MASK);
dev_info(&qm->pdev->dev, "SEC v%d does not support hw error handle\n",
dev_info(&qm->pdev->dev, "v%d don't support hw error handle\n",
qm->ver);
return;
}
......@@ -586,7 +445,8 @@ static void hisi_sec_hw_error_set_state(struct hisi_sec *hisi_sec, bool state)
if (state) {
/* enable SEC hw error interrupts */
writel(0, hisi_sec->qm.io_base + HSEC_CORE_INT_MASK);
writel(HSEC_CORE_INT_ENABLE, hisi_sec->qm.io_base +
HSEC_CORE_INT_MASK);
} else {
/* disable SEC hw error interrupts */
writel(HSEC_CORE_INT_DISABLE,
......@@ -635,8 +495,8 @@ static u32 clear_enable_read(struct ctrl_debug_file *file)
{
struct hisi_qm *qm = file_to_qm(file);
return readl(qm->io_base + HSEC_SOFT_CTRL_CNT_CLR_CE) &
SOFT_CTRL_CNT_CLR_CE_BIT;
return readl(qm->io_base + HSEC_CTRL_CNT_CLR_CE) &
HSEC_CTRL_CNT_CLR_CE_BIT;
}
static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
......@@ -647,9 +507,9 @@ static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
if (val != 1 && val != 0)
return -EINVAL;
tmp = (readl(qm->io_base + HSEC_SOFT_CTRL_CNT_CLR_CE) &
~SOFT_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + HSEC_SOFT_CTRL_CNT_CLR_CE);
tmp = (readl(qm->io_base + HSEC_CTRL_CNT_CLR_CE) &
~HSEC_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + HSEC_CTRL_CNT_CLR_CE);
return 0;
}
......@@ -738,6 +598,7 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
struct hisi_sec *hisi_sec = ctrl->hisi_sec;
struct hisi_qm *qm = &hisi_sec->qm;
struct device *dev = &qm->pdev->dev;
struct hisi_sec_dfx *dfx = &hisi_sec->sec_dfx;
struct debugfs_regset32 *regset;
struct dentry *tmp_d, *tmp;
char buf[20];
......@@ -760,6 +621,43 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
if (!tmp)
return -ENOENT;
tmp = debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
if (!tmp)
return -ENOENT;
tmp = debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
if (!tmp)
return -ENOENT;
tmp = debugfs_create_u64("get_task_cnt", 0444, tmp_d,
&dfx->get_task_cnt);
if (!tmp)
return -ENOENT;
tmp = debugfs_create_u64("put_task_cnt", 0444, tmp_d,
&dfx->put_task_cnt);
if (!tmp)
return -ENOENT;
tmp = debugfs_create_u64("gran_task_cnt", 0444, tmp_d,
&dfx->gran_task_cnt);
if (!tmp)
return -ENOENT;
tmp = debugfs_create_u64("thread_cnt", 0444, tmp_d, &dfx->thread_cnt);
if (!tmp)
return -ENOENT;
tmp = debugfs_create_u64("fake_busy_cnt", 0444,
tmp_d, &dfx->fake_busy_cnt);
if (!tmp)
return -ENOENT;
tmp = debugfs_create_u64("busy_comp_cnt", 0444, tmp_d,
&dfx->busy_comp_cnt);
if (!tmp)
return -ENOENT;
return 0;
}
......@@ -859,32 +757,14 @@ static int hisi_sec_pf_probe_init(struct hisi_sec *hisi_sec)
return 0;
}
static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static int hisi_sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
struct hisi_sec *hisi_sec;
enum qm_hw_ver rev_id;
struct hisi_qm *qm;
int ret;
rev_id = hisi_qm_get_hw_version(pdev);
if (rev_id == QM_HW_UNKNOWN)
return -EINVAL;
hisi_sec = devm_kzalloc(&pdev->dev, sizeof(*hisi_sec), GFP_KERNEL);
if (!hisi_sec)
return -ENOMEM;
pci_set_drvdata(pdev, hisi_sec);
hisi_sec_add_to_list(hisi_sec);
hisi_sec->sgl_pool = acc_create_sgl_pool(&pdev->dev, "hsec-sgl");
if (!hisi_sec->sgl_pool)
return -ENOMEM;
hisi_sec->ctx_q_num = ctx_q_num;
return -ENODEV;
qm = &hisi_sec->qm;
qm->pdev = pdev;
qm->ver = rev_id;
......@@ -892,6 +772,7 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qm->dev_name = hisi_sec_name;
qm->fun_type = (pdev->device == 0xa255) ? QM_HW_PF : QM_HW_VF;
qm->algs = "sec\n";
qm->wq = sec_wq;
switch (uacce_mode) {
case UACCE_MODE_NOUACCE:
......@@ -912,23 +793,18 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qm->use_uacce = true;
break;
default:
ret = -EINVAL;
goto err_remove_from_list;
return -EINVAL;
}
ret = hisi_qm_init(qm);
if (ret) {
dev_err(&pdev->dev, "Failed to init qm!\n");
goto err_remove_from_list;
}
return hisi_qm_init(qm);
}
static int hisi_sec_probe_init(struct hisi_qm *qm, struct hisi_sec *hisi_sec)
{
if (qm->fun_type == QM_HW_PF) {
ret = hisi_sec_pf_probe_init(hisi_sec);
if (ret)
goto err_remove_from_list;
qm->qp_base = HSEC_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
return hisi_sec_pf_probe_init(hisi_sec);
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
......@@ -945,6 +821,47 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
}
return 0;
}
static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_sec *hisi_sec;
struct hisi_qm *qm;
int ret;
hisi_sec = devm_kzalloc(&pdev->dev, sizeof(*hisi_sec), GFP_KERNEL);
if (!hisi_sec)
return -ENOMEM;
pci_set_drvdata(pdev, hisi_sec);
hisi_sec_add_to_list(hisi_sec);
hisi_sec->sgl_pool = acc_create_sgl_pool(&pdev->dev, "hsec-sgl");
if (!hisi_sec->sgl_pool)
return -ENOMEM;
atomic_inc(&hisi_sec_ref);
hisi_sec->ctx_q_num = ctx_q_num;
hisi_sec->fusion_limit = fusion_limit;
hisi_sec->fusion_tmout_usec = fusion_tmout_usec;
qm = &hisi_sec->qm;
ret = hisi_sec_qm_init(qm, pdev);
if (ret) {
dev_err(&pdev->dev, "Failed to pre init qm!\n");
goto err_remove_from_list;
}
ret = hisi_sec_probe_init(qm, hisi_sec);
if (ret) {
dev_err(&pdev->dev, "Failed to probe!\n");
goto err_qm_uninit;
}
ret = hisi_qm_start(qm);
if (ret)
goto err_qm_uninit;
......@@ -959,11 +876,12 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hisi_qm_uninit(qm);
err_remove_from_list:
hisi_sec_remove_from_list(hisi_sec);
atomic_dec(&hisi_sec_ref);
return ret;
}
/* now we only support equal assignment */
static int hisi_sec_vf_q_assign(struct hisi_sec *hisi_sec, int num_vfs)
static int hisi_sec_vf_q_assign(struct hisi_sec *hisi_sec, u32 num_vfs)
{
struct hisi_sec_ctrl *ctrl = hisi_sec->ctrl;
struct hisi_qm *qm = &hisi_sec->qm;
......@@ -1012,7 +930,8 @@ static int hisi_sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
{
#ifdef CONFIG_PCI_IOV
struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
int pre_existing_vfs, num_vfs, ret;
u32 num_vfs;
int pre_existing_vfs, ret;
pre_existing_vfs = pci_num_vf(pdev);
......@@ -1022,7 +941,7 @@ static int hisi_sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
return 0;
}
num_vfs = min_t(int, max_vfs, HSEC_VF_NUM);
num_vfs = min_t(u32, max_vfs, HSEC_VF_NUM);
ret = hisi_sec_vf_q_assign(hisi_sec, num_vfs);
if (ret) {
......@@ -1087,6 +1006,7 @@ static void hisi_sec_remove(struct pci_dev *pdev)
hisi_qm_uninit(qm);
hisi_sec_remove_from_list(hisi_sec);
atomic_dec(&hisi_sec_ref);
}
static void hisi_sec_log_hw_error(struct hisi_sec *hisi_sec, u32 err_sts)
......@@ -1142,7 +1062,7 @@ static pci_ers_result_t hisi_sec_process_hw_error(struct pci_dev *pdev)
if (!hisi_sec) {
dev_err(dev,
"Can't recover SEC-error occurred during device init\n");
"Can't recover error occurred during device init\n");
return PCI_ERS_RESULT_NONE;
}
......@@ -1306,6 +1226,7 @@ static int hisi_sec_soft_reset(struct hisi_sec *hisi_sec)
/* The reset related sub-control registers are not in PCI BAR */
if (ACPI_HANDLE(dev)) {
acpi_status s;
s = acpi_evaluate_object(ACPI_HANDLE(dev), "SRST", NULL, NULL);
if (ACPI_FAILURE(s)) {
dev_err(dev, "Controller reset fails\n");
......@@ -1564,6 +1485,14 @@ static int __init hisi_sec_init(void)
{
int ret;
sec_wq = alloc_workqueue("hisi_sec", WQ_HIGHPRI | WQ_CPU_INTENSIVE |
WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
if (!sec_wq) {
pr_err("Fallied to alloc workqueue\n");
return PTR_ERR(sec_wq);
}
hisi_sec_register_debugfs();
ret = pci_register_driver(&hisi_sec_pci_driver);
......@@ -1575,6 +1504,12 @@ static int __init hisi_sec_init(void)
if (uacce_mode == UACCE_MODE_UACCE)
return 0;
#endif
if (atomic_read(&hisi_sec_ref) <= 0) {
ret = -ENODEV;
goto err_pci;
}
pr_info("hisi_sec: register to crypto\n");
ret = hisi_sec_register_to_crypto();
if (ret < 0) {
......@@ -1588,7 +1523,8 @@ static int __init hisi_sec_init(void)
pci_unregister_driver(&hisi_sec_pci_driver);
err_pci:
hisi_sec_unregister_debugfs();
if (sec_wq)
destroy_workqueue(sec_wq);
return ret;
}
......@@ -1602,6 +1538,8 @@ static void __exit hisi_sec_exit(void)
#endif
pci_unregister_driver(&hisi_sec_pci_driver);
hisi_sec_unregister_debugfs();
if (sec_wq)
destroy_workqueue(sec_wq);
}
module_init(hisi_sec_init);
......
......@@ -32,8 +32,10 @@ struct hisi_sec_sqe_type1 {
__u32 c_mode:4;
__u32 c_alg:4;
__u32 rsvd4:12;
__u32 dw4;
__u32 dw5;
__u32 auth_gran_size:24;
__u32:8;
__u32 cipher_gran_size:24;
__u32:8;
__u32 auth_src_offset:16;
__u32 cipher_src_offset:16;
__u32 gran_num:16;
......@@ -177,25 +179,40 @@ struct hisi_sec_sqe {
__u32 mac_addr_type:3;
__u32 rsvd0:8;
union {
struct hisi_sec_sqe_type1 type1; /* storage sence */
struct hisi_sec_sqe_type2 type2; /* the other sence */
struct hisi_sec_sqe_type1 type1;
struct hisi_sec_sqe_type2 type2;
};
};
enum C_ALG {
C_ALG_DES = 0x0,
C_ALG_3DES = 0x1,
C_ALG_AES = 0x2,
C_ALG_SM4 = 0x3,
};
enum C_MODE {
ECB = 0x0,
CBC = 0x1,
CTR = 0x4,
CCM = 0x5,
GCM = 0x6,
XTS = 0x7,
C_MODE_ECB = 0x0,
C_MODE_CBC = 0x1,
C_MODE_CTR = 0x4,
C_MODE_CCM = 0x5,
C_MODE_GCM = 0x6,
C_MODE_XTS = 0x7,
C_MODE_CBC_CS = 0x9,
};
enum C_ALG {
DES = 0x0,
TRIPLEDES = 0x1,
AES = 0x2,
SM4 = 0x3,
enum CKEY_LEN {
CKEY_LEN_128_BIT = 0x0,
CKEY_LEN_192_BIT = 0x1,
CKEY_LEN_256_BIT = 0x2,
CKEY_LEN_DES = 0x1,
CKEY_LEN_3DES_3KEY = 0x1,
CKEY_LEN_3DES_2KEY = 0x3,
};
enum {
BD_TYPE1 = 0x1,
BD_TYPE2 = 0x2,
};
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册