提交 76037d0b 编写于 作者: Z zhangwei 提交者: Xie XiuQi

ACC: add sec asyn interface

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

Add sec asyn interface

	modified:   drivers/crypto/hisilicon/sec2/sec.h
	modified:   drivers/crypto/hisilicon/sec2/sec_crypto.c
	modified:   drivers/crypto/hisilicon/sec2/sec_crypto.h
	modified:   drivers/crypto/hisilicon/sec2/sec_main.c
	modified:   drivers/crypto/hisilicon/sec2/sec_usr_if.h
Signed-off-by: NZhangwei <zhangwei375@huawei.com>
Reviewed-by: Nhucheng.hu <hucheng.hu@huawei.com>
Signed-off-by: Nlingmingqiang <lingmingqiang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 5857d956
......@@ -4,6 +4,7 @@
#include <linux/list.h>
#include "../qm.h"
#include "../sgl.h"
#include "sec_usr_if.h"
#undef pr_fmt
......@@ -26,6 +27,7 @@ struct hisi_sec {
struct hisi_qm qm;
struct list_head list;
struct hisi_sec_ctrl *ctrl;
struct dma_pool *sgl_pool;
};
struct hisi_sec *find_sec_device(int node);
......
......@@ -12,9 +12,10 @@
#include "sec.h"
#include "sec_crypto.h"
#define HSEC_SGL_CACHE_SIZE (SEC_MAX_SGL_NUM * sizeof(struct sgl))
#define SEC_ASYNC
#define SEC_INVLD_REQ_ID -1
// #define SEC_DEBUG_LOG
#define SEC_DEBUG_LOG
#ifdef SEC_DEBUG_LOG
#define dbg(msg, ...) pr_info(msg, ##__VA_ARGS__)
......@@ -22,23 +23,61 @@
#define dbg(msg, ...)
#endif
struct hisi_sec_buffer {
struct sgl *c_in;
struct hisi_sec_cipher_req {
struct acc_hw_sgl *c_in;
dma_addr_t c_in_dma;
struct sgl *c_out;
struct acc_hw_sgl *c_out;
dma_addr_t c_out_dma;
u8 *c_key;
dma_addr_t c_key_dma;
u8 *c_ivin;
dma_addr_t c_ivin_dma;
u32 c_len;
bool encrypt;
};
struct hisi_sec_qp_ctx {
struct hisi_sec_buffer buffer;
struct hisi_qp *qp;
struct hisi_sec_ctx;
struct hisi_sec_req {
struct hisi_sec_sqe sec_sqe;
struct hisi_sec_ctx *ctx;
struct skcipher_request *sk_req;
struct hisi_sec_cipher_req c_req;
int err;
int req_id;
};
struct hisi_sec_cipher_ctx {
u8 *c_key;
dma_addr_t c_key_dma;
u8 c_mode;
u8 c_alg;
u8 c_key_len;
};
struct hisi_sec_ctx {
struct hisi_qp *qp;
struct hisi_sec *sec;
struct device *sec_dev;
struct hisi_sec_req **req_list;
unsigned long *req_bitmap;
spinlock_t req_lock;
struct hisi_sec_cipher_ctx c_ctx;
};
static void dump_data(unsigned char *buf, unsigned int len)
{
unsigned int i;
for (i = 0; i < len; i += 8)
dbg("0x%llx: \t%02x %02x %02x %02x %02x %02x %02x %02x\n",
(unsigned long long)(buf + i),
*(buf + i), (*(buf + i + 1)),
*(buf + i + 2), *(buf + i + 3),
*(buf + i + 4), *(buf + i + 5),
*(buf + i + 6), *(buf + i + 7));
dbg("\n");
}
static void dump_sec_bd(unsigned int *bd)
{
unsigned int i;
......@@ -49,101 +88,66 @@ static void dump_sec_bd(unsigned int *bd)
dbg("\n");
}
/* let's allocate one buffer now, may have problem in async case */
static int hisi_sec_alloc_qp_buffer(struct hisi_sec_qp_ctx *hisi_sec_qp_ctx)
static void sec_update_iv(struct hisi_sec_req *req, u8 *iv)
{
struct hisi_sec_buffer *buf = &hisi_sec_qp_ctx->buffer;
struct hisi_qp *qp = hisi_sec_qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_sec_sqe *sec_sqe = &hisi_sec_qp_ctx->sec_sqe;
int ret;
int i;
buf->c_in = dma_alloc_coherent(dev, HSEC_SGL_CACHE_SIZE,
&buf->c_in_dma, GFP_KERNEL);
if (!buf->c_in)
return -ENOMEM;
buf->c_out = dma_alloc_coherent(dev, HSEC_SGL_CACHE_SIZE,
&buf->c_out_dma, GFP_KERNEL);
if (!buf->c_out) {
ret = -ENOMEM;
goto err_alloc_output;
}
// todo: update iv by cbc/ctr mode
}
for (i = 0; i < SEC_MAX_SGL_NUM - 1; i++) {
buf->c_in[i].next = (struct sgl *)(buf->c_in_dma +
(i +
1) * sizeof(struct sgl));
buf->c_out[i].next =
(struct sgl *)(buf->c_out_dma +
(i + 1) * sizeof(struct sgl));
}
static void sec_cipher_cb(struct hisi_qp *qp, void *);
static void sec_sg_unmap(struct device *dev,
struct skcipher_request *sk_req,
struct hisi_sec_cipher_req *creq,
struct dma_pool *pool)
{
if (sk_req->dst != sk_req->src)
acc_sg_buf_unmap(dev, sk_req->dst,
creq->c_out, creq->c_out_dma, pool);
buf->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE,
&buf->c_key_dma, GFP_KERNEL);
if (!buf->c_key) {
ret = -ENOMEM;
goto err_alloc_key;
}
acc_sg_buf_unmap(dev, sk_req->src, creq->c_in, creq->c_in_dma, pool);
}
buf->c_ivin = dma_alloc_coherent(dev, SEC_MAX_IV_SIZE,
&buf->c_ivin_dma, GFP_KERNEL);
if (!buf->c_ivin) {
ret = -ENOMEM;
goto err_alloc_ivin;
static int hisi_sec_alloc_req_id(struct hisi_sec_req *req)
{
struct hisi_sec_ctx *ctx = req->ctx;
int req_id;
unsigned long flags;
spin_lock_irqsave(&ctx->req_lock, flags);
req_id = find_first_zero_bit(ctx->req_bitmap, QM_Q_DEPTH);
if (req_id >= QM_Q_DEPTH) {
spin_unlock_irqrestore(&ctx->req_lock, flags);
dev_err(ctx->sec_dev, "no free req id\n");
return -EBUSY;
}
set_bit(req_id, ctx->req_bitmap);
spin_unlock_irqrestore(&ctx->req_lock, flags);
sec_sqe->type2.data_src_addr_l = lower_32_bits(buf->c_in_dma);
sec_sqe->type2.data_src_addr_h = upper_32_bits(buf->c_in_dma);
sec_sqe->type2.data_dst_addr_l = lower_32_bits(buf->c_out_dma);
sec_sqe->type2.data_dst_addr_h = upper_32_bits(buf->c_out_dma);
sec_sqe->type2.c_key_addr_l = lower_32_bits(buf->c_key_dma);
sec_sqe->type2.c_key_addr_h = upper_32_bits(buf->c_key_dma);
sec_sqe->type2.c_ivin_addr_l = lower_32_bits(buf->c_ivin_dma);
sec_sqe->type2.c_ivin_addr_h = upper_32_bits(buf->c_ivin_dma);
ctx->req_list[req_id] = req;
req->req_id = req_id;
return 0;
err_alloc_ivin:
dma_free_coherent(dev, SEC_MAX_KEY_SIZE, buf->c_key, buf->c_key_dma);
err_alloc_key:
dma_free_coherent(dev, HSEC_SGL_CACHE_SIZE, buf->c_out, buf->c_out_dma);
err_alloc_output:
dma_free_coherent(dev, HSEC_SGL_CACHE_SIZE, buf->c_in, buf->c_in_dma);
return ret;
}
static void hisi_sec_free_qp_buffer(struct hisi_sec_qp_ctx *hisi_sec_qp_ctx)
static void hisi_sec_free_req_id(struct hisi_sec_req *req)
{
struct hisi_sec_buffer *buf = &hisi_sec_qp_ctx->buffer;
struct hisi_qp *qp = hisi_sec_qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_sec_ctx *ctx = req->ctx;
int req_id = req->req_id;
unsigned long flags;
if (buf->c_in) {
dma_free_coherent(dev, HSEC_SGL_CACHE_SIZE, buf->c_in,
buf->c_in_dma);
buf->c_in = NULL;
}
if (buf->c_out) {
dma_free_coherent(dev, HSEC_SGL_CACHE_SIZE, buf->c_out,
buf->c_out_dma);
buf->c_out = NULL;
}
if (buf->c_key) {
dma_free_coherent(dev, SEC_MAX_KEY_SIZE, buf->c_key,
buf->c_key_dma);
buf->c_key = NULL;
}
if (buf->c_ivin) {
dma_free_coherent(dev, SEC_MAX_IV_SIZE, buf->c_ivin,
buf->c_ivin_dma);
buf->c_ivin = NULL;
if (req_id < 0) {
dev_err(ctx->sec_dev, "invalid req id %d\n", req_id);
return;
}
req->req_id = SEC_INVLD_REQ_ID;
ctx->req_list[req_id] = NULL;
spin_lock_irqsave(&ctx->req_lock, flags);
bitmap_clear(ctx->req_bitmap, req_id, 1);
spin_unlock_irqrestore(&ctx->req_lock, flags);
}
static int hisi_sec_create_qp(struct hisi_qm *qm, struct hisi_sec_qp_ctx *ctx,
static int hisi_sec_create_qp(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
int alg_type, int req_type)
{
struct hisi_qp *qp;
......@@ -155,81 +159,182 @@ static int hisi_sec_create_qp(struct hisi_qm *qm, struct hisi_sec_qp_ctx *ctx,
qp->req_type = req_type;
qp->qp_ctx = ctx;
#ifdef SEC_ASYNC
qp->req_cb = sec_cipher_cb;
#endif
ctx->qp = qp;
ret = hisi_sec_alloc_qp_buffer(ctx);
if (ret)
goto err_release_qp;
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0)
goto err_free_qp_buffer;
goto err_qm_release_qp;
return 0;
err_free_qp_buffer:
hisi_sec_free_qp_buffer(ctx);
err_release_qp:
err_qm_release_qp:
hisi_qm_release_qp(qp);
return ret;
}
static void hisi_sec_release_qp(struct hisi_sec_qp_ctx *ctx)
static void hisi_sec_release_qp(struct hisi_sec_ctx *ctx)
{
hisi_qm_stop_qp(ctx->qp);
hisi_sec_free_qp_buffer(ctx);
hisi_qm_release_qp(ctx->qp);
}
static int hisi_sec_alloc_cipher_ctx(struct crypto_skcipher *tfm)
static int __hisi_sec_ctx_init(struct hisi_sec_ctx *ctx, int qlen)
{
struct hisi_sec_qp_ctx *hisi_sec_qp_ctx = crypto_skcipher_ctx(tfm);
// const char *alg_name = crypto_tfm_alg_name(tfm);
struct hisi_sec *hisi_sec;
if (!ctx || qlen < 0)
return -EINVAL;
spin_lock_init(&ctx->req_lock);
ctx->req_bitmap = kcalloc(BITS_TO_LONGS(qlen), sizeof(long),
GFP_KERNEL);
if (!ctx->req_bitmap)
return -ENOMEM;
ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
if (!ctx->req_list) {
kfree(ctx->req_bitmap);
return -ENOMEM;
}
return 0;
}
static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
{
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
struct hisi_qm *qm;
struct hisi_sec_cipher_ctx *c_ctx;
struct hisi_sec *sec;
int ret;
/* find the proper sec device */
hisi_sec = find_sec_device(cpu_to_node(smp_processor_id()));
if (!hisi_sec) {
pr_err("Failed to find a proper SEC device!\n");
crypto_skcipher_set_reqsize(tfm, sizeof(struct hisi_sec_req));
sec = find_sec_device(cpu_to_node(smp_processor_id()));
if (!sec) {
pr_err("failed to find a proper sec device!\n");
return -ENODEV;
}
qm = &hisi_sec->qm;
ctx->sec = sec;
ret = hisi_sec_create_qp(qm, hisi_sec_qp_ctx, 0, 0);
qm = &sec->qm;
ctx->sec_dev = &qm->pdev->dev;
ret = hisi_sec_create_qp(qm, ctx, 0, 0);
if (ret)
goto err;
return ret;
return 0;
err:
hisi_sec_release_qp(hisi_sec_qp_ctx);
c_ctx = &ctx->c_ctx;
c_ctx->c_key = dma_alloc_coherent(ctx->sec_dev,
SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL);
if (!ctx->c_ctx.c_key) {
ret = -ENOMEM;
goto err_sec_release_qp;
}
return __hisi_sec_ctx_init(ctx, QM_Q_DEPTH);
err_sec_release_qp:
hisi_sec_release_qp(ctx);
return ret;
}
static void hisi_sec_free_cipher_ctx(struct crypto_skcipher *tfm)
static void hisi_sec_cipher_ctx_exit(struct crypto_skcipher *tfm)
{
struct hisi_sec_qp_ctx *hisi_sec_qp_ctx = crypto_skcipher_ctx(tfm);
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
struct hisi_sec_cipher_ctx *c_ctx;
c_ctx = &ctx->c_ctx;
if (c_ctx->c_key) {
dma_free_coherent(ctx->sec_dev, SEC_MAX_KEY_SIZE, c_ctx->c_key,
c_ctx->c_key_dma);
c_ctx->c_key = NULL;
}
hisi_sec_release_qp(hisi_sec_qp_ctx);
kfree(ctx->req_bitmap);
ctx->req_bitmap = NULL;
kfree(ctx->req_list);
ctx->req_list = NULL;
hisi_sec_release_qp(ctx);
}
static int sec_skcipher_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
static int sec_alloc_cipher_req(struct hisi_sec_req *req)
{
struct hisi_sec_qp_ctx *hisi_sec_qp_ctx = crypto_skcipher_ctx(tfm);
struct hisi_sec_sqe *sec_sqe = &hisi_sec_qp_ctx->sec_sqe;
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
struct device *sec_dev = req->ctx->sec_dev;
c_req->c_ivin = dma_alloc_coherent(sec_dev, SEC_IV_SIZE,
&c_req->c_ivin_dma, GFP_KERNEL);
if (!c_req->c_ivin)
return -ENOMEM;
sec_sqe->type2.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
sec_sqe->type2.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
return 0;
}
static int sec_free_cipher_req(struct hisi_sec_req *req)
{
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct device *sec_dev = req->ctx->sec_dev;
if (c_req->c_ivin) {
dma_free_coherent(sec_dev, SEC_IV_SIZE,
c_req->c_ivin, c_req->c_ivin_dma);
c_req->c_ivin = NULL;
}
return 0;
}
static void sec_cipher_cb(struct hisi_qp *qp, void *resp)
{
struct hisi_sec_sqe *sec_sqe = (struct hisi_sec_sqe *)resp;
u32 req_id = sec_sqe->type2.tag;
struct hisi_sec_ctx *ctx = qp->qp_ctx;
struct dma_pool *pool = ctx->sec->sgl_pool;
struct hisi_sec_req *req;
int ret = 0;
req = ctx->req_list[req_id];
if (sec_sqe->type2.done != 0x1 || sec_sqe->type2.flag != 0x2) {
ret = sec_sqe->type2.error_type;
dump_sec_bd((uint32_t *)sec_sqe);
dump_data((unsigned char *)sec_sqe,
sizeof(struct hisi_sec_sqe));
}
sec_update_iv(req, req->sk_req->iv);
sec_sg_unmap(&qp->qm->pdev->dev, req->sk_req, &req->c_req, pool);
sec_free_cipher_req(req);
hisi_sec_free_req_id(req);
req->sk_req->base.complete(&req->sk_req->base, ret);
}
static int sec_skcipher_setkey(struct hisi_sec_ctx *sec_ctx,
const u8 *key, u32 keylen)
{
struct hisi_sec_cipher_ctx *c_ctx = &sec_ctx->c_ctx;
switch (keylen) {
case AES_KEYSIZE_128:
sec_sqe->type2.c_key_len = 0;
c_ctx->c_key_len = 0;
break;
case AES_KEYSIZE_192:
sec_sqe->type2.c_key_len = 1;
c_ctx->c_key_len = 1;
break;
case AES_KEYSIZE_256:
sec_sqe->type2.c_key_len = 2;
c_ctx->c_key_len = 2;
break;
default:
return -EINVAL;
......@@ -239,138 +344,96 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm,
}
static int sec_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
const u8 *key, u32 keylen)
{
struct hisi_sec_qp_ctx *hisi_sec_qp_ctx = crypto_skcipher_ctx(tfm);
struct hisi_sec_sqe *sec_sqe = &hisi_sec_qp_ctx->sec_sqe;
struct hisi_sec_buffer *buf = &hisi_sec_qp_ctx->buffer;
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
memcpy(buf->c_key, key, keylen);
sec_sqe->type2.c_mode = ECB;
sec_sqe->type2.c_alg = AES;
memcpy(ctx->c_ctx.c_key, key, keylen);
ctx->c_ctx.c_mode = ECB;
ctx->c_ctx.c_alg = AES;
return sec_skcipher_setkey(tfm, key, keylen);
return sec_skcipher_setkey(ctx, key, keylen);
}
static int sec_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
const u8 *key, u32 keylen)
{
struct hisi_sec_qp_ctx *hisi_sec_qp_ctx = crypto_skcipher_ctx(tfm);
struct hisi_sec_sqe *sec_sqe = &hisi_sec_qp_ctx->sec_sqe;
struct hisi_sec_buffer *buf = &hisi_sec_qp_ctx->buffer;
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
memcpy(buf->c_key, key, keylen);
sec_sqe->type2.c_mode = CBC;
sec_sqe->type2.c_alg = AES;
memcpy(ctx->c_ctx.c_key, key, keylen);
ctx->c_ctx.c_mode = CBC;
ctx->c_ctx.c_alg = AES;
return sec_skcipher_setkey(tfm, key, keylen);
return sec_skcipher_setkey(ctx, key, keylen);
}
static int sec_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
const u8 *key, u32 keylen)
{
struct hisi_sec_qp_ctx *hisi_sec_qp_ctx = crypto_skcipher_ctx(tfm);
struct hisi_sec_sqe *sec_sqe = &hisi_sec_qp_ctx->sec_sqe;
struct hisi_sec_buffer *buf = &hisi_sec_qp_ctx->buffer;
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
memcpy(buf->c_key, key, keylen);
memcpy(ctx->c_ctx.c_key, key, keylen);
sec_sqe->type2.c_mode = CTR;
sec_sqe->type2.c_alg = AES;
ctx->c_ctx.c_mode = CTR;
ctx->c_ctx.c_alg = AES;
return sec_skcipher_setkey(tfm, key, keylen);
return sec_skcipher_setkey(ctx, key, keylen);
}
static int sec_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
const u8 *key, u32 keylen)
{
struct hisi_sec_qp_ctx *hisi_sec_qp_ctx = crypto_skcipher_ctx(tfm);
struct hisi_sec_sqe *sec_sqe = &hisi_sec_qp_ctx->sec_sqe;
struct hisi_sec_buffer *buf = &hisi_sec_qp_ctx->buffer;
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret = 0;
ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
memcpy(buf->c_key, key, keylen);
memcpy(ctx->c_ctx.c_key, key, keylen);
sec_sqe->type2.c_mode = XTS;
sec_sqe->type2.c_alg = AES;
ctx->c_ctx.c_mode = XTS;
ctx->c_ctx.c_alg = AES;
return sec_skcipher_setkey(tfm, key, keylen / 2);
return sec_skcipher_setkey(ctx, key, keylen / 2);
}
static int sec_skcipher_setkey_sm4_xts(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
const u8 *key, u32 keylen)
{
struct hisi_sec_qp_ctx *hisi_sec_qp_ctx = crypto_skcipher_ctx(tfm);
struct hisi_sec_sqe *sec_sqe = &hisi_sec_qp_ctx->sec_sqe;
struct hisi_sec_buffer *buf = &hisi_sec_qp_ctx->buffer;
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
int ret = 0;
ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
memcpy(buf->c_key, key, keylen);
memcpy(ctx->c_ctx.c_key, key, keylen);
sec_sqe->type2.c_mode = XTS;
sec_sqe->type2.c_alg = SM4;
ctx->c_ctx.c_mode = XTS;
ctx->c_ctx.c_alg = SM4;
return sec_skcipher_setkey(tfm, key, keylen / 2);
return sec_skcipher_setkey(ctx, key, keylen / 2);
}
static int sec_sg_to_hw_sgl(struct device *sec_dev, struct scatterlist *sg_list,
struct sgl *sgl)
static int sec_cipher_fill_sqe(struct hisi_sec_sqe *sec_sqe,
struct hisi_sec_ctx *ctx, struct hisi_sec_cipher_req *c_req)
{
int ret = 0;
int i = 0;
int sgl_pos = 0;
int sge_pos = 0;
int sg_num = sg_nents(sg_list);
struct scatterlist *sg;
// todo: return sg_num is too large error
if (sg_num > SEC_MAX_SGL_NUM * SEC_MAX_SGE_NUM)
return -1;
// todo: return dma_mag_sg failed error
if (dma_map_sg(sec_dev, sg_list, sg_num, DMA_BIDIRECTIONAL) == 0)
return -1;
sgl->entrySumInChain = sg_num;
for_each_sg(sg_list, sg, sg_num, i) {
sgl_pos = i / SEC_MAX_SGL_NUM;
sge_pos = i % SEC_MAX_SGL_NUM;
dbg("sgl_pos[%d] sge_pos[%d]\n", sgl_pos, sge_pos);
sgl[sgl_pos].entrySumInSgl = sge_pos + 1;
sgl[sgl_pos].entryNumInSgl = sge_pos + 1;
sgl[sgl_pos].entry[sge_pos].buf = (u8 *) sg_dma_address(sg);
sgl[sgl_pos].entry[sge_pos].len = sg_dma_len(sg);
}
struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
return ret;
}
static int sec_skcipher_crypto(struct skcipher_request *skreq, bool encrypt)
{
int ret = 0;
struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
struct hisi_sec_qp_ctx *hisi_sec_qp_ctx = crypto_skcipher_ctx(atfm);
struct hisi_sec_buffer *buf = &hisi_sec_qp_ctx->buffer;
struct hisi_sec_sqe *sec_sqe = &hisi_sec_qp_ctx->sec_sqe;
struct hisi_qp *qp = hisi_sec_qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
dbg("[%s] encrypt : %d\n", __func__, encrypt);
if (!c_req->c_len)
return -EINVAL;
if (sec_sg_to_hw_sgl(dev, skreq->src, buf->c_in))
return -EFAULT;
sec_sqe->type2.c_key_addr_l = lower_32_bits(c_ctx->c_key_dma);
sec_sqe->type2.c_key_addr_h = upper_32_bits(c_ctx->c_key_dma);
sec_sqe->type2.data_src_addr_l = lower_32_bits(c_req->c_in_dma);
sec_sqe->type2.data_src_addr_h = upper_32_bits(c_req->c_in_dma);
sec_sqe->type2.data_dst_addr_l = lower_32_bits(c_req->c_out_dma);
sec_sqe->type2.data_dst_addr_h = upper_32_bits(c_req->c_out_dma);
if (sec_sg_to_hw_sgl(dev, skreq->dst, buf->c_out))
return -EFAULT;
sec_sqe->type2.c_mode = c_ctx->c_mode;
sec_sqe->type2.c_alg = c_ctx->c_alg;
sec_sqe->type2.c_key_len = c_ctx->c_key_len;
sec_sqe->src_addr_type = 1;
sec_sqe->dst_addr_type = 1;
......@@ -378,30 +441,115 @@ static int sec_skcipher_crypto(struct skcipher_request *skreq, bool encrypt)
sec_sqe->scene = 1;
sec_sqe->de = 1;
if (encrypt == 1)
if (c_req->encrypt == 1)
sec_sqe->cipher = 1;
else
sec_sqe->cipher = 2;
sec_sqe->type2.c_len = skreq->cryptlen;
sec_sqe->type2.c_len = c_req->c_len;
return 0;
}
static int sec_skcipher_crypto(struct skcipher_request *sk_req,
bool encrypt)
{
int ret = 0;
struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(atfm);
struct hisi_sec_req *req = skcipher_request_ctx(sk_req);
struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
struct hisi_qp *qp = ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
struct hisi_sec_cipher_req *c_req = &req->c_req;
struct dma_pool *pool = ctx->sec->sgl_pool;
if (crypto_skcipher_ivsize(atfm))
memcpy(buf->c_ivin, skreq->iv, crypto_skcipher_ivsize(atfm));
if (!sk_req->src || !sk_req->dst || !sk_req->cryptlen)
return -EINVAL;
dbg("Dump c_ivin:");
req->sk_req = sk_req;
req->ctx = ctx;
ret = hisi_qp_send(qp, sec_sqe);
if (ret < 0)
memset(sec_sqe, 0, sizeof(struct hisi_sec_sqe));
ret = sec_alloc_cipher_req(req);
if (ret) {
dev_err(dev, "sec alloc cipher request failed\n");
return ret;
}
c_req->c_in = acc_sg_buf_map_to_hw_sgl(dev, sk_req->src, pool,
&c_req->c_in_dma);
if (IS_ERR(c_req->c_in)) {
ret = PTR_ERR(c_req->c_in);
goto err_free_cipher_req;
}
if (sk_req->dst == sk_req->src) {
c_req->c_out = c_req->c_in;
c_req->c_out_dma = c_req->c_in_dma;
} else {
c_req->c_out = acc_sg_buf_map_to_hw_sgl(dev, sk_req->dst, pool,
&c_req->c_out_dma);
if (IS_ERR(c_req->c_out)) {
ret = PTR_ERR(c_req->c_out);
goto err_unmap_src_sg;
}
}
c_req->c_len = sk_req->cryptlen;
c_req->encrypt = encrypt;
ret = sec_cipher_fill_sqe(sec_sqe, ctx, c_req);
if (ret) {
dev_err(dev, "sec cipher fill sqe failed\n");
goto err_unmap_dst_sg;
}
if (!crypto_skcipher_ivsize(atfm)) {
ret = -EINVAL;
goto err_unmap_dst_sg;
} else
memcpy(c_req->c_ivin, sk_req->iv, crypto_skcipher_ivsize(atfm));
#ifdef SEC_ASYNC
ret = hisi_sec_alloc_req_id(req);
if (ret) {
dev_err(dev, "sec alloc req id failed\n");
goto err_unmap_dst_sg;
}
sec_sqe->type2.tag = req->req_id;
#endif
ret = hisi_qp_send(qp, sec_sqe);
if (ret < 0) {
dev_err(dev, "hisi_qp_send failed\n");
goto err_unmap_dst_sg;
}
#ifdef SEC_ASYNC
ret = -EINPROGRESS;
#else
ret = hisi_qp_wait(qp);
if (ret < 0)
return ret;
goto err_unmap_dst_sg;
if (sec_sqe->type2.c_mode == 0x4)
crypto_inc(skreq->iv, 16);
sec_update_iv(req, sk_req->iv);
sec_sg_unmap(dev, sk_req, c_req, pool);
sec_free_cipher_req(req);
#endif
dump_sec_bd((uint32_t *) sec_sqe);
return ret;
err_unmap_dst_sg:
if (sk_req->dst != sk_req->src)
acc_sg_buf_unmap(dev, sk_req->dst,
c_req->c_out, c_req->c_out_dma, pool);
err_unmap_src_sg:
acc_sg_buf_unmap(dev, sk_req->src,
c_req->c_in, c_req->c_in_dma, pool);
err_free_cipher_req:
sec_free_cipher_req(req);
return ret;
}
......@@ -424,12 +572,12 @@ static struct skcipher_alg sec_algs[] = {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hisi_sec_qp_ctx),
.cra_ctxsize = sizeof(struct hisi_sec_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = hisi_sec_alloc_cipher_ctx,
.exit = hisi_sec_free_cipher_ctx,
.init = hisi_sec_cipher_ctx_init,
.exit = hisi_sec_cipher_ctx_exit,
.setkey = sec_skcipher_setkey_aes_ecb,
.decrypt = sec_skcipher_decrypt,
.encrypt = sec_skcipher_encrypt,
......@@ -443,12 +591,12 @@ static struct skcipher_alg sec_algs[] = {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hisi_sec_qp_ctx),
.cra_ctxsize = sizeof(struct hisi_sec_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = hisi_sec_alloc_cipher_ctx,
.exit = hisi_sec_free_cipher_ctx,
.init = hisi_sec_cipher_ctx_init,
.exit = hisi_sec_cipher_ctx_exit,
.setkey = sec_skcipher_setkey_aes_cbc,
.decrypt = sec_skcipher_decrypt,
.encrypt = sec_skcipher_encrypt,
......@@ -462,12 +610,12 @@ static struct skcipher_alg sec_algs[] = {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hisi_sec_qp_ctx),
.cra_ctxsize = sizeof(struct hisi_sec_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = hisi_sec_alloc_cipher_ctx,
.exit = hisi_sec_free_cipher_ctx,
.init = hisi_sec_cipher_ctx_init,
.exit = hisi_sec_cipher_ctx_exit,
.setkey = sec_skcipher_setkey_aes_ctr,
.decrypt = sec_skcipher_decrypt,
.encrypt = sec_skcipher_encrypt,
......@@ -481,12 +629,12 @@ static struct skcipher_alg sec_algs[] = {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hisi_sec_qp_ctx),
.cra_ctxsize = sizeof(struct hisi_sec_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = hisi_sec_alloc_cipher_ctx,
.exit = hisi_sec_free_cipher_ctx,
.init = hisi_sec_cipher_ctx_init,
.exit = hisi_sec_cipher_ctx_exit,
.setkey = sec_skcipher_setkey_aes_xts,
.decrypt = sec_skcipher_decrypt,
.encrypt = sec_skcipher_encrypt,
......@@ -500,12 +648,12 @@ static struct skcipher_alg sec_algs[] = {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct hisi_sec_qp_ctx),
.cra_ctxsize = sizeof(struct hisi_sec_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = hisi_sec_alloc_cipher_ctx,
.exit = hisi_sec_free_cipher_ctx,
.init = hisi_sec_cipher_ctx_init,
.exit = hisi_sec_cipher_ctx_exit,
.setkey = sec_skcipher_setkey_sm4_xts,
.decrypt = sec_skcipher_decrypt,
.encrypt = sec_skcipher_encrypt,
......@@ -518,13 +666,7 @@ static struct skcipher_alg sec_algs[] = {
int hisi_sec_register_to_crypto(void)
{
int ret = 0;
ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
if (ret)
return ret;
return ret;
return crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
}
void hisi_sec_unregister_from_crypto(void)
......
......@@ -12,7 +12,7 @@
#ifndef HISI_SEC_CRYPTO_H
#define HISI_SEC_CRYPTO_H
#define SEC_MAX_IV_SIZE 16
#define SEC_IV_SIZE 16
#define SEC_MAX_KEY_SIZE 64
int hisi_sec_register_to_crypto(void);
......
......@@ -17,6 +17,8 @@
#define HSEC_VF_NUM 63
#define HSEC_QUEUE_NUM_V1 4096
#define HSEC_QUEUE_NUM_V2 1024
#define PCI_DEVICE_ID_SEC_PF 0xa255
#define PCI_DEVICE_ID_SEC_VF 0xa256
#define HSEC_COMMON_REG_OFF 0x1000
......@@ -39,18 +41,19 @@
#define HSEC_MASTER_TRANS_RETURN 0x300150
#define MASTER_TRANS_RETURN_RW 0x3
#define HSEC_CORE_INT_SOURCE 0x3010A0
#define HSEC_CORE_INT_MASK 0x3010A4
#define HSEC_CORE_INT_STATUS 0x3010AC
#define HSEC_CORE_INT_SOURCE 0x301010
#define HSEC_CORE_INT_MASK 0x301000
#define HSEC_CORE_INT_STATUS 0x301008
#define HSEC_CORE_INT_STATUS_M_ECC BIT(1)
#define HSEC_CORE_SRAM_ECC_ERR_INFO 0x301148
#define SRAM_ECC_ERR_NUM_SHIFT 16
#define SRAM_ECC_ERR_ADDR_SHIFT 24
#define HSEC_CORE_INT_DISABLE 0x000007FF
#define HSEC_COMP_CORE_NUM 2
#define HSEC_DECOMP_CORE_NUM 6
#define HSEC_CORE_NUM (HSEC_COMP_CORE_NUM + \
HSEC_DECOMP_CORE_NUM)
#define HSEC_CORE_INT_DISABLE 0x000001FF
#define HSEC_SM4_CTR_ENABLE_REG 0X301380
#define HSEC_SM4_CTR_ENABLE_MSK 0XEFFFFFFF
#define HSEC_SM4_CTR_DISABLE_MSK 0XFFFFFFFF
#define HSEC_SQE_SIZE 128
#define HSEC_SQ_SIZE (HSEC_SQE_SIZE * QM_Q_DEPTH)
#define HSEC_PF_DEF_Q_NUM 64
......@@ -324,57 +327,33 @@ struct hisi_sec_ctrl {
struct ctrl_debug_file files[HSEC_DEBUG_FILE_NUM];
};
enum {
HSEC_COMP_CORE0,
HSEC_COMP_CORE1,
HSEC_DECOMP_CORE0,
HSEC_DECOMP_CORE1,
HSEC_DECOMP_CORE2,
HSEC_DECOMP_CORE3,
HSEC_DECOMP_CORE4,
HSEC_DECOMP_CORE5,
};
static const u64 core_offsets[] = {
[HSEC_COMP_CORE0] = 0x302000,
[HSEC_COMP_CORE1] = 0x303000,
[HSEC_DECOMP_CORE0] = 0x304000,
[HSEC_DECOMP_CORE1] = 0x305000,
[HSEC_DECOMP_CORE2] = 0x306000,
[HSEC_DECOMP_CORE3] = 0x307000,
[HSEC_DECOMP_CORE4] = 0x308000,
[HSEC_DECOMP_CORE5] = 0x309000,
};
static struct debugfs_reg32 hsec_dfx_regs[] = {
{"HSEC_GET_BD_NUM ", 0x00ull},
{"HSEC_GET_RIGHT_BD ", 0x04ull},
{"HSEC_GET_ERROR_BD ", 0x08ull},
{"HSEC_DONE_BD_NUM ", 0x0cull},
{"HSEC_WORK_CYCLE ", 0x10ull},
{"HSEC_IDLE_CYCLE ", 0x18ull},
{"HSEC_MAX_DELAY ", 0x20ull},
{"HSEC_MIN_DELAY ", 0x24ull},
{"HSEC_AVG_DELAY ", 0x28ull},
{"HSEC_MEM_VISIBLE_DATA ", 0x30ull},
{"HSEC_MEM_VISIBLE_ADDR ", 0x34ull},
{"HSEC_COMSUMED_BYTE ", 0x38ull},
{"HSEC_PRODUCED_BYTE ", 0x40ull},
{"HSEC_COMP_INF ", 0x70ull},
{"HSEC_PRE_OUT ", 0x78ull},
{"HSEC_BD_RD ", 0x7cull},
{"HSEC_BD_WR ", 0x80ull},
{"HSEC_GET_BD_AXI_ERR_NUM ", 0x84ull},
{"HSEC_GET_BD_PARSE_ERR_NUM ", 0x88ull},
{"HSEC_ADD_BD_AXI_ERR_NUM ", 0x8cull},
{"HSEC_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull},
{"HSEC_DECOMP_LZ77_CURR_ST ", 0x9cull},
{"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
{"HSEC_BD_LATENCY_MIN ", 0x301600},
{"HSEC_BD_LATENCY_MAX ", 0x301608},
{"HSEC_BD_LATENCY_AVG ", 0x30160C},
{"HSEC_BD_NUM_IN_SAA0 ", 0x301670},
{"HSEC_BD_NUM_IN_SAA1 ", 0x301674},
{"HSEC_BD_NUM_IN_SEC ", 0x301680},
{"HSEC_ECC_1BIT_CNT ", 0x301C00},
{"HSEC_ECC_1BIT_INFO ", 0x301C04},
{"HSEC_ECC_2BIT_CNT ", 0x301C10},
{"HSEC_ECC_2BIT_INFO ", 0x301C14},
{"HSEC_ECC_BD_SAA0 ", 0x301C20},
{"HSEC_ECC_BD_SAA1 ", 0x301C24},
{"HSEC_ECC_BD_SAA2 ", 0x301C28},
{"HSEC_ECC_BD_SAA3 ", 0x301C2C},
{"HSEC_ECC_BD_SAA4 ", 0x301C30},
{"HSEC_ECC_BD_SAA5 ", 0x301C34},
{"HSEC_ECC_BD_SAA6 ", 0x301C38},
{"HSEC_ECC_BD_SAA7 ", 0x301C3C},
{"HSEC_ECC_BD_SAA8 ", 0x301C40},
};
static int pf_q_num_set(const char *val, const struct kernel_param *kp)
{
struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, 0xa250,
NULL);
struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
PCI_DEVICE_ID_SEC_PF, NULL);
u32 n, q_num;
u8 rev_id;
int ret;
......@@ -420,12 +399,14 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
static int uacce_mode = UACCE_MODE_NOUACCE;
module_param(uacce_mode, int, 0444);
static int enable_sm4_ctr;
module_param(enable_sm4_ctr, int, 0444);
static const struct pci_device_id hisi_sec_dev_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa255)},
{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa256)},
{0,}
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_SEC_PF) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_SEC_VF) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, hisi_sec_dev_ids);
static inline void hisi_sec_add_to_list(struct hisi_sec *hisi_sec)
......@@ -473,10 +454,8 @@ static int sec_engine_init(struct hisi_sec *hisi_sec)
int ret;
u32 reg;
struct hisi_qm *qm = &hisi_sec->qm;
void *base =
qm->io_base + SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF;
pr_info("base[%llx]\n", (u64) base);
void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF +
SEC_ACC_COMMON_REG_OFF;
writel_relaxed(0x1, base + SEC_MEM_START_INIT_REG);
ret = readl_relaxed_poll_timeout(base +
......@@ -491,8 +470,6 @@ static int sec_engine_init(struct hisi_sec *hisi_sec)
reg |= (0x1 << SEC_TRNG_EN_SHIFT);
writel_relaxed(reg, base + SEC_CONTROL_REG);
// todo: JUST SUPPORT SMMU
// if (sec_dev->smmu_normal) {
reg = readl_relaxed(base + SEC_INTERFACE_USER_CTRL0_REG);
reg |= SEC_USER0_SMMU_NORMAL;
writel_relaxed(reg, base + SEC_INTERFACE_USER_CTRL0_REG);
......@@ -500,20 +477,11 @@ static int sec_engine_init(struct hisi_sec *hisi_sec)
reg = readl_relaxed(base + SEC_INTERFACE_USER_CTRL1_REG);
reg |= SEC_USER1_SMMU_NORMAL;
writel_relaxed(reg, base + SEC_INTERFACE_USER_CTRL1_REG);
// } else {
// reg = readl_relaxed(base + SEC_INTERFACE_USER_CTRL0_REG);
// reg &= ~SEC_USER0_SMMU_NORMAL;
// writel_relaxed(reg, base + SEC_INTERFACE_USER_CTRL0_REG);
// reg = readl_relaxed(base + SEC_INTERFACE_USER_CTRL1_REG);
// reg &= ~SEC_USER1_SMMU_NORMAL;
// writel_relaxed(reg, base + SEC_INTERFACE_USER_CTRL1_REG);
// }
writel_relaxed(0xfffff7fd, base + SEC_BD_ERR_CHK_EN_REG(1));
writel_relaxed(0xffffbfff, base + SEC_BD_ERR_CHK_EN_REG(3));
/*enable abnormal int */
/* enable abnormal int */
writel_relaxed(SEC_PF_INT_MSK, base + SEC_PF_ABNORMAL_INT_ENABLE_REG);
writel_relaxed(SEC_RAS_CE_ENB_MSK, base + SEC_RAS_CE_ENABLE_REG);
writel_relaxed(SEC_RAS_FE_ENB_MSK, base + SEC_RAS_FE_ENABLE_REG);
......@@ -528,9 +496,23 @@ static int sec_engine_init(struct hisi_sec *hisi_sec)
reg = readl_relaxed(base + SEC_CONTROL_REG);
reg |= sec_get_endian(hisi_sec);
writel_relaxed(reg, base + SEC_CONTROL_REG);
if (enable_sm4_ctr)
writel_relaxed(HSEC_SM4_CTR_ENABLE_MSK,
qm->io_base + HSEC_SM4_CTR_ENABLE_REG);
return 0;
}
static void hisi_sec_disable_sm4_ctr(struct hisi_sec *hisi_sec)
{
struct hisi_qm *qm = &hisi_sec->qm;
if (enable_sm4_ctr)
writel_relaxed(HSEC_SM4_CTR_DISABLE_MSK,
qm->io_base + HSEC_SM4_CTR_ENABLE_REG);
}
static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec)
{
struct hisi_qm *qm = &hisi_sec->qm;
......@@ -558,7 +540,24 @@ static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec)
static void hisi_sec_hw_error_set_state(struct hisi_sec *hisi_sec, bool state)
{
struct hisi_qm *qm = &hisi_sec->qm;
if (qm->ver == QM_HW_V1) {
writel(HSEC_CORE_INT_DISABLE, qm->io_base + HSEC_CORE_INT_MASK);
dev_info(&qm->pdev->dev, "SEC v%d does not support hw error handle\n",
qm->ver);
return;
}
if (state) {
/* enable SEC hw error interrupts */
writel(0, hisi_sec->qm.io_base + HSEC_CORE_INT_MASK);
} else {
/* disable SEC hw error interrupts */
writel(HSEC_CORE_INT_DISABLE,
hisi_sec->qm.io_base + HSEC_CORE_INT_MASK);
}
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
......@@ -699,13 +698,8 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
struct debugfs_regset32 *regset;
struct dentry *tmp_d, *tmp;
char buf[20];
int i;
for (i = 0; i < HSEC_CORE_NUM; i++) {
if (i < HSEC_COMP_CORE_NUM)
sprintf(buf, "comp_core%d", i);
else
sprintf(buf, "decomp_core%d", i - HSEC_COMP_CORE_NUM);
sprintf(buf, "hisi_sec_dfx");
tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
if (!tmp_d)
......@@ -717,12 +711,11 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
regset->regs = hsec_dfx_regs;
regset->nregs = ARRAY_SIZE(hsec_dfx_regs);
regset->base = qm->io_base + core_offsets[i];
regset->base = qm->io_base;
tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
if (!tmp)
return -ENOENT;
}
return 0;
}
......@@ -763,7 +756,7 @@ static int hisi_sec_debugfs_init(struct hisi_sec *hisi_sec)
if (ret)
goto failed_to_create;
if (qm->pdev->device == 0xa250) {
if (qm->pdev->device == PCI_DEVICE_ID_SEC_PF) {
hisi_sec->ctrl->debug_root = dev_d;
ret = hisi_sec_ctrl_debug_init(hisi_sec->ctrl);
if (ret)
......@@ -842,6 +835,10 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hisi_sec_add_to_list(hisi_sec);
hisi_sec->sgl_pool = acc_create_sgl_pool(&pdev->dev, "hsec-sgl");
if (!hisi_sec->sgl_pool)
return -ENOMEM;
qm = &hisi_sec->qm;
qm->pdev = pdev;
qm->ver = rev_id;
......@@ -1038,8 +1035,10 @@ static void hisi_sec_remove(struct pci_dev *pdev)
hisi_sec_debugfs_exit(hisi_sec);
hisi_qm_stop(qm);
if (qm->fun_type == QM_HW_PF)
if (qm->fun_type == QM_HW_PF) {
hisi_sec_hw_error_set_state(hisi_sec, false);
hisi_sec_disable_sm4_ctr(hisi_sec);
}
hisi_qm_uninit(qm);
hisi_sec_remove_from_list(hisi_sec);
......
......@@ -192,27 +192,4 @@ enum C_ALG {
#define SEC_MAX_SGE_NUM 255
#define SEC_MAX_SGL_NUM 256
struct sgl_entry {
char *buf;
void *pageCtrl;
u32 len;
u32 pad;
u32 pad0;
u32 pad1;
};
struct sgl {
struct sgl *next;
u16 entrySumInChain;
u16 entrySumInSgl;
u16 entryNumInSgl;
u8 pad0[2];
u64 serialNum;
u32 flag;
u32 cpuid;
u8 pad1[8];
u8 reserve[24];
struct sgl_entry entry[SEC_MAX_SGE_NUM];
};
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册