提交 22dc4b72 编写于 作者: T tanghui20 提交者: Xie XiuQi

WD_HPRE:solve static check problem from wd_hpre

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

Solve static check problem from wd_hpre.

Feature or Bugfix:Bugfix
Signed-off-by: Ntanghui20 <tanghui20@huawei.com>
Reviewed-by: Nxuzaibo <xuzaibo@huawei.com>
Reviewed-by: NZhou Wang <wangzhou1@hisilicon.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 f020f120
...@@ -236,7 +236,7 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req, ...@@ -236,7 +236,7 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
{ {
struct hpre_sqe *msg = &hpre_req->req; struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx; struct hpre_ctx *ctx = hpre_req->ctx;
dma_addr_t tmp; dma_addr_t tmp = 0;
int ret; int ret;
/* when the data is dh's source, we should format it */ /* when the data is dh's source, we should format it */
...@@ -382,7 +382,7 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *_resp) ...@@ -382,7 +382,7 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *_resp)
struct hpre_sqe *sqe = _resp; struct hpre_sqe *sqe = _resp;
struct hpre_ctx *ctx = qp->qp_ctx; struct hpre_ctx *ctx = qp->qp_ctx;
ctx->req_list[sqe->tag]->cb(ctx, _resp); ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, _resp);
} }
static int hpre_ctx_init(struct hpre_ctx *ctx) static int hpre_ctx_init(struct hpre_ctx *ctx)
...@@ -470,9 +470,11 @@ static int hpre_dh_compute_value(struct kpp_request *req) ...@@ -470,9 +470,11 @@ static int hpre_dh_compute_value(struct kpp_request *req)
if (ret) if (ret)
goto clear_all; goto clear_all;
if (ctx->crt_g2_mode && !req->src) if (ctx->crt_g2_mode && !req->src)
msg->dw0 |= HPRE_ALG_DH_G2; msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0)
| HPRE_ALG_DH_G2);
else else
msg->dw0 |= HPRE_ALG_DH; msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0)
| HPRE_ALG_DH);
do { do {
ret = hisi_qp_send(ctx->qp, msg); ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
...@@ -727,10 +729,12 @@ static int hpre_rsa_dec(struct akcipher_request *req) ...@@ -727,10 +729,12 @@ static int hpre_rsa_dec(struct akcipher_request *req)
if (ctx->crt_g2_mode) { if (ctx->crt_g2_mode) {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey); msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
msg->dw0 |= HPRE_ALG_NC_CRT; msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0)
| HPRE_ALG_NC_CRT);
} else { } else {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey); msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
msg->dw0 |= HPRE_ALG_NC_NCRT; msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0)
| HPRE_ALG_NC_NCRT);
} }
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
...@@ -933,7 +937,7 @@ static bool hpre_is_crt_key(struct rsa_key *key) ...@@ -933,7 +937,7 @@ static bool hpre_is_crt_key(struct rsa_key *key)
u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz + u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
key->qinv_sz; key->qinv_sz;
#define LEN_OF_NCRT_PARA 5 //N-CRT less than 5 parameters #define LEN_OF_NCRT_PARA 5 // N-CRT less than 5 parameters
return len > LEN_OF_NCRT_PARA; return len > LEN_OF_NCRT_PARA;
} }
......
...@@ -346,13 +346,40 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm) ...@@ -346,13 +346,40 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)
return 0; return 0;
} }
static int hpre_set_user_domain_and_cache(struct hpre *hpre) static int hpre_set_cluster(struct hisi_qm *qm)
{ {
int ret, i; struct device *dev = &qm->pdev->dev;
u32 val;
unsigned long offset; unsigned long offset;
u32 val = 0;
int ret, i;
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
offset = i * HPRE_CLSTR_ADDR_INTRVL;
/* clusters initiating */
writel(HPRE_CLUSTER_CORE_MASK,
HPRE_ADDR(offset + HPRE_CORE_ENB));
writel(0x1, HPRE_ADDR(offset + HPRE_CORE_INI_CFG));
ret = readl_relaxed_poll_timeout(HPRE_ADDR(offset +
HPRE_CORE_INI_STATUS), val,
((val & HPRE_CLUSTER_CORE_MASK) ==
HPRE_CLUSTER_CORE_MASK),
HPRE_REG_RD_INTVRL_US, HPRE_REG_RD_TMOUT_US);
if (ret) {
dev_err(dev, "cluster %d int st status timeout!\n", i);
return -ETIMEDOUT;
}
}
return 0;
}
static int hpre_set_user_domain_and_cache(struct hpre *hpre)
{
struct hisi_qm *qm = &hpre->qm; struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev; struct device *dev = &qm->pdev->dev;
u32 val;
int ret;
writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(QM_ARUSER_M_CFG_ENABLE)); writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(QM_ARUSER_M_CFG_ENABLE));
writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(QM_AWUSER_M_CFG_ENABLE)); writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(QM_AWUSER_M_CFG_ENABLE));
...@@ -387,24 +414,10 @@ static int hpre_set_user_domain_and_cache(struct hpre *hpre) ...@@ -387,24 +414,10 @@ static int hpre_set_user_domain_and_cache(struct hpre *hpre)
dev_err(dev, "read rd channel timeout fail!\n"); dev_err(dev, "read rd channel timeout fail!\n");
return -ETIMEDOUT; return -ETIMEDOUT;
} }
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
offset = i * HPRE_CLSTR_ADDR_INTRVL;
/* clusters initiating */ ret = hpre_set_cluster(qm);
writel(HPRE_CLUSTER_CORE_MASK, if (ret)
HPRE_ADDR(offset + HPRE_CORE_ENB));
writel(0x1, HPRE_ADDR(offset + HPRE_CORE_INI_CFG));
ret = readl_relaxed_poll_timeout(HPRE_ADDR(offset +
HPRE_CORE_INI_STATUS), val,
((val & HPRE_CLUSTER_CORE_MASK) ==
HPRE_CLUSTER_CORE_MASK),
HPRE_REG_RD_INTVRL_US, HPRE_REG_RD_TMOUT_US);
if (ret) {
dev_err(dev,
"cluster %d int st status timeout!\n", i);
return -ETIMEDOUT; return -ETIMEDOUT;
}
}
ret = hpre_cfg_by_dsm(qm); ret = hpre_cfg_by_dsm(qm);
if (ret) if (ret)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册