提交 e43da631 编写于 作者: Y Yu'an Wang 提交者: Yang Yingliang

hpre: add likely and unlikey in result judgement

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

In this patch ,we try to integrate some optimization points:
1.We add likely and unlikely in result judgement to improve
performance.
2.Follow hpre module, we remove device judgement of sec/rde
module.
3.We remove invalid blank line and replace print interface.
Signed-off-by: NYu'an Wang <wangyuan46@huawei.com>
Reviewed-by: NMingqiang Ling <lingmingqiang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 19e74032
......@@ -123,7 +123,7 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx = hpre_req->ctx;
id = hpre_alloc_req_id(ctx);
if (id < 0)
if (unlikely(id < 0))
return -EINVAL;
ctx->req_list[id] = hpre_req;
......@@ -190,7 +190,7 @@ static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
}
*tmp = dma_map_single(dev, sg_virt(data),
len, dma_dir);
if (dma_mapping_error(dev, *tmp)) {
if (unlikely(dma_mapping_error(dev, *tmp))) {
dev_err(dev, "dma map data err!\n");
return -ENOMEM;
}
......@@ -208,11 +208,11 @@ static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
int shift;
shift = ctx->key_sz - len;
if (shift < 0)
if (unlikely(shift < 0))
return -EINVAL;
ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
if (!ptr)
if (unlikely(!ptr))
return -ENOMEM;
if (is_src) {
......@@ -241,7 +241,7 @@ static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
else
ret = hpre_prepare_dma_buf(hpre_req, data, len,
is_src, &tmp);
if (ret)
if (unlikely(ret))
return ret;
if (is_src)
......@@ -262,7 +262,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
dma_addr_t tmp;
tmp = le64_to_cpu(sqe->in);
if (!tmp)
if (unlikely(!tmp))
return;
if (src) {
......@@ -275,7 +275,7 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}
tmp = le64_to_cpu(sqe->out);
if (!tmp)
if (unlikely(!tmp))
return;
if (req->dst) {
......@@ -308,8 +308,7 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
HREE_SQE_DONE_MASK;
if (err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)
if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
return 0;
return -EINVAL;
......@@ -456,31 +455,29 @@ static int hpre_dh_compute_value(struct kpp_request *req)
int ret;
ret = hpre_msg_request_set(ctx, req, false);
if (ret)
if (unlikely(ret))
return ret;
if (req->src) {
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
if (ret)
if (unlikely(ret))
goto clear_all;
}
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
if (ret)
if (unlikely(ret))
goto clear_all;
if (ctx->crt_g2_mode && !req->src)
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0)
| HPRE_ALG_DH_G2);
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0)
| HPRE_ALG_DH);
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
do {
ret = hisi_qp_send(ctx->qp, msg);
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
if (!ret)
if (likely(!ret))
return -EINPROGRESS;
clear_all:
......@@ -669,22 +666,22 @@ static int hpre_rsa_enc(struct akcipher_request *req)
return ret;
}
if (!ctx->rsa.pubkey)
if (unlikely(!ctx->rsa.pubkey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
if (ret)
if (unlikely(ret))
return ret;
msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
if (ret)
if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
if (ret)
if (unlikely(ret))
goto clear_all;
do {
......@@ -692,7 +689,7 @@ static int hpre_rsa_enc(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
if (!ret)
if (likely(!ret))
return -EINPROGRESS;
clear_all:
......@@ -721,29 +718,29 @@ static int hpre_rsa_dec(struct akcipher_request *req)
return ret;
}
if (!ctx->rsa.prikey)
if (unlikely(!ctx->rsa.prikey))
return -EINVAL;
ret = hpre_msg_request_set(ctx, req, true);
if (ret)
if (unlikely(ret))
return ret;
if (ctx->crt_g2_mode) {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0)
| HPRE_ALG_NC_CRT);
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
HPRE_ALG_NC_CRT);
} else {
msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0)
| HPRE_ALG_NC_NCRT);
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
HPRE_ALG_NC_NCRT);
}
ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
if (ret)
if (unlikely(ret))
goto clear_all;
ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
if (ret)
if (unlikely(ret))
goto clear_all;
do {
......@@ -751,7 +748,7 @@ static int hpre_rsa_dec(struct akcipher_request *req)
} while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
if (!ret)
if (likely(!ret))
return -EINPROGRESS;
clear_all:
......@@ -829,17 +826,17 @@ static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
return 0;
}
static int hpre_crt_para_get(char *para, const char *raw,
unsigned int raw_sz, unsigned int para_size)
static int hpre_crt_para_get(char *para, size_t para_sz,
const char *raw, size_t raw_sz)
{
const char *ptr = raw;
size_t len = raw_sz;
hpre_rsa_drop_leading_zeros(&ptr, &len);
if (!len || len > para_size)
if (!len || len > para_sz)
return -EINVAL;
memcpy(para + para_size - len, ptr, len);
memcpy(para + para_sz - len, ptr, len);
return 0;
}
......@@ -857,32 +854,32 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
if (!ctx->rsa.crt_prikey)
return -ENOMEM;
ret = hpre_crt_para_get(ctx->rsa.crt_prikey, rsa_key->dq,
rsa_key->dq_sz, hlf_ksz);
ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
rsa_key->dq, rsa_key->dq_sz);
if (ret)
goto free_key;
offset = hlf_ksz;
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp,
rsa_key->dp_sz, hlf_ksz);
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
rsa_key->dp, rsa_key->dp_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_Q;
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
rsa_key->q, rsa_key->q_sz, hlf_ksz);
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
rsa_key->q, rsa_key->q_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_P;
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
rsa_key->p, rsa_key->p_sz, hlf_ksz);
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
rsa_key->p, rsa_key->p_sz);
if (ret)
goto free_key;
offset = hlf_ksz * HPRE_CRT_INV;
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz);
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
rsa_key->qinv, rsa_key->qinv_sz);
if (ret)
goto free_key;
......
......@@ -17,6 +17,7 @@
#define HPRE_DISABLE 0
#define HPRE_VF_NUM 63
#define HPRE_QUEUE_NUM_V2 1024
#define HPRE_QUEUE_NUM_V1 4096
#define HPRE_QM_ABNML_INT_MASK 0x100004
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HPRE_COMM_CNT_CLR_CE 0x0
......@@ -131,18 +132,18 @@ static const char * const hpre_debug_file_name[] = {
};
static const struct hpre_hw_error hpre_hw_errors[] = {
{ .int_msk = BIT(0), .msg = "hpre_ecc_1bitt_err" },
{ .int_msk = BIT(1), .msg = "hpre_ecc_2bit_err" },
{ .int_msk = BIT(2), .msg = "hpre_data_wr_err" },
{ .int_msk = BIT(3), .msg = "hpre_data_rd_err" },
{ .int_msk = BIT(4), .msg = "hpre_bd_rd_err" },
{ .int_msk = BIT(5), .msg = "hpre_ooo_2bit_ecc_err" },
{ .int_msk = BIT(6), .msg = "hpre_cltr1_htbt_tm_out_err" },
{ .int_msk = BIT(7), .msg = "hpre_cltr2_htbt_tm_out_err" },
{ .int_msk = BIT(8), .msg = "hpre_cltr3_htbt_tm_out_err" },
{ .int_msk = BIT(9), .msg = "hpre_cltr4_htbt_tm_out_err" },
{ .int_msk = GENMASK(10, 15), .msg = "hpre_ooo_rdrsp_err" },
{ .int_msk = GENMASK(16, 21), .msg = "hpre_ooo_wrrsp_err" },
{ .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" },
{ .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" },
{ .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" },
{ .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" },
{ .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" },
{ .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" },
{ .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" },
{ .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" },
{ .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" },
{ .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
{ .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
{ .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
{ /* sentinel */ }
};
......@@ -353,7 +354,7 @@ static int hpre_set_cluster(struct hisi_qm *qm)
static int hpre_set_user_domain_and_cache(struct hpre *hpre)
{
struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
struct pci_dev *pdev = qm->pdev;
u32 val;
int ret;
......@@ -387,19 +388,19 @@ static int hpre_set_user_domain_and_cache(struct hpre *hpre)
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
if (ret) {
dev_err(dev, "read rd channel timeout fail!\n");
pci_err(pdev, "read rd channel timeout fail!\n");
return -ETIMEDOUT;
}
ret = hpre_set_cluster(qm);
if (ret) {
dev_err(dev, "set hpre cluster err!\n");
pci_err(pdev, "set hpre cluster err!\n");
return -ETIMEDOUT;
}
ret = hpre_cfg_by_dsm(qm);
if (ret)
dev_err(dev, "acpi_evaluate_dsm err.\n");
pci_err(pdev, "acpi_evaluate_dsm err.\n");
/* disable FLR triggered by BME(bus master enable) */
val = readl(hpre->qm.io_base + QM_PEH_AXUSER_CFG);
......
......@@ -1518,7 +1518,7 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
atomic_read(&qp->qm->status.flags) == QM_STOP) ||
qp->is_resetting == true) {
qp->is_resetting) {
dev_info_ratelimited(&qp->qm->pdev->dev, "QM resetting...\n");
return -EAGAIN;
}
......@@ -1708,6 +1708,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
qp->qdma.dma, sz);
vma->vm_pgoff = vm_pgoff;
return ret;
default:
return -EINVAL;
......
......@@ -840,31 +840,31 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qm = &hisi_rde->qm;
ret = hisi_rde_qm_pre_init(qm, pdev);
if (ret) {
dev_err(&pdev->dev, "Pre init qm failed!\n");
pci_err(pdev, "Pre init qm failed!\n");
return ret;
}
ret = hisi_qm_init(qm);
if (ret) {
dev_err(&pdev->dev, "Init qm failed!\n");
pci_err(pdev, "Init qm failed!\n");
return ret;
}
ret = hisi_rde_pf_probe_init(hisi_rde);
if (ret) {
dev_err(&pdev->dev, "Init pf failed!\n");
pci_err(pdev, "Init pf failed!\n");
goto err_qm_uninit;
}
ret = hisi_qm_start(qm);
if (ret) {
dev_err(&pdev->dev, "Start qm failed!\n");
pci_err(pdev, "Start qm failed!\n");
goto err_qm_uninit;
}
ret = hisi_rde_debugfs_init(hisi_rde);
if (ret)
dev_warn(&pdev->dev, "Init debugfs failed!\n");
pci_warn(pdev, "Init debugfs failed!\n");
hisi_rde_add_to_list(hisi_rde);
hisi_rde->rde_list_lock = &hisi_rde_list_lock;
......@@ -1234,23 +1234,10 @@ static int __init hisi_rde_init(void)
ret = pci_register_driver(&hisi_rde_pci_driver);
if (ret < 0) {
hisi_rde_unregister_debugfs();
pr_err("Register pci driver failed.\n");
goto err_pci;
}
if (list_empty(&hisi_rde_list)) {
pr_err("No rde device.\n");
ret = -ENODEV;
goto err_probe_device;
}
return 0;
err_probe_device:
pci_unregister_driver(&hisi_rde_pci_driver);
err_pci:
hisi_rde_unregister_debugfs();
return ret;
}
......
......@@ -1771,12 +1771,6 @@ static int __init hisi_sec_init(void)
goto err_pci;
}
if (list_empty(&hisi_sec_list)) {
pr_err("no device!\n");
ret = -ENODEV;
goto err_probe_device;
}
pr_info("hisi_sec: register to crypto\n");
ret = hisi_sec_register_to_crypto(fusion_limit);
if (ret < 0) {
......
......@@ -450,7 +450,7 @@ static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
dev_info(&qm->pdev->dev, "ZIP v%d does not support hw error handle\n",
pci_info(qm->pdev, "ZIP v%d cannot support hw error handle!\n",
qm->ver);
return;
}
......@@ -461,7 +461,6 @@ static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_NFE_ENB);
val = readl(hisi_zip->qm.io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
if (state) {
/* clear ZIP hw error source if having */
......@@ -876,14 +875,16 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_init(qm);
if (ret) {
dev_err(&pdev->dev, "Failed to init qm!\n");
pci_err(pdev, "Failed to init qm (%d)!\n", ret);
goto err_remove_from_list;
}
if (qm->fun_type == QM_HW_PF) {
ret = hisi_zip_pf_probe_init(hisi_zip);
if (ret)
if (ret) {
pci_err(pdev, "Failed to init pf probe (%d)!\n", ret);
goto err_remove_from_list;
}
qm->qp_base = HZIP_PF_DEF_Q_BASE;
qm->qp_num = pf_q_num;
......@@ -908,16 +909,18 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
ret = hisi_qm_start(qm);
if (ret)
if (ret) {
pci_err(pdev, "Failed to start qm (%d)!\n", ret);
goto err_qm_uninit;
}
ret = hisi_zip_debugfs_init(hisi_zip);
if (ret)
dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
pci_err(pdev, "Failed to init debugfs (%d)!\n", ret);
ret = hisi_zip_register_to_crypto();
if (ret < 0) {
pr_err("Failed to register driver to crypto.\n");
pci_err(pdev, "Failed to register driver to crypto!\n");
goto err_qm_stop;
}
return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册