提交 8d84475d 编写于 作者: Z Zhao Weibo 提交者: Yang Yingliang

RDMA/hns: some robust optimize in rdfx

driver inclusion
category: cleanup
bugzilla: NA
CVE: NA

--------------------------------

This patch add some robust optimize in rdfx.
Reviewed-by: NHu Chunzhi <huchunzhi@huawei.com>
Signed-off-by: NZhao Weibo <zhaoweibo3@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 6a1b9631
...@@ -54,7 +54,7 @@ EXPORT_SYMBOL_GPL(rdfx_cp_rq_wqe_buf); ...@@ -54,7 +54,7 @@ EXPORT_SYMBOL_GPL(rdfx_cp_rq_wqe_buf);
#ifdef CONFIG_KERNEL_419 #ifdef CONFIG_KERNEL_419
static void rdfx_change_sq_buf(const struct ib_send_wr *wr, int atomic_en, static void rdfx_change_sq_buf(const struct ib_send_wr *wr, int atomic_en,
void *dfx_qp_buf, void *dfx_hns_wqe_sge, void *dfx_qp_buf, const void *dfx_hns_wqe_sge,
struct rdfx_sq_info *sq, struct rdfx_sq_info *sq,
struct hns_roce_dev *hr_dev, struct hns_roce_dev *hr_dev,
struct hns_roce_qp *qp) struct hns_roce_qp *qp)
...@@ -88,10 +88,12 @@ static void rdfx_change_sq_buf(struct ib_send_wr *wr, int atomic_en, ...@@ -88,10 +88,12 @@ static void rdfx_change_sq_buf(struct ib_send_wr *wr, int atomic_en,
atomic_set(&sq->head, (int)qp->sq.head); atomic_set(&sq->head, (int)qp->sq.head);
sq->head_addr = sq->head_addr =
(u64)get_send_wqe(qp, qp->sq.head & (qp->sq.wqe_cnt - 1)); (u64)get_send_wqe(qp, qp->sq.head &
(unsigned int)(qp->sq.wqe_cnt - 1));
atomic_set(&sq->tail, (int)qp->sq.tail); atomic_set(&sq->tail, (int)qp->sq.tail);
sq->tail_addr = sq->tail_addr =
(u64)get_send_wqe(qp, qp->sq.tail & (qp->sq.wqe_cnt - 1)); (u64)get_send_wqe(qp, qp->sq.tail &
(unsigned int)(qp->sq.wqe_cnt - 1));
} }
#ifdef CONFIG_KERNEL_419 #ifdef CONFIG_KERNEL_419
......
...@@ -211,7 +211,7 @@ static int show_cqe(struct rdfx_cq_info *rdfx_cq, int cqe_index) ...@@ -211,7 +211,7 @@ static int show_cqe(struct rdfx_cq_info *rdfx_cq, int cqe_index)
return 0; return 0;
} }
static inline int rdfx_convert_str(char *str, u32 *val) static inline int rdfx_convert_str(const char *str, u32 *val)
{ {
long long convert_val; long long convert_val;
...@@ -224,7 +224,7 @@ static inline int rdfx_convert_str(char *str, u32 *val) ...@@ -224,7 +224,7 @@ static inline int rdfx_convert_str(char *str, u32 *val)
return 0; return 0;
} }
static inline int rdfx_show_qp_wqe(char *sq_rq, void *buf, u32 qpn, static inline int rdfx_show_qp_wqe(const char *sq_rq, void *buf, u32 qpn,
struct rdfx_info *rdfx) struct rdfx_info *rdfx)
{ {
struct rdfx_qp_info *rdfx_qp; struct rdfx_qp_info *rdfx_qp;
...@@ -401,7 +401,7 @@ static inline int rdfx_show_cq_detail(u32 cqn, struct rdfx_info *rdfx) ...@@ -401,7 +401,7 @@ static inline int rdfx_show_cq_detail(u32 cqn, struct rdfx_info *rdfx)
{ {
struct rdfx_cq_info *rdfx_cq = NULL; struct rdfx_cq_info *rdfx_cq = NULL;
struct hns_roce_dev *hr_dev; struct hns_roce_dev *hr_dev;
struct hns_roce_cq *cq; struct hns_roce_cq *cq = NULL;
hr_dev = (struct hns_roce_dev *)rdfx->priv; hr_dev = (struct hns_roce_dev *)rdfx->priv;
...@@ -414,7 +414,7 @@ static inline int rdfx_show_cq_detail(u32 cqn, struct rdfx_info *rdfx) ...@@ -414,7 +414,7 @@ static inline int rdfx_show_cq_detail(u32 cqn, struct rdfx_info *rdfx)
atomic_read(&rdfx->cq.top_cq_index)); atomic_read(&rdfx->cq.top_cq_index));
cq = radix_tree_lookup(&hr_dev->cq_table.tree, cq = radix_tree_lookup(&hr_dev->cq_table.tree,
cqn & (hr_dev->caps.num_cqs - 1)); cqn & (u32)(hr_dev->caps.num_cqs - 1));
if (cq) if (cq)
pr_info("arm_sn_cnt : 0x%x\n", cq->arm_sn); pr_info("arm_sn_cnt : 0x%x\n", cq->arm_sn);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册