提交 0d4fc4e7 编写于 作者: O oulijun 提交者: Xie XiuQi

RDMA/hns: Refactor the codes of creating qp

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

Here packages the codes of allocating receive rq inline buffer
in hns_roce_create_qp_common function in order to reduce the
complexity.

Feature or Bugfix:Bugfix
Signed-off-by: Noulijun <oulijun@huawei.com>
Reviewed-by: Nliuyixian <liuyixian@huawei.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 7c3a284f
......@@ -714,6 +714,55 @@ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
return 1;
}
static int hns_roce_alloc_recv_inline_buffer(struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr)
{
int ret;
int i;
/* allocate recv inline buf */
hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
sizeof(struct hns_roce_rinl_wqe),
GFP_KERNEL);
if (!hr_qp->rq_inl_buf.wqe_list) {
ret = -ENOMEM;
goto err;
}
hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
/* Firstly, allocate a list of sge space buffer */
hr_qp->rq_inl_buf.wqe_list[0].sg_list =
kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
init_attr->cap.max_recv_sge *
sizeof(struct hns_roce_rinl_sge),
GFP_KERNEL);
if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
ret = -ENOMEM;
goto err_wqe_list;
}
for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
/* Secondly, reallocate the buffer */
hr_qp->rq_inl_buf.wqe_list[i].sg_list =
&hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
init_attr->cap.max_recv_sge];
return 0;
err_wqe_list:
kfree(hr_qp->rq_inl_buf.wqe_list);
err:
return ret;
}
static void hns_roce_free_recv_inline_buffer(struct hns_roce_qp *hr_qp)
{
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
kfree(hr_qp->rq_inl_buf.wqe_list);
}
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr,
......@@ -754,33 +803,11 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr)) {
/* allocate recv inline buf */
hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
sizeof(struct hns_roce_rinl_wqe),
GFP_KERNEL);
if (!hr_qp->rq_inl_buf.wqe_list) {
ret = -ENOMEM;
ret = hns_roce_alloc_recv_inline_buffer(hr_qp, init_attr);
if (ret) {
dev_err(dev, "allocate receive inline buffer failed\n");
goto err_out;
}
hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
/* Firstly, allocate a list of sge space buffer */
hr_qp->rq_inl_buf.wqe_list[0].sg_list =
kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
init_attr->cap.max_recv_sge *
sizeof(struct hns_roce_rinl_sge),
GFP_KERNEL);
if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
ret = -ENOMEM;
goto err_wqe_list;
}
for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
/* Secondly, reallocate the buffer */
hr_qp->rq_inl_buf.wqe_list[i].sg_list =
&hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
init_attr->cap.max_recv_sge];
}
page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
......@@ -788,14 +815,14 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "ib_copy_from_udata error for create qp\n");
ret = -EFAULT;
goto err_rq_sge_list;
goto err_alloc_recv_inline_buffer;
}
ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
&ucmd);
if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
goto err_rq_sge_list;
goto err_alloc_recv_inline_buffer;
}
hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
......@@ -804,7 +831,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
goto err_rq_sge_list;
goto err_alloc_recv_inline_buffer;
}
hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp,
hr_qp->regions, ARRAY_SIZE(hr_qp->regions),
......@@ -868,14 +895,14 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
dev_err(dev, "init_attr->create_flags error(%d)!\n",
init_attr->create_flags);
ret = -EINVAL;
goto err_rq_sge_list;
goto err_alloc_recv_inline_buffer;
}
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
dev_err(dev, "init_attr->create_flags error(%d)!\n",
init_attr->create_flags);
ret = -EINVAL;
goto err_rq_sge_list;
goto err_alloc_recv_inline_buffer;
}
/* Set SQ size */
......@@ -883,7 +910,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
goto err_rq_sge_list;
goto err_alloc_recv_inline_buffer;
}
/* QP doorbell register address */
......@@ -897,7 +924,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
if (ret) {
dev_err(dev, "rq record doorbell alloc failed!\n");
goto err_rq_sge_list;
goto err_alloc_recv_inline_buffer;
}
*hr_qp->rdb.db_record = 0;
hr_qp->rdb_en = 1;
......@@ -1058,15 +1085,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
hns_roce_free_db(hr_dev, &hr_qp->rdb);
err_rq_sge_list:
err_alloc_recv_inline_buffer:
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr))
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
err_wqe_list:
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr))
kfree(hr_qp->rq_inl_buf.wqe_list);
hns_roce_free_recv_inline_buffer(hr_qp);
err_out:
return ret;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册