提交 ae2854c5 编写于 作者: Y Yixing Liu 提交者: Jason Gunthorpe

RDMA/hns: Encapsulate the qp db as a function

Encapsulate qp db into two functions: user and kernel.

Link: https://lore.kernel.org/r/1629985056-57004-7-git-send-email-liangwenpeng@huawei.comSigned-off-by: NYixing Liu <liuyixing1@huawei.com>
Signed-off-by: NWenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: NJason Gunthorpe <jgg@nvidia.com>
上级 7fac7169
...@@ -823,24 +823,20 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, ...@@ -823,24 +823,20 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
hns_roce_qp_has_rq(init_attr)); hns_roce_qp_has_rq(init_attr));
} }
static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, struct ib_udata *udata,
struct hns_roce_ib_create_qp *ucmd, struct hns_roce_ib_create_qp *ucmd,
struct hns_roce_ib_create_qp_resp *resp) struct hns_roce_ib_create_qp_resp *resp)
{ {
struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
udata, struct hns_roce_ucontext, ibucontext); struct hns_roce_ucontext, ibucontext);
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
int ret; int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE)
hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
if (udata) {
if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
&hr_qp->sdb);
if (ret) { if (ret) {
ibdev_err(ibdev, ibdev_err(ibdev,
"failed to map user SQ doorbell, ret = %d.\n", "failed to map user SQ doorbell, ret = %d.\n",
...@@ -851,8 +847,7 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ...@@ -851,8 +847,7 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
} }
if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
ret = hns_roce_db_map_user(uctx, ucmd->db_addr, ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
&hr_qp->rdb);
if (ret) { if (ret) {
ibdev_err(ibdev, ibdev_err(ibdev,
"failed to map user RQ doorbell, ret = %d.\n", "failed to map user RQ doorbell, ret = %d.\n",
...@@ -861,13 +856,28 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ...@@ -861,13 +856,28 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
} }
hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
} }
} else {
return 0;
err_sdb:
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
err_out:
return ret;
}
static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
hr_qp->sq.db_reg = hr_dev->mem_base + hr_qp->sq.db_reg = hr_dev->mem_base +
HNS_ROCE_DWQE_SIZE * hr_qp->qpn; HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
else else
hr_qp->sq.db_reg = hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset +
hr_dev->reg_base + hr_dev->sdb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index; DB_REG_OFFSET * hr_dev->priv_uar.index;
hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset + hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
...@@ -879,19 +889,38 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ...@@ -879,19 +889,38 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibdev_err(ibdev, ibdev_err(ibdev,
"failed to alloc kernel RQ doorbell, ret = %d.\n", "failed to alloc kernel RQ doorbell, ret = %d.\n",
ret); ret);
goto err_out; return ret;
} }
*hr_qp->rdb.db_record = 0; *hr_qp->rdb.db_record = 0;
hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
} }
}
return 0; return 0;
err_sdb: }
if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb); static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
err_out: struct ib_qp_init_attr *init_attr,
struct ib_udata *udata,
struct hns_roce_ib_create_qp *ucmd,
struct hns_roce_ib_create_qp_resp *resp)
{
int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE)
hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
if (udata) {
ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd,
resp);
if (ret)
return ret; return ret;
} else {
ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr);
if (ret)
return ret;
}
return 0;
} }
static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册