提交 60d36428 编写于 作者: X Xi Wang 提交者: Yang Yingliang

RDMA/hns: Optimize qp destroy flow

mainline  inclusion
from mainline-v5.7
commit e365b26c
category: bugfix
bugzilla: NA
CVE: NA

Wrap the duplicate code in hip08 and hip06 qp destruction process as
hns_roce_qp_destroy() to simply the qp destroy flow.

Link: https://lore.kernel.org/r/1582526258-13825-2-git-send-email-liweihang@huawei.comSigned-off-by: NXi Wang <wangxi11@huawei.com>
Signed-off-by: NWeihang Li <liweihang@huawei.com>
Signed-off-by: NJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: NShunfeng Yang <yangshunfeng2@huawei.com>
Reviewed-by: Nchunzhi hu <huchunzhi@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 ee7c8690
......@@ -1471,9 +1471,7 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
struct hns_roce_cq *recv_cq);
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
int cnt);
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
__be32 send_ieth(struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
......
......@@ -3665,30 +3665,10 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
if (send_cq && send_cq != recv_cq)
__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
}
hns_roce_unlock_cqs(send_cq, recv_cq);
hns_roce_qp_remove(hr_dev, hr_qp);
hns_roce_qp_free(hr_dev, hr_qp);
/* RC QP, release QPN */
if (hr_qp->ibqp.qp_type == IB_QPT_RC)
hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
if (is_user)
ib_umem_release(hr_qp->umem);
else {
kfree(hr_qp->sq.wrid);
kfree(hr_qp->rq.wrid);
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
if (hr_qp->ibqp.qp_type == IB_QPT_RC)
kfree(hr_qp);
else
kfree(hr_qp);
hns_roce_qp_destroy(hr_dev, hr_qp);
return 0;
}
......
......@@ -5499,41 +5499,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&hr_dev->qp_lock, flags);
hns_roce_qp_free(hr_dev, hr_qp);
/* Not special_QP, free their QPN */
if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
(hr_qp->ibqp.qp_type == IB_QPT_UC) ||
(hr_qp->ibqp.qp_type == IB_QPT_UD))
hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
if (is_user) {
if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
hns_roce_db_unmap_user(
to_hr_ucontext(hr_qp->ibqp.uobject->context),
&hr_qp->sdb);
if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
hns_roce_db_unmap_user(
to_hr_ucontext(hr_qp->ibqp.uobject->context),
&hr_qp->rdb);
ib_umem_release(hr_qp->umem);
} else {
kfree(hr_qp->sq.wrid);
kfree(hr_qp->rq.wrid);
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
hns_roce_free_db(hr_dev, &hr_qp->rdb);
}
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hr_qp->rq.wqe_cnt) {
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
kfree(hr_qp->rq_inl_buf.wqe_list);
}
return ret;
}
......@@ -5553,7 +5518,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
dev_err(hr_dev->dev, "Destroy qp 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
kfree(hr_qp);
hns_roce_qp_destroy(hr_dev, hr_qp);
return 0;
}
......
......@@ -1189,6 +1189,44 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
return ret;
}
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
hns_roce_qp_free(hr_dev, hr_qp);
/* Not special_QP, free their QPN */
if (hr_qp->ibqp.qp_type != IB_QPT_GSI)
hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
if (hr_qp->ibqp.pd->uobject) {
struct hns_roce_ucontext *context =
to_hr_ucontext(hr_qp->ibqp.pd->uobject->context);
if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
hns_roce_db_unmap_user(context, &hr_qp->sdb);
if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
hns_roce_db_unmap_user(context, &hr_qp->rdb);
} else {
kfree(hr_qp->sq.wrid);
kfree(hr_qp->rq.wrid);
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
if (hr_qp->rq.wqe_cnt)
hns_roce_free_db(hr_dev, &hr_qp->rdb);
}
ib_umem_release(hr_qp->umem);
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hr_qp->rq.wqe_cnt) {
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
kfree(hr_qp->rq_inl_buf.wqe_list);
}
kfree(hr_qp);
}
EXPORT_SYMBOL_GPL(hns_roce_qp_destroy);
struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册