提交 d0b40fc5 编写于 作者: L Luoyouming 提交者: Zheng Zengkai

RDMA/hns: Remove enable rq inline in kernel and add compatibility handling

driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5USIG

----------------------------------------------------------

The rq inline only use in userspace, and it should enable in RTR
status. Added compatibility processing between different user space
and kernel space.
Signed-off-by: NLuoyouming <luoyouming@huawei.com>
Reviewed-by: NYangyang Li <liyangyang20@huawei.com>
Reviewed-by: NYueHaibing <yuehaibing@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 34542349
......@@ -132,7 +132,8 @@ enum hns_roce_event {
enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
/* discard this bit, reserved for compatibility */
HNS_ROCE_CAP_FLAG_DISCARD = BIT(2),
HNS_ROCE_CAP_FLAG_CQ_RECORD_DB = BIT(3),
HNS_ROCE_CAP_FLAG_QP_RECORD_DB = BIT(4),
HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
......@@ -144,6 +145,7 @@ enum {
HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12),
HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
HNS_ROCE_CAP_FLAG_STASH = BIT(17),
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(20),
};
#define HNS_ROCE_DB_TYPE_COUNT 2
......@@ -202,7 +204,7 @@ struct hns_roce_ucontext {
struct list_head page_list;
struct mutex page_mutex;
struct hns_user_mmap_entry *db_mmap_entry;
u32 config;
u32 config;
};
struct hns_roce_pd {
......@@ -889,7 +891,7 @@ struct hns_roce_hw {
u32 step_idx);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
enum ib_qp_state new_state, struct ib_udata *udata);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
void (*dereg_mr)(struct hns_roce_dev *hr_dev);
......
......@@ -2801,7 +2801,7 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
attr->port_num = 1;
attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
IB_QPS_INIT);
IB_QPS_INIT, NULL);
if (ret) {
ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
ret);
......@@ -2823,7 +2823,7 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
IB_QPS_RTR);
IB_QPS_RTR, NULL);
hr_dev->loop_idc = loopback;
if (ret) {
ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
......@@ -2838,7 +2838,7 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR,
IB_QPS_RTS);
IB_QPS_RTS, NULL);
if (ret)
ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
ret);
......@@ -4404,10 +4404,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
upper_32_bits(hr_qp->rdb.dma));
if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
hr_reg_write_bool(context, QPC_RQIE,
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
if (ibqp->srq) {
......@@ -4598,8 +4594,11 @@ static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
struct hns_roce_v2_qp_context *qpc_mask,
struct ib_udata *udata)
{
struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
struct hns_roce_ucontext, ibucontext);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
......@@ -4719,6 +4718,14 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_write(context, QPC_LP_SGEN_INI, 3);
hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
if (udata && (ibqp->qp_type == IB_QPT_RC) &&
(uctx->config & HNS_ROCE_ALLOC_UCTX_RQ_INLINE_FLAGS)) {
hr_reg_write_bool(context, QPC_RQIE,
hr_dev->caps.flags &
HNS_ROCE_CAP_FLAG_RQ_INLINE);
hr_reg_clear(qpc_mask, QPC_RQIE);
}
return 0;
}
......@@ -5066,7 +5073,8 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
enum ib_qp_state cur_state,
enum ib_qp_state new_state,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
struct hns_roce_v2_qp_context *qpc_mask,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
int ret = 0;
......@@ -5085,7 +5093,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
qpc_mask, udata);
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
......@@ -5265,7 +5273,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
enum ib_qp_state new_state, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
......@@ -5285,7 +5293,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
new_state, context, qpc_mask);
new_state, context, qpc_mask, udata);
if (ret)
goto out;
......@@ -5486,7 +5494,7 @@ int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
if (modify_qp_is_ok(hr_qp)) {
/* Modify qp to reset before destroying qp */
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
hr_qp->state, IB_QPS_RESET, udata);
if (ret)
ibdev_err(ibdev,
"failed to modify QP to RST, ret = %d.\n",
......
......@@ -379,6 +379,10 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
resp.config = HNS_ROCE_UCONTEXT_EXSGE_CALC_MODE;
resp.max_inline_data = hr_dev->caps.max_sq_inline;
}
context->config |= ucmd.config & HNS_ROCE_ALLOC_UCTX_RQ_INLINE_FLAGS;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
resp.config |= HNS_ROCE_ALLOC_UCTX_RQ_INLINE_FLAGS;
}
ret = hns_roce_uar_alloc(hr_dev, &context->uar);
......
......@@ -1446,7 +1446,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
new_state);
new_state, udata);
out:
mutex_unlock(&hr_qp->mutex);
......
......@@ -87,6 +87,7 @@ struct hns_roce_ib_create_qp_resp {
enum {
HNS_ROCE_UCONTEXT_EXSGE_CALC_MODE = 1 << 0,
HNS_ROCE_ALLOC_UCTX_RQ_INLINE_FLAGS = 1 << 1,
};
struct hns_roce_ib_alloc_ucontext_resp {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册