提交 6e8484c5 编写于 作者: M Max Gurtovoy 提交者: Doug Ledford

RDMA/mlx5: set UMR wqe fence according to HCA cap

Cache the needed umr_fence and set the wqe ctrl segmennt
accordingly.
Signed-off-by: NMax Gurtovoy <maxg@mellanox.com>
Acked-by: NLeon Romanovsky <leon@kernel.org>
Reviewed-by: NSagi Grimberg <sagi@grimberg.me>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 1410a90a
...@@ -2979,6 +2979,18 @@ static int create_umr_res(struct mlx5_ib_dev *dev) ...@@ -2979,6 +2979,18 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
return ret; return ret;
} }
static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
{
switch (umr_fence_cap) {
case MLX5_CAP_UMR_FENCE_NONE:
return MLX5_FENCE_MODE_NONE;
case MLX5_CAP_UMR_FENCE_SMALL:
return MLX5_FENCE_MODE_INITIATOR_SMALL;
default:
return MLX5_FENCE_MODE_STRONG_ORDERING;
}
}
static int create_dev_resources(struct mlx5_ib_resources *devr) static int create_dev_resources(struct mlx5_ib_resources *devr)
{ {
struct ib_srq_init_attr attr; struct ib_srq_init_attr attr;
...@@ -3693,6 +3705,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -3693,6 +3705,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
mlx5_ib_internal_fill_odp_caps(dev); mlx5_ib_internal_fill_odp_caps(dev);
dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
if (MLX5_CAP_GEN(mdev, imaicl)) { if (MLX5_CAP_GEN(mdev, imaicl)) {
dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
......
...@@ -349,7 +349,7 @@ struct mlx5_ib_qp { ...@@ -349,7 +349,7 @@ struct mlx5_ib_qp {
struct mlx5_ib_wq rq; struct mlx5_ib_wq rq;
u8 sq_signal_bits; u8 sq_signal_bits;
u8 fm_cache; u8 next_fence;
struct mlx5_ib_wq sq; struct mlx5_ib_wq sq;
/* serialize qp state modifications /* serialize qp state modifications
...@@ -654,6 +654,7 @@ struct mlx5_ib_dev { ...@@ -654,6 +654,7 @@ struct mlx5_ib_dev {
struct mlx5_ib_port *port; struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg; struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg; struct mlx5_sq_bfreg fp_bfreg;
u8 umr_fence;
}; };
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
......
...@@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) ...@@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
} }
} }
static u8 get_fence(u8 fence, struct ib_send_wr *wr)
{
if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
wr->send_flags & IB_SEND_FENCE))
return MLX5_FENCE_MODE_STRONG_ORDERING;
if (unlikely(fence)) {
if (wr->send_flags & IB_SEND_FENCE)
return MLX5_FENCE_MODE_SMALL_AND_FENCE;
else
return fence;
} else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
return MLX5_FENCE_MODE_FENCE;
}
return 0;
}
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct mlx5_wqe_ctrl_seg **ctrl, struct mlx5_wqe_ctrl_seg **ctrl,
struct ib_send_wr *wr, unsigned *idx, struct ib_send_wr *wr, unsigned *idx,
...@@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, ...@@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
static void finish_wqe(struct mlx5_ib_qp *qp, static void finish_wqe(struct mlx5_ib_qp *qp,
struct mlx5_wqe_ctrl_seg *ctrl, struct mlx5_wqe_ctrl_seg *ctrl,
u8 size, unsigned idx, u64 wr_id, u8 size, unsigned idx, u64 wr_id,
int nreq, u8 fence, u8 next_fence, int nreq, u8 fence, u32 mlx5_opcode)
u32 mlx5_opcode)
{ {
u8 opmod = 0; u8 opmod = 0;
...@@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp, ...@@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
mlx5_opcode | ((u32)opmod << 24)); mlx5_opcode | ((u32)opmod << 24));
ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
ctrl->fm_ce_se |= fence; ctrl->fm_ce_se |= fence;
qp->fm_cache = next_fence;
if (unlikely(qp->wq_sig)) if (unlikely(qp->wq_sig))
ctrl->signature = wq_sig(ctrl); ctrl->signature = wq_sig(ctrl);
...@@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
fence = qp->fm_cache;
num_sge = wr->num_sge; num_sge = wr->num_sge;
if (unlikely(num_sge > qp->sq.max_gs)) { if (unlikely(num_sge > qp->sq.max_gs)) {
mlx5_ib_warn(dev, "\n"); mlx5_ib_warn(dev, "\n");
...@@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
if (wr->opcode == IB_WR_LOCAL_INV ||
wr->opcode == IB_WR_REG_MR) {
fence = dev->umr_fence;
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
} else if (wr->send_flags & IB_SEND_FENCE) {
if (qp->next_fence)
fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
else
fence = MLX5_FENCE_MODE_FENCE;
} else {
fence = qp->next_fence;
}
switch (ibqp->qp_type) { switch (ibqp->qp_type) {
case IB_QPT_XRC_INI: case IB_QPT_XRC_INI:
xrc = seg; xrc = seg;
...@@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
case IB_WR_LOCAL_INV: case IB_WR_LOCAL_INV:
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
set_linv_wr(qp, &seg, &size); set_linv_wr(qp, &seg, &size);
...@@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break; break;
case IB_WR_REG_MR: case IB_WR_REG_MR:
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
qp->sq.wr_data[idx] = IB_WR_REG_MR; qp->sq.wr_data[idx] = IB_WR_REG_MR;
ctrl->imm = cpu_to_be32(reg_wr(wr)->key); ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
err = set_reg_wr(qp, reg_wr(wr), &seg, &size); err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
...@@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
finish_wqe(qp, ctrl, size, idx, wr->wr_id, finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
nreq, get_fence(fence, wr), fence, MLX5_OPCODE_UMR);
next_fence, MLX5_OPCODE_UMR);
/* /*
* SET_PSV WQEs are not signaled and solicited * SET_PSV WQEs are not signaled and solicited
* on error * on error
...@@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
finish_wqe(qp, ctrl, size, idx, wr->wr_id, finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
nreq, get_fence(fence, wr), fence, MLX5_OPCODE_SET_PSV);
next_fence, MLX5_OPCODE_SET_PSV);
err = begin_wqe(qp, &seg, &ctrl, wr, err = begin_wqe(qp, &seg, &ctrl, wr,
&idx, &size, nreq); &idx, &size, nreq);
if (err) { if (err) {
...@@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
mr->sig->psv_wire.psv_idx, &seg, mr->sig->psv_wire.psv_idx, &seg,
&size); &size);
...@@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
finish_wqe(qp, ctrl, size, idx, wr->wr_id, finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
nreq, get_fence(fence, wr), fence, MLX5_OPCODE_SET_PSV);
next_fence, MLX5_OPCODE_SET_PSV); qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
num_sge = 0; num_sge = 0;
goto skip_psv; goto skip_psv;
...@@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
} }
} }
finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, qp->next_fence = next_fence;
get_fence(fence, wr), next_fence, finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
mlx5_ib_opcode[wr->opcode]); mlx5_ib_opcode[wr->opcode]);
skip_psv: skip_psv:
if (0) if (0)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册