提交 29af9498 编写于 作者: M Michael Guralnik 提交者: Jason Gunthorpe

IB/mlx5: Remove check of FW capabilities in ODP page fault handling

As page fault handling is initiated by FW, there is no need to check that
the ODP supports the operation and transport.

Link: https://lore.kernel.org/r/20190819120815.21225-3-leon@kernel.orgSigned-off-by: NMichael Guralnik <michaelgur@mellanox.com>
Signed-off-by: NLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: NJason Gunthorpe <jgg@mellanox.com>
上级 00679b63
...@@ -990,17 +990,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev, ...@@ -990,17 +990,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
return ret < 0 ? ret : npages; return ret < 0 ? ret : npages;
} }
static const u32 mlx5_ib_odp_opcode_cap[] = {
[MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE,
[MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE,
[MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ,
[MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC,
[MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC,
};
/* /*
* Parse initiator WQE. Advances the wqe pointer to point at the * Parse initiator WQE. Advances the wqe pointer to point at the
* scatter-gather list, and set wqe_end to the end of the WQE. * scatter-gather list, and set wqe_end to the end of the WQE.
...@@ -1011,7 +1000,6 @@ static int mlx5_ib_mr_initiator_pfault_handler( ...@@ -1011,7 +1000,6 @@ static int mlx5_ib_mr_initiator_pfault_handler(
{ {
struct mlx5_wqe_ctrl_seg *ctrl = *wqe; struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
u16 wqe_index = pfault->wqe.wqe_index; u16 wqe_index = pfault->wqe.wqe_index;
u32 transport_caps;
struct mlx5_base_av *av; struct mlx5_base_av *av;
unsigned ds, opcode; unsigned ds, opcode;
#if defined(DEBUG) #if defined(DEBUG)
...@@ -1059,29 +1047,8 @@ static int mlx5_ib_mr_initiator_pfault_handler( ...@@ -1059,29 +1047,8 @@ static int mlx5_ib_mr_initiator_pfault_handler(
opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
MLX5_WQE_CTRL_OPCODE_MASK; MLX5_WQE_CTRL_OPCODE_MASK;
switch (qp->ibqp.qp_type) { if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
case IB_QPT_XRC_INI:
*wqe += sizeof(struct mlx5_wqe_xrc_seg); *wqe += sizeof(struct mlx5_wqe_xrc_seg);
transport_caps = dev->odp_caps.per_transport_caps.xrc_odp_caps;
break;
case IB_QPT_RC:
transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
break;
case IB_QPT_UD:
transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
break;
default:
mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
qp->ibqp.qp_type);
return -EFAULT;
}
if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) ||
!(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
opcode);
return -EFAULT;
}
if (qp->ibqp.qp_type == IB_QPT_UD) { if (qp->ibqp.qp_type == IB_QPT_UD) {
av = *wqe; av = *wqe;
...@@ -1146,19 +1113,6 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev, ...@@ -1146,19 +1113,6 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
return -EFAULT; return -EFAULT;
} }
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
IB_ODP_SUPPORT_RECV))
goto invalid_transport_or_opcode;
break;
default:
invalid_transport_or_opcode:
mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
qp->ibqp.qp_type);
return -EFAULT;
}
*wqe_end = wqe + wqe_size; *wqe_end = wqe + wqe_size;
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册