提交 6f4bc0ea 编写于 作者: Y Yonatan Cohen 提交者: Doug Ledford

IB/mlx5: Allow scatter to CQE without global signaled WRs

Requester scatter to CQE is restricted to QPs configured to signal
all WRs.

This patch adds ability to enable scatter to cqe (force enable)
in the requester without sig_all, for users who do not want all WRs
signaled but rather just the ones whose data found in the CQE.
Signed-off-by: NYonatan Cohen <yonatanc@mellanox.com>
Reviewed-by: NGuy Levi <guyle@mellanox.com>
Signed-off-by: NLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 2e43bb31
...@@ -1706,15 +1706,20 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr, ...@@ -1706,15 +1706,20 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev, static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct mlx5_ib_create_qp *ucmd,
void *qpc) void *qpc)
{ {
enum ib_qp_type qpt = init_attr->qp_type; enum ib_qp_type qpt = init_attr->qp_type;
int scqe_sz; int scqe_sz;
bool allow_scat_cqe = 0;
if (qpt == IB_QPT_UC || qpt == IB_QPT_UD) if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
return; return;
if (init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) if (ucmd)
allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
return; return;
scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq); scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
...@@ -1836,7 +1841,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1836,7 +1841,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_QP_FLAG_TUNNEL_OFFLOADS | MLX5_QP_FLAG_TUNNEL_OFFLOADS |
MLX5_QP_FLAG_BFREG_INDEX | MLX5_QP_FLAG_BFREG_INDEX |
MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCT |
MLX5_QP_FLAG_TYPE_DCI)) MLX5_QP_FLAG_TYPE_DCI |
MLX5_QP_FLAG_ALLOW_SCATTER_CQE))
return -EINVAL; return -EINVAL;
err = get_qp_user_index(to_mucontext(pd->uobject->context), err = get_qp_user_index(to_mucontext(pd->uobject->context),
...@@ -1971,7 +1977,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1971,7 +1977,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (qp->scat_cqe && is_connected(init_attr->qp_type)) { if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
configure_responder_scat_cqe(init_attr, qpc); configure_responder_scat_cqe(init_attr, qpc);
configure_requester_scat_cqe(dev, init_attr, qpc); configure_requester_scat_cqe(dev, init_attr,
(pd && pd->uobject) ? &ucmd : NULL,
qpc);
} }
if (qp->rq.wqe_cnt) { if (qp->rq.wqe_cnt) {
......
...@@ -47,6 +47,7 @@ enum { ...@@ -47,6 +47,7 @@ enum {
MLX5_QP_FLAG_TYPE_DCI = 1 << 5, MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6, MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7, MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
}; };
enum { enum {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册