提交 ed8637d3 编写于 作者: G Guy Levi 提交者: Doug Ledford

IB/mlx4: Add contig support for control objects

Taking advantage of the optimization which was introduced in previous
commit ("IB/mlx4: Use optimal numbers of MTT entries") to optimize the
MTT usage for QP and CQ.
Signed-off-by: NGuy Levi <guyle@mellanox.com>
Signed-off-by: NYishai Hadas <yishaih@mellanox.com>
Signed-off-by: NLeon Romanovsky <leon@kernel.org>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 9901abf5
...@@ -140,14 +140,18 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont ...@@ -140,14 +140,18 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
{ {
int err; int err;
int cqe_size = dev->dev->caps.cqe_size; int cqe_size = dev->dev->caps.cqe_size;
int shift;
int n;
*umem = ib_umem_get(context, buf_addr, cqe * cqe_size, *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE, 1); IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem)) if (IS_ERR(*umem))
return PTR_ERR(*umem); return PTR_ERR(*umem);
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), n = ib_umem_page_count(*umem);
(*umem)->page_shift, &buf->mtt); shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
if (err) if (err)
goto err_buf; goto err_buf;
......
...@@ -935,5 +935,7 @@ struct ib_rwq_ind_table ...@@ -935,5 +935,7 @@ struct ib_rwq_ind_table
struct ib_rwq_ind_table_init_attr *init_attr, struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
int *num_of_mtts);
#endif /* MLX4_IB_H */ #endif /* MLX4_IB_H */
...@@ -254,8 +254,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, ...@@ -254,8 +254,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
* middle already handled as part of mtt shift calculation for both their start * middle already handled as part of mtt shift calculation for both their start
* & end addresses. * & end addresses.
*/ */
static int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
u64 start_va,
int *num_of_mtts) int *num_of_mtts)
{ {
u64 block_shift = MLX4_MAX_MTT_SHIFT; u64 block_shift = MLX4_MAX_MTT_SHIFT;
......
...@@ -1038,6 +1038,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -1038,6 +1038,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct mlx4_ib_create_wq wq; struct mlx4_ib_create_wq wq;
} ucmd; } ucmd;
size_t copy_len; size_t copy_len;
int shift;
int n;
copy_len = (src == MLX4_IB_QP_SRC) ? copy_len = (src == MLX4_IB_QP_SRC) ?
sizeof(struct mlx4_ib_create_qp) : sizeof(struct mlx4_ib_create_qp) :
...@@ -1100,8 +1102,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -1100,8 +1102,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err; goto err;
} }
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), n = ib_umem_page_count(qp->umem);
qp->umem->page_shift, &qp->mtt); shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (err) if (err)
goto err_buf; goto err_buf;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册