提交 72b894b0 编写于 作者: C Christoph Hellwig 提交者: Jason Gunthorpe

IB/umem: remove the dmasync argument to ib_umem_get

The argument is always ignored, so remove it.

Link: https://lore.kernel.org/r/20191113073214.9514-3-hch@lst.deSigned-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NJason Gunthorpe <jgg@mellanox.com>
Acked-by: NMichal Kalderon <michal.kalderon@marvell.com>
Signed-off-by: NJason Gunthorpe <jgg@mellanox.com>
上级 7283fff8
...@@ -185,10 +185,9 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz); ...@@ -185,10 +185,9 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
* @addr: userspace virtual address to start at * @addr: userspace virtual address to start at
* @size: length of region to pin * @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned * @access: IB_ACCESS_xxx flags for memory being pinned
* @dmasync: flush in-flight DMA when the memory region is written
*/ */
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
size_t size, int access, int dmasync) size_t size, int access)
{ {
struct ib_ucontext *context; struct ib_ucontext *context;
struct ib_umem *umem; struct ib_umem *umem;
......
...@@ -837,7 +837,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, ...@@ -837,7 +837,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes += (qplib_qp->sq.max_wqe * psn_sz); bytes += (qplib_qp->sq.max_wqe * psn_sz);
} }
bytes = PAGE_ALIGN(bytes); bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1); umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) if (IS_ERR(umem))
return PTR_ERR(umem); return PTR_ERR(umem);
...@@ -851,7 +851,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, ...@@ -851,7 +851,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes); bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(udata, ureq.qprva, bytes, umem = ib_umem_get(udata, ureq.qprva, bytes,
IB_ACCESS_LOCAL_WRITE, 1); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) if (IS_ERR(umem))
goto rqfail; goto rqfail;
qp->rumem = umem; qp->rumem = umem;
...@@ -1304,7 +1304,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, ...@@ -1304,7 +1304,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes); bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1); umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) if (IS_ERR(umem))
return PTR_ERR(umem); return PTR_ERR(umem);
...@@ -2547,7 +2547,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ...@@ -2547,7 +2547,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->umem = ib_umem_get(udata, req.cq_va, cq->umem = ib_umem_get(udata, req.cq_va,
entries * sizeof(struct cq_base), entries * sizeof(struct cq_base),
IB_ACCESS_LOCAL_WRITE, 1); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) { if (IS_ERR(cq->umem)) {
rc = PTR_ERR(cq->umem); rc = PTR_ERR(cq->umem);
goto fail; goto fail;
...@@ -3512,7 +3512,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, ...@@ -3512,7 +3512,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
/* The fixed portion of the rkey is the same as the lkey */ /* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey; mr->ib_mr.rkey = mr->qplib_mr.rkey;
umem = ib_umem_get(udata, start, length, mr_access_flags, 0); umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem"); dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT; rc = -EFAULT;
......
...@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp; mhp->rhp = rhp;
mhp->umem = ib_umem_get(udata, start, length, acc, 0); mhp->umem = ib_umem_get(udata, start, length, acc);
if (IS_ERR(mhp->umem)) if (IS_ERR(mhp->umem))
goto err_free_skb; goto err_free_skb;
......
...@@ -1371,7 +1371,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, ...@@ -1371,7 +1371,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out; goto err_out;
} }
mr->umem = ib_umem_get(udata, start, length, access_flags, 0); mr->umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem); err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev, ibdev_dbg(&dev->ibdev,
......
...@@ -219,7 +219,7 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, ...@@ -219,7 +219,7 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
u32 npages; u32 npages;
*umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz, *umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
IB_ACCESS_LOCAL_WRITE, 1); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem)) if (IS_ERR(*umem))
return PTR_ERR(*umem); return PTR_ERR(*umem);
......
...@@ -31,7 +31,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, ...@@ -31,7 +31,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
refcount_set(&page->refcount, 1); refcount_set(&page->refcount, 1);
page->user_virt = page_addr; page->user_virt = page_addr;
page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0); page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) { if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem); ret = PTR_ERR(page->umem);
kfree(page); kfree(page);
......
...@@ -1145,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1145,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mr->umem = ib_umem_get(udata, start, length, access_flags, 0); mr->umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem); ret = PTR_ERR(mr->umem);
goto err_free; goto err_free;
...@@ -1230,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags, ...@@ -1230,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
} }
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0); mr->umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem); ret = PTR_ERR(mr->umem);
mr->umem = NULL; mr->umem = NULL;
......
...@@ -745,7 +745,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -745,7 +745,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
} }
hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr, hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
hr_qp->buff_size, 0, 0); hr_qp->buff_size, 0);
if (IS_ERR(hr_qp->umem)) { if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n"); dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem); ret = PTR_ERR(hr_qp->umem);
......
...@@ -186,7 +186,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, ...@@ -186,7 +186,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT; return -EFAULT;
srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0); srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0);
if (IS_ERR(srq->umem)) if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem); return PTR_ERR(srq->umem);
...@@ -206,7 +206,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata, ...@@ -206,7 +206,7 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
/* config index queue BA */ /* config index queue BA */
srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr, srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
srq->idx_que.buf_size, 0, 0); srq->idx_que.buf_size, 0);
if (IS_ERR(srq->idx_que.umem)) { if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n"); dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
ret = PTR_ERR(srq->idx_que.umem); ret = PTR_ERR(srq->idx_que.umem);
......
...@@ -1763,7 +1763,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd, ...@@ -1763,7 +1763,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (length > I40IW_MAX_MR_SIZE) if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
region = ib_umem_get(udata, start, length, acc, 0); region = ib_umem_get(udata, start, length, acc);
if (IS_ERR(region)) if (IS_ERR(region))
return (struct ib_mr *)region; return (struct ib_mr *)region;
......
...@@ -145,7 +145,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata, ...@@ -145,7 +145,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
int n; int n;
*umem = ib_umem_get(udata, buf_addr, cqe * cqe_size, *umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE, 1); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem)) if (IS_ERR(*umem))
return PTR_ERR(*umem); return PTR_ERR(*umem);
......
...@@ -64,7 +64,7 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt, ...@@ -64,7 +64,7 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
page->user_virt = (virt & PAGE_MASK); page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0; page->refcnt = 0;
page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0); page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) { if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem); err = PTR_ERR(page->umem);
kfree(page); kfree(page);
......
...@@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start, ...@@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
} }
return ib_umem_get(udata, start, length, access_flags, 0); return ib_umem_get(udata, start, length, access_flags);
} }
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
......
...@@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, ...@@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift); (qp->sq.wqe_cnt << qp->sq.wqe_shift);
qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0); qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) { if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem); err = PTR_ERR(qp->umem);
goto err; goto err;
...@@ -1110,8 +1110,7 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, ...@@ -1110,8 +1110,7 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
if (err) if (err)
goto err; goto err;
qp->umem = qp->umem = ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0);
ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0, 0);
if (IS_ERR(qp->umem)) { if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem); err = PTR_ERR(qp->umem);
goto err; goto err;
......
...@@ -110,7 +110,7 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq, ...@@ -110,7 +110,7 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT; return -EFAULT;
srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0); srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem); return PTR_ERR(srq->umem);
......
...@@ -709,7 +709,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, ...@@ -709,7 +709,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
cq->buf.umem = cq->buf.umem =
ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size, ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size,
IB_ACCESS_LOCAL_WRITE, 1); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->buf.umem)) { if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem); err = PTR_ERR(cq->buf.umem);
return err; return err;
...@@ -1110,7 +1110,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, ...@@ -1110,7 +1110,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
umem = ib_umem_get(udata, ucmd.buf_addr, umem = ib_umem_get(udata, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries, (size_t)ucmd.cqe_size * entries,
IB_ACCESS_LOCAL_WRITE, 1); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
err = PTR_ERR(umem); err = PTR_ERR(umem);
return err; return err;
......
...@@ -2134,7 +2134,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext, ...@@ -2134,7 +2134,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err) if (err)
return err; return err;
obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0); obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access);
if (IS_ERR(obj->umem)) if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem); return PTR_ERR(obj->umem);
......
...@@ -64,7 +64,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, ...@@ -64,7 +64,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
page->user_virt = (virt & PAGE_MASK); page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0; page->refcnt = 0;
page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0); page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) { if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem); err = PTR_ERR(page->umem);
kfree(page); kfree(page);
......
...@@ -764,7 +764,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, ...@@ -764,7 +764,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (order) if (order)
*order = ilog2(roundup_pow_of_two(*ncont)); *order = ilog2(roundup_pow_of_two(*ncont));
} else { } else {
u = ib_umem_get(udata, start, length, access_flags, 0); u = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(u)) { if (IS_ERR(u)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u)); mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
return PTR_ERR(u); return PTR_ERR(u);
......
...@@ -749,7 +749,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, ...@@ -749,7 +749,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
{ {
int err; int err;
*umem = ib_umem_get(udata, addr, size, 0, 0); *umem = ib_umem_get(udata, addr, size, 0);
if (IS_ERR(*umem)) { if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n"); mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem); return PTR_ERR(*umem);
...@@ -806,7 +806,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -806,7 +806,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (!ucmd->buf_addr) if (!ucmd->buf_addr)
return -EINVAL; return -EINVAL;
rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0, 0); rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0);
if (IS_ERR(rwq->umem)) { if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n"); mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem); err = PTR_ERR(rwq->umem);
......
...@@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, ...@@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0); srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) { if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size); mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem); err = PTR_ERR(srq->umem);
......
...@@ -880,9 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -880,9 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mr->umem = ib_umem_get(udata, start, length, acc, mr->umem = ib_umem_get(udata, start, length, acc);
ucmd.mr_attrs & MTHCA_MR_DMASYNC);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem); err = PTR_ERR(mr->umem);
goto err; goto err;
......
...@@ -869,7 +869,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, ...@@ -869,7 +869,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL); mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) if (!mr)
return ERR_PTR(status); return ERR_PTR(status);
mr->umem = ib_umem_get(udata, start, len, acc, 0); mr->umem = ib_umem_get(udata, start, len, acc);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
status = -EFAULT; status = -EFAULT;
goto umem_err; goto umem_err;
......
...@@ -762,7 +762,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata, ...@@ -762,7 +762,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
struct qedr_dev *dev, struct qedr_dev *dev,
struct qedr_userq *q, u64 buf_addr, struct qedr_userq *q, u64 buf_addr,
size_t buf_len, bool requires_db_rec, size_t buf_len, bool requires_db_rec,
int access, int dmasync, int access,
int alloc_and_init) int alloc_and_init)
{ {
u32 fw_pages; u32 fw_pages;
...@@ -770,7 +770,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata, ...@@ -770,7 +770,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
q->buf_addr = buf_addr; q->buf_addr = buf_addr;
q->buf_len = buf_len; q->buf_len = buf_len;
q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access, dmasync); q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access);
if (IS_ERR(q->umem)) { if (IS_ERR(q->umem)) {
DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n", DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
PTR_ERR(q->umem)); PTR_ERR(q->umem));
...@@ -927,9 +927,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ...@@ -927,9 +927,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->cq_type = QEDR_CQ_TYPE_USER; cq->cq_type = QEDR_CQ_TYPE_USER;
rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr, rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
ureq.len, true, ureq.len, true, IB_ACCESS_LOCAL_WRITE,
IB_ACCESS_LOCAL_WRITE, 1);
1, 1);
if (rc) if (rc)
goto err0; goto err0;
...@@ -1401,19 +1400,19 @@ static void qedr_free_srq_kernel_params(struct qedr_srq *srq) ...@@ -1401,19 +1400,19 @@ static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
static int qedr_init_srq_user_params(struct ib_udata *udata, static int qedr_init_srq_user_params(struct ib_udata *udata,
struct qedr_srq *srq, struct qedr_srq *srq,
struct qedr_create_srq_ureq *ureq, struct qedr_create_srq_ureq *ureq,
int access, int dmasync) int access)
{ {
struct scatterlist *sg; struct scatterlist *sg;
int rc; int rc;
rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr, rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
ureq->srq_len, false, access, dmasync, 1); ureq->srq_len, false, access, 1);
if (rc) if (rc)
return rc; return rc;
srq->prod_umem = srq->prod_umem =
ib_umem_get(udata, ureq->prod_pair_addr, ib_umem_get(udata, ureq->prod_pair_addr,
sizeof(struct rdma_srq_producers), access, dmasync); sizeof(struct rdma_srq_producers), access);
if (IS_ERR(srq->prod_umem)) { if (IS_ERR(srq->prod_umem)) {
qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl); qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
ib_umem_release(srq->usrq.umem); ib_umem_release(srq->usrq.umem);
...@@ -1510,7 +1509,7 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, ...@@ -1510,7 +1509,7 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err0; goto err0;
} }
rc = qedr_init_srq_user_params(udata, srq, &ureq, 0, 0); rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
if (rc) if (rc)
goto err0; goto err0;
...@@ -1751,18 +1750,16 @@ static int qedr_create_user_qp(struct qedr_dev *dev, ...@@ -1751,18 +1750,16 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
return rc; return rc;
} }
/* SQ - read access only (0), dma sync not required (0) */ /* SQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr, rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
ureq.sq_len, true, 0, 0, ureq.sq_len, true, 0, alloc_and_init);
alloc_and_init);
if (rc) if (rc)
return rc; return rc;
if (!qp->srq) { if (!qp->srq) {
/* RQ - read access only (0), dma sync not required (0) */ /* RQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr, rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
ureq.rq_len, true, ureq.rq_len, true, 0, alloc_and_init);
0, 0, alloc_and_init);
if (rc) if (rc)
return rc; return rc;
} }
...@@ -2837,7 +2834,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, ...@@ -2837,7 +2834,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER; mr->type = QEDR_MR_USER;
mr->umem = ib_umem_get(udata, start, len, acc, 0); mr->umem = ib_umem_get(udata, start, len, acc);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
rc = -EFAULT; rc = -EFAULT;
goto err0; goto err0;
......
...@@ -136,7 +136,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ...@@ -136,7 +136,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
} }
cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size,
IB_ACCESS_LOCAL_WRITE, 1); IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) { if (IS_ERR(cq->umem)) {
ret = PTR_ERR(cq->umem); ret = PTR_ERR(cq->umem);
goto err_cq; goto err_cq;
......
...@@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
umem = ib_umem_get(udata, start, length, access_flags, 0); umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"could not get umem for mem region\n"); "could not get umem for mem region\n");
......
...@@ -277,7 +277,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, ...@@ -277,7 +277,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
if (!is_srq) { if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */ /* set qp->sq.wqe_cnt, shift, buf_size.. */
qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr, qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr,
ucmd.rbuf_size, 0, 0); ucmd.rbuf_size, 0);
if (IS_ERR(qp->rumem)) { if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem); ret = PTR_ERR(qp->rumem);
goto err_qp; goto err_qp;
...@@ -289,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, ...@@ -289,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
} }
qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr, qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr,
ucmd.sbuf_size, 0, 0); ucmd.sbuf_size, 0);
if (IS_ERR(qp->sumem)) { if (IS_ERR(qp->sumem)) {
if (!is_srq) if (!is_srq)
ib_umem_release(qp->rumem); ib_umem_release(qp->rumem);
......
...@@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, ...@@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err_srq; goto err_srq;
} }
srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0, 0); srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0);
if (IS_ERR(srq->umem)) { if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem); ret = PTR_ERR(srq->umem);
goto err_srq; goto err_srq;
......
...@@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (length == 0) if (length == 0)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
umem = ib_umem_get(udata, start, length, mr_access_flags, 0); umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(umem)) if (IS_ERR(umem))
return (void *)umem; return (void *)umem;
......
...@@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start, ...@@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr; void *vaddr;
int err; int err;
umem = ib_umem_get(udata, start, length, access, 0); umem = ib_umem_get(udata, start, length, access);
if (IS_ERR(umem)) { if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n", pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem)); (int)PTR_ERR(umem));
......
...@@ -70,7 +70,7 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem) ...@@ -70,7 +70,7 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
#ifdef CONFIG_INFINIBAND_USER_MEM #ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr, struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
size_t size, int access, int dmasync); size_t size, int access);
void ib_umem_release(struct ib_umem *umem); void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem); int ib_umem_page_count(struct ib_umem *umem);
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
...@@ -85,7 +85,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, ...@@ -85,7 +85,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
static inline struct ib_umem *ib_umem_get(struct ib_udata *udata, static inline struct ib_umem *ib_umem_get(struct ib_udata *udata,
unsigned long addr, size_t size, unsigned long addr, size_t size,
int access, int dmasync) int access)
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册