提交 bdeacabd 编写于 作者: S Shamir Rabinovitch 提交者: Jason Gunthorpe

IB: Remove 'uobject->context' dependency in object destroy APIs

Now that we have the udata passed to all the ib_xxx object destroy APIs
and the additional macro 'rdma_udata_to_drv_context' to get the
ib_ucontext from ib_udata stored in uverbs_attr_bundle, we can finally
start to remove the dependency of the drivers in the
ib_xxx->uobject->context.
Signed-off-by: NShamir Rabinovitch <shamir.rabinovitch@oracle.com>
Signed-off-by: NJason Gunthorpe <jgg@mellanox.com>
上级 c4367a26
...@@ -240,7 +240,8 @@ void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); ...@@ -240,7 +240,8 @@ void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_event_handler(struct ib_event_handler *handler, void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event); struct ib_event *event);
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd, int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
enum rdma_remove_reason why, struct ib_udata *udata); enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs);
int uverbs_dealloc_mw(struct ib_mw *mw); int uverbs_dealloc_mw(struct ib_mw *mw);
void ib_uverbs_detach_umcast(struct ib_qp *qp, void ib_uverbs_detach_umcast(struct ib_qp *qp,
......
...@@ -670,17 +670,18 @@ static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs) ...@@ -670,17 +670,18 @@ static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs)
} }
int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd, int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
enum rdma_remove_reason why, struct ib_udata *udata) enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs)
{ {
struct inode *inode; struct inode *inode;
int ret; int ret;
struct ib_uverbs_device *dev = uobject->context->ufile->device; struct ib_uverbs_device *dev = attrs->ufile->device;
inode = xrcd->inode; inode = xrcd->inode;
if (inode && !atomic_dec_and_test(&xrcd->usecnt)) if (inode && !atomic_dec_and_test(&xrcd->usecnt))
return 0; return 0;
ret = ib_dealloc_xrcd(xrcd, udata); ret = ib_dealloc_xrcd(xrcd, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject)) { if (ib_is_destroy_retryable(ret, why, uobject)) {
atomic_inc(&xrcd->usecnt); atomic_inc(&xrcd->usecnt);
......
...@@ -105,7 +105,7 @@ static int uverbs_free_qp(struct ib_uobject *uobject, ...@@ -105,7 +105,7 @@ static int uverbs_free_qp(struct ib_uobject *uobject,
if (uqp->uxrcd) if (uqp->uxrcd)
atomic_dec(&uqp->uxrcd->refcnt); atomic_dec(&uqp->uxrcd->refcnt);
ib_uverbs_release_uevent(uobject->context->ufile, &uqp->uevent); ib_uverbs_release_uevent(attrs->ufile, &uqp->uevent);
return ret; return ret;
} }
...@@ -138,7 +138,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject, ...@@ -138,7 +138,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
if (ib_is_destroy_retryable(ret, why, uobject)) if (ib_is_destroy_retryable(ret, why, uobject))
return ret; return ret;
ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent); ib_uverbs_release_uevent(attrs->ufile, &uwq->uevent);
return ret; return ret;
} }
...@@ -163,7 +163,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject, ...@@ -163,7 +163,7 @@ static int uverbs_free_srq(struct ib_uobject *uobject,
atomic_dec(&us->uxrcd->refcnt); atomic_dec(&us->uxrcd->refcnt);
} }
ib_uverbs_release_uevent(uobject->context->ufile, uevent); ib_uverbs_release_uevent(attrs->ufile, uevent);
return ret; return ret;
} }
...@@ -180,9 +180,9 @@ static int uverbs_free_xrcd(struct ib_uobject *uobject, ...@@ -180,9 +180,9 @@ static int uverbs_free_xrcd(struct ib_uobject *uobject,
if (ret) if (ret)
return ret; return ret;
mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex); mutex_lock(&attrs->ufile->device->xrcd_tree_mutex);
ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why, &attrs->driver_udata); ret = ib_uverbs_dealloc_xrcd(uobject, xrcd, why, attrs);
mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex); mutex_unlock(&attrs->ufile->device->xrcd_tree_mutex);
return ret; return ret;
} }
......
...@@ -49,7 +49,7 @@ static int uverbs_free_cq(struct ib_uobject *uobject, ...@@ -49,7 +49,7 @@ static int uverbs_free_cq(struct ib_uobject *uobject,
return ret; return ret;
ib_uverbs_release_ucq( ib_uverbs_release_ucq(
uobject->context->ufile, attrs->ufile,
ev_queue ? container_of(ev_queue, ev_queue ? container_of(ev_queue,
struct ib_uverbs_completion_event_file, struct ib_uverbs_completion_event_file,
ev_queue) : ev_queue) :
......
...@@ -760,8 +760,8 @@ static int iwch_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) ...@@ -760,8 +760,8 @@ static int iwch_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
atomic_dec(&qhp->refcnt); atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context) ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
: NULL; ibucontext);
cxio_destroy_qp(&rhp->rdev, &qhp->wq, cxio_destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx); ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <rdma/uverbs_ioctl.h>
#include "iw_cxgb4.h" #include "iw_cxgb4.h"
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
...@@ -980,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ...@@ -980,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
atomic_dec(&chp->refcnt); atomic_dec(&chp->refcnt);
wait_event(chp->wait, !atomic_read(&chp->refcnt)); wait_event(chp->wait, !atomic_read(&chp->refcnt));
ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context) ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
: NULL; ibucontext);
destroy_cq(&chp->rhp->rdev, &chp->cq, destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
chp->destroy_skb, chp->wr_waitp); chp->destroy_skb, chp->wr_waitp);
......
...@@ -2838,8 +2838,8 @@ int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) ...@@ -2838,8 +2838,8 @@ int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
pr_debug("%s id %d\n", __func__, srq->wq.qid); pr_debug("%s id %d\n", __func__, srq->wq.qid);
xa_erase_irq(&rhp->qps, srq->wq.qid); xa_erase_irq(&rhp->qps, srq->wq.qid);
ucontext = ibsrq->uobject ? ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
to_c4iw_ucontext(ibsrq->uobject->context) : NULL; ibucontext);
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
srq->wr_waitp); srq->wr_waitp);
c4iw_free_srq_idx(&rhp->rdev, srq->idx); c4iw_free_srq_idx(&rhp->rdev, srq->idx);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h" #include "hns_roce_device.h"
#include "hns_roce_cmd.h" #include "hns_roce_cmd.h"
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
...@@ -456,12 +457,15 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ...@@ -456,12 +457,15 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
hns_roce_free_cq(hr_dev, hr_cq); hns_roce_free_cq(hr_dev, hr_cq);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (ib_cq->uobject) { if (udata) {
ib_umem_release(hr_cq->umem); ib_umem_release(hr_cq->umem);
if (hr_cq->db_en == 1) if (hr_cq->db_en == 1)
hns_roce_db_unmap_user( hns_roce_db_unmap_user(
to_hr_ucontext(ib_cq->uobject->context), rdma_udata_to_drv_context(
udata,
struct hns_roce_ucontext,
ibucontext),
&hr_cq->db); &hr_cq->db);
} else { } else {
/* Free the buff of stored cq */ /* Free the buff of stored cq */
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <net/addrconf.h> #include <net/addrconf.h>
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hnae3.h" #include "hnae3.h"
#include "hns_roce_common.h" #include "hns_roce_common.h"
...@@ -4442,7 +4443,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ...@@ -4442,7 +4443,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, struct hns_roce_qp *hr_qp,
bool is_user) struct ib_udata *udata)
{ {
struct hns_roce_cq *send_cq, *recv_cq; struct hns_roce_cq *send_cq, *recv_cq;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
...@@ -4464,7 +4465,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ...@@ -4464,7 +4465,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_lock_cqs(send_cq, recv_cq); hns_roce_lock_cqs(send_cq, recv_cq);
if (!is_user) { if (!udata) {
__hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ? __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
to_hr_srq(hr_qp->ibqp.srq) : NULL); to_hr_srq(hr_qp->ibqp.srq) : NULL);
if (send_cq != recv_cq) if (send_cq != recv_cq)
...@@ -4485,16 +4486,18 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ...@@ -4485,16 +4486,18 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
if (is_user) { if (udata) {
struct hns_roce_ucontext *context =
rdma_udata_to_drv_context(
udata,
struct hns_roce_ucontext,
ibucontext);
if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1)) if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
hns_roce_db_unmap_user( hns_roce_db_unmap_user(context, &hr_qp->sdb);
to_hr_ucontext(hr_qp->ibqp.uobject->context),
&hr_qp->sdb);
if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1)) if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
hns_roce_db_unmap_user( hns_roce_db_unmap_user(context, &hr_qp->rdb);
to_hr_ucontext(hr_qp->ibqp.uobject->context),
&hr_qp->rdb);
ib_umem_release(hr_qp->umem); ib_umem_release(hr_qp->umem);
} else { } else {
kfree(hr_qp->sq.wrid); kfree(hr_qp->sq.wrid);
...@@ -4519,7 +4522,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ...@@ -4519,7 +4522,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
int ret; int ret;
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, ibqp->uobject); ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret); dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
return ret; return ret;
......
...@@ -2060,9 +2060,12 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) ...@@ -2060,9 +2060,12 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
if (iwmr->type != IW_MEMREG_TYPE_MEM) { if (iwmr->type != IW_MEMREG_TYPE_MEM) {
/* region is released. only test for userness. */ /* region is released. only test for userness. */
if (iwmr->region) { if (iwmr->region) {
struct i40iw_ucontext *ucontext; struct i40iw_ucontext *ucontext =
rdma_udata_to_drv_context(
udata,
struct i40iw_ucontext,
ibucontext);
ucontext = to_ucontext(ibpd->uobject->context);
i40iw_del_memlist(iwmr, ucontext); i40iw_del_memlist(iwmr, ucontext);
} }
if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP) if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "mlx4_ib.h" #include "mlx4_ib.h"
#include <rdma/mlx4-abi.h> #include <rdma/mlx4-abi.h>
#include <rdma/uverbs_ioctl.h>
static void mlx4_ib_cq_comp(struct mlx4_cq *cq) static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
{ {
...@@ -493,8 +494,13 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) ...@@ -493,8 +494,13 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
mlx4_cq_free(dev->dev, &mcq->mcq); mlx4_cq_free(dev->dev, &mcq->mcq);
mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt); mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
if (cq->uobject) { if (udata) {
mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); mlx4_ib_db_unmap_user(
rdma_udata_to_drv_context(
udata,
struct mlx4_ib_ucontext,
ibucontext),
&mcq->db);
ib_umem_release(mcq->umem); ib_umem_release(mcq->umem);
} else { } else {
mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
......
...@@ -1338,7 +1338,8 @@ static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) ...@@ -1338,7 +1338,8 @@ static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
} }
static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
enum mlx4_ib_source_type src, bool is_user) enum mlx4_ib_source_type src,
struct ib_udata *udata)
{ {
struct mlx4_ib_cq *send_cq, *recv_cq; struct mlx4_ib_cq *send_cq, *recv_cq;
unsigned long flags; unsigned long flags;
...@@ -1380,7 +1381,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, ...@@ -1380,7 +1381,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
list_del(&qp->qps_list); list_del(&qp->qps_list);
list_del(&qp->cq_send_list); list_del(&qp->cq_send_list);
list_del(&qp->cq_recv_list); list_del(&qp->cq_recv_list);
if (!is_user) { if (!udata) {
__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
if (send_cq != recv_cq) if (send_cq != recv_cq)
...@@ -1398,19 +1399,26 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, ...@@ -1398,19 +1399,26 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
if (qp->flags & MLX4_IB_QP_NETIF) if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
else if (src == MLX4_IB_RWQ_SRC) else if (src == MLX4_IB_RWQ_SRC)
mlx4_ib_release_wqn(to_mucontext( mlx4_ib_release_wqn(
qp->ibwq.uobject->context), qp, 1); rdma_udata_to_drv_context(
udata,
struct mlx4_ib_ucontext,
ibucontext),
qp, 1);
else else
mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
} }
mlx4_mtt_cleanup(dev->dev, &qp->mtt); mlx4_mtt_cleanup(dev->dev, &qp->mtt);
if (is_user) { if (udata) {
if (qp->rq.wqe_cnt) { if (qp->rq.wqe_cnt) {
struct mlx4_ib_ucontext *mcontext = !src ? struct mlx4_ib_ucontext *mcontext =
to_mucontext(qp->ibqp.uobject->context) : rdma_udata_to_drv_context(
to_mucontext(qp->ibwq.uobject->context); udata,
struct mlx4_ib_ucontext,
ibucontext);
mlx4_ib_db_unmap_user(mcontext, &qp->db); mlx4_ib_db_unmap_user(mcontext, &qp->db);
} }
ib_umem_release(qp->umem); ib_umem_release(qp->umem);
...@@ -1594,7 +1602,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, ...@@ -1594,7 +1602,7 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
return ibqp; return ibqp;
} }
static int _mlx4_ib_destroy_qp(struct ib_qp *qp) static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{ {
struct mlx4_ib_dev *dev = to_mdev(qp->device); struct mlx4_ib_dev *dev = to_mdev(qp->device);
struct mlx4_ib_qp *mqp = to_mqp(qp); struct mlx4_ib_qp *mqp = to_mqp(qp);
...@@ -1615,7 +1623,7 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp) ...@@ -1615,7 +1623,7 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
if (qp->rwq_ind_tbl) { if (qp->rwq_ind_tbl) {
destroy_qp_rss(dev, mqp); destroy_qp_rss(dev, mqp);
} else { } else {
destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, qp->uobject); destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata);
} }
if (is_sqp(dev, mqp)) if (is_sqp(dev, mqp))
...@@ -1637,7 +1645,7 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) ...@@ -1637,7 +1645,7 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
ib_destroy_qp(sqp->roce_v2_gsi); ib_destroy_qp(sqp->roce_v2_gsi);
} }
return _mlx4_ib_destroy_qp(qp); return _mlx4_ib_destroy_qp(qp, udata);
} }
static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type) static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
...@@ -4252,7 +4260,7 @@ int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata) ...@@ -4252,7 +4260,7 @@ int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
if (qp->counter_index) if (qp->counter_index)
mlx4_ib_free_qp_counter(dev, qp); mlx4_ib_free_qp_counter(dev, qp);
destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, 1); destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
kfree(qp); kfree(qp);
......
...@@ -280,8 +280,13 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) ...@@ -280,8 +280,13 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
mlx4_srq_free(dev->dev, &msrq->msrq); mlx4_srq_free(dev->dev, &msrq->msrq);
mlx4_mtt_cleanup(dev->dev, &msrq->mtt); mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
if (srq->uobject) { if (udata) {
mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); mlx4_ib_db_unmap_user(
rdma_udata_to_drv_context(
udata,
struct mlx4_ib_ucontext,
ibucontext),
&msrq->db);
ib_umem_release(msrq->umem); ib_umem_release(msrq->umem);
} else { } else {
kvfree(msrq->wrid); kvfree(msrq->wrid);
......
...@@ -796,9 +796,12 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, ...@@ -796,9 +796,12 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
return err; return err;
} }
static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context) static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
{ {
mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
mlx5_ib_db_unmap_user(context, &cq->db);
ib_umem_release(cq->buf.umem); ib_umem_release(cq->buf.umem);
} }
...@@ -923,7 +926,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, ...@@ -923,7 +926,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
INIT_LIST_HEAD(&cq->list_send_qp); INIT_LIST_HEAD(&cq->list_send_qp);
INIT_LIST_HEAD(&cq->list_recv_qp); INIT_LIST_HEAD(&cq->list_recv_qp);
if (context) { if (udata) {
err = create_cq_user(dev, udata, context, cq, entries, err = create_cq_user(dev, udata, context, cq, entries,
&cqb, &cqe_size, &index, &inlen); &cqb, &cqe_size, &index, &inlen);
if (err) if (err)
...@@ -985,8 +988,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, ...@@ -985,8 +988,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
err_cqb: err_cqb:
kvfree(cqb); kvfree(cqb);
if (context) if (udata)
destroy_cq_user(cq, context); destroy_cq_user(cq, udata);
else else
destroy_cq_kernel(dev, cq); destroy_cq_kernel(dev, cq);
...@@ -1000,14 +1003,10 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) ...@@ -1000,14 +1003,10 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{ {
struct mlx5_ib_dev *dev = to_mdev(cq->device); struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq); struct mlx5_ib_cq *mcq = to_mcq(cq);
struct ib_ucontext *context = NULL;
if (cq->uobject)
context = cq->uobject->context;
mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
if (context) if (udata)
destroy_cq_user(mcq, context); destroy_cq_user(mcq, udata);
else else
destroy_cq_kernel(dev, mcq); destroy_cq_kernel(dev, mcq);
......
...@@ -2329,7 +2329,10 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) ...@@ -2329,7 +2329,10 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) - page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) -
MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >> MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
PAGE_SHIFT; PAGE_SHIFT;
bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages, bitmap_clear(rdma_udata_to_drv_context(
&attrs->driver_udata,
struct mlx5_ib_ucontext,
ibucontext)->dm_pages,
page_idx, page_idx,
DIV_ROUND_UP(act_size, PAGE_SIZE)); DIV_ROUND_UP(act_size, PAGE_SIZE));
......
...@@ -777,14 +777,17 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata, ...@@ -777,14 +777,17 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
} }
static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_rwq *rwq) struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
{ {
struct mlx5_ib_ucontext *context; struct mlx5_ib_ucontext *context =
rdma_udata_to_drv_context(
udata,
struct mlx5_ib_ucontext,
ibucontext);
if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP) if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
atomic_dec(&dev->delay_drop.rqs_cnt); atomic_dec(&dev->delay_drop.rqs_cnt);
context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &rwq->db); mlx5_ib_db_unmap_user(context, &rwq->db);
if (rwq->umem) if (rwq->umem)
ib_umem_release(rwq->umem); ib_umem_release(rwq->umem);
...@@ -983,11 +986,15 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -983,11 +986,15 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
} }
static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd, static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base) struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base,
struct ib_udata *udata)
{ {
struct mlx5_ib_ucontext *context; struct mlx5_ib_ucontext *context =
rdma_udata_to_drv_context(
udata,
struct mlx5_ib_ucontext,
ibucontext);
context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &qp->db); mlx5_ib_db_unmap_user(context, &qp->db);
if (base->ubuffer.umem) if (base->ubuffer.umem)
ib_umem_release(base->ubuffer.umem); ib_umem_release(base->ubuffer.umem);
...@@ -2284,7 +2291,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -2284,7 +2291,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err_create: err_create:
if (qp->create_type == MLX5_QP_USER) if (qp->create_type == MLX5_QP_USER)
destroy_qp_user(dev, pd, qp, base); destroy_qp_user(dev, pd, qp, base, udata);
else if (qp->create_type == MLX5_QP_KERNEL) else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp); destroy_qp_kernel(dev, qp);
...@@ -2395,7 +2402,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -2395,7 +2402,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct mlx5_modify_raw_qp_param *raw_qp_param, const struct mlx5_modify_raw_qp_param *raw_qp_param,
u8 lag_tx_affinity); u8 lag_tx_affinity);
static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct ib_udata *udata)
{ {
struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_ib_qp_base *base; struct mlx5_ib_qp_base *base;
...@@ -2466,7 +2474,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) ...@@ -2466,7 +2474,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
if (qp->create_type == MLX5_QP_KERNEL) if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp); destroy_qp_kernel(dev, qp);
else if (qp->create_type == MLX5_QP_USER) else if (qp->create_type == MLX5_QP_USER)
destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base); destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base, udata);
} }
static const char *ib_qp_type_str(enum ib_qp_type type) static const char *ib_qp_type_str(enum ib_qp_type type)
...@@ -2743,7 +2751,7 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) ...@@ -2743,7 +2751,7 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
if (mqp->qp_sub_type == MLX5_IB_QPT_DCT) if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
return mlx5_ib_destroy_dct(mqp); return mlx5_ib_destroy_dct(mqp);
destroy_qp_common(dev, mqp); destroy_qp_common(dev, mqp, udata);
kfree(mqp); kfree(mqp);
...@@ -5959,7 +5967,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, ...@@ -5959,7 +5967,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
err_copy: err_copy:
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
err_user_rq: err_user_rq:
destroy_user_rq(dev, pd, rwq); destroy_user_rq(dev, pd, rwq, udata);
err: err:
kfree(rwq); kfree(rwq);
return ERR_PTR(err); return ERR_PTR(err);
...@@ -5971,7 +5979,7 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) ...@@ -5971,7 +5979,7 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
struct mlx5_ib_rwq *rwq = to_mrwq(wq); struct mlx5_ib_rwq *rwq = to_mrwq(wq);
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
destroy_user_rq(dev, wq->pd, rwq); destroy_user_rq(dev, wq->pd, rwq, udata);
kfree(rwq); kfree(rwq);
return 0; return 0;
......
...@@ -194,9 +194,15 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, ...@@ -194,9 +194,15 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
return err; return err;
} }
static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq) static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
struct ib_udata *udata)
{ {
mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); mlx5_ib_db_unmap_user(
rdma_udata_to_drv_context(
udata,
struct mlx5_ib_ucontext,
ibucontext),
&srq->db);
ib_umem_release(srq->umem); ib_umem_release(srq->umem);
} }
...@@ -327,7 +333,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, ...@@ -327,7 +333,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
err_usr_kern_srq: err_usr_kern_srq:
if (udata) if (udata)
destroy_srq_user(pd, srq); destroy_srq_user(pd, srq, udata);
else else
destroy_srq_kernel(dev, srq); destroy_srq_kernel(dev, srq);
...@@ -395,7 +401,12 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) ...@@ -395,7 +401,12 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
mlx5_cmd_destroy_srq(dev, &msrq->msrq); mlx5_cmd_destroy_srq(dev, &msrq->msrq);
if (srq->uobject) { if (srq->uobject) {
mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); mlx5_ib_db_unmap_user(
rdma_udata_to_drv_context(
udata,
struct mlx5_ib_ucontext,
ibucontext),
&msrq->db);
ib_umem_release(msrq->umem); ib_umem_release(msrq->umem);
} else { } else {
destroy_srq_kernel(dev, msrq); destroy_srq_kernel(dev, msrq);
......
...@@ -479,10 +479,12 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd, ...@@ -479,10 +479,12 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
{ {
struct mthca_ucontext *context; if (udata) {
struct mthca_ucontext *context =
if (srq->uobject) { rdma_udata_to_drv_context(
context = to_mucontext(srq->uobject->context); udata,
struct mthca_ucontext,
ibucontext);
mthca_unmap_user_db(to_mdev(srq->device), &context->uar, mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
context->db_tab, to_msrq(srq)->db_index); context->db_tab, to_msrq(srq)->db_index);
...@@ -609,14 +611,20 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd, ...@@ -609,14 +611,20 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{ {
if (qp->uobject) { if (udata) {
struct mthca_ucontext *context =
rdma_udata_to_drv_context(
udata,
struct mthca_ucontext,
ibucontext);
mthca_unmap_user_db(to_mdev(qp->device), mthca_unmap_user_db(to_mdev(qp->device),
&to_mucontext(qp->uobject->context)->uar, &context->uar,
to_mucontext(qp->uobject->context)->db_tab, context->db_tab,
to_mqp(qp)->sq.db_index); to_mqp(qp)->sq.db_index);
mthca_unmap_user_db(to_mdev(qp->device), mthca_unmap_user_db(to_mdev(qp->device),
&to_mucontext(qp->uobject->context)->uar, &context->uar,
to_mucontext(qp->uobject->context)->db_tab, context->db_tab,
to_mqp(qp)->rq.db_index); to_mqp(qp)->rq.db_index);
} }
mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
...@@ -829,14 +837,20 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda ...@@ -829,14 +837,20 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{ {
if (cq->uobject) { if (udata) {
struct mthca_ucontext *context =
rdma_udata_to_drv_context(
udata,
struct mthca_ucontext,
ibucontext);
mthca_unmap_user_db(to_mdev(cq->device), mthca_unmap_user_db(to_mdev(cq->device),
&to_mucontext(cq->uobject->context)->uar, &context->uar,
to_mucontext(cq->uobject->context)->db_tab, context->db_tab,
to_mcq(cq)->arm_db_index); to_mcq(cq)->arm_db_index);
mthca_unmap_user_db(to_mdev(cq->device), mthca_unmap_user_db(to_mdev(cq->device),
&to_mucontext(cq->uobject->context)->uar, &context->uar,
to_mucontext(cq->uobject->context)->db_tab, context->db_tab,
to_mcq(cq)->set_ci_db_index); to_mcq(cq)->set_ci_db_index);
} }
mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
......
...@@ -707,8 +707,12 @@ static void nes_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) ...@@ -707,8 +707,12 @@ static void nes_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct nes_device *nesdev = nesvnic->nesdev; struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter; struct nes_adapter *nesadapter = nesdev->nesadapter;
if ((ibpd->uobject) && (ibpd->uobject->context)) { if (udata) {
nesucontext = to_nesucontext(ibpd->uobject->context); nesucontext =
rdma_udata_to_drv_context(
udata,
struct nes_ucontext,
ibucontext);
nes_debug(NES_DBG_PD, "Clearing bit %u from allocated doorbells\n", nes_debug(NES_DBG_PD, "Clearing bit %u from allocated doorbells\n",
nespd->mmap_db_index); nespd->mmap_db_index);
clear_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells); clear_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
...@@ -1337,8 +1341,12 @@ static int nes_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ...@@ -1337,8 +1341,12 @@ static int nes_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
} }
if (nesqp->user_mode) { if (nesqp->user_mode) {
if ((ibqp->uobject)&&(ibqp->uobject->context)) { if (udata) {
nes_ucontext = to_nesucontext(ibqp->uobject->context); nes_ucontext =
rdma_udata_to_drv_context(
udata,
struct nes_ucontext,
ibucontext);
clear_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs); clear_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = NULL; nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = NULL;
if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) { if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) {
......
...@@ -986,7 +986,7 @@ int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ...@@ -986,7 +986,7 @@ int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
dev->ops->common->chain_free(dev->cdev, &cq->pbl); dev->ops->common->chain_free(dev->cdev, &cq->pbl);
if (ibcq->uobject && ibcq->uobject->context) { if (udata) {
qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl); qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
ib_umem_release(cq->q.umem); ib_umem_release(cq->q.umem);
} }
...@@ -2470,7 +2470,8 @@ int qedr_query_qp(struct ib_qp *ibqp, ...@@ -2470,7 +2470,8 @@ int qedr_query_qp(struct ib_qp *ibqp,
return rc; return rc;
} }
static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp) static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
struct ib_udata *udata)
{ {
int rc = 0; int rc = 0;
...@@ -2480,7 +2481,7 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp) ...@@ -2480,7 +2481,7 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
return rc; return rc;
} }
if (qp->ibqp.uobject && qp->ibqp.uobject->context) if (udata)
qedr_cleanup_user(dev, qp); qedr_cleanup_user(dev, qp);
else else
qedr_cleanup_kernel(dev, qp); qedr_cleanup_kernel(dev, qp);
...@@ -2532,7 +2533,7 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ...@@ -2532,7 +2533,7 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (qp->qp_type == IB_QPT_GSI) if (qp->qp_type == IB_QPT_GSI)
qedr_destroy_gsi_qp(dev); qedr_destroy_gsi_qp(dev);
qedr_free_qp_resources(dev, qp); qedr_free_qp_resources(dev, qp, udata);
if (atomic_dec_and_test(&qp->refcnt) && if (atomic_dec_and_test(&qp->refcnt) &&
rdma_protocol_iwarp(&dev->ibdev, 1)) { rdma_protocol_iwarp(&dev->ibdev, 1)) {
......
...@@ -648,7 +648,7 @@ int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) ...@@ -648,7 +648,7 @@ int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length); usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
usnic_uiom_reg_release(mr->umem, ibmr->uobject->context); usnic_uiom_reg_release(mr->umem);
kfree(mr); kfree(mr);
return 0; return 0;
} }
......
...@@ -432,8 +432,7 @@ static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr) ...@@ -432,8 +432,7 @@ static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
} }
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
struct ib_ucontext *context)
{ {
__usnic_uiom_reg_release(uiomr->pd, uiomr, 1); __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
......
...@@ -90,7 +90,6 @@ void usnic_uiom_free_dev_list(struct device **devs); ...@@ -90,7 +90,6 @@ void usnic_uiom_free_dev_list(struct device **devs);
struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
unsigned long addr, size_t size, unsigned long addr, size_t size,
int access, int dmasync); int access, int dmasync);
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr);
struct ib_ucontext *ucontext);
int usnic_uiom_init(char *drv_name); int usnic_uiom_init(char *drv_name);
#endif /* USNIC_UIOM_H_ */ #endif /* USNIC_UIOM_H_ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册