提交 27114876 编写于 作者: M Matthew Wilcox 提交者: Jason Gunthorpe

cxgb3: Convert qpidr to XArray

Signed-off-by: NMatthew Wilcox <willy@infradead.org>
Acked-by: NSteve Wise <swise@opengridcomputing.com>
Signed-off-by: NJason Gunthorpe <jgg@mellanox.com>
上级 a2f40971
......@@ -62,37 +62,30 @@ struct cxgb3_client t3c_client = {
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(dev_mutex);
static int disable_qp_db(int id, void *p, void *data)
{
struct iwch_qp *qhp = p;
cxio_disable_wq_db(&qhp->wq);
return 0;
}
static int enable_qp_db(int id, void *p, void *data)
{
struct iwch_qp *qhp = p;
if (data)
ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
cxio_enable_wq_db(&qhp->wq);
return 0;
}
static void disable_dbs(struct iwch_dev *rnicp)
{
spin_lock_irq(&rnicp->lock);
idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
spin_unlock_irq(&rnicp->lock);
unsigned long index;
struct iwch_qp *qhp;
xa_lock_irq(&rnicp->qps);
xa_for_each(&rnicp->qps, index, qhp)
cxio_disable_wq_db(&qhp->wq);
xa_unlock_irq(&rnicp->qps);
}
static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
{
spin_lock_irq(&rnicp->lock);
idr_for_each(&rnicp->qpidr, enable_qp_db,
(void *)(unsigned long)ring_db);
spin_unlock_irq(&rnicp->lock);
unsigned long index;
struct iwch_qp *qhp;
xa_lock_irq(&rnicp->qps);
xa_for_each(&rnicp->qps, index, qhp) {
if (ring_db)
ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell,
qhp->wq.qpid);
cxio_enable_wq_db(&qhp->wq);
}
xa_unlock_irq(&rnicp->qps);
}
static void iwch_db_drop_task(struct work_struct *work)
......@@ -106,7 +99,7 @@ static void rnic_init(struct iwch_dev *rnicp)
{
pr_debug("%s iwch_dev %p\n", __func__, rnicp);
xa_init_flags(&rnicp->cqs, XA_FLAGS_LOCK_IRQ);
idr_init(&rnicp->qpidr);
xa_init_flags(&rnicp->qps, XA_FLAGS_LOCK_IRQ);
idr_init(&rnicp->mmidr);
spin_lock_init(&rnicp->lock);
INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
......@@ -191,7 +184,7 @@ static void close_rnic_dev(struct t3cdev *tdev)
iwch_unregister_device(dev);
cxio_rdev_close(&dev->rdev);
WARN_ON(!xa_empty(&dev->cqs));
idr_destroy(&dev->qpidr);
WARN_ON(!xa_empty(&dev->qps));
idr_destroy(&dev->mmidr);
ib_dealloc_device(&dev->ibdev);
break;
......
......@@ -107,7 +107,7 @@ struct iwch_dev {
u32 device_cap_flags;
struct iwch_rnic_attributes attr;
struct xarray cqs;
struct idr qpidr;
struct xarray qps;
struct idr mmidr;
spinlock_t lock;
struct list_head entry;
......@@ -141,7 +141,7 @@ static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
{
return idr_find(&rhp->qpidr, qpid);
return xa_load(&rhp->qps, qpid);
}
static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
......
......@@ -48,14 +48,14 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
struct iwch_qp *qhp;
unsigned long flag;
spin_lock(&rnicp->lock);
qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
xa_lock(&rnicp->qps);
qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
if (!qhp) {
pr_err("%s unaffiliated error 0x%x qpid 0x%x\n",
__func__, CQE_STATUS(rsp_msg->cqe),
CQE_QPID(rsp_msg->cqe));
spin_unlock(&rnicp->lock);
xa_unlock(&rnicp->qps);
return;
}
......@@ -65,7 +65,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
__func__,
qhp->attr.state, qhp->wq.qpid,
CQE_STATUS(rsp_msg->cqe));
spin_unlock(&rnicp->lock);
xa_unlock(&rnicp->qps);
return;
}
......@@ -76,7 +76,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
atomic_inc(&qhp->refcnt);
spin_unlock(&rnicp->lock);
xa_unlock(&rnicp->qps);
if (qhp->attr.state == IWCH_QP_STATE_RTS) {
attrs.next_state = IWCH_QP_STATE_TERMINATE;
......@@ -114,21 +114,21 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
unsigned long flag;
rnicp = (struct iwch_dev *) rdev_p->ulp;
spin_lock(&rnicp->lock);
xa_lock(&rnicp->qps);
chp = get_chp(rnicp, cqid);
qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
if (!chp || !qhp) {
pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
cqid, CQE_QPID(rsp_msg->cqe),
CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
CQE_WRID_LOW(rsp_msg->cqe));
spin_unlock(&rnicp->lock);
xa_unlock(&rnicp->qps);
goto out;
}
iwch_qp_add_ref(&qhp->ibqp);
atomic_inc(&chp->refcnt);
spin_unlock(&rnicp->lock);
xa_unlock(&rnicp->qps);
/*
* 1) completion of our sending a TERMINATE.
......
......@@ -756,7 +756,7 @@ static int iwch_destroy_qp(struct ib_qp *ib_qp)
iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep);
remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
xa_erase_irq(&rhp->qps, qhp->wq.qpid);
atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
......@@ -872,7 +872,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
if (xa_store_irq(&rhp->qps, qhp->wq.qpid, qhp, GFP_KERNEL)) {
cxio_destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
kfree(qhp);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册