提交 2f431291 编写于 作者: M Matthew Wilcox 提交者: Jason Gunthorpe

cxgb4: Convert qpidr to XArray

Signed-off-by: NMatthew Wilcox <willy@infradead.org>
Acked-by: NSteve Wise <swise@opengridcomputing.com>
Signed-off-by: NJason Gunthorpe <jgg@mellanox.com>
上级 52e124c2
......@@ -250,16 +250,11 @@ static void set_ep_sin6_addrs(struct c4iw_ep *ep,
}
}
static int dump_qp(int id, void *p, void *data)
static int dump_qp(struct c4iw_qp *qp, struct c4iw_debugfs_data *qpd)
{
struct c4iw_qp *qp = p;
struct c4iw_debugfs_data *qpd = data;
int space;
int cc;
if (id != qp->wq.sq.qid)
return 0;
space = qpd->bufsize - qpd->pos - 1;
if (space == 0)
return 1;
......@@ -335,7 +330,9 @@ static int qp_release(struct inode *inode, struct file *file)
static int qp_open(struct inode *inode, struct file *file)
{
struct c4iw_qp *qp;
struct c4iw_debugfs_data *qpd;
unsigned long index;
int count = 1;
qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
......@@ -345,9 +342,12 @@ static int qp_open(struct inode *inode, struct file *file)
qpd->devp = inode->i_private;
qpd->pos = 0;
spin_lock_irq(&qpd->devp->lock);
idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
spin_unlock_irq(&qpd->devp->lock);
/*
* No need to lock; we drop the lock to call vmalloc so it's racy
* anyway. Someone who cares should switch this over to seq_file
*/
xa_for_each(&qpd->devp->qps, index, qp)
count++;
qpd->bufsize = count * 180;
qpd->buf = vmalloc(qpd->bufsize);
......@@ -356,9 +356,10 @@ static int qp_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
spin_lock_irq(&qpd->devp->lock);
idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
spin_unlock_irq(&qpd->devp->lock);
xa_lock_irq(&qpd->devp->qps);
xa_for_each(&qpd->devp->qps, index, qp)
dump_qp(qp, qpd);
xa_unlock_irq(&qpd->devp->qps);
qpd->buf[qpd->pos++] = 0;
file->private_data = qpd;
......@@ -932,8 +933,7 @@ void c4iw_dealloc(struct uld_ctx *ctx)
{
c4iw_rdev_close(&ctx->dev->rdev);
WARN_ON(!xa_empty(&ctx->dev->cqs));
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
idr_destroy(&ctx->dev->qpidr);
WARN_ON(!xa_empty(&ctx->dev->qps));
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
idr_destroy(&ctx->dev->mmidr);
wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
......@@ -1044,7 +1044,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
}
xa_init_flags(&devp->cqs, XA_FLAGS_LOCK_IRQ);
idr_init(&devp->qpidr);
xa_init_flags(&devp->qps, XA_FLAGS_LOCK_IRQ);
idr_init(&devp->mmidr);
idr_init(&devp->hwtid_idr);
idr_init(&devp->stid_idr);
......@@ -1264,34 +1264,21 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
return 0;
}
static int disable_qp_db(int id, void *p, void *data)
{
struct c4iw_qp *qp = p;
t4_disable_wq_db(&qp->wq);
return 0;
}
static void stop_queues(struct uld_ctx *ctx)
{
unsigned long flags;
struct c4iw_qp *qp;
unsigned long index, flags;
spin_lock_irqsave(&ctx->dev->lock, flags);
xa_lock_irqsave(&ctx->dev->qps, flags);
ctx->dev->rdev.stats.db_state_transitions++;
ctx->dev->db_state = STOPPED;
if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
else
if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
xa_for_each(&ctx->dev->qps, index, qp)
t4_disable_wq_db(&qp->wq);
} else {
ctx->dev->rdev.status_page->db_off = 1;
spin_unlock_irqrestore(&ctx->dev->lock, flags);
}
static int enable_qp_db(int id, void *p, void *data)
{
struct c4iw_qp *qp = p;
t4_enable_wq_db(&qp->wq);
return 0;
}
xa_unlock_irqrestore(&ctx->dev->qps, flags);
}
static void resume_rc_qp(struct c4iw_qp *qp)
......@@ -1321,18 +1308,21 @@ static void resume_a_chunk(struct uld_ctx *ctx)
static void resume_queues(struct uld_ctx *ctx)
{
spin_lock_irq(&ctx->dev->lock);
xa_lock_irq(&ctx->dev->qps);
if (ctx->dev->db_state != STOPPED)
goto out;
ctx->dev->db_state = FLOW_CONTROL;
while (1) {
if (list_empty(&ctx->dev->db_fc_list)) {
struct c4iw_qp *qp;
unsigned long index;
WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
ctx->dev->db_state = NORMAL;
ctx->dev->rdev.stats.db_state_transitions++;
if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
idr_for_each(&ctx->dev->qpidr, enable_qp_db,
NULL);
xa_for_each(&ctx->dev->qps, index, qp)
t4_enable_wq_db(&qp->wq);
} else {
ctx->dev->rdev.status_page->db_off = 0;
}
......@@ -1344,12 +1334,12 @@ static void resume_queues(struct uld_ctx *ctx)
resume_a_chunk(ctx);
}
if (!list_empty(&ctx->dev->db_fc_list)) {
spin_unlock_irq(&ctx->dev->lock);
xa_unlock_irq(&ctx->dev->qps);
if (DB_FC_RESUME_DELAY) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(DB_FC_RESUME_DELAY);
}
spin_lock_irq(&ctx->dev->lock);
xa_lock_irq(&ctx->dev->qps);
if (ctx->dev->db_state != FLOW_CONTROL)
break;
}
......@@ -1358,7 +1348,7 @@ static void resume_queues(struct uld_ctx *ctx)
out:
if (ctx->dev->db_state != NORMAL)
ctx->dev->rdev.stats.db_fc_interruptions++;
spin_unlock_irq(&ctx->dev->lock);
xa_unlock_irq(&ctx->dev->qps);
}
struct qp_list {
......@@ -1366,23 +1356,6 @@ struct qp_list {
struct c4iw_qp **qps;
};
static int add_and_ref_qp(int id, void *p, void *data)
{
struct qp_list *qp_listp = data;
struct c4iw_qp *qp = p;
c4iw_qp_add_ref(&qp->ibqp);
qp_listp->qps[qp_listp->idx++] = qp;
return 0;
}
static int count_qps(int id, void *p, void *data)
{
unsigned *countp = data;
(*countp)++;
return 0;
}
static void deref_qps(struct qp_list *qp_list)
{
int idx;
......@@ -1399,7 +1372,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
for (idx = 0; idx < qp_list->idx; idx++) {
struct c4iw_qp *qp = qp_list->qps[idx];
spin_lock_irq(&qp->rhp->lock);
xa_lock_irq(&qp->rhp->qps);
spin_lock(&qp->lock);
ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
qp->wq.sq.qid,
......@@ -1409,7 +1382,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
spin_unlock(&qp->lock);
spin_unlock_irq(&qp->rhp->lock);
xa_unlock_irq(&qp->rhp->qps);
return;
}
qp->wq.sq.wq_pidx_inc = 0;
......@@ -1423,12 +1396,12 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
spin_unlock(&qp->lock);
spin_unlock_irq(&qp->rhp->lock);
xa_unlock_irq(&qp->rhp->qps);
return;
}
qp->wq.rq.wq_pidx_inc = 0;
spin_unlock(&qp->lock);
spin_unlock_irq(&qp->rhp->lock);
xa_unlock_irq(&qp->rhp->qps);
/* Wait for the dbfifo to drain */
while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
......@@ -1440,6 +1413,8 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
static void recover_queues(struct uld_ctx *ctx)
{
struct c4iw_qp *qp;
unsigned long index;
int count = 0;
struct qp_list qp_list;
int ret;
......@@ -1457,22 +1432,26 @@ static void recover_queues(struct uld_ctx *ctx)
}
/* Count active queues so we can build a list of queues to recover */
spin_lock_irq(&ctx->dev->lock);
xa_lock_irq(&ctx->dev->qps);
WARN_ON(ctx->dev->db_state != STOPPED);
ctx->dev->db_state = RECOVERY;
idr_for_each(&ctx->dev->qpidr, count_qps, &count);
xa_for_each(&ctx->dev->qps, index, qp)
count++;
qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC);
if (!qp_list.qps) {
spin_unlock_irq(&ctx->dev->lock);
xa_unlock_irq(&ctx->dev->qps);
return;
}
qp_list.idx = 0;
/* add and ref each qp so it doesn't get freed */
idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
xa_for_each(&ctx->dev->qps, index, qp) {
c4iw_qp_add_ref(&qp->ibqp);
qp_list.qps[qp_list.idx++] = qp;
}
spin_unlock_irq(&ctx->dev->lock);
xa_unlock_irq(&ctx->dev->qps);
/* now traverse the list in a safe context to recover the db state*/
recover_lost_dbs(ctx, &qp_list);
......@@ -1481,10 +1460,10 @@ static void recover_queues(struct uld_ctx *ctx)
deref_qps(&qp_list);
kfree(qp_list.qps);
spin_lock_irq(&ctx->dev->lock);
xa_lock_irq(&ctx->dev->qps);
WARN_ON(ctx->dev->db_state != RECOVERY);
ctx->dev->db_state = STOPPED;
spin_unlock_irq(&ctx->dev->lock);
xa_unlock_irq(&ctx->dev->qps);
}
static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
......
......@@ -123,15 +123,15 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
struct c4iw_qp *qhp;
u32 cqid;
spin_lock_irq(&dev->lock);
qhp = get_qhp(dev, CQE_QPID(err_cqe));
xa_lock_irq(&dev->qps);
qhp = xa_load(&dev->qps, CQE_QPID(err_cqe));
if (!qhp) {
pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
CQE_WRID_LOW(err_cqe));
spin_unlock_irq(&dev->lock);
xa_unlock_irq(&dev->qps);
goto out;
}
......@@ -146,13 +146,13 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
CQE_WRID_LOW(err_cqe));
spin_unlock_irq(&dev->lock);
xa_unlock_irq(&dev->qps);
goto out;
}
c4iw_qp_add_ref(&qhp->ibqp);
atomic_inc(&chp->refcnt);
spin_unlock_irq(&dev->lock);
xa_unlock_irq(&dev->qps);
/* Bad incoming write */
if (RQ_TYPE(err_cqe) &&
......
......@@ -316,7 +316,7 @@ struct c4iw_dev {
struct c4iw_rdev rdev;
u32 device_cap_flags;
struct xarray cqs;
struct idr qpidr;
struct xarray qps;
struct idr mmidr;
spinlock_t lock;
struct mutex db_mutex;
......@@ -354,7 +354,7 @@ static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
{
return idr_find(&rhp->qpidr, qpid);
return xa_load(&rhp->qps, qpid);
}
static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
......
......@@ -63,12 +63,12 @@ static int alloc_ird(struct c4iw_dev *dev, u32 ird)
{
int ret = 0;
spin_lock_irq(&dev->lock);
xa_lock_irq(&dev->qps);
if (ird <= dev->avail_ird)
dev->avail_ird -= ird;
else
ret = -ENOMEM;
spin_unlock_irq(&dev->lock);
xa_unlock_irq(&dev->qps);
if (ret)
dev_warn(&dev->rdev.lldi.pdev->dev,
......@@ -79,9 +79,9 @@ static int alloc_ird(struct c4iw_dev *dev, u32 ird)
static void free_ird(struct c4iw_dev *dev, int ird)
{
spin_lock_irq(&dev->lock);
xa_lock_irq(&dev->qps);
dev->avail_ird += ird;
spin_unlock_irq(&dev->lock);
xa_unlock_irq(&dev->qps);
}
static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
......@@ -939,7 +939,7 @@ static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
{
unsigned long flags;
spin_lock_irqsave(&qhp->rhp->lock, flags);
xa_lock_irqsave(&qhp->rhp->qps, flags);
spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL)
t4_ring_sq_db(&qhp->wq, inc, NULL);
......@@ -948,7 +948,7 @@ static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
qhp->wq.sq.wq_pidx_inc += inc;
}
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&qhp->rhp->lock, flags);
xa_unlock_irqrestore(&qhp->rhp->qps, flags);
return 0;
}
......@@ -956,7 +956,7 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
{
unsigned long flags;
spin_lock_irqsave(&qhp->rhp->lock, flags);
xa_lock_irqsave(&qhp->rhp->qps, flags);
spin_lock(&qhp->lock);
if (qhp->rhp->db_state == NORMAL)
t4_ring_rq_db(&qhp->wq, inc, NULL);
......@@ -965,7 +965,7 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
qhp->wq.rq.wq_pidx_inc += inc;
}
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&qhp->rhp->lock, flags);
xa_unlock_irqrestore(&qhp->rhp->qps, flags);
return 0;
}
......@@ -2111,12 +2111,11 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep);
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
spin_lock_irq(&rhp->lock);
xa_lock_irq(&rhp->qps);
__xa_erase(&rhp->qps, qhp->wq.sq.qid);
if (!list_empty(&qhp->db_fc_entry))
list_del_init(&qhp->db_fc_entry);
spin_unlock_irq(&rhp->lock);
xa_unlock_irq(&rhp->qps);
free_ird(rhp, qhp->attr.max_ird);
c4iw_qp_rem_ref(ib_qp);
......@@ -2234,7 +2233,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
kref_init(&qhp->kref);
INIT_WORK(&qhp->free_work, free_qp_work);
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
if (ret)
goto err_destroy_qp;
......@@ -2370,7 +2369,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
err_free_sq_key:
kfree(sq_key_mm);
err_remove_handle:
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
err_destroy_qp:
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
......@@ -2760,7 +2759,7 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
srq->flags = T4_SRQ_LIMIT_SUPPORT;
ret = insert_handle(rhp, &rhp->qpidr, srq, srq->wq.qid);
ret = xa_insert_irq(&rhp->qps, srq->wq.qid, srq, GFP_KERNEL);
if (ret)
goto err_free_queue;
......@@ -2812,7 +2811,7 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
err_free_srq_key_mm:
kfree(srq_key_mm);
err_remove_handle:
remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
xa_erase_irq(&rhp->qps, srq->wq.qid);
err_free_queue:
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
srq->wr_waitp);
......@@ -2838,7 +2837,7 @@ int c4iw_destroy_srq(struct ib_srq *ibsrq)
pr_debug("%s id %d\n", __func__, srq->wq.qid);
remove_handle(rhp, &rhp->qpidr, srq->wq.qid);
xa_erase_irq(&rhp->qps, srq->wq.qid);
ucontext = ibsrq->uobject ?
to_c4iw_ucontext(ibsrq->uobject->context) : NULL;
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册