提交 c12a67fe 编写于 作者: S Steve Wise 提交者: Doug Ledford

iw_cxgb4: free EQ queue memory on last deref

Commit ad61a4c7 ("iw_cxgb4: don't block in destroy_qp awaiting
the last deref") introduced a bug where the RDMA QP EQ queue memory
(and QIDs) are possibly freed before the underlying connection has been
fully shutdown.  The result being a possible DMA read issued by HW after
the queue memory has been unmapped and freed.  This results in possible
WR corruption in the worst case, system bus errors if an IOMMU is in use,
and SGE "bad WR" errors reported in the very least.  The fix is to defer
unmap/free of queue memory and QID resources until the QP struct has
been fully dereferenced.  To do this, the c4iw_ucontext must also be kept
around until the last QP that references it is fully freed.  In addition,
since the last QP deref can happen in an IRQ disabled context, we need
a new workqueue thread to do the final unmap/free of the EQ queue memory.

Fixes: ad61a4c7 ("iw_cxgb4: don't block in destroy_qp awaiting the last deref")
Cc: stable@vger.kernel.org
Signed-off-by: NSteve Wise <swise@opengridcomputing.com>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 4fe7c296
...@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
} }
} }
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
if (!rdev->free_workq) {
err = -ENOMEM;
goto err_free_status_page;
}
rdev->status_page->db_off = 0; rdev->status_page->db_off = 0;
return 0; return 0;
err_free_status_page:
free_page((unsigned long)rdev->status_page);
destroy_ocqp_pool: destroy_ocqp_pool:
c4iw_ocqp_pool_destroy(rdev); c4iw_ocqp_pool_destroy(rdev);
destroy_rqtpool: destroy_rqtpool:
...@@ -862,6 +870,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -862,6 +870,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
static void c4iw_rdev_close(struct c4iw_rdev *rdev) static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{ {
destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log); kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page); free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev); c4iw_pblpool_destroy(rdev);
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/workqueue.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
...@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext { ...@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
struct list_head qpids; struct list_head qpids;
struct list_head cqids; struct list_head cqids;
struct mutex lock; struct mutex lock;
struct kref kref;
}; };
enum c4iw_rdev_flags { enum c4iw_rdev_flags {
...@@ -183,6 +185,7 @@ struct c4iw_rdev { ...@@ -183,6 +185,7 @@ struct c4iw_rdev {
atomic_t wr_log_idx; atomic_t wr_log_idx;
struct wr_log_entry *wr_log; struct wr_log_entry *wr_log;
int wr_log_size; int wr_log_size;
struct workqueue_struct *free_workq;
}; };
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
...@@ -480,6 +483,8 @@ struct c4iw_qp { ...@@ -480,6 +483,8 @@ struct c4iw_qp {
wait_queue_head_t wait; wait_queue_head_t wait;
struct timer_list timer; struct timer_list timer;
int sq_sig_all; int sq_sig_all;
struct work_struct free_work;
struct c4iw_ucontext *ucontext;
}; };
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
...@@ -493,6 +498,7 @@ struct c4iw_ucontext { ...@@ -493,6 +498,7 @@ struct c4iw_ucontext {
u32 key; u32 key;
spinlock_t mmap_lock; spinlock_t mmap_lock;
struct list_head mmaps; struct list_head mmaps;
struct kref kref;
}; };
static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
...@@ -500,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) ...@@ -500,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return container_of(c, struct c4iw_ucontext, ibucontext); return container_of(c, struct c4iw_ucontext, ibucontext);
} }
void _c4iw_free_ucontext(struct kref *kref);
static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
{
kref_put(&ucontext->kref, _c4iw_free_ucontext);
}
static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
{
kref_get(&ucontext->kref);
}
struct c4iw_mm_entry { struct c4iw_mm_entry {
struct list_head entry; struct list_head entry;
u64 addr; u64 addr;
......
...@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags, ...@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
return -ENOSYS; return -ENOSYS;
} }
static int c4iw_dealloc_ucontext(struct ib_ucontext *context) void _c4iw_free_ucontext(struct kref *kref)
{ {
struct c4iw_dev *rhp = to_c4iw_dev(context->device); struct c4iw_ucontext *ucontext;
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); struct c4iw_dev *rhp;
struct c4iw_mm_entry *mm, *tmp; struct c4iw_mm_entry *mm, *tmp;
PDBG("%s context %p\n", __func__, context); ucontext = container_of(kref, struct c4iw_ucontext, kref);
rhp = to_c4iw_dev(ucontext->ibucontext.device);
PDBG("%s ucontext %p\n", __func__, ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm); kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
kfree(ucontext); kfree(ucontext);
}
static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
PDBG("%s context %p\n", __func__, context);
c4iw_put_ucontext(ucontext);
return 0; return 0;
} }
...@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, ...@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps); INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock); spin_lock_init(&context->mmap_lock);
kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
if (!warned++) if (!warned++)
......
...@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) ...@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
return 0; return 0;
} }
static void _free_qp(struct kref *kref) static void free_qp_work(struct work_struct *work)
{
struct c4iw_ucontext *ucontext;
struct c4iw_qp *qhp;
struct c4iw_dev *rhp;
qhp = container_of(work, struct c4iw_qp, free_work);
ucontext = qhp->ucontext;
rhp = qhp->rhp;
PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
if (ucontext)
c4iw_put_ucontext(ucontext);
kfree(qhp);
}
static void queue_qp_free(struct kref *kref)
{ {
struct c4iw_qp *qhp; struct c4iw_qp *qhp;
qhp = container_of(kref, struct c4iw_qp, kref); qhp = container_of(kref, struct c4iw_qp, kref);
PDBG("%s qhp %p\n", __func__, qhp); PDBG("%s qhp %p\n", __func__, qhp);
kfree(qhp); queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
} }
void c4iw_qp_add_ref(struct ib_qp *qp) void c4iw_qp_add_ref(struct ib_qp *qp)
...@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp) ...@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
void c4iw_qp_rem_ref(struct ib_qp *qp) void c4iw_qp_rem_ref(struct ib_qp *qp)
{ {
PDBG("%s ib_qp %p\n", __func__, qp); PDBG("%s ib_qp %p\n", __func__, qp);
kref_put(&to_c4iw_qp(qp)->kref, _free_qp); kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
} }
static void add_to_fc_list(struct list_head *head, struct list_head *entry) static void add_to_fc_list(struct list_head *head, struct list_head *entry)
...@@ -1706,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) ...@@ -1706,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_qp *qhp; struct c4iw_qp *qhp;
struct c4iw_qp_attributes attrs; struct c4iw_qp_attributes attrs;
struct c4iw_ucontext *ucontext;
qhp = to_c4iw_qp(ib_qp); qhp = to_c4iw_qp(ib_qp);
rhp = qhp->rhp; rhp = qhp->rhp;
...@@ -1726,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) ...@@ -1726,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
spin_unlock_irq(&rhp->lock); spin_unlock_irq(&rhp->lock);
free_ird(rhp, qhp->attr.max_ird); free_ird(rhp, qhp->attr.max_ird);
ucontext = ib_qp->uobject ?
to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
c4iw_qp_rem_ref(ib_qp); c4iw_qp_rem_ref(ib_qp);
PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
...@@ -1829,6 +1842,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1829,6 +1842,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
mutex_init(&qhp->mutex); mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait); init_waitqueue_head(&qhp->wait);
kref_init(&qhp->kref); kref_init(&qhp->kref);
INIT_WORK(&qhp->free_work, free_qp_work);
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret) if (ret)
...@@ -1915,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1915,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ma_sync_key_mm->len = PAGE_SIZE; ma_sync_key_mm->len = PAGE_SIZE;
insert_mmap(ucontext, ma_sync_key_mm); insert_mmap(ucontext, ma_sync_key_mm);
} }
c4iw_get_ucontext(ucontext);
qhp->ucontext = ucontext;
} }
qhp->ibqp.qp_num = qhp->wq.sq.qid; qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer)); init_timer(&(qhp->timer));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册