提交 8da91ea8 编写于 作者: T Tom Tucker

svcrdma: Move destroy to kernel thread

Some providers may wait while destroying adapter resources.
Since it is possible that the last reference is put on the
dto_tasklet, the actual destroy must be scheduled as a work item.
Signed-off-by: NTom Tucker <tom@opengridcomputing.com>
上级 47698e08
......@@ -124,6 +124,7 @@ struct svcxprt_rdma {
struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */
struct list_head sc_read_complete_q;
spinlock_t sc_read_complete_lock;
struct work_struct sc_work;
};
/* sc_flags */
#define RDMAXPRT_RQ_PENDING 1
......
......@@ -963,12 +963,15 @@ static void svc_rdma_detach(struct svc_xprt *xprt)
rdma_destroy_id(rdma->sc_cm_id);
}
static void svc_rdma_free(struct svc_xprt *xprt)
static void __svc_rdma_free(struct work_struct *work)
{
struct svcxprt_rdma *rdma = (struct svcxprt_rdma *)xprt;
struct svcxprt_rdma *rdma =
container_of(work, struct svcxprt_rdma, sc_work);
dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
/* We should only be called from kref_put */
BUG_ON(atomic_read(&xprt->xpt_ref.refcount) != 0);
BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
ib_destroy_cq(rdma->sc_sq_cq);
......@@ -985,6 +988,14 @@ static void svc_rdma_free(struct svc_xprt *xprt)
kfree(rdma);
}
static void svc_rdma_free(struct svc_xprt *xprt)
{
struct svcxprt_rdma *rdma =
container_of(xprt, struct svcxprt_rdma, sc_xprt);
INIT_WORK(&rdma->sc_work, __svc_rdma_free);
schedule_work(&rdma->sc_work);
}
static int svc_rdma_has_wspace(struct svc_xprt *xprt)
{
struct svcxprt_rdma *rdma =
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册