提交 986957da 编写于 作者: V Vivek Goyal 提交者: Caspar Zhang

virtiofs: Retry request submission from worker context

task #28910367
commit a9bfd9dd3417561d06c81de04f6d6c1e0c9b3d44 upstream

If regular request queue gets full, currently we sleep for a bit and
retrying submission in submitter's context. This assumes submitter is not
holding any spin lock. But this assumption is not true for background
requests. For background requests, we are called with fc->bg_lock held.

This can lead to deadlock where one thread is trying submission with
fc->bg_lock held while request completion thread has called
fuse_request_end() which tries to acquire fc->bg_lock and gets blocked. As
request completion thread gets blocked, it does not make further progress
and that means queue does not get empty and submitter can't submit more
requests.

To solve this issue, retry submission with the help of a worker, instead of
retrying in submitter's context. We already do this for hiprio/forget
requests.
Reported-by: NChirantan Ekbote <chirantan@chromium.org>
Signed-off-by: NVivek Goyal <vgoyal@redhat.com>
Signed-off-by: NMiklos Szeredi <mszeredi@redhat.com>
(cherry picked from commit a9bfd9dd3417561d06c81de04f6d6c1e0c9b3d44)
Signed-off-by: NLiu Bo <bo.liu@linux.alibaba.com>
Reviewed-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 ab040ec8
......@@ -54,6 +54,9 @@ struct virtio_fs_forget {
struct list_head list;
};
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
struct fuse_req *req, bool in_flight);
static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
{
struct virtio_fs *fs = vq->vdev->priv;
......@@ -255,6 +258,7 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work)
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
dispatch_work.work);
struct fuse_conn *fc = fsvq->fud->fc;
int ret;
pr_debug("virtio-fs: worker %s called.\n", __func__);
while (1) {
......@@ -263,13 +267,45 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work)
list);
if (!req) {
spin_unlock(&fsvq->lock);
return;
break;
}
list_del_init(&req->list);
spin_unlock(&fsvq->lock);
fuse_request_end(fc, req);
}
/* Dispatch pending requests */
while (1) {
spin_lock(&fsvq->lock);
req = list_first_entry_or_null(&fsvq->queued_reqs,
struct fuse_req, list);
if (!req) {
spin_unlock(&fsvq->lock);
return;
}
list_del_init(&req->list);
spin_unlock(&fsvq->lock);
ret = virtio_fs_enqueue_req(fsvq, req, true);
if (ret < 0) {
if (ret == -ENOMEM || ret == -ENOSPC) {
spin_lock(&fsvq->lock);
list_add_tail(&req->list, &fsvq->queued_reqs);
schedule_delayed_work(&fsvq->dispatch_work,
msecs_to_jiffies(1));
spin_unlock(&fsvq->lock);
return;
}
req->out.h.error = ret;
spin_lock(&fsvq->lock);
dec_in_flight_req(fsvq);
spin_unlock(&fsvq->lock);
pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
ret);
fuse_request_end(fc, req);
}
}
}
static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
......@@ -825,7 +861,7 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg,
/* Add a request to a virtqueue and kick the device */
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
struct fuse_req *req)
struct fuse_req *req, bool in_flight)
{
/* requests need at least 4 elements */
struct scatterlist *stack_sgs[6];
......@@ -904,7 +940,8 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
/* matches barrier in request_wait_answer() */
smp_mb__after_atomic();
inc_in_flight_req(fsvq);
if (!in_flight)
inc_in_flight_req(fsvq);
notify = virtqueue_kick_prepare(vq);
spin_unlock(&fsvq->lock);
......@@ -950,15 +987,21 @@ __releases(fiq->lock)
req->in.h.nodeid, req->in.h.len,
fuse_len_args(req->out.numargs, req->out.args));
retry:
fsvq = &fs->vqs[queue_id];
ret = virtio_fs_enqueue_req(fsvq, req);
ret = virtio_fs_enqueue_req(fsvq, req, false);
if (ret < 0) {
if (ret == -ENOMEM || ret == -ENOSPC) {
/* Virtqueue full. Retry submission */
/* TODO use completion instead of timeout */
usleep_range(20, 30);
goto retry;
/*
* Virtqueue full. Retry submission from worker
* context as we might be holding fc->bg_lock.
*/
spin_lock(&fsvq->lock);
list_add_tail(&req->list, &fsvq->queued_reqs);
inc_in_flight_req(fsvq);
schedule_delayed_work(&fsvq->dispatch_work,
msecs_to_jiffies(1));
spin_unlock(&fsvq->lock);
return;
}
req->out.h.error = ret;
pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册