From 2dad65762482020444f8cd5ae265d493e2350a09 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Dec 2019 08:46:33 -0700 Subject: [PATCH] io-wq: support concurrent non-blocking work to #26323588 commmit 895e2ca0f693c672902191747b548bdc56f0c7de upstream. io-wq assumes that work will complete fast (and not block), so it doesn't create a new worker when work is enqueued, if we already have at least one worker running. This is done on the assumption that if work is running, then it will complete fast. Add an option to force io-wq to fork a new worker for work queued. This is signaled by setting IO_WQ_WORK_CONCURRENT on the work item. For that case, io-wq will create a new worker, even though workers are already running. Signed-off-by: Jens Axboe Signed-off-by: Joseph Qi Acked-by: Xiaoguang Wang --- fs/io-wq.c | 5 ++++- fs/io-wq.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index d766119a9d7b..171ba9409baf 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -725,6 +725,7 @@ static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct, static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) { struct io_wqe_acct *acct = io_work_get_acct(wqe, work); + int work_flags; unsigned long flags; /* @@ -739,12 +740,14 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) return; } + work_flags = work->flags; spin_lock_irqsave(&wqe->lock, flags); wq_list_add_tail(&work->list, &wqe->work_list); wqe->flags &= ~IO_WQE_FLAG_STALLED; spin_unlock_irqrestore(&wqe->lock, flags); - if (!atomic_read(&acct->nr_running)) + if ((work_flags & IO_WQ_WORK_CONCURRENT) || + !atomic_read(&acct->nr_running)) io_wqe_wake_worker(wqe, acct); } diff --git a/fs/io-wq.h b/fs/io-wq.h index 1a41aa07ace1..47cc2323e78f 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -13,6 +13,7 @@ enum { IO_WQ_WORK_INTERNAL = 64, IO_WQ_WORK_CB = 128, IO_WQ_WORK_NO_CANCEL = 256, + IO_WQ_WORK_CONCURRENT = 512, IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ }; -- GitLab