提交 f0127254 编写于 作者: J Jens Axboe

io-wq: ensure all pending work is canceled on exit

If we race on shutting down the io-wq, then we should ensure that any
work that was queued after workers shutdown is canceled. Harden the
add work check a bit too, checking for IO_WQ_BIT_EXIT and cancel if
it's set.

Add a WARN_ON() for having any work before we kill the io-wq context.

Reported-by: syzbot+91b4b56ead187d35c9d3@syzkaller.appspotmail.com
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 e4b4a13f
...@@ -129,6 +129,17 @@ struct io_wq { ...@@ -129,6 +129,17 @@ struct io_wq {
static enum cpuhp_state io_wq_online; static enum cpuhp_state io_wq_online;
struct io_cb_cancel_data {
work_cancel_fn *fn;
void *data;
int nr_running;
int nr_pending;
bool cancel_all;
};
static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
struct io_cb_cancel_data *match);
static bool io_worker_get(struct io_worker *worker) static bool io_worker_get(struct io_worker *worker)
{ {
return refcount_inc_not_zero(&worker->ref); return refcount_inc_not_zero(&worker->ref);
...@@ -713,6 +724,23 @@ static void io_wq_check_workers(struct io_wq *wq) ...@@ -713,6 +724,23 @@ static void io_wq_check_workers(struct io_wq *wq)
} }
} }
static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
{
return true;
}
static void io_wq_cancel_pending(struct io_wq *wq)
{
struct io_cb_cancel_data match = {
.fn = io_wq_work_match_all,
.cancel_all = true,
};
int node;
for_each_node(node)
io_wqe_cancel_pending_work(wq->wqes[node], &match);
}
/* /*
* Manager thread. Tasked with creating new workers, if we need them. * Manager thread. Tasked with creating new workers, if we need them.
*/ */
...@@ -748,6 +776,8 @@ static int io_wq_manager(void *data) ...@@ -748,6 +776,8 @@ static int io_wq_manager(void *data)
/* we might not ever have created any workers */ /* we might not ever have created any workers */
if (atomic_read(&wq->worker_refs)) if (atomic_read(&wq->worker_refs))
wait_for_completion(&wq->worker_done); wait_for_completion(&wq->worker_done);
io_wq_cancel_pending(wq);
complete(&wq->exited); complete(&wq->exited);
do_exit(0); do_exit(0);
} }
...@@ -809,7 +839,8 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) ...@@ -809,7 +839,8 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
unsigned long flags; unsigned long flags;
/* Can only happen if manager creation fails after exec */ /* Can only happen if manager creation fails after exec */
if (unlikely(io_wq_fork_manager(wqe->wq))) { if (io_wq_fork_manager(wqe->wq) ||
test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
work->flags |= IO_WQ_WORK_CANCEL; work->flags |= IO_WQ_WORK_CANCEL;
wqe->wq->do_work(work); wqe->wq->do_work(work);
return; return;
...@@ -845,14 +876,6 @@ void io_wq_hash_work(struct io_wq_work *work, void *val) ...@@ -845,14 +876,6 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
} }
struct io_cb_cancel_data {
work_cancel_fn *fn;
void *data;
int nr_running;
int nr_pending;
bool cancel_all;
};
static bool io_wq_worker_cancel(struct io_worker *worker, void *data) static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{ {
struct io_cb_cancel_data *match = data; struct io_cb_cancel_data *match = data;
...@@ -1086,6 +1109,7 @@ static void io_wq_destroy(struct io_wq *wq) ...@@ -1086,6 +1109,7 @@ static void io_wq_destroy(struct io_wq *wq)
struct io_wqe *wqe = wq->wqes[node]; struct io_wqe *wqe = wq->wqes[node];
list_del_init(&wqe->wait.entry); list_del_init(&wqe->wait.entry);
WARN_ON_ONCE(!wq_list_empty(&wqe->work_list));
kfree(wqe); kfree(wqe);
} }
spin_unlock_irq(&wq->hash->wait.lock); spin_unlock_irq(&wq->hash->wait.lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册