提交 38954954 编写于 作者: J Jens Axboe 提交者: Xiaoguang Wang

io_uring: use io-wq manager as backup task if task is exiting

to #28170604

commit aa96bf8a9ee33457b7e3ea43e97dfa1e3a15ab20 upstream

If the original task is (or has) exited, then the task work will not get
queued properly. Allow for using the io-wq manager task to queue this
work for execution, and ensure that the io-wq manager notices and runs
this work if woken up (or exiting).
Reported-by: NDan Melnic <dmm@fb.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Acked-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 e32c3252
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/rculist_nulls.h> #include <linux/rculist_nulls.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/fs_struct.h> #include <linux/fs_struct.h>
#include <linux/task_work.h>
#include "io-wq.h" #include "io-wq.h"
...@@ -717,6 +718,9 @@ static int io_wq_manager(void *data) ...@@ -717,6 +718,9 @@ static int io_wq_manager(void *data)
complete(&wq->done); complete(&wq->done);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
if (current->task_works)
task_work_run();
for_each_node(node) { for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node]; struct io_wqe *wqe = wq->wqes[node];
bool fork_worker[2] = { false, false }; bool fork_worker[2] = { false, false };
...@@ -739,6 +743,9 @@ static int io_wq_manager(void *data) ...@@ -739,6 +743,9 @@ static int io_wq_manager(void *data)
schedule_timeout(HZ); schedule_timeout(HZ);
} }
if (current->task_works)
task_work_run();
return 0; return 0;
err: err:
set_bit(IO_WQ_BIT_ERROR, &wq->state); set_bit(IO_WQ_BIT_ERROR, &wq->state);
...@@ -1125,3 +1132,8 @@ void io_wq_destroy(struct io_wq *wq) ...@@ -1125,3 +1132,8 @@ void io_wq_destroy(struct io_wq *wq)
if (refcount_dec_and_test(&wq->use_refs)) if (refcount_dec_and_test(&wq->use_refs))
__io_wq_destroy(wq); __io_wq_destroy(wq);
} }
struct task_struct *io_wq_get_task(struct io_wq *wq)
{
return wq->manager;
}
...@@ -136,6 +136,8 @@ typedef bool (work_cancel_fn)(struct io_wq_work *, void *); ...@@ -136,6 +136,8 @@ typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
void *data); void *data);
struct task_struct *io_wq_get_task(struct io_wq *wq);
#if defined(CONFIG_IO_WQ) #if defined(CONFIG_IO_WQ)
extern void io_wq_worker_sleeping(struct task_struct *); extern void io_wq_worker_sleeping(struct task_struct *);
extern void io_wq_worker_running(struct task_struct *); extern void io_wq_worker_running(struct task_struct *);
......
...@@ -4120,6 +4120,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, ...@@ -4120,6 +4120,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
__poll_t mask, task_work_func_t func) __poll_t mask, task_work_func_t func)
{ {
struct task_struct *tsk; struct task_struct *tsk;
int ret;
/* for instances that support it check for an event match first: */ /* for instances that support it check for an event match first: */
if (mask && !(mask & poll->events)) if (mask && !(mask & poll->events))
...@@ -4133,11 +4134,15 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, ...@@ -4133,11 +4134,15 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
req->result = mask; req->result = mask;
init_task_work(&req->task_work, func); init_task_work(&req->task_work, func);
/* /*
* If this fails, then the task is exiting. If that is the case, then * If this fails, then the task is exiting. Punt to one of the io-wq
* the exit check will ultimately cancel these work items. Hence we * threads to ensure the work gets run, we can't always rely on exit
* don't need to check here and handle it specifically. * cancelation taking care of this.
*/ */
task_work_add(tsk, &req->task_work, true); ret = task_work_add(tsk, &req->task_work, true);
if (unlikely(ret)) {
tsk = io_wq_get_task(req->ctx->io_wq);
task_work_add(tsk, &req->task_work, true);
}
wake_up_process(tsk); wake_up_process(tsk);
return 1; return 1;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册