提交 4a451378 编写于 作者: P Pavel Begunkov 提交者: Xie XiuQi

io_uring: cancel only requests of current task

stable inclusion
from stable-5.10.4
commit a773dea1a9f2e55cc1c5d145d238630d7d69609a
bugzilla: 46903

--------------------------------

[ Upstream commit df9923f9 ]

io_uring_cancel_files() cancels all request that match files regardless
of task. There is no real need in that, cancel only requests of the
specified task. That also handles SQPOLL case as it already changes task
to it.
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NSasha Levin <sashal@kernel.org>
Signed-off-by: NChen Jun <chenjun102@huawei.com>
Acked-by: NXie XiuQi <xiexiuqi@huawei.com>
上级 59438828
......@@ -8421,14 +8421,6 @@ static int io_uring_release(struct inode *inode, struct file *file)
return 0;
}
static bool io_wq_files_match(struct io_wq_work *work, void *data)
{
struct files_struct *files = data;
return !files || ((work->flags & IO_WQ_WORK_FILES) &&
work->identity->files == files);
}
/*
* Returns true if 'preq' is the link parent of 'req'
*/
......@@ -8566,21 +8558,20 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
* Returns true if we found and killed one or more files pinning requests
*/
static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
struct task_struct *task,
struct files_struct *files)
{
if (list_empty_careful(&ctx->inflight_list))
return false;
/* cancel all at once, should be faster than doing it one by one*/
io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
while (!list_empty_careful(&ctx->inflight_list)) {
struct io_kiocb *cancel_req = NULL, *req;
DEFINE_WAIT(wait);
spin_lock_irq(&ctx->inflight_lock);
list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
if (files && (req->work.flags & IO_WQ_WORK_FILES) &&
if (req->task == task &&
(req->work.flags & IO_WQ_WORK_FILES) &&
req->work.identity->files != files)
continue;
/* req is being completed, ignore */
......@@ -8623,7 +8614,7 @@ static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
{
bool ret;
ret = io_uring_cancel_files(ctx, files);
ret = io_uring_cancel_files(ctx, task, files);
if (!files) {
enum io_wq_cancel cret;
......@@ -8662,11 +8653,7 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
io_sq_thread_park(ctx->sq_data);
}
if (files)
io_cancel_defer_files(ctx, NULL, files);
else
io_cancel_defer_files(ctx, task, NULL);
io_cancel_defer_files(ctx, task, files);
io_cqring_overflow_flush(ctx, true, task, files);
while (__io_uring_cancel_task_requests(ctx, task, files)) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册