提交 9c90c6a4 编写于 作者: J Jens Axboe 提交者: Xiaoguang Wang

io_uring: async task poll trigger cleanup

to #28736503

commit 310672552f4aea2ad50704711aa3cdd45f5441e9 upstream

If the request is still hashed in io_async_task_func(), then it cannot
have been canceled and it's pointless to check. So save that check.
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Acked-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 1d6d088b
......@@ -4114,7 +4114,7 @@ static void io_async_task_func(struct callback_head *cb)
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
struct async_poll *apoll = req->apoll;
struct io_ring_ctx *ctx = req->ctx;
bool canceled;
bool canceled = false;
trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
......@@ -4123,34 +4123,33 @@ static void io_async_task_func(struct callback_head *cb)
return;
}
if (hash_hashed(&req->hash_node))
/* If req is still hashed, it cannot have been canceled. Don't check. */
if (hash_hashed(&req->hash_node)) {
hash_del(&req->hash_node);
} else {
canceled = READ_ONCE(apoll->poll.canceled);
if (canceled) {
io_cqring_fill_event(req, -ECANCELED);
io_commit_cqring(ctx);
}
}
spin_unlock_irq(&ctx->completion_lock);
/* restore ->work in case we need to retry again */
memcpy(&req->work, &apoll->work, sizeof(req->work));
if (canceled) {
kfree(apoll);
io_cqring_ev_posted(ctx);
req_set_fail_links(req);
io_double_put_req(req);
return;
}
if (!canceled) {
__set_current_state(TASK_RUNNING);
mutex_lock(&ctx->uring_lock);
__io_queue_sqe(req, NULL);
mutex_unlock(&ctx->uring_lock);
kfree(apoll);
} else {
io_cqring_ev_posted(ctx);
req_set_fail_links(req);
io_double_put_req(req);
}
}
static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册