提交 e0eb71dc 编写于 作者: P Pavel Begunkov 提交者: Jens Axboe

io_uring: don't return from io_drain_req()

Never return from io_drain_req() but punt to tw if we've got there but
it's a false positive and we shouldn't actually drain.
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/93583cee51b8783706b76c73196c155b28d9e762.1633107393.git.asml.silence@gmail.comSigned-off-by: NJens Axboe <axboe@kernel.dk>
上级 22b2ca31
...@@ -6428,46 +6428,39 @@ static u32 io_get_sequence(struct io_kiocb *req) ...@@ -6428,46 +6428,39 @@ static u32 io_get_sequence(struct io_kiocb *req)
return seq; return seq;
} }
static bool io_drain_req(struct io_kiocb *req) static void io_drain_req(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_defer_entry *de; struct io_defer_entry *de;
int ret; int ret;
u32 seq; u32 seq = io_get_sequence(req);
/* Still need defer if there is pending req in defer list. */ /* Still need defer if there is pending req in defer list. */
if (likely(list_empty_careful(&ctx->defer_list) &&
!(req->flags & REQ_F_IO_DRAIN))) {
ctx->drain_active = false;
return false;
}
seq = io_get_sequence(req);
/* Still a chance to pass the sequence check */
if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) { if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
queue:
ctx->drain_active = false; ctx->drain_active = false;
return false; io_req_task_queue(req);
return;
} }
ret = io_req_prep_async(req); ret = io_req_prep_async(req);
if (ret) if (ret) {
goto fail; fail:
io_req_complete_failed(req, ret);
return;
}
io_prep_async_link(req); io_prep_async_link(req);
de = kmalloc(sizeof(*de), GFP_KERNEL); de = kmalloc(sizeof(*de), GFP_KERNEL);
if (!de) { if (!de) {
ret = -ENOMEM; ret = -ENOMEM;
fail: goto fail;
io_req_complete_failed(req, ret);
return true;
} }
spin_lock(&ctx->completion_lock); spin_lock(&ctx->completion_lock);
if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
kfree(de); kfree(de);
io_queue_async_work(req, NULL); goto queue;
ctx->drain_active = false;
return true;
} }
trace_io_uring_defer(ctx, req, req->user_data); trace_io_uring_defer(ctx, req, req->user_data);
...@@ -6475,7 +6468,6 @@ static bool io_drain_req(struct io_kiocb *req) ...@@ -6475,7 +6468,6 @@ static bool io_drain_req(struct io_kiocb *req)
de->seq = seq; de->seq = seq;
list_add_tail(&de->list, &ctx->defer_list); list_add_tail(&de->list, &ctx->defer_list);
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
return true;
} }
static void io_clean_op(struct io_kiocb *req) static void io_clean_op(struct io_kiocb *req)
...@@ -6931,8 +6923,8 @@ static void io_queue_sqe_fallback(struct io_kiocb *req) ...@@ -6931,8 +6923,8 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
{ {
if (req->flags & REQ_F_FAIL) { if (req->flags & REQ_F_FAIL) {
io_req_complete_fail_submit(req); io_req_complete_fail_submit(req);
} else if (unlikely(req->ctx->drain_active) && io_drain_req(req)) { } else if (unlikely(req->ctx->drain_active)) {
return; io_drain_req(req);
} else { } else {
int ret = io_req_prep_async(req); int ret = io_req_prep_async(req);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册