提交 7f7487a9 编写于 作者: P Pavel Begunkov 提交者: Xiaoguang Wang

io_uring: separate DRAIN flushing into a cold path

to #28736503

commit 0451894522108d6c72934aff6ef89023743a9ed4 upstream

io_commit_cqring() assembly doesn't look good with extra code handling
drained requests. IOSQE_IO_DRAIN is slow and discouraged to be used in
a hot path, so try to minimise its impact by putting it into a helper
and doing a fast check.
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Acked-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
上级 1ddc14b7
......@@ -979,19 +979,6 @@ static inline bool req_need_defer(struct io_kiocb *req)
return false;
}
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
{
struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
if (req && !req_need_defer(req)) {
list_del_init(&req->list);
return req;
}
return NULL;
}
static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
{
struct io_kiocb *req;
......@@ -1124,6 +1111,19 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx)
spin_unlock_irq(&ctx->completion_lock);
}
static void __io_queue_deferred(struct io_ring_ctx *ctx)
{
do {
struct io_kiocb *req = list_first_entry(&ctx->defer_list,
struct io_kiocb, list);
if (req_need_defer(req))
break;
list_del_init(&req->list);
io_queue_async_work(req);
} while (!list_empty(&ctx->defer_list));
}
static void io_commit_cqring(struct io_ring_ctx *ctx)
{
struct io_kiocb *req;
......@@ -1133,8 +1133,8 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
__io_commit_cqring(ctx);
while ((req = io_get_deferred_req(ctx)) != NULL)
io_queue_async_work(req);
if (unlikely(!list_empty(&ctx->defer_list)))
__io_queue_deferred(ctx);
}
static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册