提交 42f36814 编写于 作者: J Jens Axboe 提交者: Shile Zhang

io_uring: make io_cqring_events() take 'ctx' as argument

commit 84f97dc2333c626979bb547fce343a1003544dcc upstream.

The rings can be derived from the ctx, and we need the ctx there for
a future change.

No functional changes in this patch.
Reviewed-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Reviewed-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 d4be78c6
...@@ -865,8 +865,10 @@ static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr) ...@@ -865,8 +865,10 @@ static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr)
} }
} }
static unsigned io_cqring_events(struct io_rings *rings) static unsigned io_cqring_events(struct io_ring_ctx *ctx)
{ {
struct io_rings *rings = ctx->rings;
/* See comment at the top of this file */ /* See comment at the top of this file */
smp_rmb(); smp_rmb();
return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head); return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
...@@ -1022,7 +1024,7 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, ...@@ -1022,7 +1024,7 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
* If we do, we can potentially be spinning for commands that * If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error). * already triggered a CQE (eg in error).
*/ */
if (io_cqring_events(ctx->rings)) if (io_cqring_events(ctx))
break; break;
/* /*
...@@ -3076,7 +3078,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq) ...@@ -3076,7 +3078,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq)
* started waiting. For timeouts, we always want to return to userspace, * started waiting. For timeouts, we always want to return to userspace,
* regardless of event count. * regardless of event count.
*/ */
return io_cqring_events(ctx->rings) >= iowq->to_wait || return io_cqring_events(ctx) >= iowq->to_wait ||
atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
} }
...@@ -3111,7 +3113,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -3111,7 +3113,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
struct io_rings *rings = ctx->rings; struct io_rings *rings = ctx->rings;
int ret = 0; int ret = 0;
if (io_cqring_events(rings) >= min_events) if (io_cqring_events(ctx) >= min_events)
return 0; return 0;
if (sig) { if (sig) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册