diff --git a/fs/io_uring.c b/fs/io_uring.c index 437e69ff501b8961b1bbf15d31cb1a51be14d4c3..24872dc2752cff508ec6de9817b476a883ce504c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -678,6 +678,13 @@ static void io_put_req(struct io_kiocb *req) io_free_req(req); } +static unsigned io_cqring_events(struct io_cq_ring *ring) +{ + /* See comment at the top of this file */ + smp_rmb(); + return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head); +} + /* * Find and free completed poll iocbs */ @@ -817,6 +824,14 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, do { int tmin = 0; + /* + * Don't enter poll loop if we already have events pending. + * If we do, we can potentially be spinning for commands that + * already triggered a CQE (eg in error). + */ + if (io_cqring_events(ctx->cq_ring)) + break; + /* * If a submit got punted to a workqueue, we can have the * application entering polling for a command before it gets @@ -2448,13 +2463,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit) return submit; } -static unsigned io_cqring_events(struct io_cq_ring *ring) -{ - /* See comment at the top of this file */ - smp_rmb(); - return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head); -} - /* * Wait until events become available, if we don't already have some. The * application must reap them itself, as they reside on the shared cq ring.