From f7dc15c2e9c4e56abb5a73c076f675e69d83585b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 19 Apr 2022 17:06:44 +0800 Subject: [PATCH] io_uring: don't keep looping for more events if we can't flush overflow mainline inclusion from mainline-v5.12-rc1 commit ca0a26511c679a797f86589894a4523db36d833e category: bugfix bugzilla: 186454,https://gitee.com/openeuler/kernel/issues/I5026G CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=ca0a26511c679a797f86589894a4523db36d833e -------------------------------- It doesn't make sense to wait for more events to come in, if we can't even flush the overflow we already have to the ring. Return -EBUSY for that condition, just like we do for attempts to submit with overflow pending. Cc: stable@vger.kernel.org # 5.11 Signed-off-by: Jens Axboe Conflicts: fs/io_uring.c Signed-off-by: Guo Xuenan Reviewed-by: Zhang Yi Signed-off-by: Zheng Zengkai --- fs/io_uring.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 600dd0898d7e..af6a1858c791 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1713,18 +1713,22 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, return cqe != NULL; } -static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, +static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, struct task_struct *tsk, struct files_struct *files) { + bool ret = true; + if (test_bit(0, &ctx->cq_check_overflow)) { /* iopoll syncs against uring_lock, not completion_lock */ if (ctx->flags & IORING_SETUP_IOPOLL) mutex_lock(&ctx->uring_lock); - __io_cqring_overflow_flush(ctx, force, tsk, files); + ret = __io_cqring_overflow_flush(ctx, force, tsk, files); if (ctx->flags & IORING_SETUP_IOPOLL) mutex_unlock(&ctx->uring_lock); } + + return ret; } static void __io_cqring_fill_event(struct io_kiocb *req, long res, @@ -7051,7 +7055,11 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); trace_io_uring_cqring_wait(ctx, min_events); do { - io_cqring_overflow_flush(ctx, false, NULL, NULL); + /* if we can't even flush overflow, don't wait for more */ + if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) { + ret = -EBUSY; + break; + } prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, TASK_INTERRUPTIBLE); /* make sure we run task_work before checking for signals */ -- GitLab