提交 90026541 编写于 作者: J Jens Axboe 提交者: Cheng Jian

io_uring: correct poll cancel and linked timeout expiration completion

mainline inclusion
from mainline-5.5-rc1
commit b0dd8a41
category: feature
bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27
CVE: NA
---------------------------

Currently a poll request fills a completion entry of 0, even if it got
cancelled. This is odd, and it makes it harder to support with chains.
Ensure that it returns -ECANCELED in the completions events if it got
cancelled, and furthermore ensure that the linked timeout that triggered
it completes with -ETIME if we did indeed trigger the completions
through a timeout.
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NZhihao Cheng <chengzhihao1@huawei.com>
Signed-off-by: Nyangerkun <yangerkun@huawei.com>
Reviewed-by: Nzhangyi (F) <yi.zhang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 edbc5fb7
...@@ -2065,11 +2065,14 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -2065,11 +2065,14 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0; return 0;
} }
static void io_poll_complete(struct io_kiocb *req, __poll_t mask) static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
req->poll.done = true; req->poll.done = true;
if (error)
io_cqring_fill_event(req, error);
else
io_cqring_fill_event(req, mangle_poll(mask)); io_cqring_fill_event(req, mangle_poll(mask));
io_commit_cqring(ctx); io_commit_cqring(ctx);
} }
...@@ -2083,11 +2086,16 @@ static void io_poll_complete_work(struct io_wq_work **workptr) ...@@ -2083,11 +2086,16 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *nxt = NULL; struct io_kiocb *nxt = NULL;
__poll_t mask = 0; __poll_t mask = 0;
int ret = 0;
if (work->flags & IO_WQ_WORK_CANCEL) if (work->flags & IO_WQ_WORK_CANCEL) {
WRITE_ONCE(poll->canceled, true); WRITE_ONCE(poll->canceled, true);
ret = -ECANCELED;
} else if (READ_ONCE(poll->canceled)) {
ret = -ECANCELED;
}
if (!READ_ONCE(poll->canceled)) if (ret != -ECANCELED)
mask = vfs_poll(poll->file, &pt) & poll->events; mask = vfs_poll(poll->file, &pt) & poll->events;
/* /*
...@@ -2098,13 +2106,13 @@ static void io_poll_complete_work(struct io_wq_work **workptr) ...@@ -2098,13 +2106,13 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
* avoid further branches in the fast path. * avoid further branches in the fast path.
*/ */
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
if (!mask && !READ_ONCE(poll->canceled)) { if (!mask && ret != -ECANCELED) {
add_wait_queue(poll->head, &poll->wait); add_wait_queue(poll->head, &poll->wait);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
return; return;
} }
io_poll_remove_req(req); io_poll_remove_req(req);
io_poll_complete(req, mask); io_poll_complete(req, mask, ret);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
...@@ -2138,7 +2146,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, ...@@ -2138,7 +2146,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
*/ */
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) { if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
io_poll_remove_req(req); io_poll_remove_req(req);
io_poll_complete(req, mask); io_poll_complete(req, mask, 0);
req->flags |= REQ_F_COMP_LOCKED; req->flags |= REQ_F_COMP_LOCKED;
io_put_req(req); io_put_req(req);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
...@@ -2250,7 +2258,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2250,7 +2258,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} }
if (mask) { /* no async, we'd stolen it */ if (mask) { /* no async, we'd stolen it */
ipt.error = 0; ipt.error = 0;
io_poll_complete(req, mask); io_poll_complete(req, mask, 0);
} }
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
...@@ -2502,7 +2510,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) ...@@ -2502,7 +2510,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
static void io_async_find_and_cancel(struct io_ring_ctx *ctx, static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
struct io_kiocb *req, __u64 sqe_addr, struct io_kiocb *req, __u64 sqe_addr,
struct io_kiocb **nxt) struct io_kiocb **nxt, int success_ret)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -2519,6 +2527,8 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx, ...@@ -2519,6 +2527,8 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
goto done; goto done;
ret = io_poll_cancel(ctx, sqe_addr); ret = io_poll_cancel(ctx, sqe_addr);
done: done:
if (!ret)
ret = success_ret;
io_cqring_fill_event(req, ret); io_cqring_fill_event(req, ret);
io_commit_cqring(ctx); io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
...@@ -2540,7 +2550,7 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2540,7 +2550,7 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
sqe->cancel_flags) sqe->cancel_flags)
return -EINVAL; return -EINVAL;
io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt); io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt, 0);
return 0; return 0;
} }
...@@ -2830,7 +2840,8 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) ...@@ -2830,7 +2840,8 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) { if (prev) {
io_async_find_and_cancel(ctx, req, prev->user_data, NULL); io_async_find_and_cancel(ctx, req, prev->user_data, NULL,
-ETIME);
io_put_req(prev); io_put_req(prev);
} else { } else {
io_cqring_add_event(req, -ETIME); io_cqring_add_event(req, -ETIME);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册