提交 f7de6d08 编写于 作者: J Jens Axboe 提交者: Shile Zhang

io_uring: make ASYNC_CANCEL work with poll and timeout

commit 47f467686ec02fc07fd5c6bb34b6f6736e2884b0 upstream.

It's a little confusing that we have multiple types of command
cancellation opcodes now that we have a generic one. Make the generic
one work with POLL_ADD and TIMEOUT commands as well, that makes for an
easier to use API for the application. The fact that they currently
don't is a bit confusing.

Add a helper that takes care of it, so we can user it from both
IORING_OP_ASYNC_CANCEL and from the linked timeout cancellation.
Reported-by: NHrvoje Zeba <zeba.hrvoje@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Reviewed-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 687d02d6
...@@ -1959,6 +1959,20 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx) ...@@ -1959,6 +1959,20 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
} }
static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
{
struct io_kiocb *req;
list_for_each_entry(req, &ctx->cancel_list, list) {
if (req->user_data != sqe_addr)
continue;
io_poll_remove_one(req);
return 0;
}
return -ENOENT;
}
/* /*
* Find a running poll command that matches one specified in sqe->addr, * Find a running poll command that matches one specified in sqe->addr,
* and remove it if found. * and remove it if found.
...@@ -1966,8 +1980,7 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx) ...@@ -1966,8 +1980,7 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *poll_req, *next; int ret;
int ret = -ENOENT;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
...@@ -1976,13 +1989,7 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1976,13 +1989,7 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL; return -EINVAL;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) { ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr));
if (READ_ONCE(sqe->addr) == poll_req->user_data) {
io_poll_remove_one(poll_req);
ret = 0;
break;
}
}
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
io_cqring_add_event(req, ret); io_cqring_add_event(req, ret);
...@@ -2202,6 +2209,31 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) ...@@ -2202,6 +2209,31 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
{
struct io_kiocb *req;
int ret = -ENOENT;
list_for_each_entry(req, &ctx->timeout_list, list) {
if (user_data == req->user_data) {
list_del_init(&req->list);
ret = 0;
break;
}
}
if (ret == -ENOENT)
return ret;
ret = hrtimer_try_to_cancel(&req->timeout.timer);
if (ret == -1)
return -EALREADY;
io_cqring_fill_event(req, -ECANCELED);
io_put_req(req);
return 0;
}
/* /*
* Remove or update an existing timeout command * Remove or update an existing timeout command
*/ */
...@@ -2209,10 +2241,8 @@ static int io_timeout_remove(struct io_kiocb *req, ...@@ -2209,10 +2241,8 @@ static int io_timeout_remove(struct io_kiocb *req,
const struct io_uring_sqe *sqe) const struct io_uring_sqe *sqe)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *treq;
int ret = -ENOENT;
__u64 user_data;
unsigned flags; unsigned flags;
int ret;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
...@@ -2222,44 +2252,17 @@ static int io_timeout_remove(struct io_kiocb *req, ...@@ -2222,44 +2252,17 @@ static int io_timeout_remove(struct io_kiocb *req,
if (flags) if (flags)
return -EINVAL; return -EINVAL;
user_data = READ_ONCE(sqe->addr);
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
list_for_each_entry(treq, &ctx->timeout_list, list) { ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr));
if (user_data == treq->user_data) {
list_del_init(&treq->list);
ret = 0;
break;
}
}
/* didn't find timeout */
if (ret) {
fill_ev:
io_cqring_fill_event(req, ret); io_cqring_fill_event(req, ret);
io_commit_cqring(ctx); io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
if (req->flags & REQ_F_LINK) if (ret < 0 && req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK; req->flags |= REQ_F_FAIL_LINK;
io_put_req(req); io_put_req(req);
return 0; return 0;
}
ret = hrtimer_try_to_cancel(&treq->timeout.timer);
if (ret == -1) {
ret = -EBUSY;
goto fill_ev;
}
io_cqring_fill_event(req, 0);
io_cqring_fill_event(treq, -ECANCELED);
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
io_put_req(treq);
io_put_req(req);
return 0;
} }
static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
...@@ -2374,12 +2377,39 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr) ...@@ -2374,12 +2377,39 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
return ret; return ret;
} }
static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
struct io_kiocb *req, __u64 sqe_addr,
struct io_kiocb **nxt)
{
unsigned long flags;
int ret;
ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
if (ret != -ENOENT) {
spin_lock_irqsave(&ctx->completion_lock, flags);
goto done;
}
spin_lock_irqsave(&ctx->completion_lock, flags);
ret = io_timeout_cancel(ctx, sqe_addr);
if (ret != -ENOENT)
goto done;
ret = io_poll_cancel(ctx, sqe_addr);
done:
io_cqring_fill_event(req, ret);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_put_req_find_next(req, nxt);
}
static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_kiocb **nxt) struct io_kiocb **nxt)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
void *sqe_addr;
int ret;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
...@@ -2387,13 +2417,7 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2387,13 +2417,7 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
sqe->cancel_flags) sqe->cancel_flags)
return -EINVAL; return -EINVAL;
sqe_addr = (void *) (unsigned long) READ_ONCE(sqe->addr); io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), NULL);
ret = io_async_cancel_one(ctx, sqe_addr);
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req, ret);
io_put_req_find_next(req, nxt);
return 0; return 0;
} }
...@@ -2655,7 +2679,6 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) ...@@ -2655,7 +2679,6 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *prev = NULL; struct io_kiocb *prev = NULL;
unsigned long flags; unsigned long flags;
int ret = -ETIME;
spin_lock_irqsave(&ctx->completion_lock, flags); spin_lock_irqsave(&ctx->completion_lock, flags);
...@@ -2671,12 +2694,11 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) ...@@ -2671,12 +2694,11 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) { if (prev) {
void *user_data = (void *) (unsigned long) prev->user_data; io_async_find_and_cancel(ctx, req, prev->user_data, NULL);
ret = io_async_cancel_one(ctx, user_data); } else {
} io_cqring_add_event(req, -ETIME);
io_cqring_add_event(req, ret);
io_put_req(req); io_put_req(req);
}
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册