diff --git a/fs/io-wq.c b/fs/io-wq.c index 253c04a40db57f35e4bcf25b8c4193c1755851ac..652b8bac2dbcad17772e1f4047baaf38c4167a09 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -639,6 +639,91 @@ void io_wq_cancel_all(struct io_wq *wq) rcu_read_unlock(); } +struct io_cb_cancel_data { + struct io_wqe *wqe; + work_cancel_fn *cancel; + void *caller_data; +}; + +static bool io_work_cancel(struct io_worker *worker, void *cancel_data) +{ + struct io_cb_cancel_data *data = cancel_data; + struct io_wqe *wqe = data->wqe; + bool ret = false; + + /* + * Hold the lock to avoid ->cur_work going out of scope, caller + * may deference the passed in work. + */ + spin_lock_irq(&wqe->lock); + if (worker->cur_work && + data->cancel(worker->cur_work, data->caller_data)) { + send_sig(SIGINT, worker->task, 1); + ret = true; + } + spin_unlock_irq(&wqe->lock); + + return ret; +} + +static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe, + work_cancel_fn *cancel, + void *cancel_data) +{ + struct io_cb_cancel_data data = { + .wqe = wqe, + .cancel = cancel, + .caller_data = cancel_data, + }; + struct io_wq_work *work; + bool found = false; + + spin_lock_irq(&wqe->lock); + list_for_each_entry(work, &wqe->work_list, list) { + if (cancel(work, cancel_data)) { + list_del(&work->list); + found = true; + break; + } + } + spin_unlock_irq(&wqe->lock); + + if (found) { + work->flags |= IO_WQ_WORK_CANCEL; + work->func(&work); + return IO_WQ_CANCEL_OK; + } + + rcu_read_lock(); + found = io_wq_for_each_worker(wqe, &wqe->free_list, io_work_cancel, + &data); + if (found) + goto done; + + found = io_wq_for_each_worker(wqe, &wqe->busy_list, io_work_cancel, + &data); +done: + rcu_read_unlock(); + return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND; +} + +enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, + void *data) +{ + enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND; + int i; + + for (i = 0; i < wq->nr_wqes; i++) { + struct io_wqe *wqe = wq->wqes[i]; + + ret = io_wqe_cancel_cb_work(wqe, cancel, data); + if (ret != IO_WQ_CANCEL_NOTFOUND) + break; + } + + return ret; +} + static bool io_wq_worker_cancel(struct io_worker *worker, void *data) { struct io_wq_work *work = data; diff --git a/fs/io-wq.h b/fs/io-wq.h index e93f764b1fa4ee23fd9e49d0b78507765788b425..3de192dc73fc73e5a3dc4c74d7aae2686b81038e 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -43,6 +43,11 @@ void io_wq_flush(struct io_wq *wq); void io_wq_cancel_all(struct io_wq *wq); enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork); +typedef bool (work_cancel_fn)(struct io_wq_work *, void *); + +enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, + void *data); + #if defined(CONFIG_IO_WQ) extern void io_wq_worker_sleeping(struct task_struct *); extern void io_wq_worker_running(struct task_struct *); diff --git a/fs/io_uring.c b/fs/io_uring.c index 72d260520c8fddac6bff60af832b50c9883254e2..76d653085987de672dfa16bfe1ab5ffaa288499b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2133,6 +2133,48 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } +static bool io_cancel_cb(struct io_wq_work *work, void *data) +{ + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + + return req->user_data == (unsigned long) data; +} + +static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_kiocb **nxt) +{ + struct io_ring_ctx *ctx = req->ctx; + enum io_wq_cancel cancel_ret; + void *sqe_addr; + int ret = 0; + + if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->flags || sqe->ioprio || sqe->off || sqe->len || + sqe->cancel_flags) + return -EINVAL; + + sqe_addr = (void *) (unsigned long) READ_ONCE(sqe->addr); + cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr); + switch (cancel_ret) { + case IO_WQ_CANCEL_OK: + ret = 0; + break; + case IO_WQ_CANCEL_RUNNING: + ret = -EALREADY; + break; + case IO_WQ_CANCEL_NOTFOUND: + ret = -ENOENT; + break; + } + + if (ret < 0 && (req->flags & REQ_F_LINK)) + req->flags |= REQ_F_FAIL_LINK; + io_cqring_add_event(req->ctx, sqe->user_data, ret); + io_put_req(req, nxt); + return 0; +} + static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe) { @@ -2217,6 +2259,9 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, case IORING_OP_ACCEPT: ret = io_accept(req, s->sqe, nxt, force_nonblock); break; + case IORING_OP_ASYNC_CANCEL: + ret = io_async_cancel(req, s->sqe, nxt); + break; default: ret = -EINVAL; break; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index f82d90e617a6fbc03ba988cec9134bc289bd75f4..6877cf8894db446dca86cf3e72b3815010770f11 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -33,6 +33,7 @@ struct io_uring_sqe { __u32 msg_flags; __u32 timeout_flags; __u32 accept_flags; + __u32 cancel_flags; }; __u64 user_data; /* data to be passed back at completion time */ union { @@ -70,6 +71,7 @@ struct io_uring_sqe { #define IORING_OP_TIMEOUT 11 #define IORING_OP_TIMEOUT_REMOVE 12 #define IORING_OP_ACCEPT 13 +#define IORING_OP_ASYNC_CANCEL 14 /* * sqe->fsync_flags