提交 7a612350 编写于 作者: P Pavel Begunkov 提交者: Jens Axboe

io_uring: fix complete_post races for linked req

Calling io_queue_next() after spin_unlock in io_req_complete_post()
races with the other side extracting and reusing this request. Hand
coded parts of io_req_find_next() considering that io_disarm_next()
and io_req_task_queue() have (and safe) to be called with
completion_lock held.

It already does io_commit_cqring() and io_cqring_ev_posted(), so just
reuse it for post io_disarm_next().
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/5672a62f3150ee7c55849f40c0037655c4f2840f.1615250156.git.asml.silence@gmail.comSigned-off-by: NJens Axboe <axboe@kernel.dk>
上级 33cc89a9
...@@ -985,6 +985,7 @@ static const struct io_op_def io_op_defs[] = { ...@@ -985,6 +985,7 @@ static const struct io_op_def io_op_defs[] = {
[IORING_OP_UNLINKAT] = {}, [IORING_OP_UNLINKAT] = {},
}; };
static bool io_disarm_next(struct io_kiocb *req);
static void io_uring_del_task_file(unsigned long index); static void io_uring_del_task_file(unsigned long index);
static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
struct task_struct *task, struct task_struct *task,
...@@ -1525,15 +1526,14 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res) ...@@ -1525,15 +1526,14 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
__io_cqring_fill_event(req, res, 0); __io_cqring_fill_event(req, res, 0);
} }
static inline void io_req_complete_post(struct io_kiocb *req, long res, static void io_req_complete_post(struct io_kiocb *req, long res,
unsigned int cflags) unsigned int cflags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags); spin_lock_irqsave(&ctx->completion_lock, flags);
__io_cqring_fill_event(req, res, cflags); __io_cqring_fill_event(req, res, cflags);
io_commit_cqring(ctx);
/* /*
* If we're the last reference to this request, add to our locked * If we're the last reference to this request, add to our locked
* free_list cache. * free_list cache.
...@@ -1541,19 +1541,26 @@ static inline void io_req_complete_post(struct io_kiocb *req, long res, ...@@ -1541,19 +1541,26 @@ static inline void io_req_complete_post(struct io_kiocb *req, long res,
if (refcount_dec_and_test(&req->refs)) { if (refcount_dec_and_test(&req->refs)) {
struct io_comp_state *cs = &ctx->submit_state.comp; struct io_comp_state *cs = &ctx->submit_state.comp;
if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK))
io_disarm_next(req);
if (req->link) {
io_req_task_queue(req->link);
req->link = NULL;
}
}
io_dismantle_req(req); io_dismantle_req(req);
io_put_task(req->task, 1); io_put_task(req->task, 1);
list_add(&req->compl.list, &cs->locked_free_list); list_add(&req->compl.list, &cs->locked_free_list);
cs->locked_free_nr++; cs->locked_free_nr++;
} else } else
req = NULL; req = NULL;
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
if (req) {
io_queue_next(req); if (req)
percpu_ref_put(&ctx->refs); percpu_ref_put(&ctx->refs);
}
} }
static void io_req_complete_state(struct io_kiocb *req, long res, static void io_req_complete_state(struct io_kiocb *req, long res,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册