提交 8a438665 编写于 作者: P Pavel Begunkov 提交者: Xiaoguang Wang

io_uring: make submission ref putting consistent

to #28170604

commit 594506fec5faec2b1ec82ad6fb0c8132512fc459 upstream

The rule is simple, any async handler gets a submission ref and should
put it at the end. Make them all follow it, and so more consistent.

This is a preparation patch, and as io_wq_assign_next() currently won't
ever work, this doesn't care to use io_put_req_find_next() instead of
io_put_req().
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>

refcount_inc_not_zero() -> refcount_inc() fix.
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Acked-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 bc45a367
...@@ -2555,7 +2555,7 @@ static bool io_req_cancelled(struct io_kiocb *req) ...@@ -2555,7 +2555,7 @@ static bool io_req_cancelled(struct io_kiocb *req)
if (req->work.flags & IO_WQ_WORK_CANCEL) { if (req->work.flags & IO_WQ_WORK_CANCEL) {
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, -ECANCELED); io_cqring_add_event(req, -ECANCELED);
io_put_req(req); io_double_put_req(req);
return true; return true;
} }
...@@ -2605,6 +2605,7 @@ static void io_fsync_finish(struct io_wq_work **workptr) ...@@ -2605,6 +2605,7 @@ static void io_fsync_finish(struct io_wq_work **workptr)
if (io_req_cancelled(req)) if (io_req_cancelled(req))
return; return;
__io_fsync(req, &nxt); __io_fsync(req, &nxt);
io_put_req(req); /* drop submission reference */
if (nxt) if (nxt)
io_wq_assign_next(workptr, nxt); io_wq_assign_next(workptr, nxt);
} }
...@@ -2614,7 +2615,6 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt, ...@@ -2614,7 +2615,6 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
{ {
/* fsync always requires a blocking context */ /* fsync always requires a blocking context */
if (force_nonblock) { if (force_nonblock) {
io_put_req(req);
req->work.func = io_fsync_finish; req->work.func = io_fsync_finish;
return -EAGAIN; return -EAGAIN;
} }
...@@ -2626,9 +2626,6 @@ static void __io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt) ...@@ -2626,9 +2626,6 @@ static void __io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt)
{ {
int ret; int ret;
if (io_req_cancelled(req))
return;
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
req->sync.len); req->sync.len);
if (ret < 0) if (ret < 0)
...@@ -2642,7 +2639,10 @@ static void io_fallocate_finish(struct io_wq_work **workptr) ...@@ -2642,7 +2639,10 @@ static void io_fallocate_finish(struct io_wq_work **workptr)
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct io_kiocb *nxt = NULL; struct io_kiocb *nxt = NULL;
if (io_req_cancelled(req))
return;
__io_fallocate(req, &nxt); __io_fallocate(req, &nxt);
io_put_req(req); /* drop submission reference */
if (nxt) if (nxt)
io_wq_assign_next(workptr, nxt); io_wq_assign_next(workptr, nxt);
} }
...@@ -2664,7 +2664,6 @@ static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt, ...@@ -2664,7 +2664,6 @@ static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
{ {
/* fallocate always requiring blocking context */ /* fallocate always requiring blocking context */
if (force_nonblock) { if (force_nonblock) {
io_put_req(req);
req->work.func = io_fallocate_finish; req->work.func = io_fallocate_finish;
return -EAGAIN; return -EAGAIN;
} }
...@@ -3022,6 +3021,7 @@ static void io_close_finish(struct io_wq_work **workptr) ...@@ -3022,6 +3021,7 @@ static void io_close_finish(struct io_wq_work **workptr)
/* not cancellable, don't do io_req_cancelled() */ /* not cancellable, don't do io_req_cancelled() */
__io_close_finish(req, &nxt); __io_close_finish(req, &nxt);
io_put_req(req); /* drop submission reference */
if (nxt) if (nxt)
io_wq_assign_next(workptr, nxt); io_wq_assign_next(workptr, nxt);
} }
...@@ -3038,6 +3038,9 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt, ...@@ -3038,6 +3038,9 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
/* if the file has a flush method, be safe and punt to async */ /* if the file has a flush method, be safe and punt to async */
if (req->close.put_file->f_op->flush && force_nonblock) { if (req->close.put_file->f_op->flush && force_nonblock) {
/* submission ref will be dropped, take it for async */
refcount_inc(&req->refs);
req->work.func = io_close_finish; req->work.func = io_close_finish;
/* /*
* Do manual async queue here to avoid grabbing files - we don't * Do manual async queue here to avoid grabbing files - we don't
...@@ -3095,6 +3098,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr) ...@@ -3095,6 +3098,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
if (io_req_cancelled(req)) if (io_req_cancelled(req))
return; return;
__io_sync_file_range(req, &nxt); __io_sync_file_range(req, &nxt);
io_put_req(req); /* put submission ref */
if (nxt) if (nxt)
io_wq_assign_next(workptr, nxt); io_wq_assign_next(workptr, nxt);
} }
...@@ -3104,7 +3108,6 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt, ...@@ -3104,7 +3108,6 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
{ {
/* sync_file_range always requires a blocking context */ /* sync_file_range always requires a blocking context */
if (force_nonblock) { if (force_nonblock) {
io_put_req(req);
req->work.func = io_sync_file_range_finish; req->work.func = io_sync_file_range_finish;
return -EAGAIN; return -EAGAIN;
} }
...@@ -3473,11 +3476,10 @@ static void io_accept_finish(struct io_wq_work **workptr) ...@@ -3473,11 +3476,10 @@ static void io_accept_finish(struct io_wq_work **workptr)
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct io_kiocb *nxt = NULL; struct io_kiocb *nxt = NULL;
io_put_req(req);
if (io_req_cancelled(req)) if (io_req_cancelled(req))
return; return;
__io_accept(req, &nxt, false); __io_accept(req, &nxt, false);
io_put_req(req); /* drop submission reference */
if (nxt) if (nxt)
io_wq_assign_next(workptr, nxt); io_wq_assign_next(workptr, nxt);
} }
...@@ -4746,17 +4748,14 @@ static void io_wq_submit_work(struct io_wq_work **workptr) ...@@ -4746,17 +4748,14 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
} while (1); } while (1);
} }
/* drop submission reference */
io_put_req(req);
if (ret) { if (ret) {
req_set_fail_links(req); req_set_fail_links(req);
io_cqring_add_event(req, ret); io_cqring_add_event(req, ret);
io_put_req(req); io_put_req(req);
} }
/* if a dependent link is ready, pass it back */ io_put_req(req); /* drop submission reference */
if (!ret && nxt) if (nxt)
io_wq_assign_next(workptr, nxt); io_wq_assign_next(workptr, nxt);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册