提交 7a743e22 编写于 作者: P Pavel Begunkov 提交者: Jens Axboe

io_uring: get next work with submission ref drop

If after dropping the submission reference req->refs == 1, the request
is done, because this one is for io_put_work() and will be dropped
synchronously shortly after. In this case it's safe to steal a next
work from the request.
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 014db007
...@@ -1518,6 +1518,27 @@ static void io_free_req(struct io_kiocb *req) ...@@ -1518,6 +1518,27 @@ static void io_free_req(struct io_kiocb *req)
io_queue_async_work(nxt); io_queue_async_work(nxt);
} }
static void io_link_work_cb(struct io_wq_work **workptr)
{
struct io_wq_work *work = *workptr;
struct io_kiocb *link = work->data;
io_queue_linked_timeout(link);
io_wq_submit_work(workptr);
}
static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
{
struct io_kiocb *link;
*workptr = &nxt->work;
link = io_prep_linked_timeout(nxt);
if (link) {
nxt->work.func = io_link_work_cb;
nxt->work.data = link;
}
}
/* /*
* Drop reference to request, return next in chain (if there is one) if this * Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request. * was the last reference to this request.
...@@ -1537,6 +1558,27 @@ static void io_put_req(struct io_kiocb *req) ...@@ -1537,6 +1558,27 @@ static void io_put_req(struct io_kiocb *req)
io_free_req(req); io_free_req(req);
} }
static void io_put_req_async_completion(struct io_kiocb *req,
struct io_wq_work **workptr)
{
/*
* It's in an io-wq worker, so there always should be at least
* one reference, which will be dropped in io_put_work() just
* after the current handler returns.
*
* It also means, that if the counter dropped to 1, then there is
* no asynchronous users left, so it's safe to steal the next work.
*/
refcount_dec(&req->refs);
if (refcount_read(&req->refs) == 1) {
struct io_kiocb *nxt = NULL;
io_req_find_next(req, &nxt);
if (nxt)
io_wq_assign_next(workptr, nxt);
}
}
/* /*
* Must only be used if we don't need to care about links, usually from * Must only be used if we don't need to care about links, usually from
* within the completion handling itself. * within the completion handling itself.
...@@ -2543,27 +2585,6 @@ static bool io_req_cancelled(struct io_kiocb *req) ...@@ -2543,27 +2585,6 @@ static bool io_req_cancelled(struct io_kiocb *req)
return false; return false;
} }
static void io_link_work_cb(struct io_wq_work **workptr)
{
struct io_wq_work *work = *workptr;
struct io_kiocb *link = work->data;
io_queue_linked_timeout(link);
io_wq_submit_work(workptr);
}
static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
{
struct io_kiocb *link;
*workptr = &nxt->work;
link = io_prep_linked_timeout(nxt);
if (link) {
nxt->work.func = io_link_work_cb;
nxt->work.data = link;
}
}
static void __io_fsync(struct io_kiocb *req) static void __io_fsync(struct io_kiocb *req)
{ {
loff_t end = req->sync.off + req->sync.len; loff_t end = req->sync.off + req->sync.len;
...@@ -2581,14 +2602,11 @@ static void __io_fsync(struct io_kiocb *req) ...@@ -2581,14 +2602,11 @@ static void __io_fsync(struct io_kiocb *req)
static void io_fsync_finish(struct io_wq_work **workptr) static void io_fsync_finish(struct io_wq_work **workptr)
{ {
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct io_kiocb *nxt = NULL;
if (io_req_cancelled(req)) if (io_req_cancelled(req))
return; return;
__io_fsync(req); __io_fsync(req);
io_put_req(req); /* drop submission reference */ io_put_req_async_completion(req, workptr);
if (nxt)
io_wq_assign_next(workptr, nxt);
} }
static int io_fsync(struct io_kiocb *req, bool force_nonblock) static int io_fsync(struct io_kiocb *req, bool force_nonblock)
...@@ -2617,14 +2635,11 @@ static void __io_fallocate(struct io_kiocb *req) ...@@ -2617,14 +2635,11 @@ static void __io_fallocate(struct io_kiocb *req)
static void io_fallocate_finish(struct io_wq_work **workptr) static void io_fallocate_finish(struct io_wq_work **workptr)
{ {
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct io_kiocb *nxt = NULL;
if (io_req_cancelled(req)) if (io_req_cancelled(req))
return; return;
__io_fallocate(req); __io_fallocate(req);
io_put_req(req); /* drop submission reference */ io_put_req_async_completion(req, workptr);
if (nxt)
io_wq_assign_next(workptr, nxt);
} }
static int io_fallocate_prep(struct io_kiocb *req, static int io_fallocate_prep(struct io_kiocb *req,
...@@ -2988,13 +3003,10 @@ static void __io_close_finish(struct io_kiocb *req) ...@@ -2988,13 +3003,10 @@ static void __io_close_finish(struct io_kiocb *req)
static void io_close_finish(struct io_wq_work **workptr) static void io_close_finish(struct io_wq_work **workptr)
{ {
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct io_kiocb *nxt = NULL;
/* not cancellable, don't do io_req_cancelled() */ /* not cancellable, don't do io_req_cancelled() */
__io_close_finish(req); __io_close_finish(req);
io_put_req(req); /* drop submission reference */ io_put_req_async_completion(req, workptr);
if (nxt)
io_wq_assign_next(workptr, nxt);
} }
static int io_close(struct io_kiocb *req, bool force_nonblock) static int io_close(struct io_kiocb *req, bool force_nonblock)
...@@ -3436,14 +3448,11 @@ static int __io_accept(struct io_kiocb *req, bool force_nonblock) ...@@ -3436,14 +3448,11 @@ static int __io_accept(struct io_kiocb *req, bool force_nonblock)
static void io_accept_finish(struct io_wq_work **workptr) static void io_accept_finish(struct io_wq_work **workptr)
{ {
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work); struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct io_kiocb *nxt = NULL;
if (io_req_cancelled(req)) if (io_req_cancelled(req))
return; return;
__io_accept(req, false); __io_accept(req, false);
io_put_req(req); /* drop submission reference */ io_put_req_async_completion(req, workptr);
if (nxt)
io_wq_assign_next(workptr, nxt);
} }
#endif #endif
...@@ -4682,7 +4691,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr) ...@@ -4682,7 +4691,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
{ {
struct io_wq_work *work = *workptr; struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_kiocb *nxt = NULL;
int ret = 0; int ret = 0;
/* if NO_CANCEL is set, we must still run the work */ /* if NO_CANCEL is set, we must still run the work */
...@@ -4711,9 +4719,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr) ...@@ -4711,9 +4719,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
io_put_req(req); io_put_req(req);
} }
io_put_req(req); /* drop submission reference */ io_put_req_async_completion(req, workptr);
if (nxt)
io_wq_assign_next(workptr, nxt);
} }
static int io_req_needs_file(struct io_kiocb *req, int fd) static int io_req_needs_file(struct io_kiocb *req, int fd)
...@@ -6103,6 +6109,7 @@ static void io_put_work(struct io_wq_work *work) ...@@ -6103,6 +6109,7 @@ static void io_put_work(struct io_wq_work *work)
{ {
struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *req = container_of(work, struct io_kiocb, work);
/* Consider that io_put_req_async_completion() relies on this ref */
io_put_req(req); io_put_req(req);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册