提交 f4db7182 编写于 作者: P Pavel Begunkov 提交者: Jens Axboe

io-wq: return next work from ->do_work() directly

It's easier to return next work from ->do_work() than
having an in-out argument. Looks nicer and easier to compile.
Also, merge io_wq_assign_next() into its only user.
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 e883a79d
...@@ -523,9 +523,8 @@ static void io_worker_handle_work(struct io_worker *worker) ...@@ -523,9 +523,8 @@ static void io_worker_handle_work(struct io_worker *worker)
work->flags |= IO_WQ_WORK_CANCEL; work->flags |= IO_WQ_WORK_CANCEL;
hash = io_get_work_hash(work); hash = io_get_work_hash(work);
linked = old_work = work; old_work = work;
wq->do_work(&linked); linked = wq->do_work(work);
linked = (old_work == linked) ? NULL : linked;
work = next_hashed; work = next_hashed;
if (!work && linked && !io_wq_is_hashed(linked)) { if (!work && linked && !io_wq_is_hashed(linked)) {
...@@ -781,8 +780,7 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) ...@@ -781,8 +780,7 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
struct io_wq_work *old_work = work; struct io_wq_work *old_work = work;
work->flags |= IO_WQ_WORK_CANCEL; work->flags |= IO_WQ_WORK_CANCEL;
wq->do_work(&work); work = wq->do_work(work);
work = (work == old_work) ? NULL : work;
wq->free_work(old_work); wq->free_work(old_work);
} while (work); } while (work);
} }
......
...@@ -101,7 +101,7 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) ...@@ -101,7 +101,7 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
} }
typedef void (free_work_fn)(struct io_wq_work *); typedef void (free_work_fn)(struct io_wq_work *);
typedef void (io_wq_work_fn)(struct io_wq_work **); typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *);
struct io_wq_data { struct io_wq_data {
struct user_struct *user; struct user_struct *user;
......
...@@ -895,7 +895,6 @@ enum io_mem_account { ...@@ -895,7 +895,6 @@ enum io_mem_account {
ACCT_PINNED, ACCT_PINNED,
}; };
static void io_wq_submit_work(struct io_wq_work **workptr);
static void io_cqring_fill_event(struct io_kiocb *req, long res); static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req); static void io_put_req(struct io_kiocb *req);
static void io_double_put_req(struct io_kiocb *req); static void io_double_put_req(struct io_kiocb *req);
...@@ -1773,20 +1772,6 @@ static void io_free_req(struct io_kiocb *req) ...@@ -1773,20 +1772,6 @@ static void io_free_req(struct io_kiocb *req)
} }
} }
static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
{
struct io_kiocb *link;
const struct io_op_def *def = &io_op_defs[nxt->opcode];
if ((nxt->flags & REQ_F_ISREG) && def->hash_reg_file)
io_wq_hash_work(&nxt->work, file_inode(nxt->file));
*workptr = &nxt->work;
link = io_prep_linked_timeout(nxt);
if (link)
nxt->flags |= REQ_F_QUEUE_TIMEOUT;
}
/* /*
* Drop reference to request, return next in chain (if there is one) if this * Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request. * was the last reference to this request.
...@@ -1806,24 +1791,29 @@ static void io_put_req(struct io_kiocb *req) ...@@ -1806,24 +1791,29 @@ static void io_put_req(struct io_kiocb *req)
io_free_req(req); io_free_req(req);
} }
static void io_steal_work(struct io_kiocb *req, static struct io_wq_work *io_steal_work(struct io_kiocb *req)
struct io_wq_work **workptr)
{ {
struct io_kiocb *link, *nxt = NULL;
/* /*
* It's in an io-wq worker, so there always should be at least * A ref is owned by io-wq in which context we're. So, if that's the
* one reference, which will be dropped in io_put_work() just * last one, it's safe to steal next work. False negatives are Ok,
* after the current handler returns. * it just will be re-punted async in io_put_work()
*
* It also means, that if the counter dropped to 1, then there is
* no asynchronous users left, so it's safe to steal the next work.
*/ */
if (refcount_read(&req->refs) == 1) { if (refcount_read(&req->refs) != 1)
struct io_kiocb *nxt = NULL; return NULL;
io_req_find_next(req, &nxt); io_req_find_next(req, &nxt);
if (nxt) if (!nxt)
io_wq_assign_next(workptr, nxt); return NULL;
}
if ((nxt->flags & REQ_F_ISREG) && io_op_defs[nxt->opcode].hash_reg_file)
io_wq_hash_work(&nxt->work, file_inode(nxt->file));
link = io_prep_linked_timeout(nxt);
if (link)
nxt->flags |= REQ_F_QUEUE_TIMEOUT;
return &nxt->work;
} }
/* /*
...@@ -5718,9 +5708,8 @@ static void io_arm_async_linked_timeout(struct io_kiocb *req) ...@@ -5718,9 +5708,8 @@ static void io_arm_async_linked_timeout(struct io_kiocb *req)
io_queue_linked_timeout(link); io_queue_linked_timeout(link);
} }
static void io_wq_submit_work(struct io_wq_work **workptr) static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
{ {
struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *req = container_of(work, struct io_kiocb, work);
int ret = 0; int ret = 0;
...@@ -5751,7 +5740,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr) ...@@ -5751,7 +5740,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
io_req_complete(req, ret); io_req_complete(req, ret);
} }
io_steal_work(req, workptr); return io_steal_work(req);
} }
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册