提交 bd9d5dcb 编写于 作者: J Jackie Liu 提交者: Shile Zhang

io_uring: remove passed in 'ctx' function parameter ctx if possible

commit a197f664a0db8a6219d9ce949f5f29b89f60fb2b upstream.

Many times, the core of the function is req, and req has already set
req->ctx at initialization time, so there is no need to pass in the
ctx from the caller.

Cleanup, no functional change.
Signed-off-by: NJackie Liu <liuyun01@kylinos.cn>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Reviewed-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 4cc84a2b
...@@ -437,20 +437,20 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ...@@ -437,20 +437,20 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
return NULL; return NULL;
} }
static inline bool __io_sequence_defer(struct io_ring_ctx *ctx, static inline bool __io_sequence_defer(struct io_kiocb *req)
struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx;
return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+ atomic_read(&ctx->cached_cq_overflow); + atomic_read(&ctx->cached_cq_overflow);
} }
static inline bool io_sequence_defer(struct io_ring_ctx *ctx, static inline bool io_sequence_defer(struct io_kiocb *req)
struct io_kiocb *req)
{ {
if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
return false; return false;
return __io_sequence_defer(ctx, req); return __io_sequence_defer(req);
} }
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
...@@ -458,7 +458,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) ...@@ -458,7 +458,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
struct io_kiocb *req; struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list); req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
if (req && !io_sequence_defer(ctx, req)) { if (req && !io_sequence_defer(req)) {
list_del_init(&req->list); list_del_init(&req->list);
return req; return req;
} }
...@@ -471,7 +471,7 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) ...@@ -471,7 +471,7 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
struct io_kiocb *req; struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list); req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
if (req && !__io_sequence_defer(ctx, req)) { if (req && !__io_sequence_defer(req)) {
list_del_init(&req->list); list_del_init(&req->list);
return req; return req;
} }
...@@ -534,10 +534,10 @@ static inline bool io_prep_async_work(struct io_kiocb *req) ...@@ -534,10 +534,10 @@ static inline bool io_prep_async_work(struct io_kiocb *req)
return do_hashed; return do_hashed;
} }
static inline void io_queue_async_work(struct io_ring_ctx *ctx, static inline void io_queue_async_work(struct io_kiocb *req)
struct io_kiocb *req)
{ {
bool do_hashed = io_prep_async_work(req); bool do_hashed = io_prep_async_work(req);
struct io_ring_ctx *ctx = req->ctx;
trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work, trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
req->flags); req->flags);
...@@ -588,7 +588,7 @@ static void io_commit_cqring(struct io_ring_ctx *ctx) ...@@ -588,7 +588,7 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
continue; continue;
} }
req->flags |= REQ_F_IO_DRAINED; req->flags |= REQ_F_IO_DRAINED;
io_queue_async_work(ctx, req); io_queue_async_work(req);
} }
} }
...@@ -791,9 +791,9 @@ static void __io_free_req(struct io_kiocb *req) ...@@ -791,9 +791,9 @@ static void __io_free_req(struct io_kiocb *req)
kmem_cache_free(req_cachep, req); kmem_cache_free(req_cachep, req);
} }
static bool io_link_cancel_timeout(struct io_ring_ctx *ctx, static bool io_link_cancel_timeout(struct io_kiocb *req)
struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx;
int ret; int ret;
ret = hrtimer_try_to_cancel(&req->timeout.timer); ret = hrtimer_try_to_cancel(&req->timeout.timer);
...@@ -833,7 +833,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr) ...@@ -833,7 +833,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
* in this context instead of having to queue up new async work. * in this context instead of having to queue up new async work.
*/ */
if (req->flags & REQ_F_LINK_TIMEOUT) { if (req->flags & REQ_F_LINK_TIMEOUT) {
wake_ev = io_link_cancel_timeout(ctx, nxt); wake_ev = io_link_cancel_timeout(nxt);
/* we dropped this link, get next */ /* we dropped this link, get next */
nxt = list_first_entry_or_null(&req->link_list, nxt = list_first_entry_or_null(&req->link_list,
...@@ -842,7 +842,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr) ...@@ -842,7 +842,7 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
*nxtptr = nxt; *nxtptr = nxt;
break; break;
} else { } else {
io_queue_async_work(req->ctx, nxt); io_queue_async_work(nxt);
break; break;
} }
} }
...@@ -870,7 +870,7 @@ static void io_fail_links(struct io_kiocb *req) ...@@ -870,7 +870,7 @@ static void io_fail_links(struct io_kiocb *req)
if ((req->flags & REQ_F_LINK_TIMEOUT) && if ((req->flags & REQ_F_LINK_TIMEOUT) &&
link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) { link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
io_link_cancel_timeout(ctx, link); io_link_cancel_timeout(link);
} else { } else {
io_cqring_fill_event(link, -ECANCELED); io_cqring_fill_event(link, -ECANCELED);
io_double_put_req(link); io_double_put_req(link);
...@@ -939,7 +939,7 @@ static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr) ...@@ -939,7 +939,7 @@ static void io_put_req(struct io_kiocb *req, struct io_kiocb **nxtptr)
if (nxtptr) if (nxtptr)
*nxtptr = nxt; *nxtptr = nxt;
else else
io_queue_async_work(nxt->ctx, nxt); io_queue_async_work(nxt);
} }
} }
...@@ -1899,7 +1899,7 @@ static void io_poll_remove_one(struct io_kiocb *req) ...@@ -1899,7 +1899,7 @@ static void io_poll_remove_one(struct io_kiocb *req)
WRITE_ONCE(poll->canceled, true); WRITE_ONCE(poll->canceled, true);
if (!list_empty(&poll->wait.entry)) { if (!list_empty(&poll->wait.entry)) {
list_del_init(&poll->wait.entry); list_del_init(&poll->wait.entry);
io_queue_async_work(req->ctx, req); io_queue_async_work(req);
} }
spin_unlock(&poll->head->lock); spin_unlock(&poll->head->lock);
...@@ -1951,9 +1951,10 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1951,9 +1951,10 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0; return 0;
} }
static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req, static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
__poll_t mask)
{ {
struct io_ring_ctx *ctx = req->ctx;
req->poll.done = true; req->poll.done = true;
io_cqring_fill_event(req, mangle_poll(mask)); io_cqring_fill_event(req, mangle_poll(mask));
io_commit_cqring(ctx); io_commit_cqring(ctx);
...@@ -1989,7 +1990,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr) ...@@ -1989,7 +1990,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
return; return;
} }
list_del_init(&req->list); list_del_init(&req->list);
io_poll_complete(ctx, req, mask); io_poll_complete(req, mask);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
...@@ -2017,13 +2018,13 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, ...@@ -2017,13 +2018,13 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) { if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
list_del(&req->list); list_del(&req->list);
io_poll_complete(ctx, req, mask); io_poll_complete(req, mask);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
io_put_req(req, NULL); io_put_req(req, NULL);
} else { } else {
io_queue_async_work(ctx, req); io_queue_async_work(req);
} }
return 1; return 1;
...@@ -2108,7 +2109,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2108,7 +2109,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} }
if (mask) { /* no async, we'd stolen it */ if (mask) { /* no async, we'd stolen it */
ipt.error = 0; ipt.error = 0;
io_poll_complete(ctx, req, mask); io_poll_complete(req, mask);
} }
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
...@@ -2355,12 +2356,13 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2355,12 +2356,13 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0; return 0;
} }
static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req) static int io_req_defer(struct io_kiocb *req)
{ {
const struct io_uring_sqe *sqe = req->submit.sqe; const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
struct io_ring_ctx *ctx = req->ctx;
if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) if (!io_sequence_defer(req) && list_empty(&ctx->defer_list))
return 0; return 0;
sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL); sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
...@@ -2368,7 +2370,7 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req) ...@@ -2368,7 +2370,7 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req)
return -EAGAIN; return -EAGAIN;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) { if (!io_sequence_defer(req) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
kfree(sqe_copy); kfree(sqe_copy);
return 0; return 0;
...@@ -2383,11 +2385,12 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req) ...@@ -2383,11 +2385,12 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req)
return -EIOCBQUEUED; return -EIOCBQUEUED;
} }
static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
struct io_kiocb **nxt, bool force_nonblock) bool force_nonblock)
{ {
int ret, opcode; int ret, opcode;
struct sqe_submit *s = &req->submit; struct sqe_submit *s = &req->submit;
struct io_ring_ctx *ctx = req->ctx;
opcode = READ_ONCE(s->sqe->opcode); opcode = READ_ONCE(s->sqe->opcode);
switch (opcode) { switch (opcode) {
...@@ -2467,7 +2470,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr) ...@@ -2467,7 +2470,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
{ {
struct io_wq_work *work = *workptr; struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_ring_ctx *ctx = req->ctx;
struct sqe_submit *s = &req->submit; struct sqe_submit *s = &req->submit;
const struct io_uring_sqe *sqe = s->sqe; const struct io_uring_sqe *sqe = s->sqe;
struct io_kiocb *nxt = NULL; struct io_kiocb *nxt = NULL;
...@@ -2483,7 +2485,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr) ...@@ -2483,7 +2485,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0; s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
s->in_async = true; s->in_async = true;
do { do {
ret = __io_submit_sqe(ctx, req, &nxt, false); ret = __io_submit_sqe(req, &nxt, false);
/* /*
* We can get EAGAIN for polled IO even though we're * We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't * forcing a sync submission from here, since we can't
...@@ -2537,10 +2539,10 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, ...@@ -2537,10 +2539,10 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
return table->files[index & IORING_FILE_TABLE_MASK]; return table->files[index & IORING_FILE_TABLE_MASK];
} }
static int io_req_set_file(struct io_ring_ctx *ctx, static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
struct io_submit_state *state, struct io_kiocb *req)
{ {
struct sqe_submit *s = &req->submit; struct sqe_submit *s = &req->submit;
struct io_ring_ctx *ctx = req->ctx;
unsigned flags; unsigned flags;
int fd; int fd;
...@@ -2580,9 +2582,10 @@ static int io_req_set_file(struct io_ring_ctx *ctx, ...@@ -2580,9 +2582,10 @@ static int io_req_set_file(struct io_ring_ctx *ctx,
return 0; return 0;
} }
static int io_grab_files(struct io_ring_ctx *ctx, struct io_kiocb *req) static int io_grab_files(struct io_kiocb *req)
{ {
int ret = -EBADF; int ret = -EBADF;
struct io_ring_ctx *ctx = req->ctx;
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(&ctx->inflight_lock); spin_lock_irq(&ctx->inflight_lock);
...@@ -2698,7 +2701,7 @@ static inline struct io_kiocb *io_get_linked_timeout(struct io_kiocb *req) ...@@ -2698,7 +2701,7 @@ static inline struct io_kiocb *io_get_linked_timeout(struct io_kiocb *req)
return NULL; return NULL;
} }
static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req) static int __io_queue_sqe(struct io_kiocb *req)
{ {
struct io_kiocb *nxt; struct io_kiocb *nxt;
int ret; int ret;
...@@ -2710,7 +2713,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req) ...@@ -2710,7 +2713,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
goto err; goto err;
} }
ret = __io_submit_sqe(ctx, req, NULL, true); ret = __io_submit_sqe(req, NULL, true);
/* /*
* We async punt it if the file wasn't marked NOWAIT, or if the file * We async punt it if the file wasn't marked NOWAIT, or if the file
...@@ -2725,7 +2728,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req) ...@@ -2725,7 +2728,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
if (sqe_copy) { if (sqe_copy) {
s->sqe = sqe_copy; s->sqe = sqe_copy;
if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) { if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
ret = io_grab_files(ctx, req); ret = io_grab_files(req);
if (ret) { if (ret) {
kfree(sqe_copy); kfree(sqe_copy);
goto err; goto err;
...@@ -2736,7 +2739,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req) ...@@ -2736,7 +2739,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
* Queued up for async execution, worker will release * Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted. * submit reference when the iocb is actually submitted.
*/ */
io_queue_async_work(ctx, req); io_queue_async_work(req);
return 0; return 0;
} }
} }
...@@ -2756,11 +2759,11 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req) ...@@ -2756,11 +2759,11 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
return ret; return ret;
} }
static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req) static int io_queue_sqe(struct io_kiocb *req)
{ {
int ret; int ret;
ret = io_req_defer(ctx, req); ret = io_req_defer(req);
if (ret) { if (ret) {
if (ret != -EIOCBQUEUED) { if (ret != -EIOCBQUEUED) {
io_cqring_add_event(req, ret); io_cqring_add_event(req, ret);
...@@ -2769,17 +2772,17 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req) ...@@ -2769,17 +2772,17 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
return 0; return 0;
} }
return __io_queue_sqe(ctx, req); return __io_queue_sqe(req);
} }
static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
struct io_kiocb *shadow)
{ {
int ret; int ret;
int need_submit = false; int need_submit = false;
struct io_ring_ctx *ctx = req->ctx;
if (!shadow) if (!shadow)
return io_queue_sqe(ctx, req); return io_queue_sqe(req);
/* /*
* Mark the first IO in link list as DRAIN, let all the following * Mark the first IO in link list as DRAIN, let all the following
...@@ -2787,7 +2790,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2787,7 +2790,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
* list. * list.
*/ */
req->flags |= REQ_F_IO_DRAIN; req->flags |= REQ_F_IO_DRAIN;
ret = io_req_defer(ctx, req); ret = io_req_defer(req);
if (ret) { if (ret) {
if (ret != -EIOCBQUEUED) { if (ret != -EIOCBQUEUED) {
io_cqring_add_event(req, ret); io_cqring_add_event(req, ret);
...@@ -2810,18 +2813,19 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2810,18 +2813,19 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
if (need_submit) if (need_submit)
return __io_queue_sqe(ctx, req); return __io_queue_sqe(req);
return 0; return 0;
} }
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK) #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
struct io_submit_state *state, struct io_kiocb **link) struct io_kiocb **link)
{ {
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
struct sqe_submit *s = &req->submit; struct sqe_submit *s = &req->submit;
struct io_ring_ctx *ctx = req->ctx;
int ret; int ret;
req->user_data = s->sqe->user_data; req->user_data = s->sqe->user_data;
...@@ -2832,7 +2836,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2832,7 +2836,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
goto err_req; goto err_req;
} }
ret = io_req_set_file(ctx, state, req); ret = io_req_set_file(state, req);
if (unlikely(ret)) { if (unlikely(ret)) {
err_req: err_req:
io_cqring_add_event(req, ret); io_cqring_add_event(req, ret);
...@@ -2869,7 +2873,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2869,7 +2873,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
ret = -EINVAL; ret = -EINVAL;
goto err_req; goto err_req;
} else { } else {
io_queue_sqe(ctx, req); io_queue_sqe(req);
} }
} }
...@@ -3018,7 +3022,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -3018,7 +3022,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
req->submit.needs_fixed_file = async; req->submit.needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data, trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
true, async); true, async);
io_submit_sqe(ctx, req, statep, &link); io_submit_sqe(req, statep, &link);
submitted++; submitted++;
/* /*
...@@ -3026,14 +3030,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -3026,14 +3030,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
* that's the end of the chain. Submit the previous link. * that's the end of the chain. Submit the previous link.
*/ */
if (!(sqe_flags & IOSQE_IO_LINK) && link) { if (!(sqe_flags & IOSQE_IO_LINK) && link) {
io_queue_link_head(ctx, link, shadow_req); io_queue_link_head(link, shadow_req);
link = NULL; link = NULL;
shadow_req = NULL; shadow_req = NULL;
} }
} }
if (link) if (link)
io_queue_link_head(ctx, link, shadow_req); io_queue_link_head(link, shadow_req);
if (statep) if (statep)
io_submit_state_end(&state); io_submit_state_end(&state);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册