提交 ae9428ca 编写于 作者: P Pavel Begunkov 提交者: Jens Axboe

io_uring: Merge io_submit_sqes and io_ring_submit

io_submit_sqes() and io_ring_submit() are doing the same stuff with
a little difference. Deduplicate them.

Reviewed-by:Bob Liu <bob.liu@oracle.com>
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 3aa5fa03
......@@ -2694,7 +2694,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
}
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
struct mm_struct **mm)
struct file *ring_file, int ring_fd,
struct mm_struct **mm, bool async)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
......@@ -2745,10 +2746,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
}
out:
s.ring_file = ring_file;
s.ring_fd = ring_fd;
s.has_user = *mm != NULL;
s.in_async = true;
s.needs_fixed_file = true;
trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, true);
s.in_async = async;
s.needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, async);
io_submit_sqe(ctx, &s, statep, &link);
submitted++;
}
......@@ -2758,6 +2761,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
if (statep)
io_submit_state_end(&state);
/* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx);
return submitted;
}
......@@ -2862,10 +2868,8 @@ static int io_sq_thread(void *data)
}
to_submit = min(to_submit, ctx->sq_entries);
inflight += io_submit_sqes(ctx, to_submit, &cur_mm);
/* Commit SQ ring head once we've consumed all SQEs */
io_commit_sqring(ctx);
inflight += io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm,
true);
}
set_fs(old_fs);
......@@ -2879,69 +2883,6 @@ static int io_sq_thread(void *data)
return 0;
}
static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
struct file *ring_file, int ring_fd)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
struct io_kiocb *shadow_req = NULL;
bool prev_was_link = false;
int i, submit = 0;
if (to_submit > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, ctx, to_submit);
statep = &state;
}
for (i = 0; i < to_submit; i++) {
struct sqe_submit s;
if (!io_get_sqring(ctx, &s))
break;
/*
* If previous wasn't linked and we have a linked command,
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
io_queue_link_head(ctx, link, &link->submit, shadow_req);
link = NULL;
shadow_req = NULL;
}
prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
if (!shadow_req) {
shadow_req = io_get_req(ctx, NULL);
if (unlikely(!shadow_req))
goto out;
shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
refcount_dec(&shadow_req->refs);
}
shadow_req->sequence = s.sequence;
}
out:
s.ring_file = ring_file;
s.has_user = true;
s.in_async = false;
s.needs_fixed_file = false;
s.ring_fd = ring_fd;
submit++;
trace_io_uring_submit_sqe(ctx, s.sqe->user_data, true, false);
io_submit_sqe(ctx, &s, statep, &link);
}
if (link)
io_queue_link_head(ctx, link, &link->submit, shadow_req);
if (statep)
io_submit_state_end(statep);
io_commit_sqring(ctx);
return submit;
}
struct io_wait_queue {
struct wait_queue_entry wq;
struct io_ring_ctx *ctx;
......@@ -4062,10 +4003,14 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
wake_up(&ctx->sqo_wait);
submitted = to_submit;
} else if (to_submit) {
to_submit = min(to_submit, ctx->sq_entries);
struct mm_struct *cur_mm;
to_submit = min(to_submit, ctx->sq_entries);
mutex_lock(&ctx->uring_lock);
submitted = io_ring_submit(ctx, to_submit, f.file, fd);
/* already have mm, so io_submit_sqes() won't try to grab it */
cur_mm = ctx->sqo_mm;
submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
&cur_mm, false);
mutex_unlock(&ctx->uring_lock);
}
if (flags & IORING_ENTER_GETEVENTS) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册