提交 007dcf2e 编写于 作者: P Pavel Begunkov 提交者: Shile Zhang

io_uring: use inlined struct sqe_submit

commit 267bc90442aa47002e2991f7d9dd141e168b466b upstream.

req->submit is always up-to-date, use it directly
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Reviewed-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 fc1a5d21
...@@ -1154,10 +1154,9 @@ static bool io_file_supports_async(struct file *file) ...@@ -1154,10 +1154,9 @@ static bool io_file_supports_async(struct file *file)
return false; return false;
} }
static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
bool force_nonblock)
{ {
const struct io_uring_sqe *sqe = s->sqe; const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw; struct kiocb *kiocb = &req->rw;
unsigned ioprio; unsigned ioprio;
...@@ -1406,8 +1405,8 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, ...@@ -1406,8 +1405,8 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
return ret; return ret;
} }
static int io_read(struct io_kiocb *req, const struct sqe_submit *s, static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
struct io_kiocb **nxt, bool force_nonblock) bool force_nonblock)
{ {
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw; struct kiocb *kiocb = &req->rw;
...@@ -1416,7 +1415,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1416,7 +1415,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count; size_t iov_count;
ssize_t read_size, ret; ssize_t read_size, ret;
ret = io_prep_rw(req, s, force_nonblock); ret = io_prep_rw(req, force_nonblock);
if (ret) if (ret)
return ret; return ret;
file = kiocb->ki_filp; file = kiocb->ki_filp;
...@@ -1424,7 +1423,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1424,7 +1423,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_READ))) if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF; return -EBADF;
ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter); ret = io_import_iovec(req->ctx, READ, &req->submit, &iovec, &iter);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1456,7 +1455,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1456,7 +1455,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
ret2 = -EAGAIN; ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */ /* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) if (!force_nonblock || ret2 != -EAGAIN)
kiocb_done(kiocb, ret2, nxt, s->in_async); kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
else else
ret = -EAGAIN; ret = -EAGAIN;
} }
...@@ -1464,8 +1463,8 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1464,8 +1463,8 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
return ret; return ret;
} }
static int io_write(struct io_kiocb *req, const struct sqe_submit *s, static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
struct io_kiocb **nxt, bool force_nonblock) bool force_nonblock)
{ {
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw; struct kiocb *kiocb = &req->rw;
...@@ -1474,7 +1473,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1474,7 +1473,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count; size_t iov_count;
ssize_t ret; ssize_t ret;
ret = io_prep_rw(req, s, force_nonblock); ret = io_prep_rw(req, force_nonblock);
if (ret) if (ret)
return ret; return ret;
...@@ -1482,7 +1481,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1482,7 +1481,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_WRITE))) if (unlikely(!(file->f_mode & FMODE_WRITE)))
return -EBADF; return -EBADF;
ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter); ret = io_import_iovec(req->ctx, WRITE, &req->submit, &iovec, &iter);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1519,7 +1518,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1519,7 +1518,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
else else
ret2 = loop_rw_iter(WRITE, file, kiocb, &iter); ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
if (!force_nonblock || ret2 != -EAGAIN) if (!force_nonblock || ret2 != -EAGAIN)
kiocb_done(kiocb, ret2, nxt, s->in_async); kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
else else
ret = -EAGAIN; ret = -EAGAIN;
} }
...@@ -2188,9 +2187,9 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -2188,9 +2187,9 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0; return 0;
} }
static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req)
const struct io_uring_sqe *sqe)
{ {
const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
...@@ -2217,10 +2216,10 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2217,10 +2216,10 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
} }
static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct sqe_submit *s, struct io_kiocb **nxt, struct io_kiocb **nxt, bool force_nonblock)
bool force_nonblock)
{ {
int ret, opcode; int ret, opcode;
struct sqe_submit *s = &req->submit;
req->user_data = READ_ONCE(s->sqe->user_data); req->user_data = READ_ONCE(s->sqe->user_data);
...@@ -2232,18 +2231,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2232,18 +2231,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
case IORING_OP_READV: case IORING_OP_READV:
if (unlikely(s->sqe->buf_index)) if (unlikely(s->sqe->buf_index))
return -EINVAL; return -EINVAL;
ret = io_read(req, s, nxt, force_nonblock); ret = io_read(req, nxt, force_nonblock);
break; break;
case IORING_OP_WRITEV: case IORING_OP_WRITEV:
if (unlikely(s->sqe->buf_index)) if (unlikely(s->sqe->buf_index))
return -EINVAL; return -EINVAL;
ret = io_write(req, s, nxt, force_nonblock); ret = io_write(req, nxt, force_nonblock);
break; break;
case IORING_OP_READ_FIXED: case IORING_OP_READ_FIXED:
ret = io_read(req, s, nxt, force_nonblock); ret = io_read(req, nxt, force_nonblock);
break; break;
case IORING_OP_WRITE_FIXED: case IORING_OP_WRITE_FIXED:
ret = io_write(req, s, nxt, force_nonblock); ret = io_write(req, nxt, force_nonblock);
break; break;
case IORING_OP_FSYNC: case IORING_OP_FSYNC:
ret = io_fsync(req, s->sqe, nxt, force_nonblock); ret = io_fsync(req, s->sqe, nxt, force_nonblock);
...@@ -2318,7 +2317,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr) ...@@ -2318,7 +2317,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0; s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
s->in_async = true; s->in_async = true;
do { do {
ret = __io_submit_sqe(ctx, req, s, &nxt, false); ret = __io_submit_sqe(ctx, req, &nxt, false);
/* /*
* We can get EAGAIN for polled IO even though we're * We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't * forcing a sync submission from here, since we can't
...@@ -2372,9 +2371,10 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, ...@@ -2372,9 +2371,10 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
return table->files[index & IORING_FILE_TABLE_MASK]; return table->files[index & IORING_FILE_TABLE_MASK];
} }
static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s, static int io_req_set_file(struct io_ring_ctx *ctx,
struct io_submit_state *state, struct io_kiocb *req) struct io_submit_state *state, struct io_kiocb *req)
{ {
struct sqe_submit *s = &req->submit;
unsigned flags; unsigned flags;
int fd; int fd;
...@@ -2438,12 +2438,11 @@ static int io_grab_files(struct io_ring_ctx *ctx, struct io_kiocb *req) ...@@ -2438,12 +2438,11 @@ static int io_grab_files(struct io_ring_ctx *ctx, struct io_kiocb *req)
return ret; return ret;
} }
static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
struct sqe_submit *s)
{ {
int ret; int ret;
ret = __io_submit_sqe(ctx, req, s, NULL, true); ret = __io_submit_sqe(ctx, req, NULL, true);
/* /*
* We async punt it if the file wasn't marked NOWAIT, or if the file * We async punt it if the file wasn't marked NOWAIT, or if the file
...@@ -2451,6 +2450,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2451,6 +2450,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
*/ */
if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
(req->flags & REQ_F_MUST_PUNT))) { (req->flags & REQ_F_MUST_PUNT))) {
struct sqe_submit *s = &req->submit;
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
...@@ -2488,31 +2488,30 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2488,31 +2488,30 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return ret; return ret;
} }
static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req)
struct sqe_submit *s)
{ {
int ret; int ret;
ret = io_req_defer(ctx, req, s->sqe); ret = io_req_defer(ctx, req);
if (ret) { if (ret) {
if (ret != -EIOCBQUEUED) { if (ret != -EIOCBQUEUED) {
io_cqring_add_event(ctx, req->submit.sqe->user_data, ret);
io_free_req(req, NULL); io_free_req(req, NULL);
io_cqring_add_event(ctx, s->sqe->user_data, ret);
} }
return 0; return 0;
} }
return __io_queue_sqe(ctx, req, s); return __io_queue_sqe(ctx, req);
} }
static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
struct sqe_submit *s, struct io_kiocb *shadow) struct io_kiocb *shadow)
{ {
int ret; int ret;
int need_submit = false; int need_submit = false;
if (!shadow) if (!shadow)
return io_queue_sqe(ctx, req, s); return io_queue_sqe(ctx, req);
/* /*
* Mark the first IO in link list as DRAIN, let all the following * Mark the first IO in link list as DRAIN, let all the following
...@@ -2520,12 +2519,12 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2520,12 +2519,12 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
* list. * list.
*/ */
req->flags |= REQ_F_IO_DRAIN; req->flags |= REQ_F_IO_DRAIN;
ret = io_req_defer(ctx, req, s->sqe); ret = io_req_defer(ctx, req);
if (ret) { if (ret) {
if (ret != -EIOCBQUEUED) { if (ret != -EIOCBQUEUED) {
io_cqring_add_event(ctx, req->submit.sqe->user_data, ret);
io_free_req(req, NULL); io_free_req(req, NULL);
__io_free_req(shadow); __io_free_req(shadow);
io_cqring_add_event(ctx, s->sqe->user_data, ret);
return 0; return 0;
} }
} else { } else {
...@@ -2543,7 +2542,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2543,7 +2542,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
if (need_submit) if (need_submit)
return __io_queue_sqe(ctx, req, s); return __io_queue_sqe(ctx, req);
return 0; return 0;
} }
...@@ -2551,10 +2550,10 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2551,10 +2550,10 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK) #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
struct sqe_submit *s, struct io_submit_state *state, struct io_submit_state *state, struct io_kiocb **link)
struct io_kiocb **link)
{ {
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
struct sqe_submit *s = &req->submit;
int ret; int ret;
/* enforce forwards compatibility on users */ /* enforce forwards compatibility on users */
...@@ -2563,11 +2562,11 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2563,11 +2562,11 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
goto err_req; goto err_req;
} }
ret = io_req_set_file(ctx, s, state, req); ret = io_req_set_file(ctx, state, req);
if (unlikely(ret)) { if (unlikely(ret)) {
err_req: err_req:
io_free_req(req, NULL);
io_cqring_add_event(ctx, s->sqe->user_data, ret); io_cqring_add_event(ctx, s->sqe->user_data, ret);
io_free_req(req, NULL);
return; return;
} }
...@@ -2598,7 +2597,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2598,7 +2597,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
INIT_LIST_HEAD(&req->link_list); INIT_LIST_HEAD(&req->link_list);
*link = req; *link = req;
} else { } else {
io_queue_sqe(ctx, req, s); io_queue_sqe(ctx, req);
} }
} }
...@@ -2742,7 +2741,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -2742,7 +2741,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
req->submit.needs_fixed_file = async; req->submit.needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data, trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
true, async); true, async);
io_submit_sqe(ctx, req, &req->submit, statep, &link); io_submit_sqe(ctx, req, statep, &link);
submitted++; submitted++;
/* /*
...@@ -2750,14 +2749,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -2750,14 +2749,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
* that's the end of the chain. Submit the previous link. * that's the end of the chain. Submit the previous link.
*/ */
if (!(sqe_flags & IOSQE_IO_LINK) && link) { if (!(sqe_flags & IOSQE_IO_LINK) && link) {
io_queue_link_head(ctx, link, &link->submit, shadow_req); io_queue_link_head(ctx, link, shadow_req);
link = NULL; link = NULL;
shadow_req = NULL; shadow_req = NULL;
} }
} }
if (link) if (link)
io_queue_link_head(ctx, link, &link->submit, shadow_req); io_queue_link_head(ctx, link, shadow_req);
if (statep) if (statep)
io_submit_state_end(&state); io_submit_state_end(&state);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册