提交 48ce3a8a 编写于 作者: J Jens Axboe 提交者: Cheng Jian

io_uring: remove 'state' argument from io_{read,write} path

mainline inclusion
from mainline-5.1-rc7
commit 8358e3a8
category: feature
bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27
CVE: NA
---------------------------

Since commit 09bb8394 we don't use the state argument for any sort
of on-stack caching in the io read and write path. Remove the stale
and unused argument from them, and bubble it up to __io_submit_sqe()
and down to io_prep_rw().
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NZhihao Cheng <chengzhihao1@huawei.com>
Signed-off-by: Nyangerkun <yangerkun@huawei.com>
Reviewed-by: Nzhangyi (F) <yi.zhang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 88e4efbe
...@@ -740,7 +740,7 @@ static bool io_file_supports_async(struct file *file) ...@@ -740,7 +740,7 @@ static bool io_file_supports_async(struct file *file)
} }
static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
bool force_nonblock, struct io_submit_state *state) bool force_nonblock)
{ {
const struct io_uring_sqe *sqe = s->sqe; const struct io_uring_sqe *sqe = s->sqe;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -935,7 +935,7 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len) ...@@ -935,7 +935,7 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
} }
static int io_read(struct io_kiocb *req, const struct sqe_submit *s, static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
bool force_nonblock, struct io_submit_state *state) bool force_nonblock)
{ {
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw; struct kiocb *kiocb = &req->rw;
...@@ -944,7 +944,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -944,7 +944,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count; size_t iov_count;
int ret; int ret;
ret = io_prep_rw(req, s, force_nonblock, state); ret = io_prep_rw(req, s, force_nonblock);
if (ret) if (ret)
return ret; return ret;
file = kiocb->ki_filp; file = kiocb->ki_filp;
...@@ -982,7 +982,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -982,7 +982,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
} }
static int io_write(struct io_kiocb *req, const struct sqe_submit *s, static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
bool force_nonblock, struct io_submit_state *state) bool force_nonblock)
{ {
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw; struct kiocb *kiocb = &req->rw;
...@@ -991,7 +991,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -991,7 +991,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count; size_t iov_count;
int ret; int ret;
ret = io_prep_rw(req, s, force_nonblock, state); ret = io_prep_rw(req, s, force_nonblock);
if (ret) if (ret)
return ret; return ret;
...@@ -1333,8 +1333,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1333,8 +1333,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} }
static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct sqe_submit *s, bool force_nonblock, const struct sqe_submit *s, bool force_nonblock)
struct io_submit_state *state)
{ {
int ret, opcode; int ret, opcode;
...@@ -1350,18 +1349,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -1350,18 +1349,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
case IORING_OP_READV: case IORING_OP_READV:
if (unlikely(s->sqe->buf_index)) if (unlikely(s->sqe->buf_index))
return -EINVAL; return -EINVAL;
ret = io_read(req, s, force_nonblock, state); ret = io_read(req, s, force_nonblock);
break; break;
case IORING_OP_WRITEV: case IORING_OP_WRITEV:
if (unlikely(s->sqe->buf_index)) if (unlikely(s->sqe->buf_index))
return -EINVAL; return -EINVAL;
ret = io_write(req, s, force_nonblock, state); ret = io_write(req, s, force_nonblock);
break; break;
case IORING_OP_READ_FIXED: case IORING_OP_READ_FIXED:
ret = io_read(req, s, force_nonblock, state); ret = io_read(req, s, force_nonblock);
break; break;
case IORING_OP_WRITE_FIXED: case IORING_OP_WRITE_FIXED:
ret = io_write(req, s, force_nonblock, state); ret = io_write(req, s, force_nonblock);
break; break;
case IORING_OP_FSYNC: case IORING_OP_FSYNC:
ret = io_fsync(req, s->sqe, force_nonblock); ret = io_fsync(req, s->sqe, force_nonblock);
...@@ -1454,7 +1453,7 @@ static void io_sq_wq_submit_work(struct work_struct *work) ...@@ -1454,7 +1453,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
s->has_user = cur_mm != NULL; s->has_user = cur_mm != NULL;
s->needs_lock = true; s->needs_lock = true;
do { do {
ret = __io_submit_sqe(ctx, req, s, false, NULL); ret = __io_submit_sqe(ctx, req, s, false);
/* /*
* We can get EAGAIN for polled IO even though * We can get EAGAIN for polled IO even though
* we're forcing a sync submission from here, * we're forcing a sync submission from here,
...@@ -1620,7 +1619,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, ...@@ -1620,7 +1619,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
if (unlikely(ret)) if (unlikely(ret))
goto out; goto out;
ret = __io_submit_sqe(ctx, req, s, true, state); ret = __io_submit_sqe(ctx, req, s, true);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册