提交 cdecb959 编写于 作者: P Pavel Begunkov 提交者: Cheng Jian

io_uring: optimise sqe-to-req flags translation

mainline inclusion
from mainline-5.6-rc1
commit 6b47ee6e
category: feature
bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27
CVE: NA
---------------------------

For each IOSQE_* flag there is a corresponding REQ_F_* flag. And there
is a repetitive pattern of their translation:
e.g. if (sqe->flags & SQE_FLAG*) req->flags |= REQ_F_FLAG*

Use same numeric values/bits for them and copy instead of manual
handling.
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: Nyangerkun <yangerkun@huawei.com>
Reviewed-by: Nzhangyi (F) <yi.zhang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 f0d5c830
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/bits.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/fs.h> #include <linux/fs.h>
...@@ -453,6 +454,65 @@ struct io_async_ctx { ...@@ -453,6 +454,65 @@ struct io_async_ctx {
}; };
}; };
enum {
REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
REQ_F_LINK_NEXT_BIT,
REQ_F_FAIL_LINK_BIT,
REQ_F_INFLIGHT_BIT,
REQ_F_CUR_POS_BIT,
REQ_F_NOWAIT_BIT,
REQ_F_IOPOLL_COMPLETED_BIT,
REQ_F_LINK_TIMEOUT_BIT,
REQ_F_TIMEOUT_BIT,
REQ_F_ISREG_BIT,
REQ_F_MUST_PUNT_BIT,
REQ_F_TIMEOUT_NOSEQ_BIT,
REQ_F_COMP_LOCKED_BIT,
};
enum {
/* ctx owns file */
REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
/* drain existing IO first */
REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
/* linked sqes */
REQ_F_LINK = BIT(REQ_F_LINK_BIT),
/* doesn't sever on completion < 0 */
REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
/* IOSQE_ASYNC */
REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
/* already grabbed next link */
REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
/* fail rest of links */
REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
/* on inflight list */
REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
/* read/write uses file position */
REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
/* must not punt to workers */
REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
/* polled IO has completed */
REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
/* has linked timeout */
REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
/* timeout request */
REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
/* regular file */
REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
/* must be punted even for NONBLOCK */
REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
/* no timeout sequence */
REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
/* completion under lock */
REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
};
/* /*
* NOTE! Each of the iocb union members has the file pointer * NOTE! Each of the iocb union members has the file pointer
* as the first entry in their struct definition. So you can * as the first entry in their struct definition. So you can
...@@ -495,23 +555,6 @@ struct io_kiocb { ...@@ -495,23 +555,6 @@ struct io_kiocb {
struct list_head link_list; struct list_head link_list;
unsigned int flags; unsigned int flags;
refcount_t refs; refcount_t refs;
#define REQ_F_NOWAIT 1 /* must not punt to workers */
#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
#define REQ_F_FIXED_FILE 4 /* ctx owns file */
#define REQ_F_LINK_NEXT 8 /* already grabbed next link */
#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
#define REQ_F_LINK 64 /* linked sqes */
#define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */
#define REQ_F_FAIL_LINK 256 /* fail rest of links */
#define REQ_F_TIMEOUT 1024 /* timeout request */
#define REQ_F_ISREG 2048 /* regular file */
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
#define REQ_F_INFLIGHT 16384 /* on inflight list */
#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
#define REQ_F_HARDLINK 65536 /* doesn't sever on completion < 0 */
#define REQ_F_FORCE_ASYNC 131072 /* IOSQE_ASYNC */
#define REQ_F_CUR_POS 262144 /* read/write uses file position */
u64 user_data; u64 user_data;
u32 result; u32 result;
u32 sequence; u32 sequence;
...@@ -4296,9 +4339,6 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, ...@@ -4296,9 +4339,6 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
flags = READ_ONCE(sqe->flags); flags = READ_ONCE(sqe->flags);
fd = READ_ONCE(sqe->fd); fd = READ_ONCE(sqe->fd);
if (flags & IOSQE_IO_DRAIN)
req->flags |= REQ_F_IO_DRAIN;
if (!io_req_needs_file(req, fd)) if (!io_req_needs_file(req, fd))
return 0; return 0;
...@@ -4534,8 +4574,9 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -4534,8 +4574,9 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ret = -EINVAL; ret = -EINVAL;
goto err_req; goto err_req;
} }
if (sqe_flags & IOSQE_ASYNC) /* same numerical values with corresponding REQ_F_*, safe to copy */
req->flags |= REQ_F_FORCE_ASYNC; req->flags |= sqe_flags & (IOSQE_IO_DRAIN|IOSQE_IO_HARDLINK|
IOSQE_ASYNC);
ret = io_req_set_file(state, req, sqe); ret = io_req_set_file(state, req, sqe);
if (unlikely(ret)) { if (unlikely(ret)) {
...@@ -4559,10 +4600,6 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -4559,10 +4600,6 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
head->flags |= REQ_F_IO_DRAIN; head->flags |= REQ_F_IO_DRAIN;
ctx->drain_next = 1; ctx->drain_next = 1;
} }
if (sqe_flags & IOSQE_IO_HARDLINK)
req->flags |= REQ_F_HARDLINK;
if (io_alloc_async_ctx(req)) { if (io_alloc_async_ctx(req)) {
ret = -EAGAIN; ret = -EAGAIN;
goto err_req; goto err_req;
...@@ -4589,9 +4626,6 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -4589,9 +4626,6 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} }
if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) { if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
req->flags |= REQ_F_LINK; req->flags |= REQ_F_LINK;
if (sqe_flags & IOSQE_IO_HARDLINK)
req->flags |= REQ_F_HARDLINK;
INIT_LIST_HEAD(&req->link_list); INIT_LIST_HEAD(&req->link_list);
ret = io_req_defer_prep(req, sqe); ret = io_req_defer_prep(req, sqe);
if (ret) if (ret)
......
...@@ -45,14 +45,27 @@ struct io_uring_sqe { ...@@ -45,14 +45,27 @@ struct io_uring_sqe {
}; };
}; };
enum {
IOSQE_FIXED_FILE_BIT,
IOSQE_IO_DRAIN_BIT,
IOSQE_IO_LINK_BIT,
IOSQE_IO_HARDLINK_BIT,
IOSQE_ASYNC_BIT,
};
/* /*
* sqe->flags * sqe->flags
*/ */
#define IOSQE_FIXED_FILE (1U << 0) /* use fixed fileset */ /* use fixed fileset */
#define IOSQE_IO_DRAIN (1U << 1) /* issue after inflight IO */ #define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT)
#define IOSQE_IO_LINK (1U << 2) /* links next sqe */ /* issue after inflight IO */
#define IOSQE_IO_HARDLINK (1U << 3) /* like LINK, but stronger */ #define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT)
#define IOSQE_ASYNC (1U << 4) /* always go async */ /* links next sqe */
#define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT)
/* like LINK, but stronger */
#define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT)
/* always go async */
#define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT)
/* /*
* io_uring_setup() flags * io_uring_setup() flags
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册