提交 f5c255d2 编写于 作者: B Bob Liu 提交者: Cheng Jian

io_uring: introduce req_need_defer()

mainline inclusion
from mainline-5.5-rc1
commit 9d858b21
category: feature
bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=27
CVE: NA
---------------------------

Makes the code easier to read.
Signed-off-by: NBob Liu <bob.liu@oracle.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NZhihao Cheng <chengzhihao1@huawei.com>
Signed-off-by: Nyangerkun <yangerkun@huawei.com>
Reviewed-by: Nzhangyi (F) <yi.zhang@huawei.com>
Signed-off-by: NCheng Jian <cj.chengjian@huawei.com>
上级 6214622b
...@@ -448,7 +448,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ...@@ -448,7 +448,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
return NULL; return NULL;
} }
static inline bool __io_sequence_defer(struct io_kiocb *req) static inline bool __req_need_defer(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -456,12 +456,12 @@ static inline bool __io_sequence_defer(struct io_kiocb *req) ...@@ -456,12 +456,12 @@ static inline bool __io_sequence_defer(struct io_kiocb *req)
+ atomic_read(&ctx->cached_cq_overflow); + atomic_read(&ctx->cached_cq_overflow);
} }
static inline bool io_sequence_defer(struct io_kiocb *req) static inline bool req_need_defer(struct io_kiocb *req)
{ {
if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN)
return false; return __req_need_defer(req);
return __io_sequence_defer(req); return false;
} }
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
...@@ -469,7 +469,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) ...@@ -469,7 +469,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
struct io_kiocb *req; struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list); req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
if (req && !io_sequence_defer(req)) { if (req && !req_need_defer(req)) {
list_del_init(&req->list); list_del_init(&req->list);
return req; return req;
} }
...@@ -485,7 +485,7 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) ...@@ -485,7 +485,7 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
if (req) { if (req) {
if (req->flags & REQ_F_TIMEOUT_NOSEQ) if (req->flags & REQ_F_TIMEOUT_NOSEQ)
return NULL; return NULL;
if (!__io_sequence_defer(req)) { if (!__req_need_defer(req)) {
list_del_init(&req->list); list_del_init(&req->list);
return req; return req;
} }
...@@ -2450,7 +2450,8 @@ static int io_req_defer(struct io_kiocb *req) ...@@ -2450,7 +2450,8 @@ static int io_req_defer(struct io_kiocb *req)
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
if (!io_sequence_defer(req) && list_empty(&ctx->defer_list)) /* Still need defer if there is pending req in defer list. */
if (!req_need_defer(req) && list_empty(&ctx->defer_list))
return 0; return 0;
sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL); sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
...@@ -2458,7 +2459,7 @@ static int io_req_defer(struct io_kiocb *req) ...@@ -2458,7 +2459,7 @@ static int io_req_defer(struct io_kiocb *req)
return -EAGAIN; return -EAGAIN;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
if (!io_sequence_defer(req) && list_empty(&ctx->defer_list)) { if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
kfree(sqe_copy); kfree(sqe_copy);
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册