提交 9990b323 编写于 作者: P Pavel Begunkov 提交者: Xiaoguang Wang

io_uring: early submission req fail code

to #28170604

commit 1d4240cc9e7bb101dac58f30283fa24a809f5606 upstream

Having only one place for cleaning up a request after a link assembly/
submission failure will play handy in the future. At least it allows
to remove duplicated cleanup sequence.
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Acked-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Signed-off-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 c8acf67a
...@@ -5608,7 +5608,7 @@ static inline void io_queue_link_head(struct io_kiocb *req) ...@@ -5608,7 +5608,7 @@ static inline void io_queue_link_head(struct io_kiocb *req)
IOSQE_IO_HARDLINK | IOSQE_ASYNC | \ IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
IOSQE_BUFFER_SELECT) IOSQE_BUFFER_SELECT)
static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_submit_state *state, struct io_kiocb **link) struct io_submit_state *state, struct io_kiocb **link)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -5618,24 +5618,18 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -5618,24 +5618,18 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
sqe_flags = READ_ONCE(sqe->flags); sqe_flags = READ_ONCE(sqe->flags);
/* enforce forwards compatibility on users */ /* enforce forwards compatibility on users */
if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) { if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
ret = -EINVAL; return -EINVAL;
goto err_req;
}
if ((sqe_flags & IOSQE_BUFFER_SELECT) && if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
!io_op_defs[req->opcode].buffer_select) { !io_op_defs[req->opcode].buffer_select)
ret = -EOPNOTSUPP; return -EOPNOTSUPP;
goto err_req;
}
id = READ_ONCE(sqe->personality); id = READ_ONCE(sqe->personality);
if (id) { if (id) {
req->work.creds = idr_find(&ctx->personality_idr, id); req->work.creds = idr_find(&ctx->personality_idr, id);
if (unlikely(!req->work.creds)) { if (unlikely(!req->work.creds))
ret = -EINVAL; return -EINVAL;
goto err_req;
}
get_cred(req->work.creds); get_cred(req->work.creds);
} }
...@@ -5646,12 +5640,8 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -5646,12 +5640,8 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
fd = READ_ONCE(sqe->fd); fd = READ_ONCE(sqe->fd);
ret = io_req_set_file(state, req, fd, sqe_flags); ret = io_req_set_file(state, req, fd, sqe_flags);
if (unlikely(ret)) { if (unlikely(ret))
err_req: return ret;
io_cqring_add_event(req, ret);
io_double_put_req(req);
return false;
}
/* /*
* If we already have a head request, queue this one for async * If we already have a head request, queue this one for async
...@@ -5674,16 +5664,14 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -5674,16 +5664,14 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
head->flags |= REQ_F_IO_DRAIN; head->flags |= REQ_F_IO_DRAIN;
ctx->drain_next = 1; ctx->drain_next = 1;
} }
if (io_alloc_async_ctx(req)) { if (io_alloc_async_ctx(req))
ret = -EAGAIN; return -EAGAIN;
goto err_req;
}
ret = io_req_defer_prep(req, sqe); ret = io_req_defer_prep(req, sqe);
if (ret) { if (ret) {
/* fail even hard links since we don't submit */ /* fail even hard links since we don't submit */
head->flags |= REQ_F_FAIL_LINK; head->flags |= REQ_F_FAIL_LINK;
goto err_req; return ret;
} }
trace_io_uring_link(ctx, req, head); trace_io_uring_link(ctx, req, head);
list_add_tail(&req->link_list, &head->link_list); list_add_tail(&req->link_list, &head->link_list);
...@@ -5702,10 +5690,9 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -5702,10 +5690,9 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
req->flags |= REQ_F_LINK; req->flags |= REQ_F_LINK;
INIT_LIST_HEAD(&req->link_list); INIT_LIST_HEAD(&req->link_list);
if (io_alloc_async_ctx(req)) { if (io_alloc_async_ctx(req))
ret = -EAGAIN; return -EAGAIN;
goto err_req;
}
ret = io_req_defer_prep(req, sqe); ret = io_req_defer_prep(req, sqe);
if (ret) if (ret)
req->flags |= REQ_F_FAIL_LINK; req->flags |= REQ_F_FAIL_LINK;
...@@ -5715,7 +5702,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -5715,7 +5702,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} }
} }
return true; return 0;
} }
/* /*
...@@ -5880,8 +5867,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -5880,8 +5867,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
req->needs_fixed_file = async; req->needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
true, async); true, async);
if (!io_submit_sqe(req, sqe, statep, &link)) err = io_submit_sqe(req, sqe, statep, &link);
break; if (err)
goto fail_req;
} }
if (unlikely(submitted != nr)) { if (unlikely(submitted != nr)) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册