提交 4b4f9111 编写于 作者: J Jens Axboe 提交者: Shile Zhang

io_uring: fix up O_NONBLOCK handling for sockets

commit 491381ce07ca57f68c49c79a8a43da5b60749e32 upstream.

We've got two issues with the non-regular file handling for non-blocking
IO:

1) We don't want to re-do a short read in full for a non-regular file,
   as we can't just read the data again.
2) For non-regular files that don't support non-blocking IO attempts,
   we need to punt to async context even if the file is opened as
   non-blocking. Otherwise the caller always gets -EAGAIN.

Add two new request flags to handle these cases. One is just a cache
of the inode S_ISREG() status, the other tells io_uring that we always
need to punt this request to async context, even if REQ_F_NOWAIT is set.

Cc: stable@vger.kernel.org
Reported-by: NHrvoje Zeba <zeba.hrvoje@gmail.com>
Tested-by: NHrvoje Zeba <zeba.hrvoje@gmail.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Reviewed-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 dd430760
...@@ -322,6 +322,8 @@ struct io_kiocb { ...@@ -322,6 +322,8 @@ struct io_kiocb {
#define REQ_F_FAIL_LINK 256 /* fail rest of links */ #define REQ_F_FAIL_LINK 256 /* fail rest of links */
#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */ #define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
#define REQ_F_TIMEOUT 1024 /* timeout request */ #define REQ_F_TIMEOUT 1024 /* timeout request */
#define REQ_F_ISREG 2048 /* regular file */
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
u64 user_data; u64 user_data;
u32 result; u32 result;
u32 sequence; u32 sequence;
...@@ -913,26 +915,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, ...@@ -913,26 +915,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
return ret; return ret;
} }
static void kiocb_end_write(struct kiocb *kiocb) static void kiocb_end_write(struct io_kiocb *req)
{ {
if (kiocb->ki_flags & IOCB_WRITE) { /*
struct inode *inode = file_inode(kiocb->ki_filp); * Tell lockdep we inherited freeze protection from submission
* thread.
*/
if (req->flags & REQ_F_ISREG) {
struct inode *inode = file_inode(req->file);
/* __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
* Tell lockdep we inherited freeze protection from submission
* thread.
*/
if (S_ISREG(inode->i_mode))
__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
file_end_write(kiocb->ki_filp);
} }
file_end_write(req->file);
} }
static void io_complete_rw(struct kiocb *kiocb, long res, long res2) static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
{ {
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
kiocb_end_write(kiocb); if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
if ((req->flags & REQ_F_LINK) && res != req->result) if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK; req->flags |= REQ_F_FAIL_LINK;
...@@ -944,7 +946,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) ...@@ -944,7 +946,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{ {
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
kiocb_end_write(kiocb); if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
if ((req->flags & REQ_F_LINK) && res != req->result) if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK; req->flags |= REQ_F_FAIL_LINK;
...@@ -1058,8 +1061,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1058,8 +1061,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
if (!req->file) if (!req->file)
return -EBADF; return -EBADF;
if (force_nonblock && !io_file_supports_async(req->file)) if (S_ISREG(file_inode(req->file)->i_mode))
force_nonblock = false; req->flags |= REQ_F_ISREG;
/*
* If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
* we know to async punt it even if it was opened O_NONBLOCK
*/
if (force_nonblock && !io_file_supports_async(req->file)) {
req->flags |= REQ_F_MUST_PUNT;
return -EAGAIN;
}
kiocb->ki_pos = READ_ONCE(sqe->off); kiocb->ki_pos = READ_ONCE(sqe->off);
kiocb->ki_flags = iocb_flags(kiocb->ki_filp); kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
...@@ -1080,7 +1092,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1080,7 +1092,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
return ret; return ret;
/* don't allow async punt if RWF_NOWAIT was requested */ /* don't allow async punt if RWF_NOWAIT was requested */
if (kiocb->ki_flags & IOCB_NOWAIT) if ((kiocb->ki_flags & IOCB_NOWAIT) ||
(req->file->f_flags & O_NONBLOCK))
req->flags |= REQ_F_NOWAIT; req->flags |= REQ_F_NOWAIT;
if (force_nonblock) if (force_nonblock)
...@@ -1381,7 +1394,9 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1381,7 +1394,9 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
* need async punt anyway, so it's more efficient to do it * need async punt anyway, so it's more efficient to do it
* here. * here.
*/ */
if (force_nonblock && ret2 > 0 && ret2 < read_size) if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
(req->flags & REQ_F_ISREG) &&
ret2 > 0 && ret2 < read_size)
ret2 = -EAGAIN; ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */ /* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) { if (!force_nonblock || ret2 != -EAGAIN) {
...@@ -1446,7 +1461,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1446,7 +1461,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
* released so that it doesn't complain about the held lock when * released so that it doesn't complain about the held lock when
* we return to userspace. * we return to userspace.
*/ */
if (S_ISREG(file_inode(file)->i_mode)) { if (req->flags & REQ_F_ISREG) {
__sb_start_write(file_inode(file)->i_sb, __sb_start_write(file_inode(file)->i_sb,
SB_FREEZE_WRITE, true); SB_FREEZE_WRITE, true);
__sb_writers_release(file_inode(file)->i_sb, __sb_writers_release(file_inode(file)->i_sb,
...@@ -2281,7 +2296,13 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2281,7 +2296,13 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
int ret; int ret;
ret = __io_submit_sqe(ctx, req, s, force_nonblock); ret = __io_submit_sqe(ctx, req, s, force_nonblock);
if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
* doesn't support non-blocking read/write attempts
*/
if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
(req->flags & REQ_F_MUST_PUNT))) {
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册