From 2667df78a29144a9dd2f11f2070849521e10ea5e Mon Sep 17 00:00:00 2001 From: Stefan Metzmacher <metze@samba.org> Date: Mon, 19 Apr 2021 09:27:53 +0000 Subject: [PATCH] io_uring: call req_set_fail_links() on short send[msg]()/recv[msg]() with MSG_WAITALL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit stable inclusion from stable-5.10.28 commit 44c816c8b9ab3296d56cfd0a75975b1fe41186d7 bugzilla: 51779 -------------------------------- [ Upstream commit 0031275d119efe16711cd93519b595e6f9b4b330 ] Without that it's not safe to use them in a linked combination with others. Now combinations like IORING_OP_SENDMSG followed by IORING_OP_SPLICE should be possible. We already handle short reads and writes for the following opcodes: - IORING_OP_READV - IORING_OP_READ_FIXED - IORING_OP_READ - IORING_OP_WRITEV - IORING_OP_WRITE_FIXED - IORING_OP_WRITE - IORING_OP_SPLICE - IORING_OP_TEE Now we have it for these as well: - IORING_OP_SENDMSG - IORING_OP_SEND - IORING_OP_RECVMSG - IORING_OP_RECV For IORING_OP_RECVMSG we also check for the MSG_TRUNC and MSG_CTRUNC flags in order to call req_set_fail_links(). There might be applications arround depending on the behavior that even short send[msg]()/recv[msg]() retuns continue an IOSQE_IO_LINK chain. It's very unlikely that such applications pass in MSG_WAITALL, which is only defined in 'man 2 recvmsg', but not in 'man 2 sendmsg'. It's expected that the low level sock_sendmsg() call just ignores MSG_WAITALL, as MSG_ZEROCOPY is also ignored without explicitly set SO_ZEROCOPY. We also expect the caller to know about the implicit truncation to MAX_RW_COUNT, which we don't detect. cc: netdev@vger.kernel.org Link: https://lore.kernel.org/r/c4e1a4cc0d905314f4d5dc567e65a7b09621aab3.1615908477.git.metze@samba.org Signed-off-by: Stefan Metzmacher <metze@samba.org> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Chen Jun <chenjun102@huawei.com> Acked-by:Â Weilong Chen <chenweilong@huawei.com> Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> --- fs/io_uring.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 90a0c4152359..afc6d4f9fd9e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -4395,6 +4395,7 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, struct io_async_msghdr iomsg, *kmsg; struct socket *sock; unsigned flags; + int min_ret = 0; int ret; sock = sock_from_file(req->file, &ret); @@ -4421,6 +4422,9 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, else if (force_nonblock) flags |= MSG_DONTWAIT; + if (flags & MSG_WAITALL) + min_ret = iov_iter_count(&kmsg->msg.msg_iter); + ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); if (force_nonblock && ret == -EAGAIN) return io_setup_async_msg(req, kmsg); @@ -4430,7 +4434,7 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, if (kmsg->iov != kmsg->fast_iov) kfree(kmsg->iov); req->flags &= ~REQ_F_NEED_CLEANUP; - if (ret < 0) + if (ret < min_ret) req_set_fail_links(req); __io_req_complete(req, ret, 0, cs); return 0; @@ -4444,6 +4448,7 @@ static int io_send(struct io_kiocb *req, bool force_nonblock, struct iovec iov; struct socket *sock; unsigned flags; + int min_ret = 0; int ret; sock = sock_from_file(req->file, &ret); @@ -4465,6 +4470,9 @@ static int io_send(struct io_kiocb *req, bool force_nonblock, else if (force_nonblock) flags |= MSG_DONTWAIT; + if (flags & MSG_WAITALL) + min_ret = iov_iter_count(&msg.msg_iter); + msg.msg_flags = flags; ret = sock_sendmsg(sock, &msg); if (force_nonblock && ret == -EAGAIN) @@ -4472,7 +4480,7 @@ static int io_send(struct io_kiocb *req, bool force_nonblock, if (ret == -ERESTARTSYS) ret = -EINTR; - if (ret < 0) + if (ret < min_ret) req_set_fail_links(req); __io_req_complete(req, ret, 0, cs); return 0; @@ -4624,6 +4632,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, struct socket *sock; struct io_buffer *kbuf; unsigned flags; + int min_ret = 0; int ret, cflags = 0; sock = sock_from_file(req->file, &ret); @@ -4659,6 +4668,9 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, else if (force_nonblock) flags |= MSG_DONTWAIT; + if (flags & MSG_WAITALL) + min_ret = iov_iter_count(&kmsg->msg.msg_iter); + ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, kmsg->uaddr, flags); if (force_nonblock && ret == -EAGAIN) @@ -4671,7 +4683,7 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, if (kmsg->iov != kmsg->fast_iov) kfree(kmsg->iov); req->flags &= ~REQ_F_NEED_CLEANUP; - if (ret < 0) + if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) req_set_fail_links(req); __io_req_complete(req, ret, cflags, cs); return 0; @@ -4687,6 +4699,7 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock, struct socket *sock; struct iovec iov; unsigned flags; + int min_ret = 0; int ret, cflags = 0; sock = sock_from_file(req->file, &ret); @@ -4717,6 +4730,9 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock, else if (force_nonblock) flags |= MSG_DONTWAIT; + if (flags & MSG_WAITALL) + min_ret = iov_iter_count(&msg.msg_iter); + ret = sock_recvmsg(sock, &msg, flags); if (force_nonblock && ret == -EAGAIN) return -EAGAIN; @@ -4725,7 +4741,7 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock, out_free: if (req->flags & REQ_F_BUFFER_SELECTED) cflags = io_put_recv_kbuf(req); - if (ret < 0) + if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) req_set_fail_links(req); __io_req_complete(req, ret, cflags, cs); return 0; -- GitLab