net.c 35.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/net.h>
#include <linux/compat.h>
#include <net/compat.h>
#include <linux/io_uring.h>

#include <uapi/linux/io_uring.h>

#include "io_uring.h"
14
#include "kbuf.h"
J
Jens Axboe 已提交
15
#include "alloc_cache.h"
16
#include "net.h"
17
#include "notif.h"
18
#include "rsrc.h"
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48

#if defined(CONFIG_NET)
struct io_shutdown {
	struct file			*file;
	int				how;
};

struct io_accept {
	struct file			*file;
	struct sockaddr __user		*addr;
	int __user			*addr_len;
	int				flags;
	u32				file_slot;
	unsigned long			nofile;
};

struct io_socket {
	struct file			*file;
	int				domain;
	int				type;
	int				protocol;
	int				flags;
	u32				file_slot;
	unsigned long			nofile;
};

struct io_connect {
	struct file			*file;
	struct sockaddr __user		*addr;
	int				addr_len;
49
	bool				in_progress;
50 51 52 53 54 55 56 57 58
};

struct io_sr_msg {
	struct file			*file;
	union {
		struct compat_msghdr __user	*umsg_compat;
		struct user_msghdr __user	*umsg;
		void __user			*buf;
	};
59 60
	unsigned			len;
	unsigned			done_io;
61
	unsigned			msg_flags;
62
	u16				flags;
63
	/* initialised and used only by !msg send variants */
64
	u16				addr_len;
65
	void __user			*addr;
66
	/* used only for send zerocopy */
67
	struct io_kiocb 		*notif;
68 69
};

70 71 72 73
#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)

int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
74
	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
75 76 77 78 79 80 81 82 83 84 85

	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
		     sqe->buf_index || sqe->splice_fd_in))
		return -EINVAL;

	shutdown->how = READ_ONCE(sqe->len);
	return 0;
}

int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
{
86
	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
	struct socket *sock;
	int ret;

	if (issue_flags & IO_URING_F_NONBLOCK)
		return -EAGAIN;

	sock = sock_from_file(req->file);
	if (unlikely(!sock))
		return -ENOTSOCK;

	ret = __sys_shutdown_sock(sock, shutdown->how);
	io_req_set_res(req, ret, 0);
	return IOU_OK;
}

static bool io_net_retry(struct socket *sock, int flags)
{
	if (!(flags & MSG_WAITALL))
		return false;
	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
}

J
Jens Axboe 已提交
109 110 111 112
static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
{
	struct io_async_msghdr *hdr = req->async_data;

113
	if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
J
Jens Axboe 已提交
114 115 116 117 118 119 120 121 122
		return;

	/* Let normal cleanup path reap it if we fail adding to the cache */
	if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
		req->async_data = NULL;
		req->flags &= ~REQ_F_ASYNC_DATA;
	}
}

123 124
static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
						  unsigned int issue_flags)
J
Jens Axboe 已提交
125 126 127
{
	struct io_ring_ctx *ctx = req->ctx;
	struct io_cache_entry *entry;
128
	struct io_async_msghdr *hdr;
J
Jens Axboe 已提交
129 130 131 132

	if (!(issue_flags & IO_URING_F_UNLOCKED) &&
	    (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
		hdr = container_of(entry, struct io_async_msghdr, cache);
133
		hdr->free_iov = NULL;
J
Jens Axboe 已提交
134 135 136 137 138
		req->flags |= REQ_F_ASYNC_DATA;
		req->async_data = hdr;
		return hdr;
	}

139 140 141 142 143
	if (!io_alloc_async_data(req)) {
		hdr = req->async_data;
		hdr->free_iov = NULL;
		return hdr;
	}
J
Jens Axboe 已提交
144 145 146
	return NULL;
}

147 148 149 150 151 152
static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
{
	/* ->prep_async is always called from the submission context */
	return io_msg_alloc_async(req, 0);
}

153
static int io_setup_async_msg(struct io_kiocb *req,
J
Jens Axboe 已提交
154 155
			      struct io_async_msghdr *kmsg,
			      unsigned int issue_flags)
156
{
157
	struct io_async_msghdr *async_msg;
158

159
	if (req_has_async_data(req))
160
		return -EAGAIN;
161
	async_msg = io_msg_alloc_async(req, issue_flags);
J
Jens Axboe 已提交
162
	if (!async_msg) {
163 164 165 166 167
		kfree(kmsg->free_iov);
		return -ENOMEM;
	}
	req->flags |= REQ_F_NEED_CLEANUP;
	memcpy(async_msg, kmsg, sizeof(*kmsg));
168 169
	if (async_msg->msg.msg_name)
		async_msg->msg.msg_name = &async_msg->addr;
170
	/* if were using fast_iov, set it to the new one */
171 172 173 174
	if (!kmsg->free_iov) {
		size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
		async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
	}
175 176 177 178 179 180 181

	return -EAGAIN;
}

static int io_sendmsg_copy_hdr(struct io_kiocb *req,
			       struct io_async_msghdr *iomsg)
{
182
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
183 184 185 186 187 188 189

	iomsg->msg.msg_name = &iomsg->addr;
	iomsg->free_iov = iomsg->fast_iov;
	return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
					&iomsg->free_iov);
}

190
int io_send_prep_async(struct io_kiocb *req)
191
{
192
	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
193 194 195 196 197
	struct io_async_msghdr *io;
	int ret;

	if (!zc->addr || req_has_async_data(req))
		return 0;
198 199
	io = io_msg_alloc_async_prep(req);
	if (!io)
200 201 202 203 204 205
		return -ENOMEM;
	ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
	return ret;
}

static int io_setup_async_addr(struct io_kiocb *req,
206
			      struct sockaddr_storage *addr_storage,
207 208
			      unsigned int issue_flags)
{
209
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
210 211
	struct io_async_msghdr *io;

212
	if (!sr->addr || req_has_async_data(req))
213
		return -EAGAIN;
214 215
	io = io_msg_alloc_async(req, issue_flags);
	if (!io)
216
		return -ENOMEM;
217
	memcpy(&io->addr, addr_storage, sizeof(io->addr));
218 219 220
	return -EAGAIN;
}

221 222 223 224
int io_sendmsg_prep_async(struct io_kiocb *req)
{
	int ret;

225 226
	if (!io_msg_alloc_async_prep(req))
		return -ENOMEM;
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	ret = io_sendmsg_copy_hdr(req, req->async_data);
	if (!ret)
		req->flags |= REQ_F_NEED_CLEANUP;
	return ret;
}

void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
{
	struct io_async_msghdr *io = req->async_data;

	kfree(io->free_iov);
}

int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
242
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
243

244 245 246 247 248 249
	if (req->opcode == IORING_OP_SEND) {
		if (READ_ONCE(sqe->__pad3[0]))
			return -EINVAL;
		sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
		sr->addr_len = READ_ONCE(sqe->addr_len);
	} else if (sqe->addr2 || sqe->file_index) {
250
		return -EINVAL;
251
	}
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271

	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
	sr->len = READ_ONCE(sqe->len);
	sr->flags = READ_ONCE(sqe->ioprio);
	if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
		return -EINVAL;
	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
	if (sr->msg_flags & MSG_DONTWAIT)
		req->flags |= REQ_F_NOWAIT;

#ifdef CONFIG_COMPAT
	if (req->ctx->compat)
		sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
	sr->done_io = 0;
	return 0;
}

int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
{
272
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
	struct io_async_msghdr iomsg, *kmsg;
	struct socket *sock;
	unsigned flags;
	int min_ret = 0;
	int ret;

	sock = sock_from_file(req->file);
	if (unlikely(!sock))
		return -ENOTSOCK;

	if (req_has_async_data(req)) {
		kmsg = req->async_data;
	} else {
		ret = io_sendmsg_copy_hdr(req, &iomsg);
		if (ret)
			return ret;
		kmsg = &iomsg;
	}

	if (!(req->flags & REQ_F_POLLED) &&
	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
J
Jens Axboe 已提交
294
		return io_setup_async_msg(req, kmsg, issue_flags);
295 296 297 298 299 300 301 302 303 304 305

	flags = sr->msg_flags;
	if (issue_flags & IO_URING_F_NONBLOCK)
		flags |= MSG_DONTWAIT;
	if (flags & MSG_WAITALL)
		min_ret = iov_iter_count(&kmsg->msg.msg_iter);

	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);

	if (ret < min_ret) {
		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
J
Jens Axboe 已提交
306
			return io_setup_async_msg(req, kmsg, issue_flags);
307 308 309
		if (ret > 0 && io_net_retry(sock, flags)) {
			sr->done_io += ret;
			req->flags |= REQ_F_PARTIAL_IO;
J
Jens Axboe 已提交
310
			return io_setup_async_msg(req, kmsg, issue_flags);
311
		}
312 313
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
314 315 316 317 318 319
		req_set_fail(req);
	}
	/* fast path, check for non-NULL to avoid function call */
	if (kmsg->free_iov)
		kfree(kmsg->free_iov);
	req->flags &= ~REQ_F_NEED_CLEANUP;
J
Jens Axboe 已提交
320
	io_netmsg_recycle(req, issue_flags);
321 322 323 324 325 326 327 328 329 330
	if (ret >= 0)
		ret += sr->done_io;
	else if (sr->done_io)
		ret = sr->done_io;
	io_req_set_res(req, ret, 0);
	return IOU_OK;
}

int io_send(struct io_kiocb *req, unsigned int issue_flags)
{
331
	struct sockaddr_storage __address;
332
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
333 334 335 336 337 338 339
	struct msghdr msg;
	struct iovec iov;
	struct socket *sock;
	unsigned flags;
	int min_ret = 0;
	int ret;

340 341 342 343 344 345
	msg.msg_name = NULL;
	msg.msg_control = NULL;
	msg.msg_controllen = 0;
	msg.msg_namelen = 0;
	msg.msg_ubuf = NULL;

346 347 348 349 350 351 352 353 354 355 356 357 358 359
	if (sr->addr) {
		if (req_has_async_data(req)) {
			struct io_async_msghdr *io = req->async_data;

			msg.msg_name = &io->addr;
		} else {
			ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
			if (unlikely(ret < 0))
				return ret;
			msg.msg_name = (struct sockaddr *)&__address;
		}
		msg.msg_namelen = sr->addr_len;
	}

360 361
	if (!(req->flags & REQ_F_POLLED) &&
	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
362
		return io_setup_async_addr(req, &__address, issue_flags);
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381

	sock = sock_from_file(req->file);
	if (unlikely(!sock))
		return -ENOTSOCK;

	ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
	if (unlikely(ret))
		return ret;

	flags = sr->msg_flags;
	if (issue_flags & IO_URING_F_NONBLOCK)
		flags |= MSG_DONTWAIT;
	if (flags & MSG_WAITALL)
		min_ret = iov_iter_count(&msg.msg_iter);

	msg.msg_flags = flags;
	ret = sock_sendmsg(sock, &msg);
	if (ret < min_ret) {
		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
382 383
			return io_setup_async_addr(req, &__address, issue_flags);

384 385 386 387 388
		if (ret > 0 && io_net_retry(sock, flags)) {
			sr->len -= ret;
			sr->buf += ret;
			sr->done_io += ret;
			req->flags |= REQ_F_PARTIAL_IO;
389
			return io_setup_async_addr(req, &__address, issue_flags);
390
		}
391 392
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
393 394 395 396 397 398 399 400 401 402
		req_set_fail(req);
	}
	if (ret >= 0)
		ret += sr->done_io;
	else if (sr->done_io)
		ret = sr->done_io;
	io_req_set_res(req, ret, 0);
	return IOU_OK;
}

403 404
static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
{
405
	int hdr;
406

407
	if (iomsg->namelen < 0)
408
		return true;
409 410
	if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
			       iomsg->namelen, &hdr))
411
		return true;
412
	if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
413 414 415 416 417
		return true;

	return false;
}

418 419 420
static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
				 struct io_async_msghdr *iomsg)
{
421
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
422
	struct user_msghdr msg;
423 424
	int ret;

425 426 427 428
	if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
		return -EFAULT;

	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
429 430 431 432
	if (ret)
		return ret;

	if (req->flags & REQ_F_BUFFER_SELECT) {
433
		if (msg.msg_iovlen == 0) {
434 435 436
			sr->len = iomsg->fast_iov[0].iov_len = 0;
			iomsg->fast_iov[0].iov_base = NULL;
			iomsg->free_iov = NULL;
437
		} else if (msg.msg_iovlen > 1) {
438
			return -EINVAL;
439
		} else {
440
			if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
441 442 443 444
				return -EFAULT;
			sr->len = iomsg->fast_iov[0].iov_len;
			iomsg->free_iov = NULL;
		}
445 446 447 448 449 450 451

		if (req->flags & REQ_F_APOLL_MULTISHOT) {
			iomsg->namelen = msg.msg_namelen;
			iomsg->controllen = msg.msg_controllen;
			if (io_recvmsg_multishot_overflow(iomsg))
				return -EOVERFLOW;
		}
452 453
	} else {
		iomsg->free_iov = iomsg->fast_iov;
454
		ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
455 456 457 458 459 460 461 462 463 464 465 466 467
				     &iomsg->free_iov, &iomsg->msg.msg_iter,
				     false);
		if (ret > 0)
			ret = 0;
	}

	return ret;
}

#ifdef CONFIG_COMPAT
static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
					struct io_async_msghdr *iomsg)
{
468
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
469
	struct compat_msghdr msg;
470 471 472
	struct compat_iovec __user *uiov;
	int ret;

473 474 475
	if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
		return -EFAULT;

476
	ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
477 478 479
	if (ret)
		return ret;

480
	uiov = compat_ptr(msg.msg_iov);
481 482 483
	if (req->flags & REQ_F_BUFFER_SELECT) {
		compat_ssize_t clen;

484
		if (msg.msg_iovlen == 0) {
485
			sr->len = 0;
486
		} else if (msg.msg_iovlen > 1) {
487
			return -EINVAL;
488 489 490 491 492 493 494 495 496
		} else {
			if (!access_ok(uiov, sizeof(*uiov)))
				return -EFAULT;
			if (__get_user(clen, &uiov->iov_len))
				return -EFAULT;
			if (clen < 0)
				return -EINVAL;
			sr->len = clen;
		}
497 498 499 500 501 502 503

		if (req->flags & REQ_F_APOLL_MULTISHOT) {
			iomsg->namelen = msg.msg_namelen;
			iomsg->controllen = msg.msg_controllen;
			if (io_recvmsg_multishot_overflow(iomsg))
				return -EOVERFLOW;
		}
504 505
	} else {
		iomsg->free_iov = iomsg->fast_iov;
506
		ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
				   UIO_FASTIOV, &iomsg->free_iov,
				   &iomsg->msg.msg_iter, true);
		if (ret < 0)
			return ret;
	}

	return 0;
}
#endif

static int io_recvmsg_copy_hdr(struct io_kiocb *req,
			       struct io_async_msghdr *iomsg)
{
	iomsg->msg.msg_name = &iomsg->addr;

#ifdef CONFIG_COMPAT
	if (req->ctx->compat)
		return __io_compat_recvmsg_copy_hdr(req, iomsg);
#endif

	return __io_recvmsg_copy_hdr(req, iomsg);
}

int io_recvmsg_prep_async(struct io_kiocb *req)
{
	int ret;

534 535
	if (!io_msg_alloc_async_prep(req))
		return -ENOMEM;
536 537 538 539 540 541
	ret = io_recvmsg_copy_hdr(req, req->async_data);
	if (!ret)
		req->flags |= REQ_F_NEED_CLEANUP;
	return ret;
}

D
Dylan Yudaken 已提交
542 543
#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)

544 545
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
546
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
547 548 549 550 551 552 553

	if (unlikely(sqe->file_index || sqe->addr2))
		return -EINVAL;

	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
	sr->len = READ_ONCE(sqe->len);
	sr->flags = READ_ONCE(sqe->ioprio);
D
Dylan Yudaken 已提交
554
	if (sr->flags & ~(RECVMSG_FLAGS))
555 556 557 558 559 560
		return -EINVAL;
	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
	if (sr->msg_flags & MSG_DONTWAIT)
		req->flags |= REQ_F_NOWAIT;
	if (sr->msg_flags & MSG_ERRQUEUE)
		req->flags |= REQ_F_CLEAR_POLLIN;
D
Dylan Yudaken 已提交
561 562 563 564 565 566 567 568 569
	if (sr->flags & IORING_RECV_MULTISHOT) {
		if (!(req->flags & REQ_F_BUFFER_SELECT))
			return -EINVAL;
		if (sr->msg_flags & MSG_WAITALL)
			return -EINVAL;
		if (req->opcode == IORING_OP_RECV && sr->len)
			return -EINVAL;
		req->flags |= REQ_F_APOLL_MULTISHOT;
	}
570 571 572 573 574 575 576 577 578

#ifdef CONFIG_COMPAT
	if (req->ctx->compat)
		sr->msg_flags |= MSG_CMSG_COMPAT;
#endif
	sr->done_io = 0;
	return 0;
}

D
Dylan Yudaken 已提交
579 580
static inline void io_recv_prep_retry(struct io_kiocb *req)
{
581
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
D
Dylan Yudaken 已提交
582 583 584 585 586 587

	sr->done_io = 0;
	sr->len = 0; /* get from the provided buffer */
}

/*
588
 * Finishes io_recv and io_recvmsg.
D
Dylan Yudaken 已提交
589 590 591 592
 *
 * Returns true if it is actually finished, or false if it should run
 * again (for multishot).
 */
593 594
static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
				  unsigned int cflags, bool mshot_finished)
D
Dylan Yudaken 已提交
595 596 597 598 599 600 601
{
	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
		io_req_set_res(req, *ret, cflags);
		*ret = IOU_OK;
		return true;
	}

602
	if (!mshot_finished) {
D
Dylan Yudaken 已提交
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
		if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
				    cflags | IORING_CQE_F_MORE, false)) {
			io_recv_prep_retry(req);
			return false;
		}
		/*
		 * Otherwise stop multishot but use the current result.
		 * Probably will end up going into overflow, but this means
		 * we cannot trust the ordering anymore
		 */
	}

	io_req_set_res(req, *ret, cflags);

	if (req->flags & REQ_F_POLLED)
		*ret = IOU_STOP_MULTISHOT;
619 620
	else
		*ret = IOU_OK;
D
Dylan Yudaken 已提交
621 622 623
	return true;
}

624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
				     struct io_sr_msg *sr, void __user **buf,
				     size_t *len)
{
	unsigned long ubuf = (unsigned long) *buf;
	unsigned long hdr;

	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
		kmsg->controllen;
	if (*len < hdr)
		return -EFAULT;

	if (kmsg->controllen) {
		unsigned long control = ubuf + hdr - kmsg->controllen;

639
		kmsg->msg.msg_control_user = (void __user *) control;
640 641 642 643
		kmsg->msg.msg_controllen = kmsg->controllen;
	}

	sr->buf = *buf; /* stash for later copy */
644
	*buf = (void __user *) (ubuf + hdr);
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
	kmsg->payloadlen = *len = *len - hdr;
	return 0;
}

struct io_recvmsg_multishot_hdr {
	struct io_uring_recvmsg_out msg;
	struct sockaddr_storage addr;
};

static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
				struct io_async_msghdr *kmsg,
				unsigned int flags, bool *finished)
{
	int err;
	int copy_len;
	struct io_recvmsg_multishot_hdr hdr;

	if (kmsg->namelen)
		kmsg->msg.msg_name = &hdr.addr;
	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
	kmsg->msg.msg_namelen = 0;

	if (sock->file->f_flags & O_NONBLOCK)
		flags |= MSG_DONTWAIT;

	err = sock_recvmsg(sock, &kmsg->msg, flags);
	*finished = err <= 0;
	if (err < 0)
		return err;

	hdr.msg = (struct io_uring_recvmsg_out) {
		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
	};

	hdr.msg.payloadlen = err;
	if (err > kmsg->payloadlen)
		err = kmsg->payloadlen;

	copy_len = sizeof(struct io_uring_recvmsg_out);
	if (kmsg->msg.msg_namelen > kmsg->namelen)
		copy_len += kmsg->namelen;
	else
		copy_len += kmsg->msg.msg_namelen;

	/*
	 *      "fromlen shall refer to the value before truncation.."
	 *                      1003.1g
	 */
	hdr.msg.namelen = kmsg->msg.msg_namelen;

	/* ensure that there is no gap between hdr and sockaddr_storage */
	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
		     sizeof(struct io_uring_recvmsg_out));
	if (copy_to_user(io->buf, &hdr, copy_len)) {
		*finished = true;
		return -EFAULT;
	}

	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
			kmsg->controllen + err;
}

708 709
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
710
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
711 712 713 714 715 716
	struct io_async_msghdr iomsg, *kmsg;
	struct socket *sock;
	unsigned int cflags;
	unsigned flags;
	int ret, min_ret = 0;
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
717
	bool mshot_finished = true;
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733

	sock = sock_from_file(req->file);
	if (unlikely(!sock))
		return -ENOTSOCK;

	if (req_has_async_data(req)) {
		kmsg = req->async_data;
	} else {
		ret = io_recvmsg_copy_hdr(req, &iomsg);
		if (ret)
			return ret;
		kmsg = &iomsg;
	}

	if (!(req->flags & REQ_F_POLLED) &&
	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
J
Jens Axboe 已提交
734
		return io_setup_async_msg(req, kmsg, issue_flags);
735

736
retry_multishot:
737 738
	if (io_do_buffer_select(req)) {
		void __user *buf;
739
		size_t len = sr->len;
740

741
		buf = io_buffer_select(req, &len, issue_flags);
742 743
		if (!buf)
			return -ENOBUFS;
744 745 746 747 748 749 750 751 752

		if (req->flags & REQ_F_APOLL_MULTISHOT) {
			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
			if (ret) {
				io_kbuf_recycle(req, issue_flags);
				return ret;
			}
		}

753
		kmsg->fast_iov[0].iov_base = buf;
754
		kmsg->fast_iov[0].iov_len = len;
755
		iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
756
				len);
757 758 759 760 761 762 763 764 765
	}

	flags = sr->msg_flags;
	if (force_nonblock)
		flags |= MSG_DONTWAIT;
	if (flags & MSG_WAITALL)
		min_ret = iov_iter_count(&kmsg->msg.msg_iter);

	kmsg->msg.msg_get_inq = 1;
766 767 768 769 770 771 772
	if (req->flags & REQ_F_APOLL_MULTISHOT)
		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
					   &mshot_finished);
	else
		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
					 kmsg->uaddr, flags);

773
	if (ret < min_ret) {
774 775 776 777 778 779 780 781 782
		if (ret == -EAGAIN && force_nonblock) {
			ret = io_setup_async_msg(req, kmsg, issue_flags);
			if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
					       IO_APOLL_MULTI_POLLED) {
				io_kbuf_recycle(req, issue_flags);
				return IOU_ISSUE_SKIP_COMPLETE;
			}
			return ret;
		}
783 784 785
		if (ret > 0 && io_net_retry(sock, flags)) {
			sr->done_io += ret;
			req->flags |= REQ_F_PARTIAL_IO;
J
Jens Axboe 已提交
786
			return io_setup_async_msg(req, kmsg, issue_flags);
787
		}
788 789
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
790 791 792 793 794
		req_set_fail(req);
	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
		req_set_fail(req);
	}

795
	if (ret > 0)
796 797 798
		ret += sr->done_io;
	else if (sr->done_io)
		ret = sr->done_io;
799 800 801
	else
		io_kbuf_recycle(req, issue_flags);

802 803 804
	cflags = io_put_kbuf(req, issue_flags);
	if (kmsg->msg.msg_inq)
		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
D
Dylan Yudaken 已提交
805

806 807 808 809 810 811 812 813 814 815 816 817
	if (!io_recv_finish(req, &ret, cflags, mshot_finished))
		goto retry_multishot;

	if (mshot_finished) {
		io_netmsg_recycle(req, issue_flags);
		/* fast path, check for non-NULL to avoid function call */
		if (kmsg->free_iov)
			kfree(kmsg->free_iov);
		req->flags &= ~REQ_F_NEED_CLEANUP;
	}

	return ret;
818 819 820 821
}

int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
822
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
823 824 825 826 827 828 829
	struct msghdr msg;
	struct socket *sock;
	struct iovec iov;
	unsigned int cflags;
	unsigned flags;
	int ret, min_ret = 0;
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
D
Dylan Yudaken 已提交
830
	size_t len = sr->len;
831 832 833 834 835 836 837 838 839

	if (!(req->flags & REQ_F_POLLED) &&
	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
		return -EAGAIN;

	sock = sock_from_file(req->file);
	if (unlikely(!sock))
		return -ENOTSOCK;

D
Dylan Yudaken 已提交
840
retry_multishot:
841 842 843
	if (io_do_buffer_select(req)) {
		void __user *buf;

D
Dylan Yudaken 已提交
844
		buf = io_buffer_select(req, &len, issue_flags);
845 846 847 848 849
		if (!buf)
			return -ENOBUFS;
		sr->buf = buf;
	}

D
Dylan Yudaken 已提交
850
	ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
851 852 853 854 855 856 857 858 859 860
	if (unlikely(ret))
		goto out_free;

	msg.msg_name = NULL;
	msg.msg_namelen = 0;
	msg.msg_control = NULL;
	msg.msg_get_inq = 1;
	msg.msg_flags = 0;
	msg.msg_controllen = 0;
	msg.msg_iocb = NULL;
861
	msg.msg_ubuf = NULL;
862 863 864 865 866 867 868 869 870

	flags = sr->msg_flags;
	if (force_nonblock)
		flags |= MSG_DONTWAIT;
	if (flags & MSG_WAITALL)
		min_ret = iov_iter_count(&msg.msg_iter);

	ret = sock_recvmsg(sock, &msg, flags);
	if (ret < min_ret) {
D
Dylan Yudaken 已提交
871 872 873 874 875 876
		if (ret == -EAGAIN && force_nonblock) {
			if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
				io_kbuf_recycle(req, issue_flags);
				return IOU_ISSUE_SKIP_COMPLETE;
			}

877
			return -EAGAIN;
D
Dylan Yudaken 已提交
878
		}
879 880 881 882 883 884 885
		if (ret > 0 && io_net_retry(sock, flags)) {
			sr->len -= ret;
			sr->buf += ret;
			sr->done_io += ret;
			req->flags |= REQ_F_PARTIAL_IO;
			return -EAGAIN;
		}
886 887
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
888 889 890 891 892 893
		req_set_fail(req);
	} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
out_free:
		req_set_fail(req);
	}

894
	if (ret > 0)
895 896 897
		ret += sr->done_io;
	else if (sr->done_io)
		ret = sr->done_io;
898 899 900
	else
		io_kbuf_recycle(req, issue_flags);

901 902 903
	cflags = io_put_kbuf(req, issue_flags);
	if (msg.msg_inq)
		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
D
Dylan Yudaken 已提交
904

905
	if (!io_recv_finish(req, &ret, cflags, ret <= 0))
D
Dylan Yudaken 已提交
906 907 908
		goto retry_multishot;

	return ret;
909 910
}

911
void io_send_zc_cleanup(struct io_kiocb *req)
912
{
913
	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
P
Pavel Begunkov 已提交
914
	struct io_async_msghdr *io;
915

P
Pavel Begunkov 已提交
916 917
	if (req_has_async_data(req)) {
		io = req->async_data;
918 919 920
		/* might be ->fast_iov if *msg_copy_hdr failed */
		if (io->free_iov != io->fast_iov)
			kfree(io->free_iov);
P
Pavel Begunkov 已提交
921
	}
922 923 924 925
	if (zc->notif) {
		io_notif_flush(zc->notif);
		zc->notif = NULL;
	}
926 927
}

928
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
929
{
930
	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
931
	struct io_ring_ctx *ctx = req->ctx;
932
	struct io_kiocb *notif;
933

P
Pavel Begunkov 已提交
934
	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
935 936 937
		return -EINVAL;
	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
	if (req->flags & REQ_F_CQE_SKIP)
938 939 940
		return -EINVAL;

	zc->flags = READ_ONCE(sqe->ioprio);
941
	if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
942
			  IORING_RECVSEND_FIXED_BUF))
943
		return -EINVAL;
944 945 946 947 948 949 950
	notif = zc->notif = io_alloc_notif(ctx);
	if (!notif)
		return -ENOMEM;
	notif->cqe.user_data = req->cqe.user_data;
	notif->cqe.res = 0;
	notif->cqe.flags = IORING_CQE_F_NOTIF;
	req->flags |= REQ_F_NEED_CLEANUP;
951 952 953 954 955 956 957
	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
		unsigned idx = READ_ONCE(sqe->buf_index);

		if (unlikely(idx >= ctx->nr_user_bufs))
			return -EFAULT;
		idx = array_index_nospec(idx, ctx->nr_user_bufs);
		req->imu = READ_ONCE(ctx->user_bufs[idx]);
958
		io_req_set_rsrc_node(notif, ctx, 0);
959
	}
960

P
Pavel Begunkov 已提交
961 962 963 964 965 966 967 968 969 970 971 972
	if (req->opcode == IORING_OP_SEND_ZC) {
		if (READ_ONCE(sqe->__pad3[0]))
			return -EINVAL;
		zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
		zc->addr_len = READ_ONCE(sqe->addr_len);
	} else {
		if (unlikely(sqe->addr2 || sqe->file_index))
			return -EINVAL;
		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
			return -EINVAL;
	}

973 974 975 976 977
	zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
	zc->len = READ_ONCE(sqe->len);
	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
	if (zc->msg_flags & MSG_DONTWAIT)
		req->flags |= REQ_F_NOWAIT;
978

979
	zc->done_io = 0;
980

981 982 983 984 985 986 987
#ifdef CONFIG_COMPAT
	if (req->ctx->compat)
		zc->msg_flags |= MSG_CMSG_COMPAT;
#endif
	return 0;
}

988 989 990 991 992 993 994
static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
				 struct iov_iter *from, size_t length)
{
	skb_zcopy_downgrade_managed(skb);
	return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
}

995 996 997 998 999 1000 1001 1002 1003 1004
static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
			   struct iov_iter *from, size_t length)
{
	struct skb_shared_info *shinfo = skb_shinfo(skb);
	int frag = shinfo->nr_frags;
	int ret = 0;
	struct bvec_iter bi;
	ssize_t copied = 0;
	unsigned long truesize = 0;

1005
	if (!frag)
1006
		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1007
	else if (unlikely(!skb_zcopy_managed(skb)))
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
		return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);

	bi.bi_size = min(from->count, length);
	bi.bi_bvec_done = from->iov_offset;
	bi.bi_idx = 0;

	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);

		copied += v.bv_len;
		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
					   v.bv_offset, v.bv_len);
		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
	}
	if (bi.bi_size)
		ret = -EMSGSIZE;

	shinfo->nr_frags = frag;
	from->bvec += bi.bi_idx;
	from->nr_segs -= bi.bi_idx;
1029
	from->count -= copied;
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
	from->iov_offset = bi.bi_bvec_done;

	skb->data_len += copied;
	skb->len += copied;
	skb->truesize += truesize;

	if (sk && sk->sk_type == SOCK_STREAM) {
		sk_wmem_queued_add(sk, truesize);
		if (!skb_zcopy_pure(skb))
			sk_mem_charge(sk, truesize);
	} else {
		refcount_add(truesize, &skb->sk->sk_wmem_alloc);
	}
	return ret;
}

1046
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1047
{
1048
	struct sockaddr_storage __address;
1049
	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1050 1051 1052
	struct msghdr msg;
	struct iovec iov;
	struct socket *sock;
1053
	unsigned msg_flags;
1054 1055 1056 1057 1058
	int ret, min_ret = 0;

	sock = sock_from_file(req->file);
	if (unlikely(!sock))
		return -ENOTSOCK;
1059 1060
	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
		return -EOPNOTSUPP;
1061 1062 1063 1064 1065 1066

	msg.msg_name = NULL;
	msg.msg_control = NULL;
	msg.msg_controllen = 0;
	msg.msg_namelen = 0;

1067
	if (zc->addr) {
1068 1069 1070
		if (req_has_async_data(req)) {
			struct io_async_msghdr *io = req->async_data;

1071
			msg.msg_name = &io->addr;
1072 1073 1074 1075 1076 1077
		} else {
			ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
			if (unlikely(ret < 0))
				return ret;
			msg.msg_name = (struct sockaddr *)&__address;
		}
1078 1079 1080
		msg.msg_namelen = zc->addr_len;
	}

1081 1082
	if (!(req->flags & REQ_F_POLLED) &&
	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1083
		return io_setup_async_addr(req, &__address, issue_flags);
1084

1085 1086 1087 1088
	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
		ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
					(u64)(uintptr_t)zc->buf, zc->len);
		if (unlikely(ret))
P
Pavel Begunkov 已提交
1089
			return ret;
1090
		msg.sg_from_iter = io_sg_from_iter;
1091 1092 1093 1094 1095
	} else {
		ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
					  &msg.msg_iter);
		if (unlikely(ret))
			return ret;
1096
		ret = io_notif_account_mem(zc->notif, zc->len);
1097 1098
		if (unlikely(ret))
			return ret;
1099
		msg.sg_from_iter = io_sg_from_iter_iovec;
1100
	}
1101 1102 1103 1104 1105 1106 1107 1108

	msg_flags = zc->msg_flags | MSG_ZEROCOPY;
	if (issue_flags & IO_URING_F_NONBLOCK)
		msg_flags |= MSG_DONTWAIT;
	if (msg_flags & MSG_WAITALL)
		min_ret = iov_iter_count(&msg.msg_iter);

	msg.msg_flags = msg_flags;
1109
	msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1110 1111 1112 1113
	ret = sock_sendmsg(sock, &msg);

	if (unlikely(ret < min_ret)) {
		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1114
			return io_setup_async_addr(req, &__address, issue_flags);
1115

1116 1117 1118 1119 1120
		if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
			zc->len -= ret;
			zc->buf += ret;
			zc->done_io += ret;
			req->flags |= REQ_F_PARTIAL_IO;
1121
			return io_setup_async_addr(req, &__address, issue_flags);
1122 1123 1124
		}
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
1125
		req_set_fail(req);
1126 1127
	}

1128 1129 1130 1131
	if (ret >= 0)
		ret += zc->done_io;
	else if (zc->done_io)
		ret = zc->done_io;
1132

1133 1134 1135 1136 1137 1138 1139 1140
	/*
	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
	 * flushing notif to io_send_zc_cleanup()
	 */
	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
		io_notif_flush(zc->notif);
		req->flags &= ~REQ_F_NEED_CLEANUP;
	}
1141
	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1142 1143 1144
	return IOU_OK;
}

P
Pavel Begunkov 已提交
1145 1146 1147 1148 1149
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
{
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
	struct io_async_msghdr iomsg, *kmsg;
	struct socket *sock;
1150
	unsigned flags;
P
Pavel Begunkov 已提交
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
	int ret, min_ret = 0;

	sock = sock_from_file(req->file);
	if (unlikely(!sock))
		return -ENOTSOCK;

	if (req_has_async_data(req)) {
		kmsg = req->async_data;
	} else {
		ret = io_sendmsg_copy_hdr(req, &iomsg);
		if (ret)
			return ret;
		kmsg = &iomsg;
	}

	if (!(req->flags & REQ_F_POLLED) &&
	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
		return io_setup_async_msg(req, kmsg, issue_flags);

	flags = sr->msg_flags | MSG_ZEROCOPY;
	if (issue_flags & IO_URING_F_NONBLOCK)
		flags |= MSG_DONTWAIT;
	if (flags & MSG_WAITALL)
		min_ret = iov_iter_count(&kmsg->msg.msg_iter);

	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);

	if (unlikely(ret < min_ret)) {
		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
			return io_setup_async_msg(req, kmsg, issue_flags);

		if (ret > 0 && io_net_retry(sock, flags)) {
			sr->done_io += ret;
			req->flags |= REQ_F_PARTIAL_IO;
			return io_setup_async_msg(req, kmsg, issue_flags);
		}
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
		req_set_fail(req);
	}
	/* fast path, check for non-NULL to avoid function call */
1194
	if (kmsg->free_iov) {
P
Pavel Begunkov 已提交
1195
		kfree(kmsg->free_iov);
1196 1197
		kmsg->free_iov = NULL;
	}
P
Pavel Begunkov 已提交
1198 1199 1200 1201 1202 1203 1204

	io_netmsg_recycle(req, issue_flags);
	if (ret >= 0)
		ret += sr->done_io;
	else if (sr->done_io)
		ret = sr->done_io;

1205 1206 1207 1208 1209 1210 1211 1212
	/*
	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
	 * flushing notif to io_send_zc_cleanup()
	 */
	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
		io_notif_flush(sr->notif);
		req->flags &= ~REQ_F_NEED_CLEANUP;
	}
1213
	io_req_set_res(req, ret, IORING_CQE_F_MORE);
P
Pavel Begunkov 已提交
1214 1215 1216
	return IOU_OK;
}

1217 1218 1219 1220 1221
void io_sendrecv_fail(struct io_kiocb *req)
{
	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);

	if (req->flags & REQ_F_PARTIAL_IO)
1222 1223
		req->cqe.res = sr->done_io;

1224
	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1225 1226
	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
		req->cqe.flags |= IORING_CQE_F_MORE;
1227 1228
}

1229 1230
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
1231
	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
	unsigned flags;

	if (sqe->len || sqe->buf_index)
		return -EINVAL;

	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
	accept->flags = READ_ONCE(sqe->accept_flags);
	accept->nofile = rlimit(RLIMIT_NOFILE);
	flags = READ_ONCE(sqe->ioprio);
	if (flags & ~IORING_ACCEPT_MULTISHOT)
		return -EINVAL;

	accept->file_slot = READ_ONCE(sqe->file_index);
	if (accept->file_slot) {
		if (accept->flags & SOCK_CLOEXEC)
			return -EINVAL;
		if (flags & IORING_ACCEPT_MULTISHOT &&
		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
			return -EINVAL;
	}
	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
		return -EINVAL;
	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
	if (flags & IORING_ACCEPT_MULTISHOT)
		req->flags |= REQ_F_APOLL_MULTISHOT;
	return 0;
}

int io_accept(struct io_kiocb *req, unsigned int issue_flags)
{
	struct io_ring_ctx *ctx = req->ctx;
1265
	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
	bool fixed = !!accept->file_slot;
	struct file *file;
	int ret, fd;

retry:
	if (!fixed) {
		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
		if (unlikely(fd < 0))
			return fd;
	}
	file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
			 accept->flags);
	if (IS_ERR(file)) {
		if (!fixed)
			put_unused_fd(fd);
		ret = PTR_ERR(file);
		if (ret == -EAGAIN && force_nonblock) {
			/*
			 * if it's multishot and polled, we don't need to
			 * return EAGAIN to arm the poll infra since it
			 * has already been done
			 */
			if ((req->flags & IO_APOLL_MULTI_POLLED) ==
			    IO_APOLL_MULTI_POLLED)
				ret = IOU_ISSUE_SKIP_COMPLETE;
			return ret;
		}
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
		req_set_fail(req);
	} else if (!fixed) {
		fd_install(fd, file);
		ret = fd;
	} else {
		ret = io_fixed_fd_install(req, issue_flags, file,
						accept->file_slot);
	}

	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
		io_req_set_res(req, ret, 0);
		return IOU_OK;
	}

1311 1312
	if (ret >= 0 &&
	    io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1313
		goto retry;
1314 1315 1316 1317 1318

	io_req_set_res(req, ret, 0);
	if (req->flags & REQ_F_POLLED)
		return IOU_STOP_MULTISHOT;
	return IOU_OK;
1319 1320 1321 1322
}

int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
1323
	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343

	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
		return -EINVAL;

	sock->domain = READ_ONCE(sqe->fd);
	sock->type = READ_ONCE(sqe->off);
	sock->protocol = READ_ONCE(sqe->len);
	sock->file_slot = READ_ONCE(sqe->file_index);
	sock->nofile = rlimit(RLIMIT_NOFILE);

	sock->flags = sock->type & ~SOCK_TYPE_MASK;
	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
		return -EINVAL;
	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
		return -EINVAL;
	return 0;
}

int io_socket(struct io_kiocb *req, unsigned int issue_flags)
{
1344
	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
	bool fixed = !!sock->file_slot;
	struct file *file;
	int ret, fd;

	if (!fixed) {
		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
		if (unlikely(fd < 0))
			return fd;
	}
	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
	if (IS_ERR(file)) {
		if (!fixed)
			put_unused_fd(fd);
		ret = PTR_ERR(file);
		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
			return -EAGAIN;
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
		req_set_fail(req);
	} else if (!fixed) {
		fd_install(fd, file);
		ret = fd;
	} else {
		ret = io_fixed_fd_install(req, issue_flags, file,
					    sock->file_slot);
	}
	io_req_set_res(req, ret, 0);
	return IOU_OK;
}

int io_connect_prep_async(struct io_kiocb *req)
{
	struct io_async_connect *io = req->async_data;
1378
	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1379 1380 1381 1382 1383 1384

	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
}

int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
1385
	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1386 1387 1388 1389 1390 1391

	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
		return -EINVAL;

	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
	conn->addr_len =  READ_ONCE(sqe->addr2);
1392
	conn->in_progress = false;
1393 1394 1395 1396 1397
	return 0;
}

int io_connect(struct io_kiocb *req, unsigned int issue_flags)
{
1398
	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1399 1400 1401 1402 1403
	struct io_async_connect __io, *io;
	unsigned file_flags;
	int ret;
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
	if (connect->in_progress) {
		struct socket *socket;

		ret = -ENOTSOCK;
		socket = sock_from_file(req->file);
		if (socket)
			ret = sock_error(socket->sk);
		goto out;
	}

1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
	if (req_has_async_data(req)) {
		io = req->async_data;
	} else {
		ret = move_addr_to_kernel(connect->addr,
						connect->addr_len,
						&__io.address);
		if (ret)
			goto out;
		io = &__io;
	}

	file_flags = force_nonblock ? O_NONBLOCK : 0;

	ret = __sys_connect_file(req->file, &io->address,
					connect->addr_len, file_flags);
	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
		if (ret == -EINPROGRESS) {
			connect->in_progress = true;
		} else {
			if (req_has_async_data(req))
				return -EAGAIN;
			if (io_alloc_async_data(req)) {
				ret = -ENOMEM;
				goto out;
			}
			memcpy(req->async_data, &__io, sizeof(__io));
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
		}
		return -EAGAIN;
	}
	if (ret == -ERESTARTSYS)
		ret = -EINTR;
out:
	if (ret < 0)
		req_set_fail(req);
	io_req_set_res(req, ret, 0);
	return IOU_OK;
}
J
Jens Axboe 已提交
1451 1452 1453 1454 1455

void io_netmsg_cache_free(struct io_cache_entry *entry)
{
	kfree(container_of(entry, struct io_async_msghdr, cache));
}
1456
#endif