io_uring.c 247.9 KB
Newer Older
J
Jens Axboe 已提交
1 2 3 4 5 6
// SPDX-License-Identifier: GPL-2.0
/*
 * Shared application/kernel submission and completion ring pairs, for
 * supporting fast/efficient IO.
 *
 * A note on the read/write ordering memory barriers that are matched between
S
Stefan Bühler 已提交
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 * the application and kernel side.
 *
 * After the application reads the CQ ring tail, it must use an
 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
 * before writing the tail (using smp_load_acquire to read the tail will
 * do). It also needs a smp_mb() before updating CQ head (ordering the
 * entry load(s) with the head store), pairing with an implicit barrier
 * through a control-dependency in io_get_cqring (smp_store_release to
 * store head will do). Failure to do so could lead to reading invalid
 * CQ entries.
 *
 * Likewise, the application must use an appropriate smp_wmb() before
 * writing the SQ tail (ordering SQ entry stores with the tail store),
 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
 * to store the tail will do). And it needs a barrier ordering the SQ
 * head load before writing new SQ entries (smp_load_acquire to read
 * head will do).
 *
 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
 * updating the SQ tail; a full memory barrier smp_mb() is needed
 * between.
J
Jens Axboe 已提交
29 30 31 32 33 34 35 36 37 38 39
 *
 * Also see the examples in the liburing library:
 *
 *	git://git.kernel.dk/liburing
 *
 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
 * from data shared between the kernel and application. This is done both
 * for ordering purposes, but also to ensure that once a value is loaded from
 * data that the application could potentially modify, it remains stable.
 *
 * Copyright (C) 2018-2019 Jens Axboe
C
Christoph Hellwig 已提交
40
 * Copyright (c) 2018-2019 Christoph Hellwig
J
Jens Axboe 已提交
41 42 43 44 45 46
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/syscalls.h>
#include <linux/compat.h>
47
#include <net/compat.h>
J
Jens Axboe 已提交
48 49
#include <linux/refcount.h>
#include <linux/uio.h>
50
#include <linux/bits.h>
J
Jens Axboe 已提交
51 52 53 54 55 56 57 58 59

#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/percpu.h>
#include <linux/slab.h>
J
Jens Axboe 已提交
60
#include <linux/kthread.h>
J
Jens Axboe 已提交
61
#include <linux/blkdev.h>
62
#include <linux/bvec.h>
J
Jens Axboe 已提交
63 64 65
#include <linux/net.h>
#include <net/sock.h>
#include <net/af_unix.h>
J
Jens Axboe 已提交
66
#include <net/scm.h>
J
Jens Axboe 已提交
67 68 69 70
#include <linux/anon_inodes.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <linux/nospec.h>
71 72
#include <linux/sizes.h>
#include <linux/hugetlb.h>
73
#include <linux/highmem.h>
74 75
#include <linux/namei.h>
#include <linux/fsnotify.h>
J
Jens Axboe 已提交
76
#include <linux/fadvise.h>
77
#include <linux/eventpoll.h>
78
#include <linux/fs_struct.h>
P
Pavel Begunkov 已提交
79
#include <linux/splice.h>
80
#include <linux/task_work.h>
81
#include <linux/pagemap.h>
82
#include <linux/io_uring.h>
83
#include <linux/blk-cgroup.h>
84
#include <linux/audit.h>
J
Jens Axboe 已提交
85

86 87 88
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>

J
Jens Axboe 已提交
89 90 91
#include <uapi/linux/io_uring.h>

#include "internal.h"
92
#include "io-wq.h"
J
Jens Axboe 已提交
93

94
#define IORING_MAX_ENTRIES	32768
95
#define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
96 97 98 99 100 101 102 103

/*
 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
 */
#define IORING_FILE_TABLE_SHIFT	9
#define IORING_MAX_FILES_TABLE	(1U << IORING_FILE_TABLE_SHIFT)
#define IORING_FILE_TABLE_MASK	(IORING_MAX_FILES_TABLE - 1)
#define IORING_MAX_FIXED_FILES	(64 * IORING_MAX_FILES_TABLE)
104 105
#define IORING_MAX_RESTRICTIONS	(IORING_RESTRICTION_LAST + \
				 IORING_REGISTER_LAST + IORING_OP_LAST)
J
Jens Axboe 已提交
106

107 108 109 110
#define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK|	\
				IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
				IOSQE_BUFFER_SELECT)

J
Jens Axboe 已提交
111 112 113 114 115
struct io_uring {
	u32 head ____cacheline_aligned_in_smp;
	u32 tail ____cacheline_aligned_in_smp;
};

S
Stefan Bühler 已提交
116
/*
117 118
 * This data is shared with the application through the mmap at offsets
 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
S
Stefan Bühler 已提交
119 120 121 122
 *
 * The offsets to the member fields are published through struct
 * io_sqring_offsets when calling io_uring_setup.
 */
123
struct io_rings {
S
Stefan Bühler 已提交
124 125 126 127
	/*
	 * Head and tail offsets into the ring; the offsets need to be
	 * masked to get valid indices.
	 *
128 129 130
	 * The kernel controls head of the sq ring and the tail of the cq ring,
	 * and the application controls tail of the sq ring and the head of the
	 * cq ring.
S
Stefan Bühler 已提交
131
	 */
132
	struct io_uring		sq, cq;
S
Stefan Bühler 已提交
133
	/*
134
	 * Bitmasks to apply to head and tail offsets (constant, equals
S
Stefan Bühler 已提交
135 136
	 * ring_entries - 1)
	 */
137 138 139
	u32			sq_ring_mask, cq_ring_mask;
	/* Ring sizes (constant, power of 2) */
	u32			sq_ring_entries, cq_ring_entries;
S
Stefan Bühler 已提交
140 141 142 143 144 145 146 147 148 149 150 151
	/*
	 * Number of invalid entries dropped by the kernel due to
	 * invalid index stored in array
	 *
	 * Written by the kernel, shouldn't be modified by the
	 * application (i.e. get number of "new events" by comparing to
	 * cached value).
	 *
	 * After a new SQ head value was read by the application this
	 * counter includes all submissions that were dropped reaching
	 * the new SQ head (and possibly more).
	 */
152
	u32			sq_dropped;
S
Stefan Bühler 已提交
153
	/*
154
	 * Runtime SQ flags
S
Stefan Bühler 已提交
155 156 157 158 159 160 161
	 *
	 * Written by the kernel, shouldn't be modified by the
	 * application.
	 *
	 * The application needs a full memory barrier before checking
	 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
	 */
162
	u32			sq_flags;
163 164 165 166 167 168 169
	/*
	 * Runtime CQ flags
	 *
	 * Written by the application, shouldn't be modified by the
	 * kernel.
	 */
	u32                     cq_flags;
S
Stefan Bühler 已提交
170 171 172
	/*
	 * Number of completion events lost because the queue was full;
	 * this should be avoided by the application by making sure
L
LimingWu 已提交
173
	 * there are not more requests pending than there is space in
S
Stefan Bühler 已提交
174 175 176 177 178 179 180 181 182
	 * the completion queue.
	 *
	 * Written by the kernel, shouldn't be modified by the
	 * application (i.e. get number of "new events" by comparing to
	 * cached value).
	 *
	 * As completion events come in out of order this counter is not
	 * ordered with any other data.
	 */
183
	u32			cq_overflow;
S
Stefan Bühler 已提交
184 185 186 187 188 189 190
	/*
	 * Ring buffer of completion events.
	 *
	 * The kernel writes completion events fresh every time they are
	 * produced, so the application is allowed to modify pending
	 * entries.
	 */
191
	struct io_uring_cqe	cqes[] ____cacheline_aligned_in_smp;
J
Jens Axboe 已提交
192 193
};

194 195
enum io_uring_cmd_flags {
	IO_URING_F_NONBLOCK		= 1,
196
	IO_URING_F_COMPLETE_DEFER	= 2,
197 198
};

199 200 201 202 203
struct io_mapped_ubuf {
	u64		ubuf;
	size_t		len;
	struct		bio_vec *bvec;
	unsigned int	nr_bvecs;
204
	unsigned long	acct_pages;
205 206
};

207 208
struct io_ring_ctx;

209 210
struct io_rsrc_put {
	struct list_head list;
211 212 213 214
	union {
		void *rsrc;
		struct file *file;
	};
215 216 217
};

struct fixed_rsrc_table {
218
	struct file		**files;
219 220
};

221
struct fixed_rsrc_ref_node {
222 223
	struct percpu_ref		refs;
	struct list_head		node;
224 225
	struct list_head		rsrc_list;
	struct fixed_rsrc_data		*rsrc_data;
226 227
	void				(*rsrc_put)(struct io_ring_ctx *ctx,
						    struct io_rsrc_put *prsrc);
228
	struct llist_node		llist;
P
Pavel Begunkov 已提交
229
	bool				done;
230 231
};

232 233
struct fixed_rsrc_data {
	struct fixed_rsrc_table		*table;
234 235
	struct io_ring_ctx		*ctx;

236
	struct fixed_rsrc_ref_node	*node;
237 238 239 240
	struct percpu_ref		refs;
	struct completion		done;
};

241 242 243 244 245 246 247
struct io_buffer {
	struct list_head list;
	__u64 addr;
	__s32 len;
	__u16 bid;
};

248 249 250 251 252
struct io_restriction {
	DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
	DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
	u8 sqe_flags_allowed;
	u8 sqe_flags_required;
253
	bool registered;
254 255
};

256 257
struct io_sq_data {
	refcount_t		refs;
258 259 260 261 262 263 264
	struct mutex		lock;

	/* ctx's that are using this sqd */
	struct list_head	ctx_list;
	struct list_head	ctx_new_list;
	struct mutex		ctx_lock;

265 266
	struct task_struct	*thread;
	struct wait_queue_head	wait;
267 268

	unsigned		sq_thread_idle;
269 270
};

271
#define IO_IOPOLL_BATCH			8
272
#define IO_COMPL_BATCH			32
273
#define IO_REQ_CACHE_SIZE		32
P
Pavel Begunkov 已提交
274
#define IO_REQ_ALLOC_BATCH		8
275 276

struct io_comp_state {
277
	struct io_kiocb		*reqs[IO_COMPL_BATCH];
278
	unsigned int		nr;
279 280
	unsigned int		locked_free_nr;
	/* inline/task_work completion list, under ->uring_lock */
281
	struct list_head	free_list;
282 283
	/* IRQ completion list, under ->completion_lock */
	struct list_head	locked_free_list;
284 285
};

286 287 288 289 290
struct io_submit_link {
	struct io_kiocb		*head;
	struct io_kiocb		*last;
};

291 292
struct io_submit_state {
	struct blk_plug		plug;
293
	struct io_submit_link	link;
294 295 296 297

	/*
	 * io_kiocb alloc cache
	 */
P
Pavel Begunkov 已提交
298
	void			*reqs[IO_REQ_CACHE_SIZE];
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
	unsigned int		free_reqs;

	bool			plug_started;

	/*
	 * Batch completion logic
	 */
	struct io_comp_state	comp;

	/*
	 * File reference cache
	 */
	struct file		*file;
	unsigned int		fd;
	unsigned int		file_refs;
	unsigned int		ios_left;
};

J
Jens Axboe 已提交
317 318 319 320 321 322 323
struct io_ring_ctx {
	struct {
		struct percpu_ref	refs;
	} ____cacheline_aligned_in_smp;

	struct {
		unsigned int		flags;
324
		unsigned int		compat: 1;
325
		unsigned int		limit_mem: 1;
326 327 328
		unsigned int		cq_overflow_flushed: 1;
		unsigned int		drain_next: 1;
		unsigned int		eventfd_async: 1;
329
		unsigned int		restricted: 1;
330
		unsigned int		sqo_dead: 1;
J
Jens Axboe 已提交
331

332 333 334 335 336 337 338 339 340 341 342 343
		/*
		 * Ring buffer of indices into array of io_uring_sqe, which is
		 * mmapped by the application using the IORING_OFF_SQES offset.
		 *
		 * This indirection could e.g. be used to assign fixed
		 * io_uring_sqe entries to operations and only submit them to
		 * the queue when needed.
		 *
		 * The kernel modifies neither the indices array nor the entries
		 * array.
		 */
		u32			*sq_array;
J
Jens Axboe 已提交
344 345 346
		unsigned		cached_sq_head;
		unsigned		sq_entries;
		unsigned		sq_mask;
J
Jens Axboe 已提交
347
		unsigned		sq_thread_idle;
348
		unsigned		cached_sq_dropped;
349
		unsigned		cached_cq_overflow;
350
		unsigned long		sq_check_overflow;
351 352

		struct list_head	defer_list;
J
Jens Axboe 已提交
353
		struct list_head	timeout_list;
354
		struct list_head	cq_overflow_list;
355

356
		struct io_uring_sqe	*sq_sqes;
J
Jens Axboe 已提交
357 358
	} ____cacheline_aligned_in_smp;

359 360 361 362 363 364 365
	struct {
		struct mutex		uring_lock;
		wait_queue_head_t	wait;
	} ____cacheline_aligned_in_smp;

	struct io_submit_state		submit_state;

366 367
	struct io_rings	*rings;

J
Jens Axboe 已提交
368
	/* IO offload */
369
	struct io_wq		*io_wq;
370 371 372 373 374 375 376 377 378 379

	/*
	 * For SQPOLL usage - we hold a reference to the parent task, so we
	 * have access to the ->files
	 */
	struct task_struct	*sqo_task;

	/* Only used for accounting purposes */
	struct mm_struct	*mm_account;

380 381 382 383
#ifdef CONFIG_BLK_CGROUP
	struct cgroup_subsys_state	*sqo_blkcg_css;
#endif

384 385
	struct io_sq_data	*sq_data;	/* if using sq thread polling */

386
	struct wait_queue_head	sqo_sq_wait;
387
	struct list_head	sqd_list;
388

J
Jens Axboe 已提交
389 390 391 392 393
	/*
	 * If used, fixed file set. Writers must ensure that ->refs is dead,
	 * readers must ensure that ->refs is alive as long as the file* is
	 * used. Only updated through io_uring_register(2).
	 */
394
	struct fixed_rsrc_data	*file_data;
J
Jens Axboe 已提交
395 396
	unsigned		nr_user_files;

397 398 399 400
	/* if used, fixed mapped user buffers */
	unsigned		nr_user_bufs;
	struct io_mapped_ubuf	*user_bufs;

J
Jens Axboe 已提交
401 402
	struct user_struct	*user;

403
	const struct cred	*creds;
404

405 406 407 408 409
#ifdef CONFIG_AUDIT
	kuid_t			loginuid;
	unsigned int		sessionid;
#endif

410 411
	struct completion	ref_comp;
	struct completion	sq_thread_comp;
412 413 414 415 416

#if defined(CONFIG_UNIX)
	struct socket		*ring_sock;
#endif

417 418
	struct idr		io_buffer_idr;

419 420
	struct idr		personality_idr;

421 422 423 424 425
	struct {
		unsigned		cached_cq_tail;
		unsigned		cq_entries;
		unsigned		cq_mask;
		atomic_t		cq_timeouts;
426
		unsigned		cq_last_tm_flush;
427
		unsigned long		cq_check_overflow;
428 429 430 431
		struct wait_queue_head	cq_wait;
		struct fasync_struct	*cq_fasync;
		struct eventfd_ctx	*cq_ev_fd;
	} ____cacheline_aligned_in_smp;
J
Jens Axboe 已提交
432 433 434

	struct {
		spinlock_t		completion_lock;
435

J
Jens Axboe 已提交
436
		/*
437
		 * ->iopoll_list is protected by the ctx->uring_lock for
J
Jens Axboe 已提交
438 439 440 441
		 * io_uring instances that don't use IORING_SETUP_SQPOLL.
		 * For SQPOLL, only the single threaded io_sq_thread() will
		 * manipulate the list, hence no extra locking is needed there.
		 */
442
		struct list_head	iopoll_list;
443 444
		struct hlist_head	*cancel_hash;
		unsigned		cancel_hash_bits;
445
		bool			poll_multi_file;
446

447 448
		spinlock_t		inflight_lock;
		struct list_head	inflight_list;
J
Jens Axboe 已提交
449
	} ____cacheline_aligned_in_smp;
450

451 452
	struct delayed_work		rsrc_put_work;
	struct llist_head		rsrc_put_llist;
453 454
	struct list_head		rsrc_ref_list;
	spinlock_t			rsrc_ref_lock;
455

456
	struct io_restriction		restrictions;
457 458 459

	/* Keep this last, we don't need it for the fast path */
	struct work_struct		exit_work;
J
Jens Axboe 已提交
460 461
};

J
Jens Axboe 已提交
462 463 464 465
/*
 * First field must be the file pointer in all the
 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
 */
466 467
struct io_poll_iocb {
	struct file			*file;
468
	struct wait_queue_head		*head;
469
	__poll_t			events;
J
Jens Axboe 已提交
470
	bool				done;
471
	bool				canceled;
472
	struct wait_queue_entry		wait;
473 474
};

475 476 477 478 479
struct io_poll_remove {
	struct file			*file;
	u64				addr;
};

480 481 482 483 484
struct io_close {
	struct file			*file;
	int				fd;
};

485 486 487 488 489 490 491
struct io_timeout_data {
	struct io_kiocb			*req;
	struct hrtimer			timer;
	struct timespec64		ts;
	enum hrtimer_mode		mode;
};

492 493 494 495 496
struct io_accept {
	struct file			*file;
	struct sockaddr __user		*addr;
	int __user			*addr_len;
	int				flags;
497
	unsigned long			nofile;
498 499 500 501 502 503 504
};

struct io_sync {
	struct file			*file;
	loff_t				len;
	loff_t				off;
	int				flags;
505
	int				mode;
506 507
};

508 509 510 511 512
struct io_cancel {
	struct file			*file;
	u64				addr;
};

513 514
struct io_timeout {
	struct file			*file;
515 516
	u32				off;
	u32				target_seq;
P
Pavel Begunkov 已提交
517
	struct list_head		list;
518 519
	/* head of the link, used by linked timeouts only */
	struct io_kiocb			*head;
520 521
};

522 523 524
struct io_timeout_rem {
	struct file			*file;
	u64				addr;
P
Pavel Begunkov 已提交
525 526 527 528

	/* timeout update */
	struct timespec64		ts;
	u32				flags;
529 530
};

531 532 533 534 535 536 537
struct io_rw {
	/* NOTE: kiocb has the file as the first member, so don't do it here */
	struct kiocb			kiocb;
	u64				addr;
	u64				len;
};

538 539 540 541 542 543
struct io_connect {
	struct file			*file;
	struct sockaddr __user		*addr;
	int				addr_len;
};

544 545
struct io_sr_msg {
	struct file			*file;
546
	union {
547
		struct user_msghdr __user *umsg;
548 549
		void __user		*buf;
	};
550
	int				msg_flags;
551
	int				bgid;
552
	size_t				len;
553
	struct io_buffer		*kbuf;
554 555
};

556 557 558 559
struct io_open {
	struct file			*file;
	int				dfd;
	struct filename			*filename;
560
	struct open_how			how;
561
	unsigned long			nofile;
562 563
};

564
struct io_rsrc_update {
565 566 567 568 569 570
	struct file			*file;
	u64				arg;
	u32				nr_args;
	u32				offset;
};

J
Jens Axboe 已提交
571 572 573 574 575 576 577
struct io_fadvise {
	struct file			*file;
	u64				offset;
	u32				len;
	u32				advice;
};

J
Jens Axboe 已提交
578 579 580 581 582 583 584
struct io_madvise {
	struct file			*file;
	u64				addr;
	u32				len;
	u32				advice;
};

585 586 587 588 589 590
struct io_epoll {
	struct file			*file;
	int				epfd;
	int				op;
	int				fd;
	struct epoll_event		event;
591 592
};

P
Pavel Begunkov 已提交
593 594 595 596 597 598 599 600 601
struct io_splice {
	struct file			*file_out;
	struct file			*file_in;
	loff_t				off_out;
	loff_t				off_in;
	u64				len;
	unsigned int			flags;
};

602 603 604 605 606 607 608 609 610
struct io_provide_buf {
	struct file			*file;
	__u64				addr;
	__s32				len;
	__u32				bgid;
	__u16				nbufs;
	__u16				bid;
};

611 612 613 614 615
struct io_statx {
	struct file			*file;
	int				dfd;
	unsigned int			mask;
	unsigned int			flags;
B
Bijan Mottahedeh 已提交
616
	const char __user		*filename;
617 618 619
	struct statx __user		*buffer;
};

J
Jens Axboe 已提交
620 621 622 623 624
struct io_shutdown {
	struct file			*file;
	int				how;
};

625 626 627 628 629 630 631 632 633
struct io_rename {
	struct file			*file;
	int				old_dfd;
	int				new_dfd;
	struct filename			*oldpath;
	struct filename			*newpath;
	int				flags;
};

634 635 636 637 638 639 640
struct io_unlink {
	struct file			*file;
	int				dfd;
	int				flags;
	struct filename			*filename;
};

641 642 643
struct io_completion {
	struct file			*file;
	struct list_head		list;
644
	int				cflags;
645 646
};

647 648 649 650
struct io_async_connect {
	struct sockaddr_storage		address;
};

651 652
struct io_async_msghdr {
	struct iovec			fast_iov[UIO_FASTIOV];
653 654
	/* points to an allocated iov, if NULL we use fast_iov instead */
	struct iovec			*free_iov;
655 656
	struct sockaddr __user		*uaddr;
	struct msghdr			msg;
657
	struct sockaddr_storage		addr;
658 659
};

660 661
struct io_async_rw {
	struct iovec			fast_iov[UIO_FASTIOV];
662 663
	const struct iovec		*free_iovec;
	struct iov_iter			iter;
664
	size_t				bytes_done;
665
	struct wait_page_queue		wpq;
666 667
};

668 669 670 671 672 673
enum {
	REQ_F_FIXED_FILE_BIT	= IOSQE_FIXED_FILE_BIT,
	REQ_F_IO_DRAIN_BIT	= IOSQE_IO_DRAIN_BIT,
	REQ_F_LINK_BIT		= IOSQE_IO_LINK_BIT,
	REQ_F_HARDLINK_BIT	= IOSQE_IO_HARDLINK_BIT,
	REQ_F_FORCE_ASYNC_BIT	= IOSQE_ASYNC_BIT,
674
	REQ_F_BUFFER_SELECT_BIT	= IOSQE_BUFFER_SELECT_BIT,
675 676 677 678 679 680 681

	REQ_F_FAIL_LINK_BIT,
	REQ_F_INFLIGHT_BIT,
	REQ_F_CUR_POS_BIT,
	REQ_F_NOWAIT_BIT,
	REQ_F_LINK_TIMEOUT_BIT,
	REQ_F_ISREG_BIT,
P
Pavel Begunkov 已提交
682
	REQ_F_NEED_CLEANUP_BIT,
683
	REQ_F_POLLED_BIT,
684
	REQ_F_BUFFER_SELECTED_BIT,
685
	REQ_F_NO_FILE_TABLE_BIT,
686
	REQ_F_WORK_INITIALIZED_BIT,
687
	REQ_F_LTIMEOUT_ACTIVE_BIT,
688
	REQ_F_COMPLETE_INLINE_BIT,
689 690 691

	/* not a real bit, just to check we're not overflowing the space */
	__REQ_F_LAST_BIT,
692 693 694 695 696 697 698 699 700 701 702 703 704
};

enum {
	/* ctx owns file */
	REQ_F_FIXED_FILE	= BIT(REQ_F_FIXED_FILE_BIT),
	/* drain existing IO first */
	REQ_F_IO_DRAIN		= BIT(REQ_F_IO_DRAIN_BIT),
	/* linked sqes */
	REQ_F_LINK		= BIT(REQ_F_LINK_BIT),
	/* doesn't sever on completion < 0 */
	REQ_F_HARDLINK		= BIT(REQ_F_HARDLINK_BIT),
	/* IOSQE_ASYNC */
	REQ_F_FORCE_ASYNC	= BIT(REQ_F_FORCE_ASYNC_BIT),
705 706
	/* IOSQE_BUFFER_SELECT */
	REQ_F_BUFFER_SELECT	= BIT(REQ_F_BUFFER_SELECT_BIT),
707 708 709 710 711 712 713 714 715

	/* fail rest of links */
	REQ_F_FAIL_LINK		= BIT(REQ_F_FAIL_LINK_BIT),
	/* on inflight list */
	REQ_F_INFLIGHT		= BIT(REQ_F_INFLIGHT_BIT),
	/* read/write uses file position */
	REQ_F_CUR_POS		= BIT(REQ_F_CUR_POS_BIT),
	/* must not punt to workers */
	REQ_F_NOWAIT		= BIT(REQ_F_NOWAIT_BIT),
716
	/* has or had linked timeout */
717 718 719
	REQ_F_LINK_TIMEOUT	= BIT(REQ_F_LINK_TIMEOUT_BIT),
	/* regular file */
	REQ_F_ISREG		= BIT(REQ_F_ISREG_BIT),
P
Pavel Begunkov 已提交
720 721
	/* needs cleanup */
	REQ_F_NEED_CLEANUP	= BIT(REQ_F_NEED_CLEANUP_BIT),
722 723
	/* already went through poll handler */
	REQ_F_POLLED		= BIT(REQ_F_POLLED_BIT),
724 725
	/* buffer already selected */
	REQ_F_BUFFER_SELECTED	= BIT(REQ_F_BUFFER_SELECTED_BIT),
726 727
	/* doesn't need file table for this request */
	REQ_F_NO_FILE_TABLE	= BIT(REQ_F_NO_FILE_TABLE_BIT),
728 729
	/* io_wq_work is initialized */
	REQ_F_WORK_INITIALIZED	= BIT(REQ_F_WORK_INITIALIZED_BIT),
730 731
	/* linked timeout is active, i.e. prepared by link's head */
	REQ_F_LTIMEOUT_ACTIVE	= BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
732 733
	/* completion is deferred through io_comp_state */
	REQ_F_COMPLETE_INLINE	= BIT(REQ_F_COMPLETE_INLINE_BIT),
734 735 736 737
};

struct async_poll {
	struct io_poll_iocb	poll;
738
	struct io_poll_iocb	*double_poll;
739 740
};

741 742 743 744 745
struct io_task_work {
	struct io_wq_work_node	node;
	task_work_func_t	func;
};

J
Jens Axboe 已提交
746 747 748 749 750 751
/*
 * NOTE! Each of the iocb union members has the file pointer
 * as the first entry in their struct definition. So you can
 * access the file pointer through any of the sub-structs,
 * or directly as just 'ki_filp' in this struct.
 */
J
Jens Axboe 已提交
752
struct io_kiocb {
753
	union {
J
Jens Axboe 已提交
754
		struct file		*file;
755
		struct io_rw		rw;
756
		struct io_poll_iocb	poll;
757
		struct io_poll_remove	poll_remove;
758 759
		struct io_accept	accept;
		struct io_sync		sync;
760
		struct io_cancel	cancel;
761
		struct io_timeout	timeout;
762
		struct io_timeout_rem	timeout_rem;
763
		struct io_connect	connect;
764
		struct io_sr_msg	sr_msg;
765
		struct io_open		open;
766
		struct io_close		close;
767
		struct io_rsrc_update	rsrc_update;
J
Jens Axboe 已提交
768
		struct io_fadvise	fadvise;
J
Jens Axboe 已提交
769
		struct io_madvise	madvise;
770
		struct io_epoll		epoll;
P
Pavel Begunkov 已提交
771
		struct io_splice	splice;
772
		struct io_provide_buf	pbuf;
773
		struct io_statx		statx;
J
Jens Axboe 已提交
774
		struct io_shutdown	shutdown;
775
		struct io_rename	rename;
776
		struct io_unlink	unlink;
777 778
		/* use only after cleaning per-op data, see io_clean_op() */
		struct io_completion	compl;
779
	};
J
Jens Axboe 已提交
780

781 782
	/* opcode allocated if it needs to store data for async defer */
	void				*async_data;
783
	u8				opcode;
784 785
	/* polled IO has completed */
	u8				iopoll_completed;
J
Jens Axboe 已提交
786

787
	u16				buf_index;
788
	u32				result;
789

P
Pavel Begunkov 已提交
790 791 792 793 794
	struct io_ring_ctx		*ctx;
	unsigned int			flags;
	refcount_t			refs;
	struct task_struct		*task;
	u64				user_data;
795

796
	struct io_kiocb			*link;
797
	struct percpu_ref		*fixed_rsrc_refs;
798

799 800 801 802
	/*
	 * 1. used with ctx->iopoll_list with reads/writes
	 * 2. to track reqs with ->files (see io_op_def::file_table)
	 */
P
Pavel Begunkov 已提交
803
	struct list_head		inflight_entry;
804 805 806 807
	union {
		struct io_task_work	io_task_work;
		struct callback_head	task_work;
	};
P
Pavel Begunkov 已提交
808 809 810 811
	/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
	struct hlist_node		hash_node;
	struct async_poll		*apoll;
	struct io_wq_work		work;
J
Jens Axboe 已提交
812
};
813

814 815 816
struct io_defer_entry {
	struct list_head	list;
	struct io_kiocb		*req;
817
	u32			seq;
J
Jens Axboe 已提交
818 819
};

820 821 822 823 824 825 826
struct io_op_def {
	/* needs req->file assigned */
	unsigned		needs_file : 1;
	/* hash wq insertion if file is a regular file */
	unsigned		hash_reg_file : 1;
	/* unbound wq insertion if file is a non-regular file */
	unsigned		unbound_nonreg_file : 1;
827 828
	/* opcode is not supported by this kernel */
	unsigned		not_supported : 1;
829 830 831
	/* set if opcode supports polled "wait" */
	unsigned		pollin : 1;
	unsigned		pollout : 1;
832 833
	/* op supports buffer selection */
	unsigned		buffer_select : 1;
834 835
	/* must always have async data allocated */
	unsigned		needs_async_data : 1;
J
Jens Axboe 已提交
836 837
	/* should block plug */
	unsigned		plug : 1;
838 839
	/* size of async data needed, if any */
	unsigned short		async_size;
840
	unsigned		work_flags;
841 842
};

843
static const struct io_op_def io_op_defs[] = {
844 845
	[IORING_OP_NOP] = {},
	[IORING_OP_READV] = {
846 847
		.needs_file		= 1,
		.unbound_nonreg_file	= 1,
848
		.pollin			= 1,
849
		.buffer_select		= 1,
850
		.needs_async_data	= 1,
J
Jens Axboe 已提交
851
		.plug			= 1,
852
		.async_size		= sizeof(struct io_async_rw),
853
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
854
	},
855
	[IORING_OP_WRITEV] = {
856 857 858
		.needs_file		= 1,
		.hash_reg_file		= 1,
		.unbound_nonreg_file	= 1,
859
		.pollout		= 1,
860
		.needs_async_data	= 1,
J
Jens Axboe 已提交
861
		.plug			= 1,
862
		.async_size		= sizeof(struct io_async_rw),
863 864
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
						IO_WQ_WORK_FSIZE,
865
	},
866
	[IORING_OP_FSYNC] = {
867
		.needs_file		= 1,
868
		.work_flags		= IO_WQ_WORK_BLKCG,
869
	},
870
	[IORING_OP_READ_FIXED] = {
871 872
		.needs_file		= 1,
		.unbound_nonreg_file	= 1,
873
		.pollin			= 1,
J
Jens Axboe 已提交
874
		.plug			= 1,
875
		.async_size		= sizeof(struct io_async_rw),
876
		.work_flags		= IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
877
	},
878
	[IORING_OP_WRITE_FIXED] = {
879 880 881
		.needs_file		= 1,
		.hash_reg_file		= 1,
		.unbound_nonreg_file	= 1,
882
		.pollout		= 1,
J
Jens Axboe 已提交
883
		.plug			= 1,
884
		.async_size		= sizeof(struct io_async_rw),
885 886
		.work_flags		= IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE |
						IO_WQ_WORK_MM,
887
	},
888
	[IORING_OP_POLL_ADD] = {
889 890 891
		.needs_file		= 1,
		.unbound_nonreg_file	= 1,
	},
892 893
	[IORING_OP_POLL_REMOVE] = {},
	[IORING_OP_SYNC_FILE_RANGE] = {
894
		.needs_file		= 1,
895
		.work_flags		= IO_WQ_WORK_BLKCG,
896
	},
897
	[IORING_OP_SENDMSG] = {
898 899
		.needs_file		= 1,
		.unbound_nonreg_file	= 1,
900
		.pollout		= 1,
901 902
		.needs_async_data	= 1,
		.async_size		= sizeof(struct io_async_msghdr),
903
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
904
	},
905
	[IORING_OP_RECVMSG] = {
906 907
		.needs_file		= 1,
		.unbound_nonreg_file	= 1,
908
		.pollin			= 1,
909
		.buffer_select		= 1,
910 911
		.needs_async_data	= 1,
		.async_size		= sizeof(struct io_async_msghdr),
912
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
913
	},
914
	[IORING_OP_TIMEOUT] = {
915 916
		.needs_async_data	= 1,
		.async_size		= sizeof(struct io_timeout_data),
917
		.work_flags		= IO_WQ_WORK_MM,
918
	},
P
Pavel Begunkov 已提交
919 920 921 922
	[IORING_OP_TIMEOUT_REMOVE] = {
		/* used by timeout updates' prep() */
		.work_flags		= IO_WQ_WORK_MM,
	},
923
	[IORING_OP_ACCEPT] = {
924 925
		.needs_file		= 1,
		.unbound_nonreg_file	= 1,
926
		.pollin			= 1,
927
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
928
	},
929 930
	[IORING_OP_ASYNC_CANCEL] = {},
	[IORING_OP_LINK_TIMEOUT] = {
931 932
		.needs_async_data	= 1,
		.async_size		= sizeof(struct io_timeout_data),
933
		.work_flags		= IO_WQ_WORK_MM,
934
	},
935
	[IORING_OP_CONNECT] = {
936 937
		.needs_file		= 1,
		.unbound_nonreg_file	= 1,
938
		.pollout		= 1,
939 940
		.needs_async_data	= 1,
		.async_size		= sizeof(struct io_async_connect),
941
		.work_flags		= IO_WQ_WORK_MM,
942
	},
943
	[IORING_OP_FALLOCATE] = {
944
		.needs_file		= 1,
945
		.work_flags		= IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE,
946
	},
947
	[IORING_OP_OPENAT] = {
948
		.work_flags		= IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
949
						IO_WQ_WORK_FS | IO_WQ_WORK_MM,
950
	},
951
	[IORING_OP_CLOSE] = {
952
		.work_flags		= IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
953
	},
954
	[IORING_OP_FILES_UPDATE] = {
955
		.work_flags		= IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
956
	},
957
	[IORING_OP_STATX] = {
958 959
		.work_flags		= IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
						IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
960
	},
961
	[IORING_OP_READ] = {
962 963
		.needs_file		= 1,
		.unbound_nonreg_file	= 1,
964
		.pollin			= 1,
965
		.buffer_select		= 1,
J
Jens Axboe 已提交
966
		.plug			= 1,
967
		.async_size		= sizeof(struct io_async_rw),
968
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
969
	},
970
	[IORING_OP_WRITE] = {
971 972
		.needs_file		= 1,
		.unbound_nonreg_file	= 1,
973
		.pollout		= 1,
J
Jens Axboe 已提交
974
		.plug			= 1,
975
		.async_size		= sizeof(struct io_async_rw),
976 977
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
						IO_WQ_WORK_FSIZE,
978
	},
979
	[IORING_OP_FADVISE] = {
J
Jens Axboe 已提交
980
		.needs_file		= 1,
981
		.work_flags		= IO_WQ_WORK_BLKCG,
J
Jens Axboe 已提交
982
	},
983
	[IORING_OP_MADVISE] = {
984
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
J
Jens Axboe 已提交
985
	},
986
	[IORING_OP_SEND] = {
987 988
		.needs_file		= 1,
		.unbound_nonreg_file	= 1,
989
		.pollout		= 1,
990
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
991
	},
992
	[IORING_OP_RECV] = {
993 994
		.needs_file		= 1,
		.unbound_nonreg_file	= 1,
995
		.pollin			= 1,
996
		.buffer_select		= 1,
997
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
998
	},
999
	[IORING_OP_OPENAT2] = {
1000
		.work_flags		= IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
1001
						IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
1002
	},
1003 1004
	[IORING_OP_EPOLL_CTL] = {
		.unbound_nonreg_file	= 1,
1005
		.work_flags		= IO_WQ_WORK_FILES,
1006
	},
P
Pavel Begunkov 已提交
1007 1008 1009 1010
	[IORING_OP_SPLICE] = {
		.needs_file		= 1,
		.hash_reg_file		= 1,
		.unbound_nonreg_file	= 1,
1011
		.work_flags		= IO_WQ_WORK_BLKCG,
1012 1013
	},
	[IORING_OP_PROVIDE_BUFFERS] = {},
1014
	[IORING_OP_REMOVE_BUFFERS] = {},
P
Pavel Begunkov 已提交
1015 1016 1017 1018 1019
	[IORING_OP_TEE] = {
		.needs_file		= 1,
		.hash_reg_file		= 1,
		.unbound_nonreg_file	= 1,
	},
J
Jens Axboe 已提交
1020 1021 1022
	[IORING_OP_SHUTDOWN] = {
		.needs_file		= 1,
	},
1023 1024 1025 1026
	[IORING_OP_RENAMEAT] = {
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
						IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
	},
1027 1028 1029 1030
	[IORING_OP_UNLINKAT] = {
		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_FILES |
						IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
	},
1031 1032
};

1033 1034 1035
static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
					 struct task_struct *task,
					 struct files_struct *files);
1036
static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
1037
static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
1038
			struct io_ring_ctx *ctx);
1039 1040
static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
				     struct fixed_rsrc_ref_node *ref_node);
1041

1042
static bool io_rw_reissue(struct io_kiocb *req);
1043
static void io_cqring_fill_event(struct io_kiocb *req, long res);
1044
static void io_put_req(struct io_kiocb *req);
1045
static void io_put_req_deferred(struct io_kiocb *req, int nr);
1046
static void io_double_put_req(struct io_kiocb *req);
1047 1048 1049
static void io_dismantle_req(struct io_kiocb *req);
static void io_put_task(struct task_struct *task, int nr);
static void io_queue_next(struct io_kiocb *req);
1050
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
1051
static void __io_queue_linked_timeout(struct io_kiocb *req);
1052
static void io_queue_linked_timeout(struct io_kiocb *req);
1053
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
1054
				 struct io_uring_rsrc_update *ip,
1055
				 unsigned nr_args);
1056
static void __io_clean_op(struct io_kiocb *req);
P
Pavel Begunkov 已提交
1057 1058
static struct file *io_file_get(struct io_submit_state *state,
				struct io_kiocb *req, int fd, bool fixed);
1059
static void __io_queue_sqe(struct io_kiocb *req);
1060
static void io_rsrc_put_work(struct work_struct *work);
1061

1062 1063
static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
			   struct iov_iter *iter, bool needs_lock);
1064 1065
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
			     const struct iovec *fast_iov,
1066
			     struct iov_iter *iter, bool force);
1067
static void io_req_task_queue(struct io_kiocb *req);
1068 1069
static void io_submit_flush_completions(struct io_comp_state *cs,
					struct io_ring_ctx *ctx);
1070

J
Jens Axboe 已提交
1071 1072
static struct kmem_cache *req_cachep;

1073
static const struct file_operations io_uring_fops;
J
Jens Axboe 已提交
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087

struct sock *io_uring_get_socket(struct file *file)
{
#if defined(CONFIG_UNIX)
	if (file->f_op == &io_uring_fops) {
		struct io_ring_ctx *ctx = file->private_data;

		return ctx->ring_sock->sk;
	}
#endif
	return NULL;
}
EXPORT_SYMBOL(io_uring_get_socket);

1088 1089 1090
#define io_for_each_link(pos, head) \
	for (pos = (head); pos; pos = pos->link)

1091 1092
static inline void io_clean_op(struct io_kiocb *req)
{
1093
	if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
1094 1095 1096
		__io_clean_op(req);
}

1097 1098 1099 1100
static inline void io_set_resource_node(struct io_kiocb *req)
{
	struct io_ring_ctx *ctx = req->ctx;

1101 1102 1103
	if (!req->fixed_rsrc_refs) {
		req->fixed_rsrc_refs = &ctx->file_data->node->refs;
		percpu_ref_get(req->fixed_rsrc_refs);
1104 1105 1106
	}
}

1107 1108 1109 1110 1111 1112
static bool io_match_task(struct io_kiocb *head,
			  struct task_struct *task,
			  struct files_struct *files)
{
	struct io_kiocb *req;

1113 1114 1115 1116
	if (task && head->task != task) {
		/* in terms of cancelation, always match if req task is dead */
		if (head->task->flags & PF_EXITING)
			return true;
1117
		return false;
1118
	}
1119 1120 1121 1122
	if (!files)
		return true;

	io_for_each_link(req, head) {
1123 1124 1125 1126 1127
		if (!(req->flags & REQ_F_WORK_INITIALIZED))
			continue;
		if (req->file && req->file->f_op == &io_uring_fops)
			return true;
		if ((req->work.flags & IO_WQ_WORK_FILES) &&
1128 1129 1130 1131 1132 1133
		    req->work.identity->files == files)
			return true;
	}
	return false;
}

1134
static void io_sq_thread_drop_mm_files(void)
1135
{
1136
	struct files_struct *files = current->files;
1137 1138 1139 1140 1141
	struct mm_struct *mm = current->mm;

	if (mm) {
		kthread_unuse_mm(mm);
		mmput(mm);
1142
		current->mm = NULL;
1143
	}
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
	if (files) {
		struct nsproxy *nsproxy = current->nsproxy;

		task_lock(current);
		current->files = NULL;
		current->nsproxy = NULL;
		task_unlock(current);
		put_files_struct(files);
		put_nsproxy(nsproxy);
	}
}

1156
static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
1157 1158 1159 1160 1161 1162 1163 1164 1165
{
	if (!current->files) {
		struct files_struct *files;
		struct nsproxy *nsproxy;

		task_lock(ctx->sqo_task);
		files = ctx->sqo_task->files;
		if (!files) {
			task_unlock(ctx->sqo_task);
1166
			return -EOWNERDEAD;
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
		}
		atomic_inc(&files->count);
		get_nsproxy(ctx->sqo_task->nsproxy);
		nsproxy = ctx->sqo_task->nsproxy;
		task_unlock(ctx->sqo_task);

		task_lock(current);
		current->files = files;
		current->nsproxy = nsproxy;
		task_unlock(current);
	}
1178
	return 0;
1179 1180 1181 1182
}

static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
{
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
	struct mm_struct *mm;

	if (current->mm)
		return 0;

	task_lock(ctx->sqo_task);
	mm = ctx->sqo_task->mm;
	if (unlikely(!mm || !mmget_not_zero(mm)))
		mm = NULL;
	task_unlock(ctx->sqo_task);

	if (mm) {
		kthread_use_mm(mm);
		return 0;
1197 1198
	}

1199
	return -EFAULT;
1200 1201
}

1202 1203
static int __io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
					   struct io_kiocb *req)
1204
{
1205
	const struct io_op_def *def = &io_op_defs[req->opcode];
1206
	int ret;
1207 1208

	if (def->work_flags & IO_WQ_WORK_MM) {
1209
		ret = __io_sq_thread_acquire_mm(ctx);
1210 1211 1212 1213
		if (unlikely(ret))
			return ret;
	}

1214 1215 1216 1217 1218
	if (def->needs_file || (def->work_flags & IO_WQ_WORK_FILES)) {
		ret = __io_sq_thread_acquire_files(ctx);
		if (unlikely(ret))
			return ret;
	}
1219 1220

	return 0;
1221 1222
}

1223 1224 1225 1226 1227 1228 1229 1230
static inline int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
						struct io_kiocb *req)
{
	if (!(ctx->flags & IORING_SETUP_SQPOLL))
		return 0;
	return __io_sq_thread_acquire_mm_files(ctx, req);
}

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
					 struct cgroup_subsys_state **cur_css)

{
#ifdef CONFIG_BLK_CGROUP
	/* puts the old one when swapping */
	if (*cur_css != ctx->sqo_blkcg_css) {
		kthread_associate_blkcg(ctx->sqo_blkcg_css);
		*cur_css = ctx->sqo_blkcg_css;
	}
#endif
}

static void io_sq_thread_unassociate_blkcg(void)
{
#ifdef CONFIG_BLK_CGROUP
	kthread_associate_blkcg(NULL);
#endif
}

1251 1252 1253 1254 1255
static inline void req_set_fail_links(struct io_kiocb *req)
{
	if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
		req->flags |= REQ_F_FAIL_LINK;
}
1256

J
Jens Axboe 已提交
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
/*
 * None of these are dereferenced, they are simply used to check if any of
 * them have changed. If we're under current and check they are still the
 * same, we're fine to grab references to them for actual out-of-line use.
 */
static void io_init_identity(struct io_identity *id)
{
	id->files = current->files;
	id->mm = current->mm;
#ifdef CONFIG_BLK_CGROUP
	rcu_read_lock();
	id->blkcg_css = blkcg_css();
	rcu_read_unlock();
#endif
	id->creds = current_cred();
	id->nsproxy = current->nsproxy;
	id->fs = current->fs;
	id->fsize = rlimit(RLIMIT_FSIZE);
1275 1276 1277 1278
#ifdef CONFIG_AUDIT
	id->loginuid = current->loginuid;
	id->sessionid = current->sessionid;
#endif
J
Jens Axboe 已提交
1279 1280 1281
	refcount_set(&id->count, 1);
}

1282 1283 1284 1285 1286 1287
static inline void __io_req_init_async(struct io_kiocb *req)
{
	memset(&req->work, 0, sizeof(req->work));
	req->flags |= REQ_F_WORK_INITIALIZED;
}

1288 1289 1290 1291 1292 1293
/*
 * Note: must call io_req_init_async() for the first time you
 * touch any members of io_wq_work.
 */
static inline void io_req_init_async(struct io_kiocb *req)
{
1294 1295
	struct io_uring_task *tctx = current->io_uring;

1296 1297 1298
	if (req->flags & REQ_F_WORK_INITIALIZED)
		return;

1299
	__io_req_init_async(req);
1300 1301 1302 1303 1304

	/* Grab a ref if this isn't our static identity */
	req->work.identity = tctx->identity;
	if (tctx->identity != &tctx->__identity)
		refcount_inc(&req->work.identity->count);
1305 1306
}

J
Jens Axboe 已提交
1307 1308 1309 1310
static void io_ring_ctx_ref_free(struct percpu_ref *ref)
{
	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);

1311
	complete(&ctx->ref_comp);
J
Jens Axboe 已提交
1312 1313
}

1314 1315 1316 1317 1318
static inline bool io_is_timeout_noseq(struct io_kiocb *req)
{
	return !req->timeout.off;
}

J
Jens Axboe 已提交
1319 1320 1321
static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{
	struct io_ring_ctx *ctx;
1322
	int hash_bits;
J
Jens Axboe 已提交
1323 1324 1325 1326 1327

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return NULL;

1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
	/*
	 * Use 5 bits less than the max cq entries, that should give us around
	 * 32 entries per hash list if totally full and uniformly spread.
	 */
	hash_bits = ilog2(p->cq_entries);
	hash_bits -= 5;
	if (hash_bits <= 0)
		hash_bits = 1;
	ctx->cancel_hash_bits = hash_bits;
	ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
					GFP_KERNEL);
	if (!ctx->cancel_hash)
		goto err;
	__hash_init(ctx->cancel_hash, 1U << hash_bits);

1343
	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
1344 1345
			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
		goto err;
J
Jens Axboe 已提交
1346 1347

	ctx->flags = p->flags;
1348
	init_waitqueue_head(&ctx->sqo_sq_wait);
1349
	INIT_LIST_HEAD(&ctx->sqd_list);
J
Jens Axboe 已提交
1350
	init_waitqueue_head(&ctx->cq_wait);
1351
	INIT_LIST_HEAD(&ctx->cq_overflow_list);
1352 1353
	init_completion(&ctx->ref_comp);
	init_completion(&ctx->sq_thread_comp);
1354
	idr_init(&ctx->io_buffer_idr);
1355
	idr_init(&ctx->personality_idr);
J
Jens Axboe 已提交
1356 1357 1358
	mutex_init(&ctx->uring_lock);
	init_waitqueue_head(&ctx->wait);
	spin_lock_init(&ctx->completion_lock);
1359
	INIT_LIST_HEAD(&ctx->iopoll_list);
1360
	INIT_LIST_HEAD(&ctx->defer_list);
J
Jens Axboe 已提交
1361
	INIT_LIST_HEAD(&ctx->timeout_list);
1362 1363
	spin_lock_init(&ctx->inflight_lock);
	INIT_LIST_HEAD(&ctx->inflight_list);
1364 1365
	spin_lock_init(&ctx->rsrc_ref_lock);
	INIT_LIST_HEAD(&ctx->rsrc_ref_list);
1366 1367
	INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
	init_llist_head(&ctx->rsrc_put_llist);
1368
	INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
1369
	INIT_LIST_HEAD(&ctx->submit_state.comp.locked_free_list);
J
Jens Axboe 已提交
1370
	return ctx;
1371
err:
1372
	kfree(ctx->cancel_hash);
1373 1374
	kfree(ctx);
	return NULL;
J
Jens Axboe 已提交
1375 1376
}

1377
static bool req_need_defer(struct io_kiocb *req, u32 seq)
1378
{
1379 1380
	if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
		struct io_ring_ctx *ctx = req->ctx;
1381

1382
		return seq != ctx->cached_cq_tail
1383
				+ READ_ONCE(ctx->cached_cq_overflow);
1384
	}
1385

B
Bob Liu 已提交
1386
	return false;
1387 1388
}

1389
static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
J
Jens Axboe 已提交
1390
{
1391
	if (req->work.identity == &tctx->__identity)
J
Jens Axboe 已提交
1392 1393 1394 1395 1396
		return;
	if (refcount_dec_and_test(&req->work.identity->count))
		kfree(req->work.identity);
}

1397
static void io_req_clean_work(struct io_kiocb *req)
1398
{
1399
	if (!(req->flags & REQ_F_WORK_INITIALIZED))
1400
		return;
1401

1402
	if (req->work.flags & IO_WQ_WORK_MM)
1403
		mmdrop(req->work.identity->mm);
1404
#ifdef CONFIG_BLK_CGROUP
1405
	if (req->work.flags & IO_WQ_WORK_BLKCG)
1406
		css_put(req->work.identity->blkcg_css);
1407
#endif
1408
	if (req->work.flags & IO_WQ_WORK_CREDS)
1409
		put_cred(req->work.identity->creds);
1410
	if (req->work.flags & IO_WQ_WORK_FS) {
1411
		struct fs_struct *fs = req->work.identity->fs;
1412

1413
		spin_lock(&req->work.identity->fs->lock);
1414 1415
		if (--fs->users)
			fs = NULL;
1416
		spin_unlock(&req->work.identity->fs->lock);
1417 1418 1419
		if (fs)
			free_fs_struct(fs);
	}
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
	if (req->work.flags & IO_WQ_WORK_FILES) {
		put_files_struct(req->work.identity->files);
		put_nsproxy(req->work.identity->nsproxy);
	}
	if (req->flags & REQ_F_INFLIGHT) {
		struct io_ring_ctx *ctx = req->ctx;
		struct io_uring_task *tctx = req->task->io_uring;
		unsigned long flags;

		spin_lock_irqsave(&ctx->inflight_lock, flags);
		list_del(&req->inflight_entry);
		spin_unlock_irqrestore(&ctx->inflight_lock, flags);
		req->flags &= ~REQ_F_INFLIGHT;
		if (atomic_read(&tctx->in_idle))
			wake_up(&tctx->wait);
	}
1436

1437 1438 1439
	req->flags &= ~REQ_F_WORK_INITIALIZED;
	req->work.flags &= ~(IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG | IO_WQ_WORK_FS |
			     IO_WQ_WORK_CREDS | IO_WQ_WORK_FILES);
1440
	io_put_identity(req->task->io_uring, req);
1441 1442
}

J
Jens Axboe 已提交
1443 1444 1445 1446 1447 1448
/*
 * Create a private copy of io_identity, since some fields don't match
 * the current context.
 */
static bool io_identity_cow(struct io_kiocb *req)
{
1449
	struct io_uring_task *tctx = current->io_uring;
J
Jens Axboe 已提交
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
	const struct cred *creds = NULL;
	struct io_identity *id;

	if (req->work.flags & IO_WQ_WORK_CREDS)
		creds = req->work.identity->creds;

	id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
	if (unlikely(!id)) {
		req->work.flags |= IO_WQ_WORK_CANCEL;
		return false;
	}

	/*
	 * We can safely just re-init the creds we copied  Either the field
	 * matches the current one, or we haven't grabbed it yet. The only
	 * exception is ->creds, through registered personalities, so handle
	 * that one separately.
	 */
	io_init_identity(id);
	if (creds)
1470
		id->creds = creds;
J
Jens Axboe 已提交
1471 1472 1473 1474

	/* add one for this request */
	refcount_inc(&id->count);

1475 1476 1477 1478 1479 1480
	/* drop tctx and req identity references, if needed */
	if (tctx->identity != &tctx->__identity &&
	    refcount_dec_and_test(&tctx->identity->count))
		kfree(tctx->identity);
	if (req->work.identity != &tctx->__identity &&
	    refcount_dec_and_test(&req->work.identity->count))
J
Jens Axboe 已提交
1481 1482 1483
		kfree(req->work.identity);

	req->work.identity = id;
1484
	tctx->identity = id;
J
Jens Axboe 已提交
1485 1486 1487
	return true;
}

1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
static void io_req_track_inflight(struct io_kiocb *req)
{
	struct io_ring_ctx *ctx = req->ctx;

	if (!(req->flags & REQ_F_INFLIGHT)) {
		io_req_init_async(req);
		req->flags |= REQ_F_INFLIGHT;

		spin_lock_irq(&ctx->inflight_lock);
		list_add(&req->inflight_entry, &ctx->inflight_list);
		spin_unlock_irq(&ctx->inflight_lock);
	}
}

J
Jens Axboe 已提交
1502
static bool io_grab_identity(struct io_kiocb *req)
1503
{
1504
	const struct io_op_def *def = &io_op_defs[req->opcode];
1505
	struct io_identity *id = req->work.identity;
1506

1507 1508 1509 1510 1511
	if (def->work_flags & IO_WQ_WORK_FSIZE) {
		if (id->fsize != rlimit(RLIMIT_FSIZE))
			return false;
		req->work.flags |= IO_WQ_WORK_FSIZE;
	}
1512
#ifdef CONFIG_BLK_CGROUP
1513 1514
	if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
	    (def->work_flags & IO_WQ_WORK_BLKCG)) {
1515
		rcu_read_lock();
J
Jens Axboe 已提交
1516 1517 1518 1519
		if (id->blkcg_css != blkcg_css()) {
			rcu_read_unlock();
			return false;
		}
1520 1521 1522 1523
		/*
		 * This should be rare, either the cgroup is dying or the task
		 * is moving cgroups. Just punt to root for the handful of ios.
		 */
J
Jens Axboe 已提交
1524
		if (css_tryget_online(id->blkcg_css))
1525
			req->work.flags |= IO_WQ_WORK_BLKCG;
1526 1527 1528
		rcu_read_unlock();
	}
#endif
1529
	if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
J
Jens Axboe 已提交
1530 1531 1532
		if (id->creds != current_cred())
			return false;
		get_cred(id->creds);
1533 1534
		req->work.flags |= IO_WQ_WORK_CREDS;
	}
1535 1536 1537 1538 1539
#ifdef CONFIG_AUDIT
	if (!uid_eq(current->loginuid, id->loginuid) ||
	    current->sessionid != id->sessionid)
		return false;
#endif
1540 1541
	if (!(req->work.flags & IO_WQ_WORK_FS) &&
	    (def->work_flags & IO_WQ_WORK_FS)) {
J
Jens Axboe 已提交
1542 1543 1544 1545 1546
		if (current->fs != id->fs)
			return false;
		spin_lock(&id->fs->lock);
		if (!id->fs->in_exec) {
			id->fs->users++;
1547
			req->work.flags |= IO_WQ_WORK_FS;
1548 1549 1550 1551 1552
		} else {
			req->work.flags |= IO_WQ_WORK_CANCEL;
		}
		spin_unlock(&current->fs->lock);
	}
1553 1554 1555 1556 1557 1558 1559 1560 1561
	if (!(req->work.flags & IO_WQ_WORK_FILES) &&
	    (def->work_flags & IO_WQ_WORK_FILES) &&
	    !(req->flags & REQ_F_NO_FILE_TABLE)) {
		if (id->files != current->files ||
		    id->nsproxy != current->nsproxy)
			return false;
		atomic_inc(&id->files->count);
		get_nsproxy(id->nsproxy);
		req->work.flags |= IO_WQ_WORK_FILES;
1562
		io_req_track_inflight(req);
1563
	}
1564 1565 1566 1567 1568 1569 1570
	if (!(req->work.flags & IO_WQ_WORK_MM) &&
	    (def->work_flags & IO_WQ_WORK_MM)) {
		if (id->mm != current->mm)
			return false;
		mmgrab(id->mm);
		req->work.flags |= IO_WQ_WORK_MM;
	}
J
Jens Axboe 已提交
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581

	return true;
}

static void io_prep_async_work(struct io_kiocb *req)
{
	const struct io_op_def *def = &io_op_defs[req->opcode];
	struct io_ring_ctx *ctx = req->ctx;

	io_req_init_async(req);

1582 1583 1584
	if (req->flags & REQ_F_FORCE_ASYNC)
		req->work.flags |= IO_WQ_WORK_CONCURRENT;

J
Jens Axboe 已提交
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
	if (req->flags & REQ_F_ISREG) {
		if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
			io_wq_hash_work(&req->work, file_inode(req->file));
	} else {
		if (def->unbound_nonreg_file)
			req->work.flags |= IO_WQ_WORK_UNBOUND;
	}

	/* if we fail grabbing identity, we must COW, regrab, and retry */
	if (io_grab_identity(req))
		return;

	if (!io_identity_cow(req))
		return;

	/* can't fail at this point */
	if (!io_grab_identity(req))
		WARN_ON(1);
1603
}
1604

1605
static void io_prep_async_link(struct io_kiocb *req)
1606
{
1607
	struct io_kiocb *cur;
1608

1609 1610
	io_for_each_link(cur, req)
		io_prep_async_work(cur);
1611 1612
}

1613
static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
1614
{
1615
	struct io_ring_ctx *ctx = req->ctx;
1616
	struct io_kiocb *link = io_prep_linked_timeout(req);
1617

1618 1619 1620
	trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
					&req->work, req->flags);
	io_wq_enqueue(ctx->io_wq, &req->work);
1621
	return link;
1622 1623
}

1624 1625
static void io_queue_async_work(struct io_kiocb *req)
{
1626 1627
	struct io_kiocb *link;

1628 1629
	/* init ->work of the whole link before punting */
	io_prep_async_link(req);
1630 1631 1632 1633
	link = __io_queue_async_work(req);

	if (link)
		io_queue_linked_timeout(link);
1634 1635
}

J
Jens Axboe 已提交
1636 1637
static void io_kill_timeout(struct io_kiocb *req)
{
1638
	struct io_timeout_data *io = req->async_data;
J
Jens Axboe 已提交
1639 1640
	int ret;

1641
	ret = hrtimer_try_to_cancel(&io->timer);
J
Jens Axboe 已提交
1642
	if (ret != -1) {
1643 1644
		atomic_set(&req->ctx->cq_timeouts,
			atomic_read(&req->ctx->cq_timeouts) + 1);
P
Pavel Begunkov 已提交
1645
		list_del_init(&req->timeout.list);
1646
		io_cqring_fill_event(req, 0);
1647
		io_put_req_deferred(req, 1);
J
Jens Axboe 已提交
1648 1649 1650
	}
}

1651 1652 1653
/*
 * Returns true if we found and killed one or more timeouts
 */
1654 1655
static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
			     struct files_struct *files)
J
Jens Axboe 已提交
1656 1657
{
	struct io_kiocb *req, *tmp;
1658
	int canceled = 0;
J
Jens Axboe 已提交
1659 1660

	spin_lock_irq(&ctx->completion_lock);
1661
	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
1662
		if (io_match_task(req, tsk, files)) {
1663
			io_kill_timeout(req);
1664 1665
			canceled++;
		}
1666
	}
J
Jens Axboe 已提交
1667
	spin_unlock_irq(&ctx->completion_lock);
1668
	return canceled != 0;
J
Jens Axboe 已提交
1669 1670
}

1671
static void __io_queue_deferred(struct io_ring_ctx *ctx)
1672
{
1673
	do {
1674 1675
		struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
						struct io_defer_entry, list);
1676

1677
		if (req_need_defer(de->req, de->seq))
1678
			break;
1679
		list_del_init(&de->list);
1680
		io_req_task_queue(de->req);
1681
		kfree(de);
1682 1683 1684
	} while (!list_empty(&ctx->defer_list));
}

1685
static void io_flush_timeouts(struct io_ring_ctx *ctx)
1686
{
1687 1688 1689 1690 1691 1692 1693 1694 1695
	u32 seq;

	if (list_empty(&ctx->timeout_list))
		return;

	seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);

	do {
		u32 events_needed, events_got;
1696
		struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
P
Pavel Begunkov 已提交
1697
						struct io_kiocb, timeout.list);
1698

1699
		if (io_is_timeout_noseq(req))
1700
			break;
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711

		/*
		 * Since seq can easily wrap around over time, subtract
		 * the last seq at which timeouts were flushed before comparing.
		 * Assuming not more than 2^31-1 events have happened since,
		 * these subtractions won't have wrapped, so we can check if
		 * target is in [last_seq, current_seq] by comparing the two.
		 */
		events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
		events_got = seq - ctx->cq_last_tm_flush;
		if (events_got < events_needed)
1712
			break;
1713

P
Pavel Begunkov 已提交
1714
		list_del_init(&req->timeout.list);
J
Jens Axboe 已提交
1715
		io_kill_timeout(req);
1716 1717 1718
	} while (!list_empty(&ctx->timeout_list));

	ctx->cq_last_tm_flush = seq;
1719
}
J
Jens Axboe 已提交
1720

1721 1722 1723
static void io_commit_cqring(struct io_ring_ctx *ctx)
{
	io_flush_timeouts(ctx);
1724 1725 1726

	/* order cqe stores with ring update */
	smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
1727

1728 1729
	if (unlikely(!list_empty(&ctx->defer_list)))
		__io_queue_deferred(ctx);
1730 1731
}

1732 1733 1734 1735 1736 1737 1738
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{
	struct io_rings *r = ctx->rings;

	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
}

1739 1740 1741 1742 1743
static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
{
	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
}

J
Jens Axboe 已提交
1744 1745
static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
{
1746
	struct io_rings *rings = ctx->rings;
J
Jens Axboe 已提交
1747 1748
	unsigned tail;

1749 1750 1751 1752 1753
	/*
	 * writes to the cq entry need to come after reading head; the
	 * control dependency is enough as we're using WRITE_ONCE to
	 * fill the cq entry
	 */
1754
	if (__io_cqring_events(ctx) == rings->cq_ring_entries)
J
Jens Axboe 已提交
1755 1756
		return NULL;

1757
	tail = ctx->cached_cq_tail++;
1758
	return &rings->cqes[tail & ctx->cq_mask];
J
Jens Axboe 已提交
1759 1760
}

1761 1762
static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
{
1763 1764
	if (!ctx->cq_ev_fd)
		return false;
1765 1766
	if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
		return false;
1767 1768
	if (!ctx->eventfd_async)
		return true;
1769
	return io_wq_current_is_worker();
1770 1771
}

1772
static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1773
{
1774 1775 1776
	/* see waitqueue_active() comment */
	smp_mb();

1777 1778
	if (waitqueue_active(&ctx->wait))
		wake_up(&ctx->wait);
1779 1780
	if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
		wake_up(&ctx->sq_data->wait);
1781
	if (io_should_trigger_evfd(ctx))
1782
		eventfd_signal(ctx->cq_ev_fd, 1);
1783
	if (waitqueue_active(&ctx->cq_wait)) {
1784 1785 1786
		wake_up_interruptible(&ctx->cq_wait);
		kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
	}
1787 1788
}

1789 1790
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
{
1791 1792 1793
	/* see waitqueue_active() comment */
	smp_mb();

1794 1795 1796 1797 1798 1799
	if (ctx->flags & IORING_SETUP_SQPOLL) {
		if (waitqueue_active(&ctx->wait))
			wake_up(&ctx->wait);
	}
	if (io_should_trigger_evfd(ctx))
		eventfd_signal(ctx->cq_ev_fd, 1);
1800
	if (waitqueue_active(&ctx->cq_wait)) {
1801 1802 1803
		wake_up_interruptible(&ctx->cq_wait);
		kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
	}
1804 1805
}

1806
/* Returns true if there are no backlogged entries after the flush */
1807 1808 1809
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
				       struct task_struct *tsk,
				       struct files_struct *files)
1810 1811
{
	struct io_rings *rings = ctx->rings;
1812
	struct io_kiocb *req, *tmp;
1813 1814
	struct io_uring_cqe *cqe;
	unsigned long flags;
1815
	bool all_flushed, posted;
1816 1817
	LIST_HEAD(list);

1818 1819
	if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
		return false;
1820

1821
	posted = false;
1822
	spin_lock_irqsave(&ctx->completion_lock, flags);
1823
	list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
1824
		if (!io_match_task(req, tsk, files))
1825 1826
			continue;

1827 1828 1829 1830
		cqe = io_get_cqring(ctx);
		if (!cqe && !force)
			break;

1831
		list_move(&req->compl.list, &list);
1832 1833 1834
		if (cqe) {
			WRITE_ONCE(cqe->user_data, req->user_data);
			WRITE_ONCE(cqe->res, req->result);
1835
			WRITE_ONCE(cqe->flags, req->compl.cflags);
1836
		} else {
1837
			ctx->cached_cq_overflow++;
1838
			WRITE_ONCE(ctx->rings->cq_overflow,
1839
				   ctx->cached_cq_overflow);
1840
		}
1841
		posted = true;
1842 1843
	}

1844 1845 1846 1847 1848 1849
	all_flushed = list_empty(&ctx->cq_overflow_list);
	if (all_flushed) {
		clear_bit(0, &ctx->sq_check_overflow);
		clear_bit(0, &ctx->cq_check_overflow);
		ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
	}
1850

1851 1852
	if (posted)
		io_commit_cqring(ctx);
1853
	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1854 1855
	if (posted)
		io_cqring_ev_posted(ctx);
1856 1857

	while (!list_empty(&list)) {
1858 1859
		req = list_first_entry(&list, struct io_kiocb, compl.list);
		list_del(&req->compl.list);
1860
		io_put_req(req);
1861
	}
1862

1863
	return all_flushed;
1864 1865
}

1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
				     struct task_struct *tsk,
				     struct files_struct *files)
{
	if (test_bit(0, &ctx->cq_check_overflow)) {
		/* iopoll syncs against uring_lock, not completion_lock */
		if (ctx->flags & IORING_SETUP_IOPOLL)
			mutex_lock(&ctx->uring_lock);
		__io_cqring_overflow_flush(ctx, force, tsk, files);
		if (ctx->flags & IORING_SETUP_IOPOLL)
			mutex_unlock(&ctx->uring_lock);
	}
}

1880
static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
J
Jens Axboe 已提交
1881
{
1882
	struct io_ring_ctx *ctx = req->ctx;
J
Jens Axboe 已提交
1883 1884
	struct io_uring_cqe *cqe;

1885
	trace_io_uring_complete(ctx, req->user_data, res);
J
Jens Axboe 已提交
1886

J
Jens Axboe 已提交
1887 1888 1889 1890 1891 1892
	/*
	 * If we can't get a cq entry, userspace overflowed the
	 * submission (by quite a lot). Increment the overflow count in
	 * the ring.
	 */
	cqe = io_get_cqring(ctx);
1893
	if (likely(cqe)) {
1894
		WRITE_ONCE(cqe->user_data, req->user_data);
J
Jens Axboe 已提交
1895
		WRITE_ONCE(cqe->res, res);
1896
		WRITE_ONCE(cqe->flags, cflags);
1897 1898
	} else if (ctx->cq_overflow_flushed ||
		   atomic_read(&req->task->io_uring->in_idle)) {
1899 1900 1901 1902 1903
		/*
		 * If we're in ring overflow flush mode, or in task cancel mode,
		 * then we cannot store the request for later flushing, we need
		 * to drop it on the floor.
		 */
1904 1905
		ctx->cached_cq_overflow++;
		WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
1906
	} else {
1907 1908 1909
		if (list_empty(&ctx->cq_overflow_list)) {
			set_bit(0, &ctx->sq_check_overflow);
			set_bit(0, &ctx->cq_check_overflow);
1910
			ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
1911
		}
1912
		io_clean_op(req);
1913
		req->result = res;
1914
		req->compl.cflags = cflags;
1915 1916
		refcount_inc(&req->refs);
		list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
J
Jens Axboe 已提交
1917 1918 1919
	}
}

1920 1921 1922 1923 1924
static void io_cqring_fill_event(struct io_kiocb *req, long res)
{
	__io_cqring_fill_event(req, res, 0);
}

1925 1926
static inline void io_req_complete_post(struct io_kiocb *req, long res,
					unsigned int cflags)
J
Jens Axboe 已提交
1927
{
1928
	struct io_ring_ctx *ctx = req->ctx;
J
Jens Axboe 已提交
1929 1930 1931
	unsigned long flags;

	spin_lock_irqsave(&ctx->completion_lock, flags);
1932
	__io_cqring_fill_event(req, res, cflags);
J
Jens Axboe 已提交
1933
	io_commit_cqring(ctx);
1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
	/*
	 * If we're the last reference to this request, add to our locked
	 * free_list cache.
	 */
	if (refcount_dec_and_test(&req->refs)) {
		struct io_comp_state *cs = &ctx->submit_state.comp;

		io_dismantle_req(req);
		io_put_task(req->task, 1);
		list_add(&req->compl.list, &cs->locked_free_list);
		cs->locked_free_nr++;
	} else
		req = NULL;
J
Jens Axboe 已提交
1947 1948
	spin_unlock_irqrestore(&ctx->completion_lock, flags);

J
Jens Axboe 已提交
1949
	io_cqring_ev_posted(ctx);
1950 1951 1952 1953
	if (req) {
		io_queue_next(req);
		percpu_ref_put(&ctx->refs);
	}
J
Jens Axboe 已提交
1954 1955
}

1956
static void io_req_complete_state(struct io_kiocb *req, long res,
1957
				  unsigned int cflags)
1958
{
1959 1960 1961
	io_clean_op(req);
	req->result = res;
	req->compl.cflags = cflags;
1962
	req->flags |= REQ_F_COMPLETE_INLINE;
1963 1964
}

1965 1966
static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags,
				     long res, unsigned cflags)
1967
{
1968 1969
	if (issue_flags & IO_URING_F_COMPLETE_DEFER)
		io_req_complete_state(req, res, cflags);
1970
	else
1971
		io_req_complete_post(req, res, cflags);
1972 1973
}

1974
static inline void io_req_complete(struct io_kiocb *req, long res)
1975
{
1976
	__io_req_complete(req, 0, res, 0);
1977 1978
}

1979
static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
J
Jens Axboe 已提交
1980
{
1981 1982
	struct io_submit_state *state = &ctx->submit_state;
	struct io_comp_state *cs = &state->comp;
1983
	struct io_kiocb *req = NULL;
1984

1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998
	/*
	 * If we have more than a batch's worth of requests in our IRQ side
	 * locked cache, grab the lock and move them over to our submission
	 * side cache.
	 */
	if (READ_ONCE(cs->locked_free_nr) > IO_COMPL_BATCH) {
		spin_lock_irq(&ctx->completion_lock);
		list_splice_init(&cs->locked_free_list, &cs->free_list);
		cs->locked_free_nr = 0;
		spin_unlock_irq(&ctx->completion_lock);
	}

	while (!list_empty(&cs->free_list)) {
		req = list_first_entry(&cs->free_list, struct io_kiocb,
1999 2000
					compl.list);
		list_del(&req->compl.list);
2001 2002 2003
		state->reqs[state->free_reqs++] = req;
		if (state->free_reqs == ARRAY_SIZE(state->reqs))
			break;
2004 2005
	}

2006 2007 2008 2009 2010 2011 2012 2013 2014
	return req != NULL;
}

static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
{
	struct io_submit_state *state = &ctx->submit_state;

	BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));

2015
	if (!state->free_reqs) {
2016
		gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
J
Jens Axboe 已提交
2017 2018
		int ret;

2019
		if (io_flush_cached_reqs(ctx))
2020 2021
			goto got_req;

P
Pavel Begunkov 已提交
2022 2023
		ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
					    state->reqs);
2024 2025 2026 2027 2028 2029 2030 2031

		/*
		 * Bulk alloc is all-or-nothing. If we fail to get a batch,
		 * retry single alloc to be on the safe side.
		 */
		if (unlikely(ret <= 0)) {
			state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
			if (!state->reqs[0])
P
Pavel Begunkov 已提交
2032
				return NULL;
2033 2034
			ret = 1;
		}
2035
		state->free_reqs = ret;
J
Jens Axboe 已提交
2036
	}
2037
got_req:
2038 2039
	state->free_reqs--;
	return state->reqs[state->free_reqs];
J
Jens Axboe 已提交
2040 2041
}

2042 2043 2044
static inline void io_put_file(struct io_kiocb *req, struct file *file,
			  bool fixed)
{
2045
	if (!fixed)
2046 2047 2048
		fput(file);
}

2049
static void io_dismantle_req(struct io_kiocb *req)
J
Jens Axboe 已提交
2050
{
2051
	io_clean_op(req);
2052

2053 2054
	if (req->async_data)
		kfree(req->async_data);
2055 2056
	if (req->file)
		io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
2057 2058
	if (req->fixed_rsrc_refs)
		percpu_ref_put(req->fixed_rsrc_refs);
2059
	io_req_clean_work(req);
2060 2061
}

2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
static inline void io_put_task(struct task_struct *task, int nr)
{
	struct io_uring_task *tctx = task->io_uring;

	percpu_counter_sub(&tctx->inflight, nr);
	if (unlikely(atomic_read(&tctx->in_idle)))
		wake_up(&tctx->wait);
	put_task_struct_many(task, nr);
}

2072
static void __io_free_req(struct io_kiocb *req)
2073
{
2074
	struct io_ring_ctx *ctx = req->ctx;
2075

2076
	io_dismantle_req(req);
2077
	io_put_task(req->task, 1);
2078

P
Pavel Begunkov 已提交
2079
	kmem_cache_free(req_cachep, req);
2080
	percpu_ref_put(&ctx->refs);
2081 2082
}

2083 2084 2085 2086 2087 2088 2089 2090
static inline void io_remove_next_linked(struct io_kiocb *req)
{
	struct io_kiocb *nxt = req->link;

	req->link = nxt->link;
	nxt->link = NULL;
}

2091
static void io_kill_linked_timeout(struct io_kiocb *req)
2092
{
2093
	struct io_ring_ctx *ctx = req->ctx;
2094
	struct io_kiocb *link;
2095 2096
	bool cancelled = false;
	unsigned long flags;
2097

2098
	spin_lock_irqsave(&ctx->completion_lock, flags);
2099 2100
	link = req->link;

2101 2102 2103 2104
	/*
	 * Can happen if a linked timeout fired and link had been like
	 * req -> link t-out -> link t-out [-> ...]
	 */
2105 2106 2107
	if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
		struct io_timeout_data *io = link->async_data;
		int ret;
2108

2109
		io_remove_next_linked(req);
2110
		link->timeout.head = NULL;
2111 2112 2113 2114 2115 2116 2117
		ret = hrtimer_try_to_cancel(&io->timer);
		if (ret != -1) {
			io_cqring_fill_event(link, -ECANCELED);
			io_commit_cqring(ctx);
			cancelled = true;
		}
	}
2118
	req->flags &= ~REQ_F_LINK_TIMEOUT;
2119
	spin_unlock_irqrestore(&ctx->completion_lock, flags);
2120

2121
	if (cancelled) {
2122
		io_cqring_ev_posted(ctx);
2123 2124
		io_put_req(link);
	}
2125 2126
}

J
Jens Axboe 已提交
2127

P
Pavel Begunkov 已提交
2128
static void io_fail_links(struct io_kiocb *req)
J
Jens Axboe 已提交
2129
{
2130
	struct io_kiocb *link, *nxt;
2131
	struct io_ring_ctx *ctx = req->ctx;
P
Pavel Begunkov 已提交
2132
	unsigned long flags;
J
Jens Axboe 已提交
2133

P
Pavel Begunkov 已提交
2134
	spin_lock_irqsave(&ctx->completion_lock, flags);
2135 2136
	link = req->link;
	req->link = NULL;
J
Jens Axboe 已提交
2137

2138 2139 2140
	while (link) {
		nxt = link->link;
		link->link = NULL;
2141

2142
		trace_io_uring_fail_link(req, link);
2143
		io_cqring_fill_event(link, -ECANCELED);
2144 2145 2146 2147 2148 2149 2150 2151 2152 2153

		/*
		 * It's ok to free under spinlock as they're not linked anymore,
		 * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
		 * work.fs->lock.
		 */
		if (link->flags & REQ_F_WORK_INITIALIZED)
			io_put_req_deferred(link, 2);
		else
			io_double_put_req(link);
2154
		link = nxt;
J
Jens Axboe 已提交
2155
	}
2156
	io_commit_cqring(ctx);
2157
	spin_unlock_irqrestore(&ctx->completion_lock, flags);
J
Jens Axboe 已提交
2158

2159
	io_cqring_ev_posted(ctx);
J
Jens Axboe 已提交
2160 2161
}

2162
static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
2163
{
2164 2165
	if (req->flags & REQ_F_LINK_TIMEOUT)
		io_kill_linked_timeout(req);
2166

J
Jens Axboe 已提交
2167 2168 2169 2170 2171 2172
	/*
	 * If LINK is set, we have dependent requests in this chain. If we
	 * didn't fail this request, queue the first one up, moving any other
	 * dependencies to the next request. In case of failure, fail the rest
	 * of the chain.
	 */
2173 2174 2175 2176 2177 2178
	if (likely(!(req->flags & REQ_F_FAIL_LINK))) {
		struct io_kiocb *nxt = req->link;

		req->link = NULL;
		return nxt;
	}
2179 2180
	io_fail_links(req);
	return NULL;
2181
}
J
Jens Axboe 已提交
2182

2183
static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
2184
{
2185
	if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK))))
2186 2187 2188 2189
		return NULL;
	return __io_req_find_next(req);
}

2190 2191
static bool __tctx_task_work(struct io_uring_task *tctx)
{
2192
	struct io_ring_ctx *ctx = NULL;
2193 2194 2195 2196 2197 2198
	struct io_wq_work_list list;
	struct io_wq_work_node *node;

	if (wq_list_empty(&tctx->task_list))
		return false;

2199
	spin_lock_irq(&tctx->task_lock);
2200 2201
	list = tctx->task_list;
	INIT_WQ_LIST(&tctx->task_list);
2202
	spin_unlock_irq(&tctx->task_lock);
2203 2204 2205 2206

	node = list.first;
	while (node) {
		struct io_wq_work_node *next = node->next;
2207
		struct io_ring_ctx *this_ctx;
2208 2209 2210
		struct io_kiocb *req;

		req = container_of(node, struct io_kiocb, io_task_work.node);
2211
		this_ctx = req->ctx;
2212 2213
		req->task_work.func(&req->task_work);
		node = next;
2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228

		if (!ctx) {
			ctx = this_ctx;
		} else if (ctx != this_ctx) {
			mutex_lock(&ctx->uring_lock);
			io_submit_flush_completions(&ctx->submit_state.comp, ctx);
			mutex_unlock(&ctx->uring_lock);
			ctx = this_ctx;
		}
	}

	if (ctx && ctx->submit_state.comp.nr) {
		mutex_lock(&ctx->uring_lock);
		io_submit_flush_completions(&ctx->submit_state.comp, ctx);
		mutex_unlock(&ctx->uring_lock);
2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
	}

	return list.first != NULL;
}

static void tctx_task_work(struct callback_head *cb)
{
	struct io_uring_task *tctx = container_of(cb, struct io_uring_task, task_work);

	while (__tctx_task_work(tctx))
		cond_resched();

	clear_bit(0, &tctx->task_state);
}

static int io_task_work_add(struct task_struct *tsk, struct io_kiocb *req,
			    enum task_work_notify_mode notify)
{
	struct io_uring_task *tctx = tsk->io_uring;
	struct io_wq_work_node *node, *prev;
2249
	unsigned long flags;
2250 2251 2252 2253
	int ret;

	WARN_ON_ONCE(!tctx);

2254
	spin_lock_irqsave(&tctx->task_lock, flags);
2255
	wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
2256
	spin_unlock_irqrestore(&tctx->task_lock, flags);
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270

	/* task_work already pending, we're done */
	if (test_bit(0, &tctx->task_state) ||
	    test_and_set_bit(0, &tctx->task_state))
		return 0;

	if (!task_work_add(tsk, &tctx->task_work, notify))
		return 0;

	/*
	 * Slow path - we failed, find and delete work. if the work is not
	 * in the list, it got run and we're fine.
	 */
	ret = 0;
2271
	spin_lock_irqsave(&tctx->task_lock, flags);
2272 2273 2274 2275 2276 2277 2278
	wq_list_for_each(node, prev, &tctx->task_list) {
		if (&req->io_task_work.node == node) {
			wq_list_del(&tctx->task_list, node, prev);
			ret = 1;
			break;
		}
	}
2279
	spin_unlock_irqrestore(&tctx->task_lock, flags);
2280 2281 2282 2283
	clear_bit(0, &tctx->task_state);
	return ret;
}

2284
static int io_req_task_work_add(struct io_kiocb *req)
2285 2286 2287
{
	struct task_struct *tsk = req->task;
	struct io_ring_ctx *ctx = req->ctx;
J
Jens Axboe 已提交
2288 2289
	enum task_work_notify_mode notify;
	int ret;
2290

2291 2292 2293
	if (tsk->flags & PF_EXITING)
		return -ESRCH;

2294
	/*
2295 2296 2297 2298
	 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
	 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
	 * processing task_work. There's no reliable way to tell if TWA_RESUME
	 * will do the job.
2299
	 */
J
Jens Axboe 已提交
2300
	notify = TWA_NONE;
2301
	if (!(ctx->flags & IORING_SETUP_SQPOLL))
2302 2303
		notify = TWA_SIGNAL;

2304
	ret = io_task_work_add(tsk, req, notify);
2305 2306
	if (!ret)
		wake_up_process(tsk);
2307

2308 2309 2310
	return ret;
}

2311
static void io_req_task_work_add_fallback(struct io_kiocb *req,
2312
					  task_work_func_t cb)
2313 2314 2315 2316 2317 2318 2319 2320
{
	struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);

	init_task_work(&req->task_work, cb);
	task_work_add(tsk, &req->task_work, TWA_NONE);
	wake_up_process(tsk);
}

2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
static void __io_req_task_cancel(struct io_kiocb *req, int error)
{
	struct io_ring_ctx *ctx = req->ctx;

	spin_lock_irq(&ctx->completion_lock);
	io_cqring_fill_event(req, error);
	io_commit_cqring(ctx);
	spin_unlock_irq(&ctx->completion_lock);

	io_cqring_ev_posted(ctx);
	req_set_fail_links(req);
	io_double_put_req(req);
}

static void io_req_task_cancel(struct callback_head *cb)
{
	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2338
	struct io_ring_ctx *ctx = req->ctx;
2339 2340

	__io_req_task_cancel(req, -ECANCELED);
2341
	percpu_ref_put(&ctx->refs);
2342 2343 2344 2345 2346 2347
}

static void __io_req_task_submit(struct io_kiocb *req)
{
	struct io_ring_ctx *ctx = req->ctx;

2348
	/* ctx stays valid until unlock, even if we drop all ours ctx->refs */
2349
	mutex_lock(&ctx->uring_lock);
2350 2351
	if (!ctx->sqo_dead && !(current->flags & PF_EXITING) &&
	    !io_sq_thread_acquire_mm_files(ctx, req))
2352
		__io_queue_sqe(req);
2353
	else
2354
		__io_req_task_cancel(req, -EFAULT);
2355
	mutex_unlock(&ctx->uring_lock);
2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368
}

static void io_req_task_submit(struct callback_head *cb)
{
	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);

	__io_req_task_submit(req);
}

static void io_req_task_queue(struct io_kiocb *req)
{
	int ret;

2369
	req->task_work.func = io_req_task_submit;
2370
	ret = io_req_task_work_add(req);
2371 2372
	if (unlikely(ret)) {
		percpu_ref_get(&req->ctx->refs);
2373
		io_req_task_work_add_fallback(req, io_req_task_cancel);
2374
	}
2375 2376
}

2377
static inline void io_queue_next(struct io_kiocb *req)
2378
{
2379
	struct io_kiocb *nxt = io_req_find_next(req);
2380 2381

	if (nxt)
2382
		io_req_task_queue(nxt);
2383 2384
}

2385
static void io_free_req(struct io_kiocb *req)
2386
{
2387 2388 2389
	io_queue_next(req);
	__io_free_req(req);
}
2390

2391
struct req_batch {
2392 2393
	struct task_struct	*task;
	int			task_refs;
2394
	int			ctx_refs;
2395 2396
};

2397 2398 2399
static inline void io_init_req_batch(struct req_batch *rb)
{
	rb->task_refs = 0;
2400
	rb->ctx_refs = 0;
2401 2402 2403
	rb->task = NULL;
}

2404 2405 2406
static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
				     struct req_batch *rb)
{
2407
	if (rb->task)
2408
		io_put_task(rb->task, rb->task_refs);
2409 2410
	if (rb->ctx_refs)
		percpu_ref_put_many(&ctx->refs, rb->ctx_refs);
2411 2412
}

2413 2414
static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
			      struct io_submit_state *state)
2415
{
2416
	io_queue_next(req);
2417

2418
	if (req->task != rb->task) {
2419 2420
		if (rb->task)
			io_put_task(rb->task, rb->task_refs);
2421 2422
		rb->task = req->task;
		rb->task_refs = 0;
2423
	}
2424
	rb->task_refs++;
2425
	rb->ctx_refs++;
2426

2427
	io_dismantle_req(req);
2428
	if (state->free_reqs != ARRAY_SIZE(state->reqs))
2429
		state->reqs[state->free_reqs++] = req;
2430 2431
	else
		list_add(&req->compl.list, &state->comp.free_list);
2432 2433
}

2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455
static void io_submit_flush_completions(struct io_comp_state *cs,
					struct io_ring_ctx *ctx)
{
	int i, nr = cs->nr;
	struct io_kiocb *req;
	struct req_batch rb;

	io_init_req_batch(&rb);
	spin_lock_irq(&ctx->completion_lock);
	for (i = 0; i < nr; i++) {
		req = cs->reqs[i];
		__io_cqring_fill_event(req, req->result, req->compl.cflags);
	}
	io_commit_cqring(ctx);
	spin_unlock_irq(&ctx->completion_lock);

	io_cqring_ev_posted(ctx);
	for (i = 0; i < nr; i++) {
		req = cs->reqs[i];

		/* submission and completion refs */
		if (refcount_sub_and_test(2, &req->refs))
2456
			io_req_free_batch(&rb, req, &ctx->submit_state);
2457 2458 2459 2460 2461 2462
	}

	io_req_free_batch_finish(ctx, &rb);
	cs->nr = 0;
}

2463 2464 2465 2466
/*
 * Drop reference to request, return next in chain (if there is one) if this
 * was the last reference to this request.
 */
2467
static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
2468
{
2469 2470
	struct io_kiocb *nxt = NULL;

2471
	if (refcount_dec_and_test(&req->refs)) {
2472
		nxt = io_req_find_next(req);
2473
		__io_free_req(req);
2474
	}
2475
	return nxt;
J
Jens Axboe 已提交
2476 2477
}

2478 2479 2480 2481
static void io_put_req(struct io_kiocb *req)
{
	if (refcount_dec_and_test(&req->refs))
		io_free_req(req);
J
Jens Axboe 已提交
2482 2483
}

2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494
static void io_put_req_deferred_cb(struct callback_head *cb)
{
	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);

	io_free_req(req);
}

static void io_free_req_deferred(struct io_kiocb *req)
{
	int ret;

2495
	req->task_work.func = io_put_req_deferred_cb;
2496
	ret = io_req_task_work_add(req);
2497 2498
	if (unlikely(ret))
		io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
2499 2500 2501 2502 2503 2504 2505 2506
}

static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
{
	if (refcount_sub_and_test(refs, &req->refs))
		io_free_req_deferred(req);
}

2507 2508 2509 2510 2511 2512 2513
static void io_double_put_req(struct io_kiocb *req)
{
	/* drop both submit and complete references */
	if (refcount_sub_and_test(2, &req->refs))
		io_free_req(req);
}

2514
static unsigned io_cqring_events(struct io_ring_ctx *ctx)
2515 2516 2517
{
	/* See comment at the top of this file */
	smp_rmb();
2518
	return __io_cqring_events(ctx);
2519 2520
}

2521 2522 2523 2524 2525 2526 2527 2528
static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
{
	struct io_rings *rings = ctx->rings;

	/* make sure SQ entry isn't read before tail */
	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
}

2529
static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
2530
{
2531
	unsigned int cflags;
2532

2533 2534
	cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
	cflags |= IORING_CQE_F_BUFFER;
2535
	req->flags &= ~REQ_F_BUFFER_SELECTED;
2536 2537
	kfree(kbuf);
	return cflags;
2538 2539
}

2540
static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2541
{
2542
	struct io_buffer *kbuf;
2543

2544
	kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2545 2546 2547
	return io_put_kbuf(req, kbuf);
}

2548 2549
static inline bool io_run_task_work(void)
{
2550 2551 2552 2553 2554 2555
	/*
	 * Not safe to run on exiting task, and the task_work handling will
	 * not add work to such a task.
	 */
	if (unlikely(current->flags & PF_EXITING))
		return false;
2556 2557 2558 2559 2560 2561 2562
	if (current->task_works) {
		__set_current_state(TASK_RUNNING);
		task_work_run();
		return true;
	}

	return false;
2563 2564
}

J
Jens Axboe 已提交
2565 2566 2567 2568 2569 2570
/*
 * Find and free completed poll iocbs
 */
static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
			       struct list_head *done)
{
2571
	struct req_batch rb;
J
Jens Axboe 已提交
2572
	struct io_kiocb *req;
2573 2574 2575

	/* order with ->result store in io_complete_rw_iopoll() */
	smp_rmb();
J
Jens Axboe 已提交
2576

2577
	io_init_req_batch(&rb);
J
Jens Axboe 已提交
2578
	while (!list_empty(done)) {
2579 2580
		int cflags = 0;

2581
		req = list_first_entry(done, struct io_kiocb, inflight_entry);
2582 2583
		list_del(&req->inflight_entry);

2584 2585
		if (READ_ONCE(req->result) == -EAGAIN) {
			req->iopoll_completed = 0;
2586
			if (io_rw_reissue(req))
2587
				continue;
2588
		}
J
Jens Axboe 已提交
2589

2590
		if (req->flags & REQ_F_BUFFER_SELECTED)
2591
			cflags = io_put_rw_kbuf(req);
2592 2593

		__io_cqring_fill_event(req, req->result, cflags);
J
Jens Axboe 已提交
2594 2595
		(*nr_events)++;

2596
		if (refcount_dec_and_test(&req->refs))
2597
			io_req_free_batch(&rb, req, &ctx->submit_state);
J
Jens Axboe 已提交
2598 2599
	}

J
Jens Axboe 已提交
2600
	io_commit_cqring(ctx);
2601
	io_cqring_ev_posted_iopoll(ctx);
2602
	io_req_free_batch_finish(ctx, &rb);
2603 2604
}

J
Jens Axboe 已提交
2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
			long min)
{
	struct io_kiocb *req, *tmp;
	LIST_HEAD(done);
	bool spin;
	int ret;

	/*
	 * Only spin for completions if we don't have multiple devices hanging
	 * off our complete list, and we're under the requested amount.
	 */
	spin = !ctx->poll_multi_file && *nr_events < min;

	ret = 0;
2620
	list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
2621
		struct kiocb *kiocb = &req->rw.kiocb;
J
Jens Axboe 已提交
2622 2623

		/*
2624 2625 2626
		 * Move completed and retryable entries to our local lists.
		 * If we find a request that requires polling, break out
		 * and complete those lists first, if we have entries there.
J
Jens Axboe 已提交
2627
		 */
2628
		if (READ_ONCE(req->iopoll_completed)) {
2629
			list_move_tail(&req->inflight_entry, &done);
J
Jens Axboe 已提交
2630 2631 2632 2633 2634 2635 2636 2637 2638
			continue;
		}
		if (!list_empty(&done))
			break;

		ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
		if (ret < 0)
			break;

2639 2640
		/* iopoll may have completed current req */
		if (READ_ONCE(req->iopoll_completed))
2641
			list_move_tail(&req->inflight_entry, &done);
2642

J
Jens Axboe 已提交
2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654
		if (ret && spin)
			spin = false;
		ret = 0;
	}

	if (!list_empty(&done))
		io_iopoll_complete(ctx, nr_events, &done);

	return ret;
}

/*
2655
 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
J
Jens Axboe 已提交
2656 2657 2658 2659 2660 2661
 * non-spinning poll check - we'll still enter the driver poll loop, but only
 * as a non-spinning completion check.
 */
static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
				long min)
{
2662
	while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
J
Jens Axboe 已提交
2663 2664 2665 2666 2667
		int ret;

		ret = io_do_iopoll(ctx, nr_events, min);
		if (ret < 0)
			return ret;
2668
		if (*nr_events >= min)
J
Jens Axboe 已提交
2669 2670 2671 2672 2673 2674 2675 2676 2677 2678
			return 0;
	}

	return 1;
}

/*
 * We can't just wait for polled events to come to us, we have to actively
 * find and complete them.
 */
2679
static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
J
Jens Axboe 已提交
2680 2681 2682 2683 2684
{
	if (!(ctx->flags & IORING_SETUP_IOPOLL))
		return;

	mutex_lock(&ctx->uring_lock);
2685
	while (!list_empty(&ctx->iopoll_list)) {
J
Jens Axboe 已提交
2686 2687
		unsigned int nr_events = 0;

2688
		io_do_iopoll(ctx, &nr_events, 0);
2689

2690 2691 2692
		/* let it sleep and repeat later if can't complete a request */
		if (nr_events == 0)
			break;
2693 2694 2695
		/*
		 * Ensure we allow local-to-the-cpu processing to take place,
		 * in this case we need to ensure that we reap all events.
2696
		 * Also let task_work, etc. to progress by releasing the mutex
2697
		 */
2698 2699 2700 2701 2702
		if (need_resched()) {
			mutex_unlock(&ctx->uring_lock);
			cond_resched();
			mutex_lock(&ctx->uring_lock);
		}
J
Jens Axboe 已提交
2703 2704 2705 2706
	}
	mutex_unlock(&ctx->uring_lock);
}

2707
static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
J
Jens Axboe 已提交
2708
{
2709
	unsigned int nr_events = 0;
2710
	int iters = 0, ret = 0;
2711

2712 2713 2714 2715 2716 2717
	/*
	 * We disallow the app entering submit/complete with polling, but we
	 * still need to lock the ring to prevent racing with polled issue
	 * that got punted to a workqueue.
	 */
	mutex_lock(&ctx->uring_lock);
J
Jens Axboe 已提交
2718
	do {
2719 2720 2721 2722 2723
		/*
		 * Don't enter poll loop if we already have events pending.
		 * If we do, we can potentially be spinning for commands that
		 * already triggered a CQE (eg in error).
		 */
2724 2725 2726
		if (test_bit(0, &ctx->cq_check_overflow))
			__io_cqring_overflow_flush(ctx, false, NULL, NULL);
		if (io_cqring_events(ctx))
2727 2728
			break;

2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740
		/*
		 * If a submit got punted to a workqueue, we can have the
		 * application entering polling for a command before it gets
		 * issued. That app will hold the uring_lock for the duration
		 * of the poll right here, so we need to take a breather every
		 * now and then to ensure that the issue has a chance to add
		 * the poll to the issued list. Otherwise we can spin here
		 * forever, while the workqueue is stuck trying to acquire the
		 * very same mutex.
		 */
		if (!(++iters & 7)) {
			mutex_unlock(&ctx->uring_lock);
2741
			io_run_task_work();
2742 2743 2744
			mutex_lock(&ctx->uring_lock);
		}

2745
		ret = io_iopoll_getevents(ctx, &nr_events, min);
J
Jens Axboe 已提交
2746 2747 2748
		if (ret <= 0)
			break;
		ret = 0;
2749
	} while (min && !nr_events && !need_resched());
J
Jens Axboe 已提交
2750

2751
	mutex_unlock(&ctx->uring_lock);
J
Jens Axboe 已提交
2752 2753 2754
	return ret;
}

2755
static void kiocb_end_write(struct io_kiocb *req)
J
Jens Axboe 已提交
2756
{
2757 2758 2759 2760 2761 2762
	/*
	 * Tell lockdep we inherited freeze protection from submission
	 * thread.
	 */
	if (req->flags & REQ_F_ISREG) {
		struct inode *inode = file_inode(req->file);
J
Jens Axboe 已提交
2763

2764
		__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
J
Jens Axboe 已提交
2765
	}
2766
	file_end_write(req->file);
J
Jens Axboe 已提交
2767 2768
}

2769
#ifdef CONFIG_BLOCK
2770
static bool io_resubmit_prep(struct io_kiocb *req)
2771 2772
{
	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
2773
	int rw, ret;
2774 2775
	struct iov_iter iter;

2776 2777 2778
	/* already prepared */
	if (req->async_data)
		return true;
2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793

	switch (req->opcode) {
	case IORING_OP_READV:
	case IORING_OP_READ_FIXED:
	case IORING_OP_READ:
		rw = READ;
		break;
	case IORING_OP_WRITEV:
	case IORING_OP_WRITE_FIXED:
	case IORING_OP_WRITE:
		rw = WRITE;
		break;
	default:
		printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
				req->opcode);
2794
		return false;
2795 2796
	}

2797 2798 2799
	ret = io_import_iovec(rw, req, &iovec, &iter, false);
	if (ret < 0)
		return false;
2800
	return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
2801 2802 2803
}
#endif

2804
static bool io_rw_reissue(struct io_kiocb *req)
2805 2806
{
#ifdef CONFIG_BLOCK
2807
	umode_t mode = file_inode(req->file)->i_mode;
2808 2809
	int ret;

2810 2811 2812
	if (!S_ISBLK(mode) && !S_ISREG(mode))
		return false;
	if ((req->flags & REQ_F_NOWAIT) || io_wq_current_is_worker())
2813 2814
		return false;

2815 2816
	lockdep_assert_held(&req->ctx->uring_lock);

2817
	ret = io_sq_thread_acquire_mm_files(req->ctx, req);
2818

2819
	if (!ret && io_resubmit_prep(req)) {
2820 2821
		refcount_inc(&req->refs);
		io_queue_async_work(req);
2822
		return true;
2823
	}
2824
	req_set_fail_links(req);
2825 2826 2827 2828
#endif
	return false;
}

2829
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2830
			     unsigned int issue_flags)
2831
{
2832 2833
	int cflags = 0;

2834 2835
	if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
		return;
2836 2837
	if (res != req->result)
		req_set_fail_links(req);
2838

2839 2840 2841 2842 2843
	if (req->rw.kiocb.ki_flags & IOCB_WRITE)
		kiocb_end_write(req);
	if (req->flags & REQ_F_BUFFER_SELECTED)
		cflags = io_put_rw_kbuf(req);
	__io_req_complete(req, issue_flags, res, cflags);
2844 2845 2846 2847
}

static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
{
2848
	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2849

2850
	__io_complete_rw(req, res, res2, 0);
J
Jens Axboe 已提交
2851 2852
}

J
Jens Axboe 已提交
2853 2854
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{
2855
	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
J
Jens Axboe 已提交
2856

2857 2858
	if (kiocb->ki_flags & IOCB_WRITE)
		kiocb_end_write(req);
J
Jens Axboe 已提交
2859

2860
	if (res != -EAGAIN && res != req->result)
J
Jens Axboe 已提交
2861
		req_set_fail_links(req);
2862 2863 2864

	WRITE_ONCE(req->result, res);
	/* order with io_poll_complete() checking ->result */
2865 2866
	smp_wmb();
	WRITE_ONCE(req->iopoll_completed, 1);
J
Jens Axboe 已提交
2867 2868 2869 2870 2871 2872 2873 2874
}

/*
 * After the iocb has been issued, it's safe to be found on the poll list.
 * Adding the kiocb to the list AFTER submission ensures that we don't
 * find it from a io_iopoll_getevents() thread before the issuer is done
 * accessing the kiocb cookie.
 */
2875
static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
J
Jens Axboe 已提交
2876 2877 2878 2879 2880 2881 2882 2883
{
	struct io_ring_ctx *ctx = req->ctx;

	/*
	 * Track whether we have multiple files in our lists. This will impact
	 * how we do polling eventually, not spinning if we're on potentially
	 * different devices.
	 */
2884
	if (list_empty(&ctx->iopoll_list)) {
J
Jens Axboe 已提交
2885 2886 2887 2888
		ctx->poll_multi_file = false;
	} else if (!ctx->poll_multi_file) {
		struct io_kiocb *list_req;

2889
		list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
2890
						inflight_entry);
2891
		if (list_req->file != req->file)
J
Jens Axboe 已提交
2892 2893 2894 2895 2896 2897 2898
			ctx->poll_multi_file = true;
	}

	/*
	 * For fast devices, IO may have already completed. If it has, add
	 * it to the front so we find it first.
	 */
2899
	if (READ_ONCE(req->iopoll_completed))
2900
		list_add(&req->inflight_entry, &ctx->iopoll_list);
J
Jens Axboe 已提交
2901
	else
2902
		list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
2903

2904 2905 2906 2907 2908 2909
	/*
	 * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
	 * task context or in io worker task context. If current task context is
	 * sq thread, we don't need to check whether should wake up sq thread.
	 */
	if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
2910 2911
	    wq_has_sleeper(&ctx->sq_data->wait))
		wake_up(&ctx->sq_data->wait);
J
Jens Axboe 已提交
2912 2913
}

P
Pavel Begunkov 已提交
2914 2915
static inline void io_state_file_put(struct io_submit_state *state)
{
2916 2917 2918 2919
	if (state->file_refs) {
		fput_many(state->file, state->file_refs);
		state->file_refs = 0;
	}
2920 2921 2922 2923 2924 2925 2926
}

/*
 * Get as many references to a file as we have IOs left in this submission,
 * assuming most submissions are for one file, or at least that each file
 * has more than one submission.
 */
2927
static struct file *__io_file_get(struct io_submit_state *state, int fd)
2928 2929 2930 2931
{
	if (!state)
		return fget(fd);

2932
	if (state->file_refs) {
2933
		if (state->fd == fd) {
2934
			state->file_refs--;
2935 2936
			return state->file;
		}
2937
		io_state_file_put(state);
2938 2939
	}
	state->file = fget_many(fd, state->ios_left);
2940
	if (unlikely(!state->file))
2941 2942 2943
		return NULL;

	state->fd = fd;
2944
	state->file_refs = state->ios_left - 1;
2945 2946 2947
	return state->file;
}

2948 2949
static bool io_bdev_nowait(struct block_device *bdev)
{
2950
	return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
2951 2952
}

J
Jens Axboe 已提交
2953 2954 2955 2956 2957
/*
 * If we tracked the file through the SCM inflight mechanism, we could support
 * any file. For now, just ensure that anything potentially problematic is done
 * inline.
 */
2958
static bool io_file_supports_async(struct file *file, int rw)
J
Jens Axboe 已提交
2959 2960 2961
{
	umode_t mode = file_inode(file)->i_mode;

2962
	if (S_ISBLK(mode)) {
C
Christoph Hellwig 已提交
2963 2964
		if (IS_ENABLED(CONFIG_BLOCK) &&
		    io_bdev_nowait(I_BDEV(file->f_mapping->host)))
2965 2966 2967 2968
			return true;
		return false;
	}
	if (S_ISCHR(mode) || S_ISSOCK(mode))
J
Jens Axboe 已提交
2969
		return true;
2970
	if (S_ISREG(mode)) {
C
Christoph Hellwig 已提交
2971 2972
		if (IS_ENABLED(CONFIG_BLOCK) &&
		    io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2973 2974 2975 2976
		    file->f_op != &io_uring_fops)
			return true;
		return false;
	}
J
Jens Axboe 已提交
2977

2978 2979 2980 2981
	/* any ->read/write should understand O_NONBLOCK */
	if (file->f_flags & O_NONBLOCK)
		return true;

2982 2983 2984 2985 2986 2987 2988
	if (!(file->f_mode & FMODE_NOWAIT))
		return false;

	if (rw == READ)
		return file->f_op->read_iter != NULL;

	return file->f_op->write_iter != NULL;
J
Jens Axboe 已提交
2989 2990
}

2991
static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
J
Jens Axboe 已提交
2992
{
J
Jens Axboe 已提交
2993
	struct io_ring_ctx *ctx = req->ctx;
2994
	struct kiocb *kiocb = &req->rw.kiocb;
2995
	struct file *file = req->file;
J
Jens Axboe 已提交
2996 2997
	unsigned ioprio;
	int ret;
J
Jens Axboe 已提交
2998

2999
	if (S_ISREG(file_inode(file)->i_mode))
3000 3001
		req->flags |= REQ_F_ISREG;

J
Jens Axboe 已提交
3002
	kiocb->ki_pos = READ_ONCE(sqe->off);
3003
	if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
3004
		req->flags |= REQ_F_CUR_POS;
3005
		kiocb->ki_pos = file->f_pos;
3006
	}
J
Jens Axboe 已提交
3007
	kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
3008 3009 3010 3011
	kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
	ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
	if (unlikely(ret))
		return ret;
J
Jens Axboe 已提交
3012

3013 3014 3015 3016
	/* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
	if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
		req->flags |= REQ_F_NOWAIT;

J
Jens Axboe 已提交
3017 3018 3019 3020
	ioprio = READ_ONCE(sqe->ioprio);
	if (ioprio) {
		ret = ioprio_check_cap(ioprio);
		if (ret)
J
Jens Axboe 已提交
3021
			return ret;
J
Jens Axboe 已提交
3022 3023 3024 3025 3026

		kiocb->ki_ioprio = ioprio;
	} else
		kiocb->ki_ioprio = get_current_ioprio();

J
Jens Axboe 已提交
3027 3028 3029
	if (ctx->flags & IORING_SETUP_IOPOLL) {
		if (!(kiocb->ki_flags & IOCB_DIRECT) ||
		    !kiocb->ki_filp->f_op->iopoll)
J
Jens Axboe 已提交
3030
			return -EOPNOTSUPP;
J
Jens Axboe 已提交
3031

J
Jens Axboe 已提交
3032 3033
		kiocb->ki_flags |= IOCB_HIPRI;
		kiocb->ki_complete = io_complete_rw_iopoll;
3034
		req->iopoll_completed = 0;
J
Jens Axboe 已提交
3035
	} else {
J
Jens Axboe 已提交
3036 3037
		if (kiocb->ki_flags & IOCB_HIPRI)
			return -EINVAL;
J
Jens Axboe 已提交
3038 3039
		kiocb->ki_complete = io_complete_rw;
	}
3040

3041 3042
	req->rw.addr = READ_ONCE(sqe->addr);
	req->rw.len = READ_ONCE(sqe->len);
3043
	req->buf_index = READ_ONCE(sqe->buf_index);
J
Jens Axboe 已提交
3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061
	return 0;
}

static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
{
	switch (ret) {
	case -EIOCBQUEUED:
		break;
	case -ERESTARTSYS:
	case -ERESTARTNOINTR:
	case -ERESTARTNOHAND:
	case -ERESTART_RESTARTBLOCK:
		/*
		 * We can't just restart the syscall, since previously
		 * submitted sqes may already be in progress. Just fail this
		 * IO with EINTR.
		 */
		ret = -EINTR;
3062
		fallthrough;
J
Jens Axboe 已提交
3063 3064 3065 3066 3067
	default:
		kiocb->ki_complete(kiocb, ret, 0);
	}
}

3068
static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
3069
		       unsigned int issue_flags)
3070
{
3071
	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
3072
	struct io_async_rw *io = req->async_data;
3073

3074
	/* add previously done IO, if any */
3075
	if (io && io->bytes_done > 0) {
3076
		if (ret < 0)
3077
			ret = io->bytes_done;
3078
		else
3079
			ret += io->bytes_done;
3080 3081
	}

3082 3083
	if (req->flags & REQ_F_CUR_POS)
		req->file->f_pos = kiocb->ki_pos;
P
Pavel Begunkov 已提交
3084
	if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
3085
		__io_complete_rw(req, ret, 0, issue_flags);
3086 3087 3088 3089
	else
		io_rw_done(kiocb, ret);
}

3090
static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
3091
{
3092 3093
	struct io_ring_ctx *ctx = req->ctx;
	size_t len = req->rw.len;
3094
	struct io_mapped_ubuf *imu;
3095
	u16 index, buf_index = req->buf_index;
3096 3097 3098 3099 3100 3101 3102
	size_t offset;
	u64 buf_addr;

	if (unlikely(buf_index >= ctx->nr_user_bufs))
		return -EFAULT;
	index = array_index_nospec(buf_index, ctx->nr_user_bufs);
	imu = &ctx->user_bufs[index];
3103
	buf_addr = req->rw.addr;
3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117

	/* overflow */
	if (buf_addr + len < buf_addr)
		return -EFAULT;
	/* not inside the mapped region */
	if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
		return -EFAULT;

	/*
	 * May not be a start of buffer, set size appropriately
	 * and advance us to the beginning.
	 */
	offset = buf_addr - imu->ubuf;
	iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148

	if (offset) {
		/*
		 * Don't use iov_iter_advance() here, as it's really slow for
		 * using the latter parts of a big fixed buffer - it iterates
		 * over each segment manually. We can cheat a bit here, because
		 * we know that:
		 *
		 * 1) it's a BVEC iter, we set it up
		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
		 *    first and last bvec
		 *
		 * So just find our index, and adjust the iterator afterwards.
		 * If the offset is within the first bvec (or the whole first
		 * bvec, just use iov_iter_advance(). This makes it easier
		 * since we can just skip the first segment, which may not
		 * be PAGE_SIZE aligned.
		 */
		const struct bio_vec *bvec = imu->bvec;

		if (offset <= bvec->bv_len) {
			iov_iter_advance(iter, offset);
		} else {
			unsigned long seg_skip;

			/* skip first vec */
			offset -= bvec->bv_len;
			seg_skip = 1 + (offset >> PAGE_SHIFT);

			iter->bvec = bvec + seg_skip;
			iter->nr_segs -= seg_skip;
3149
			iter->count -= bvec->bv_len + offset;
3150 3151 3152 3153
			iter->iov_offset = offset & ~PAGE_MASK;
		}
	}

3154
	return 0;
3155 3156
}

3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208
static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
{
	if (needs_lock)
		mutex_unlock(&ctx->uring_lock);
}

static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
{
	/*
	 * "Normal" inline submissions always hold the uring_lock, since we
	 * grab it from the system call. Same is true for the SQPOLL offload.
	 * The only exception is when we've detached the request and issue it
	 * from an async worker thread, grab the lock for that case.
	 */
	if (needs_lock)
		mutex_lock(&ctx->uring_lock);
}

static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
					  int bgid, struct io_buffer *kbuf,
					  bool needs_lock)
{
	struct io_buffer *head;

	if (req->flags & REQ_F_BUFFER_SELECTED)
		return kbuf;

	io_ring_submit_lock(req->ctx, needs_lock);

	lockdep_assert_held(&req->ctx->uring_lock);

	head = idr_find(&req->ctx->io_buffer_idr, bgid);
	if (head) {
		if (!list_empty(&head->list)) {
			kbuf = list_last_entry(&head->list, struct io_buffer,
							list);
			list_del(&kbuf->list);
		} else {
			kbuf = head;
			idr_remove(&req->ctx->io_buffer_idr, bgid);
		}
		if (*len > kbuf->len)
			*len = kbuf->len;
	} else {
		kbuf = ERR_PTR(-ENOBUFS);
	}

	io_ring_submit_unlock(req->ctx, needs_lock);

	return kbuf;
}

3209 3210 3211 3212
static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
					bool needs_lock)
{
	struct io_buffer *kbuf;
3213
	u16 bgid;
3214 3215

	kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3216
	bgid = req->buf_index;
3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275
	kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
	if (IS_ERR(kbuf))
		return kbuf;
	req->rw.addr = (u64) (unsigned long) kbuf;
	req->flags |= REQ_F_BUFFER_SELECTED;
	return u64_to_user_ptr(kbuf->addr);
}

#ifdef CONFIG_COMPAT
static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
				bool needs_lock)
{
	struct compat_iovec __user *uiov;
	compat_ssize_t clen;
	void __user *buf;
	ssize_t len;

	uiov = u64_to_user_ptr(req->rw.addr);
	if (!access_ok(uiov, sizeof(*uiov)))
		return -EFAULT;
	if (__get_user(clen, &uiov->iov_len))
		return -EFAULT;
	if (clen < 0)
		return -EINVAL;

	len = clen;
	buf = io_rw_buffer_select(req, &len, needs_lock);
	if (IS_ERR(buf))
		return PTR_ERR(buf);
	iov[0].iov_base = buf;
	iov[0].iov_len = (compat_size_t) len;
	return 0;
}
#endif

static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
				      bool needs_lock)
{
	struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
	void __user *buf;
	ssize_t len;

	if (copy_from_user(iov, uiov, sizeof(*uiov)))
		return -EFAULT;

	len = iov[0].iov_len;
	if (len < 0)
		return -EINVAL;
	buf = io_rw_buffer_select(req, &len, needs_lock);
	if (IS_ERR(buf))
		return PTR_ERR(buf);
	iov[0].iov_base = buf;
	iov[0].iov_len = len;
	return 0;
}

static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
				    bool needs_lock)
{
3276 3277 3278 3279 3280 3281
	if (req->flags & REQ_F_BUFFER_SELECTED) {
		struct io_buffer *kbuf;

		kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
		iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
		iov[0].iov_len = kbuf->len;
3282
		return 0;
3283
	}
3284
	if (req->rw.len != 1)
3285 3286 3287 3288 3289 3290 3291 3292 3293 3294
		return -EINVAL;

#ifdef CONFIG_COMPAT
	if (req->ctx->compat)
		return io_compat_import(req, iov, needs_lock);
#endif

	return __io_iov_buffer_select(req, iov, needs_lock);
}

3295 3296
static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec,
			   struct iov_iter *iter, bool needs_lock)
J
Jens Axboe 已提交
3297
{
3298 3299
	void __user *buf = u64_to_user_ptr(req->rw.addr);
	size_t sqe_len = req->rw.len;
3300
	u8 opcode = req->opcode;
3301
	ssize_t ret;
3302

3303
	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
3304
		*iovec = NULL;
3305
		return io_import_fixed(req, rw, iter);
3306
	}
J
Jens Axboe 已提交
3307

3308
	/* buffer index only valid with fixed read/write, or buffer select  */
3309
	if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
3310 3311
		return -EINVAL;

3312
	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
3313
		if (req->flags & REQ_F_BUFFER_SELECT) {
3314
			buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
3315
			if (IS_ERR(buf))
3316
				return PTR_ERR(buf);
3317
			req->rw.len = sqe_len;
3318 3319
		}

3320 3321
		ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
		*iovec = NULL;
3322
		return ret;
3323 3324
	}

3325 3326
	if (req->flags & REQ_F_BUFFER_SELECT) {
		ret = io_iov_buffer_select(req, *iovec, needs_lock);
3327 3328
		if (!ret)
			iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len);
3329 3330 3331 3332
		*iovec = NULL;
		return ret;
	}

3333 3334
	return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
			      req->ctx->compat);
J
Jens Axboe 已提交
3335 3336
}

3337 3338
static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
{
3339
	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
3340 3341
}

3342
/*
3343 3344
 * For files that don't have ->read_iter() and ->write_iter(), handle them
 * by looping over ->read() or ->write() manually.
3345
 */
3346
static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
3347
{
3348 3349
	struct kiocb *kiocb = &req->rw.kiocb;
	struct file *file = req->file;
3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362
	ssize_t ret = 0;

	/*
	 * Don't support polled IO through this interface, and we can't
	 * support non-blocking either. For the latter, this just causes
	 * the kiocb to be handled from an async context.
	 */
	if (kiocb->ki_flags & IOCB_HIPRI)
		return -EOPNOTSUPP;
	if (kiocb->ki_flags & IOCB_NOWAIT)
		return -EAGAIN;

	while (iov_iter_count(iter)) {
3363
		struct iovec iovec;
3364 3365
		ssize_t nr;

3366 3367 3368
		if (!iov_iter_is_bvec(iter)) {
			iovec = iov_iter_iovec(iter);
		} else {
3369 3370
			iovec.iov_base = u64_to_user_ptr(req->rw.addr);
			iovec.iov_len = req->rw.len;
3371 3372
		}

3373 3374
		if (rw == READ) {
			nr = file->f_op->read(file, iovec.iov_base,
3375
					      iovec.iov_len, io_kiocb_ppos(kiocb));
3376 3377
		} else {
			nr = file->f_op->write(file, iovec.iov_base,
3378
					       iovec.iov_len, io_kiocb_ppos(kiocb));
3379 3380 3381 3382 3383 3384 3385 3386 3387 3388
		}

		if (nr < 0) {
			if (!ret)
				ret = nr;
			break;
		}
		ret += nr;
		if (nr != iovec.iov_len)
			break;
3389 3390
		req->rw.len -= nr;
		req->rw.addr += nr;
3391 3392 3393 3394 3395 3396
		iov_iter_advance(iter, nr);
	}

	return ret;
}

3397 3398
static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
			  const struct iovec *fast_iov, struct iov_iter *iter)
3399
{
3400
	struct io_async_rw *rw = req->async_data;
3401

3402
	memcpy(&rw->iter, iter, sizeof(*iter));
3403
	rw->free_iovec = iovec;
3404
	rw->bytes_done = 0;
3405
	/* can only be fixed buffers, no need to do anything */
P
Pavel Begunkov 已提交
3406
	if (iov_iter_is_bvec(iter))
3407
		return;
3408
	if (!iovec) {
3409 3410 3411 3412 3413 3414 3415 3416 3417
		unsigned iov_off = 0;

		rw->iter.iov = rw->fast_iov;
		if (iter->iov != fast_iov) {
			iov_off = iter->iov - fast_iov;
			rw->iter.iov += iov_off;
		}
		if (rw->fast_iov != fast_iov)
			memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
3418
			       sizeof(struct iovec) * iter->nr_segs);
P
Pavel Begunkov 已提交
3419 3420
	} else {
		req->flags |= REQ_F_NEED_CLEANUP;
3421 3422 3423
	}
}

3424
static inline int __io_alloc_async_data(struct io_kiocb *req)
3425
{
3426 3427 3428
	WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
	req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
	return req->async_data == NULL;
3429 3430
}

3431
static int io_alloc_async_data(struct io_kiocb *req)
3432
{
3433
	if (!io_op_defs[req->opcode].needs_async_data)
3434
		return 0;
3435

3436
	return  __io_alloc_async_data(req);
3437 3438
}

3439 3440
static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
			     const struct iovec *fast_iov,
3441
			     struct iov_iter *iter, bool force)
3442
{
3443
	if (!force && !io_op_defs[req->opcode].needs_async_data)
3444
		return 0;
3445
	if (!req->async_data) {
3446 3447
		if (__io_alloc_async_data(req)) {
			kfree(iovec);
3448
			return -ENOMEM;
3449
		}
3450

3451
		io_req_map_rw(req, iovec, fast_iov, iter);
3452
	}
3453
	return 0;
3454 3455
}

3456
static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
3457
{
3458
	struct io_async_rw *iorw = req->async_data;
3459
	struct iovec *iov = iorw->fast_iov;
3460
	int ret;
3461

3462
	ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
3463 3464 3465
	if (unlikely(ret < 0))
		return ret;

3466 3467 3468 3469
	iorw->bytes_done = 0;
	iorw->free_iovec = iov;
	if (iov)
		req->flags |= REQ_F_NEED_CLEANUP;
3470 3471 3472
	return 0;
}

3473
static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3474 3475 3476
{
	ssize_t ret;

3477
	ret = io_prep_rw(req, sqe);
3478 3479
	if (ret)
		return ret;
3480

3481 3482
	if (unlikely(!(req->file->f_mode & FMODE_READ)))
		return -EBADF;
3483

3484
	/* either don't need iovec imported or already have it */
3485
	if (!req->async_data)
3486
		return 0;
3487
	return io_rw_prep_async(req, READ);
3488 3489
}

3490 3491 3492 3493 3494 3495 3496 3497 3498 3499
/*
 * This is our waitqueue callback handler, registered through lock_page_async()
 * when we initially tried to do the IO with the iocb armed our waitqueue.
 * This gets called when the page is unlocked, and we generally expect that to
 * happen when the page IO is completed and the page is now uptodate. This will
 * queue a task_work based retry of the operation, attempting to copy the data
 * again. If the latter fails because the page was NOT uptodate, then we will
 * do a thread based blocking retry of the operation. That's the unexpected
 * slow path.
 */
3500 3501 3502 3503 3504 3505 3506 3507 3508
static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
			     int sync, void *arg)
{
	struct wait_page_queue *wpq;
	struct io_kiocb *req = wait->private;
	struct wait_page_key *key = arg;

	wpq = container_of(wait, struct wait_page_queue, wait);

3509 3510 3511
	if (!wake_page_match(wpq, key))
		return 0;

3512
	req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
3513 3514 3515 3516
	list_del_init(&wait->entry);

	/* submit ref gets dropped, acquire a new one */
	refcount_inc(&req->refs);
3517
	io_req_task_queue(req);
3518 3519 3520
	return 1;
}

3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532
/*
 * This controls whether a given IO request should be armed for async page
 * based retry. If we return false here, the request is handed to the async
 * worker threads for retry. If we're doing buffered reads on a regular file,
 * we prepare a private wait_page_queue entry and retry the operation. This
 * will either succeed because the page is now uptodate and unlocked, or it
 * will register a callback when the page is unlocked at IO completion. Through
 * that callback, io_uring uses task_work to setup a retry of the operation.
 * That retry will attempt the buffered read again. The retry will generally
 * succeed, or in rare cases where it fails, we then fall back to using the
 * async worker threads for a blocking retry.
 */
3533
static bool io_rw_should_retry(struct io_kiocb *req)
3534
{
3535 3536
	struct io_async_rw *rw = req->async_data;
	struct wait_page_queue *wait = &rw->wpq;
3537
	struct kiocb *kiocb = &req->rw.kiocb;
3538

3539 3540 3541
	/* never retry for NOWAIT, we just complete with -EAGAIN */
	if (req->flags & REQ_F_NOWAIT)
		return false;
3542

3543
	/* Only for buffered IO */
3544
	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
3545
		return false;
3546

3547 3548 3549 3550 3551 3552
	/*
	 * just use poll if we can, and don't attempt if the fs doesn't
	 * support callback based unlocks
	 */
	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
		return false;
3553

3554 3555 3556 3557 3558
	wait->wait.func = io_async_buf_func;
	wait->wait.private = req;
	wait->wait.flags = 0;
	INIT_LIST_HEAD(&wait->wait.entry);
	kiocb->ki_flags |= IOCB_WAITQ;
3559
	kiocb->ki_flags &= ~IOCB_NOWAIT;
3560 3561
	kiocb->ki_waitq = wait;
	return true;
3562 3563 3564 3565 3566 3567
}

static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
{
	if (req->file->f_op->read_iter)
		return call_read_iter(req->file, &req->rw.kiocb, iter);
3568
	else if (req->file->f_op->read)
3569
		return loop_rw_iter(READ, req, iter);
3570 3571
	else
		return -EINVAL;
3572 3573
}

3574
static int io_read(struct io_kiocb *req, unsigned int issue_flags)
J
Jens Axboe 已提交
3575 3576
{
	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3577
	struct kiocb *kiocb = &req->rw.kiocb;
3578
	struct iov_iter __iter, *iter = &__iter;
3579
	struct io_async_rw *rw = req->async_data;
3580
	ssize_t io_size, ret, ret2;
3581
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
3582

3583
	if (rw) {
3584
		iter = &rw->iter;
3585 3586 3587 3588 3589 3590
		iovec = NULL;
	} else {
		ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
		if (ret < 0)
			return ret;
	}
3591
	io_size = iov_iter_count(iter);
3592
	req->result = io_size;
J
Jens Axboe 已提交
3593

3594 3595
	/* Ensure we clear previously set non-block flag */
	if (!force_nonblock)
3596
		kiocb->ki_flags &= ~IOCB_NOWAIT;
3597 3598 3599
	else
		kiocb->ki_flags |= IOCB_NOWAIT;

3600
	/* If the file doesn't support async, just async punt */
3601 3602
	if (force_nonblock && !io_file_supports_async(req->file, READ)) {
		ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3603
		return ret ?: -EAGAIN;
3604
	}
J
Jens Axboe 已提交
3605

3606
	ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
3607 3608 3609 3610
	if (unlikely(ret)) {
		kfree(iovec);
		return ret;
	}
J
Jens Axboe 已提交
3611

3612
	ret = io_iter_do_read(req, iter);
3613

3614
	if (ret == -EIOCBQUEUED) {
P
Pavel Begunkov 已提交
3615
		goto out_free;
3616
	} else if (ret == -EAGAIN) {
3617 3618
		/* IOPOLL retry should happen for io-wq threads */
		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
3619
			goto done;
3620 3621
		/* no retry on NONBLOCK nor RWF_NOWAIT */
		if (req->flags & REQ_F_NOWAIT)
3622
			goto done;
3623
		/* some cases will consume bytes even on error returns */
3624
		iov_iter_revert(iter, io_size - iov_iter_count(iter));
3625
		ret = 0;
3626
	} else if (ret <= 0 || ret == io_size || !force_nonblock ||
3627
		   (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
3628
		/* read all, failed, already did sync or don't want to retry */
3629
		goto done;
3630
	}
3631 3632

	ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3633 3634 3635
	if (ret2)
		return ret2;

P
Pavel Begunkov 已提交
3636
	iovec = NULL;
3637
	rw = req->async_data;
3638
	/* now use our persistent iterator, if we aren't already */
3639
	iter = &rw->iter;
3640

3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660
	do {
		io_size -= ret;
		rw->bytes_done += ret;
		/* if we can retry, do so with the callbacks armed */
		if (!io_rw_should_retry(req)) {
			kiocb->ki_flags &= ~IOCB_WAITQ;
			return -EAGAIN;
		}

		/*
		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
		 * we get -EIOCBQUEUED, then we'll get a notification when the
		 * desired page gets unlocked. We can also get a partial read
		 * here, and if we do, then just retry at the new offset.
		 */
		ret = io_iter_do_read(req, iter);
		if (ret == -EIOCBQUEUED)
			return 0;
		/* we got some bytes, but not all. retry. */
	} while (ret > 0 && ret < io_size);
3661
done:
3662
	kiocb_done(kiocb, ret, issue_flags);
P
Pavel Begunkov 已提交
3663 3664 3665 3666
out_free:
	/* it's faster to check here then delegate to kfree */
	if (iovec)
		kfree(iovec);
3667
	return 0;
J
Jens Axboe 已提交
3668 3669
}

3670
static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3671 3672 3673
{
	ssize_t ret;

3674
	ret = io_prep_rw(req, sqe);
3675 3676
	if (ret)
		return ret;
3677

3678 3679
	if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
		return -EBADF;
3680

3681
	/* either don't need iovec imported or already have it */
3682
	if (!req->async_data)
3683
		return 0;
3684
	return io_rw_prep_async(req, WRITE);
3685 3686
}

3687
static int io_write(struct io_kiocb *req, unsigned int issue_flags)
J
Jens Axboe 已提交
3688 3689
{
	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3690
	struct kiocb *kiocb = &req->rw.kiocb;
3691
	struct iov_iter __iter, *iter = &__iter;
3692
	struct io_async_rw *rw = req->async_data;
3693
	ssize_t ret, ret2, io_size;
3694
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
J
Jens Axboe 已提交
3695

3696
	if (rw) {
3697
		iter = &rw->iter;
3698 3699 3700 3701 3702 3703
		iovec = NULL;
	} else {
		ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
		if (ret < 0)
			return ret;
	}
3704
	io_size = iov_iter_count(iter);
3705
	req->result = io_size;
J
Jens Axboe 已提交
3706

3707 3708
	/* Ensure we clear previously set non-block flag */
	if (!force_nonblock)
3709 3710 3711
		kiocb->ki_flags &= ~IOCB_NOWAIT;
	else
		kiocb->ki_flags |= IOCB_NOWAIT;
3712

3713
	/* If the file doesn't support async, just async punt */
3714
	if (force_nonblock && !io_file_supports_async(req->file, WRITE))
3715
		goto copy_iov;
3716

3717 3718 3719
	/* file path doesn't support NOWAIT for non-direct_IO */
	if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
	    (req->flags & REQ_F_ISREG))
3720
		goto copy_iov;
3721

3722
	ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
3723 3724
	if (unlikely(ret))
		goto out_free;
3725

3726 3727 3728 3729 3730 3731 3732 3733
	/*
	 * Open-code file_start_write here to grab freeze protection,
	 * which will be released by another thread in
	 * io_complete_rw().  Fool lockdep by telling it the lock got
	 * released so that it doesn't complain about the held lock when
	 * we return to userspace.
	 */
	if (req->flags & REQ_F_ISREG) {
3734
		sb_start_write(file_inode(req->file)->i_sb);
3735 3736 3737 3738
		__sb_writers_release(file_inode(req->file)->i_sb,
					SB_FREEZE_WRITE);
	}
	kiocb->ki_flags |= IOCB_WRITE;
3739

3740
	if (req->file->f_op->write_iter)
3741
		ret2 = call_write_iter(req->file, kiocb, iter);
3742
	else if (req->file->f_op->write)
3743
		ret2 = loop_rw_iter(WRITE, req, iter);
3744 3745
	else
		ret2 = -EINVAL;
3746

3747 3748 3749 3750 3751 3752
	/*
	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
	 * retry them without IOCB_NOWAIT.
	 */
	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
		ret2 = -EAGAIN;
3753 3754
	/* no retry on NONBLOCK nor RWF_NOWAIT */
	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
3755
		goto done;
3756
	if (!force_nonblock || ret2 != -EAGAIN) {
3757 3758 3759
		/* IOPOLL retry should happen for io-wq threads */
		if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
			goto copy_iov;
3760
done:
3761
		kiocb_done(kiocb, ret2, issue_flags);
3762
	} else {
3763
copy_iov:
3764
		/* some cases will consume bytes even on error returns */
3765
		iov_iter_revert(iter, io_size - iov_iter_count(iter));
3766
		ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
3767
		return ret ?: -EAGAIN;
J
Jens Axboe 已提交
3768
	}
3769
out_free:
3770
	/* it's reportedly faster than delegating the null check to kfree() */
3771
	if (iovec)
3772
		kfree(iovec);
J
Jens Axboe 已提交
3773 3774 3775
	return ret;
}

3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804
static int io_renameat_prep(struct io_kiocb *req,
			    const struct io_uring_sqe *sqe)
{
	struct io_rename *ren = &req->rename;
	const char __user *oldf, *newf;

	if (unlikely(req->flags & REQ_F_FIXED_FILE))
		return -EBADF;

	ren->old_dfd = READ_ONCE(sqe->fd);
	oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
	newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
	ren->new_dfd = READ_ONCE(sqe->len);
	ren->flags = READ_ONCE(sqe->rename_flags);

	ren->oldpath = getname(oldf);
	if (IS_ERR(ren->oldpath))
		return PTR_ERR(ren->oldpath);

	ren->newpath = getname(newf);
	if (IS_ERR(ren->newpath)) {
		putname(ren->oldpath);
		return PTR_ERR(ren->newpath);
	}

	req->flags |= REQ_F_NEED_CLEANUP;
	return 0;
}

3805
static int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
3806 3807 3808 3809
{
	struct io_rename *ren = &req->rename;
	int ret;

3810
	if (issue_flags & IO_URING_F_NONBLOCK)
3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822
		return -EAGAIN;

	ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
				ren->newpath, ren->flags);

	req->flags &= ~REQ_F_NEED_CLEANUP;
	if (ret < 0)
		req_set_fail_links(req);
	io_req_complete(req, ret);
	return 0;
}

3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846
static int io_unlinkat_prep(struct io_kiocb *req,
			    const struct io_uring_sqe *sqe)
{
	struct io_unlink *un = &req->unlink;
	const char __user *fname;

	if (unlikely(req->flags & REQ_F_FIXED_FILE))
		return -EBADF;

	un->dfd = READ_ONCE(sqe->fd);

	un->flags = READ_ONCE(sqe->unlink_flags);
	if (un->flags & ~AT_REMOVEDIR)
		return -EINVAL;

	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
	un->filename = getname(fname);
	if (IS_ERR(un->filename))
		return PTR_ERR(un->filename);

	req->flags |= REQ_F_NEED_CLEANUP;
	return 0;
}

3847
static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
3848 3849 3850 3851
{
	struct io_unlink *un = &req->unlink;
	int ret;

3852
	if (issue_flags & IO_URING_F_NONBLOCK)
3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866
		return -EAGAIN;

	if (un->flags & AT_REMOVEDIR)
		ret = do_rmdir(un->dfd, un->filename);
	else
		ret = do_unlinkat(un->dfd, un->filename);

	req->flags &= ~REQ_F_NEED_CLEANUP;
	if (ret < 0)
		req_set_fail_links(req);
	io_req_complete(req, ret);
	return 0;
}

J
Jens Axboe 已提交
3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883
static int io_shutdown_prep(struct io_kiocb *req,
			    const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_NET)
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;
	if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
	    sqe->buf_index)
		return -EINVAL;

	req->shutdown.how = READ_ONCE(sqe->len);
	return 0;
#else
	return -EOPNOTSUPP;
#endif
}

3884
static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
J
Jens Axboe 已提交
3885 3886 3887 3888 3889
{
#if defined(CONFIG_NET)
	struct socket *sock;
	int ret;

3890
	if (issue_flags & IO_URING_F_NONBLOCK)
J
Jens Axboe 已提交
3891 3892
		return -EAGAIN;

3893
	sock = sock_from_file(req->file);
J
Jens Axboe 已提交
3894
	if (unlikely(!sock))
3895
		return -ENOTSOCK;
J
Jens Axboe 已提交
3896 3897

	ret = __sys_shutdown_sock(sock, req->shutdown.how);
3898 3899
	if (ret < 0)
		req_set_fail_links(req);
J
Jens Axboe 已提交
3900 3901 3902 3903 3904 3905 3906
	io_req_complete(req, ret);
	return 0;
#else
	return -EOPNOTSUPP;
#endif
}

P
Pavel Begunkov 已提交
3907 3908
static int __io_splice_prep(struct io_kiocb *req,
			    const struct io_uring_sqe *sqe)
P
Pavel Begunkov 已提交
3909 3910 3911 3912
{
	struct io_splice* sp = &req->splice;
	unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;

3913 3914
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;
P
Pavel Begunkov 已提交
3915 3916 3917 3918 3919 3920 3921 3922

	sp->file_in = NULL;
	sp->len = READ_ONCE(sqe->len);
	sp->flags = READ_ONCE(sqe->splice_flags);

	if (unlikely(sp->flags & ~valid_flags))
		return -EINVAL;

P
Pavel Begunkov 已提交
3923 3924 3925 3926
	sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
				  (sp->flags & SPLICE_F_FD_IN_FIXED));
	if (!sp->file_in)
		return -EBADF;
P
Pavel Begunkov 已提交
3927 3928
	req->flags |= REQ_F_NEED_CLEANUP;

3929 3930 3931 3932 3933 3934
	if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
		/*
		 * Splice operation will be punted aync, and here need to
		 * modify io_wq_work.flags, so initialize io_wq_work firstly.
		 */
		io_req_init_async(req);
P
Pavel Begunkov 已提交
3935
		req->work.flags |= IO_WQ_WORK_UNBOUND;
3936
	}
P
Pavel Begunkov 已提交
3937 3938 3939 3940

	return 0;
}

P
Pavel Begunkov 已提交
3941 3942 3943 3944 3945 3946 3947 3948
static int io_tee_prep(struct io_kiocb *req,
		       const struct io_uring_sqe *sqe)
{
	if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
		return -EINVAL;
	return __io_splice_prep(req, sqe);
}

3949
static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
P
Pavel Begunkov 已提交
3950 3951 3952 3953 3954 3955 3956
{
	struct io_splice *sp = &req->splice;
	struct file *in = sp->file_in;
	struct file *out = sp->file_out;
	unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
	long ret = 0;

3957
	if (issue_flags & IO_URING_F_NONBLOCK)
P
Pavel Begunkov 已提交
3958 3959 3960 3961 3962 3963 3964 3965 3966
		return -EAGAIN;
	if (sp->len)
		ret = do_tee(in, out, sp->len, flags);

	io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
	req->flags &= ~REQ_F_NEED_CLEANUP;

	if (ret != sp->len)
		req_set_fail_links(req);
3967
	io_req_complete(req, ret);
P
Pavel Begunkov 已提交
3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979
	return 0;
}

static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
	struct io_splice* sp = &req->splice;

	sp->off_in = READ_ONCE(sqe->splice_off_in);
	sp->off_out = READ_ONCE(sqe->off);
	return __io_splice_prep(req, sqe);
}

3980
static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
P
Pavel Begunkov 已提交
3981 3982 3983 3984 3985 3986
{
	struct io_splice *sp = &req->splice;
	struct file *in = sp->file_in;
	struct file *out = sp->file_out;
	unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
	loff_t *poff_in, *poff_out;
3987
	long ret = 0;
P
Pavel Begunkov 已提交
3988

3989
	if (issue_flags & IO_URING_F_NONBLOCK)
3990
		return -EAGAIN;
P
Pavel Begunkov 已提交
3991 3992 3993

	poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
	poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
3994

3995
	if (sp->len)
3996
		ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
P
Pavel Begunkov 已提交
3997 3998 3999 4000 4001 4002

	io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
	req->flags &= ~REQ_F_NEED_CLEANUP;

	if (ret != sp->len)
		req_set_fail_links(req);
4003
	io_req_complete(req, ret);
P
Pavel Begunkov 已提交
4004 4005 4006
	return 0;
}

J
Jens Axboe 已提交
4007 4008 4009
/*
 * IORING_OP_NOP just posts a completion event, nothing else.
 */
4010
static int io_nop(struct io_kiocb *req, unsigned int issue_flags)
J
Jens Axboe 已提交
4011 4012 4013
{
	struct io_ring_ctx *ctx = req->ctx;

J
Jens Axboe 已提交
4014 4015 4016
	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;

4017
	__io_req_complete(req, issue_flags, 0, 0);
J
Jens Axboe 已提交
4018 4019 4020
	return 0;
}

4021
static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
C
Christoph Hellwig 已提交
4022
{
J
Jens Axboe 已提交
4023
	struct io_ring_ctx *ctx = req->ctx;
C
Christoph Hellwig 已提交
4024

J
Jens Axboe 已提交
4025 4026
	if (!req->file)
		return -EBADF;
C
Christoph Hellwig 已提交
4027

J
Jens Axboe 已提交
4028
	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
J
Jens Axboe 已提交
4029
		return -EINVAL;
4030
	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
C
Christoph Hellwig 已提交
4031 4032
		return -EINVAL;

4033 4034 4035 4036 4037 4038
	req->sync.flags = READ_ONCE(sqe->fsync_flags);
	if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
		return -EINVAL;

	req->sync.off = READ_ONCE(sqe->off);
	req->sync.len = READ_ONCE(sqe->len);
C
Christoph Hellwig 已提交
4039 4040 4041
	return 0;
}

4042
static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
4043 4044 4045 4046
{
	loff_t end = req->sync.off + req->sync.len;
	int ret;

4047
	/* fsync always requires a blocking context */
4048
	if (issue_flags & IO_URING_F_NONBLOCK)
4049 4050
		return -EAGAIN;

4051
	ret = vfs_fsync_range(req->file, req->sync.off,
4052 4053 4054 4055
				end > 0 ? end : LLONG_MAX,
				req->sync.flags & IORING_FSYNC_DATASYNC);
	if (ret < 0)
		req_set_fail_links(req);
4056
	io_req_complete(req, ret);
C
Christoph Hellwig 已提交
4057 4058 4059
	return 0;
}

4060 4061 4062 4063 4064
static int io_fallocate_prep(struct io_kiocb *req,
			     const struct io_uring_sqe *sqe)
{
	if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
		return -EINVAL;
4065 4066
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;
4067 4068 4069 4070 4071 4072 4073

	req->sync.off = READ_ONCE(sqe->off);
	req->sync.len = READ_ONCE(sqe->addr);
	req->sync.mode = READ_ONCE(sqe->len);
	return 0;
}

4074
static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
4075
{
4076 4077
	int ret;

4078
	/* fallocate always requiring blocking context */
4079
	if (issue_flags & IO_URING_F_NONBLOCK)
4080
		return -EAGAIN;
4081 4082 4083 4084
	ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
				req->sync.len);
	if (ret < 0)
		req_set_fail_links(req);
4085
	io_req_complete(req, ret);
4086 4087 4088
	return 0;
}

4089
static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4090
{
4091
	const char __user *fname;
4092
	int ret;
4093

4094
	if (unlikely(sqe->ioprio || sqe->buf_index))
4095
		return -EINVAL;
4096
	if (unlikely(req->flags & REQ_F_FIXED_FILE))
4097
		return -EBADF;
4098

4099 4100
	/* open.how should be already initialised */
	if (!(req->open.how.flags & O_PATH) && force_o_largefile())
4101
		req->open.how.flags |= O_LARGEFILE;
4102

4103 4104
	req->open.dfd = READ_ONCE(sqe->fd);
	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
4105
	req->open.filename = getname(fname);
4106 4107 4108 4109 4110
	if (IS_ERR(req->open.filename)) {
		ret = PTR_ERR(req->open.filename);
		req->open.filename = NULL;
		return ret;
	}
4111
	req->open.nofile = rlimit(RLIMIT_NOFILE);
4112
	req->flags |= REQ_F_NEED_CLEANUP;
4113
	return 0;
4114 4115
}

4116 4117 4118 4119
static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
	u64 flags, mode;

4120
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4121
		return -EINVAL;
4122 4123 4124 4125 4126 4127
	mode = READ_ONCE(sqe->len);
	flags = READ_ONCE(sqe->open_flags);
	req->open.how = build_open_how(flags, mode);
	return __io_openat_prep(req, sqe);
}

4128
static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
J
Jens Axboe 已提交
4129
{
4130 4131
	struct open_how __user *how;
	size_t len;
J
Jens Axboe 已提交
4132 4133
	int ret;

4134
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4135
		return -EINVAL;
4136 4137 4138 4139
	how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
	len = READ_ONCE(sqe->len);
	if (len < OPEN_HOW_SIZE_VER0)
		return -EINVAL;
4140

4141 4142 4143 4144
	ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
					len);
	if (ret)
		return ret;
4145

4146
	return __io_openat_prep(req, sqe);
4147 4148
}

4149
static int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
4150 4151 4152
{
	struct open_flags op;
	struct file *file;
4153 4154
	bool nonblock_set;
	bool resolve_nonblock;
4155 4156
	int ret;

4157
	ret = build_open_flags(&req->open.how, &op);
4158 4159
	if (ret)
		goto err;
4160 4161
	nonblock_set = op.open_flag & O_NONBLOCK;
	resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
4162
	if (issue_flags & IO_URING_F_NONBLOCK) {
4163 4164 4165 4166 4167 4168 4169 4170 4171
		/*
		 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
		 * it'll always -EAGAIN
		 */
		if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
			return -EAGAIN;
		op.lookup_flags |= LOOKUP_CACHED;
		op.open_flag |= O_NONBLOCK;
	}
4172

4173
	ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
4174 4175 4176 4177
	if (ret < 0)
		goto err;

	file = do_filp_open(req->open.dfd, req->open.filename, &op);
4178
	/* only retry if RESOLVE_CACHED wasn't already set by application */
4179 4180
	if ((!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)) &&
	    file == ERR_PTR(-EAGAIN)) {
4181 4182 4183 4184 4185 4186 4187 4188 4189
		/*
		 * We could hang on to this 'fd', but seems like marginal
		 * gain for something that is now known to be a slower path.
		 * So just put it, and we'll get a new one when we retry.
		 */
		put_unused_fd(ret);
		return -EAGAIN;
	}

4190 4191 4192 4193
	if (IS_ERR(file)) {
		put_unused_fd(ret);
		ret = PTR_ERR(file);
	} else {
4194
		if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
4195
			file->f_flags &= ~O_NONBLOCK;
4196 4197 4198 4199 4200
		fsnotify_open(file);
		fd_install(ret, file);
	}
err:
	putname(req->open.filename);
4201
	req->flags &= ~REQ_F_NEED_CLEANUP;
4202 4203
	if (ret < 0)
		req_set_fail_links(req);
4204
	io_req_complete(req, ret);
4205 4206 4207
	return 0;
}

4208
static int io_openat(struct io_kiocb *req, unsigned int issue_flags)
4209
{
4210
	return io_openat2(req, issue_flags & IO_URING_F_NONBLOCK);
4211 4212
}

4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257
static int io_remove_buffers_prep(struct io_kiocb *req,
				  const struct io_uring_sqe *sqe)
{
	struct io_provide_buf *p = &req->pbuf;
	u64 tmp;

	if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
		return -EINVAL;

	tmp = READ_ONCE(sqe->fd);
	if (!tmp || tmp > USHRT_MAX)
		return -EINVAL;

	memset(p, 0, sizeof(*p));
	p->nbufs = tmp;
	p->bgid = READ_ONCE(sqe->buf_group);
	return 0;
}

static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
			       int bgid, unsigned nbufs)
{
	unsigned i = 0;

	/* shouldn't happen */
	if (!nbufs)
		return 0;

	/* the head kbuf is the list itself */
	while (!list_empty(&buf->list)) {
		struct io_buffer *nxt;

		nxt = list_first_entry(&buf->list, struct io_buffer, list);
		list_del(&nxt->list);
		kfree(nxt);
		if (++i == nbufs)
			return i;
	}
	i++;
	kfree(buf);
	idr_remove(&ctx->io_buffer_idr, bgid);

	return i;
}

4258
static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
4259 4260 4261 4262 4263
{
	struct io_provide_buf *p = &req->pbuf;
	struct io_ring_ctx *ctx = req->ctx;
	struct io_buffer *head;
	int ret = 0;
4264
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276

	io_ring_submit_lock(ctx, !force_nonblock);

	lockdep_assert_held(&ctx->uring_lock);

	ret = -ENOENT;
	head = idr_find(&ctx->io_buffer_idr, p->bgid);
	if (head)
		ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
	if (ret < 0)
		req_set_fail_links(req);

4277 4278
	/* need to hold the lock to complete IOPOLL requests */
	if (ctx->flags & IORING_SETUP_IOPOLL) {
4279
		__io_req_complete(req, issue_flags, ret, 0);
4280 4281 4282
		io_ring_submit_unlock(ctx, !force_nonblock);
	} else {
		io_ring_submit_unlock(ctx, !force_nonblock);
4283
		__io_req_complete(req, issue_flags, ret, 0);
4284
	}
4285 4286 4287
	return 0;
}

4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303
static int io_provide_buffers_prep(struct io_kiocb *req,
				   const struct io_uring_sqe *sqe)
{
	struct io_provide_buf *p = &req->pbuf;
	u64 tmp;

	if (sqe->ioprio || sqe->rw_flags)
		return -EINVAL;

	tmp = READ_ONCE(sqe->fd);
	if (!tmp || tmp > USHRT_MAX)
		return -E2BIG;
	p->nbufs = tmp;
	p->addr = READ_ONCE(sqe->addr);
	p->len = READ_ONCE(sqe->len);

4304
	if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341
		return -EFAULT;

	p->bgid = READ_ONCE(sqe->buf_group);
	tmp = READ_ONCE(sqe->off);
	if (tmp > USHRT_MAX)
		return -E2BIG;
	p->bid = tmp;
	return 0;
}

static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
{
	struct io_buffer *buf;
	u64 addr = pbuf->addr;
	int i, bid = pbuf->bid;

	for (i = 0; i < pbuf->nbufs; i++) {
		buf = kmalloc(sizeof(*buf), GFP_KERNEL);
		if (!buf)
			break;

		buf->addr = addr;
		buf->len = pbuf->len;
		buf->bid = bid;
		addr += pbuf->len;
		bid++;
		if (!*head) {
			INIT_LIST_HEAD(&buf->list);
			*head = buf;
		} else {
			list_add_tail(&buf->list, &(*head)->list);
		}
	}

	return i ? i : -ENOMEM;
}

4342
static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
4343 4344 4345 4346 4347
{
	struct io_provide_buf *p = &req->pbuf;
	struct io_ring_ctx *ctx = req->ctx;
	struct io_buffer *head, *list;
	int ret = 0;
4348
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363

	io_ring_submit_lock(ctx, !force_nonblock);

	lockdep_assert_held(&ctx->uring_lock);

	list = head = idr_find(&ctx->io_buffer_idr, p->bgid);

	ret = io_add_buffers(p, &head);
	if (ret < 0)
		goto out;

	if (!list) {
		ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
					GFP_KERNEL);
		if (ret < 0) {
4364
			__io_remove_buffers(ctx, head, p->bgid, -1U);
4365 4366 4367 4368 4369 4370
			goto out;
		}
	}
out:
	if (ret < 0)
		req_set_fail_links(req);
4371 4372 4373

	/* need to hold the lock to complete IOPOLL requests */
	if (ctx->flags & IORING_SETUP_IOPOLL) {
4374
		__io_req_complete(req, issue_flags, ret, 0);
4375 4376 4377
		io_ring_submit_unlock(ctx, !force_nonblock);
	} else {
		io_ring_submit_unlock(ctx, !force_nonblock);
4378
		__io_req_complete(req, issue_flags, ret, 0);
4379
	}
4380
	return 0;
4381 4382
}

4383 4384 4385 4386 4387 4388
static int io_epoll_ctl_prep(struct io_kiocb *req,
			     const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_EPOLL)
	if (sqe->ioprio || sqe->buf_index)
		return -EINVAL;
4389
	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
4390
		return -EINVAL;
4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409

	req->epoll.epfd = READ_ONCE(sqe->fd);
	req->epoll.op = READ_ONCE(sqe->len);
	req->epoll.fd = READ_ONCE(sqe->off);

	if (ep_op_has_event(req->epoll.op)) {
		struct epoll_event __user *ev;

		ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
		if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
			return -EFAULT;
	}

	return 0;
#else
	return -EOPNOTSUPP;
#endif
}

4410
static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
4411 4412 4413 4414
{
#if defined(CONFIG_EPOLL)
	struct io_epoll *ie = &req->epoll;
	int ret;
4415
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4416 4417 4418 4419 4420 4421 4422

	ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
	if (force_nonblock && ret == -EAGAIN)
		return -EAGAIN;

	if (ret < 0)
		req_set_fail_links(req);
4423
	__io_req_complete(req, issue_flags, ret, 0);
4424 4425 4426 4427 4428 4429
	return 0;
#else
	return -EOPNOTSUPP;
#endif
}

J
Jens Axboe 已提交
4430 4431 4432 4433 4434
static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
	if (sqe->ioprio || sqe->buf_index || sqe->off)
		return -EINVAL;
4435 4436
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;
J
Jens Axboe 已提交
4437 4438 4439 4440 4441 4442 4443 4444 4445 4446

	req->madvise.addr = READ_ONCE(sqe->addr);
	req->madvise.len = READ_ONCE(sqe->len);
	req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
	return 0;
#else
	return -EOPNOTSUPP;
#endif
}

4447
static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
J
Jens Axboe 已提交
4448 4449 4450 4451 4452
{
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
	struct io_madvise *ma = &req->madvise;
	int ret;

4453
	if (issue_flags & IO_URING_F_NONBLOCK)
J
Jens Axboe 已提交
4454 4455
		return -EAGAIN;

M
Minchan Kim 已提交
4456
	ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
J
Jens Axboe 已提交
4457 4458
	if (ret < 0)
		req_set_fail_links(req);
4459
	io_req_complete(req, ret);
J
Jens Axboe 已提交
4460 4461 4462 4463 4464 4465
	return 0;
#else
	return -EOPNOTSUPP;
#endif
}

J
Jens Axboe 已提交
4466 4467 4468 4469
static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
	if (sqe->ioprio || sqe->buf_index || sqe->addr)
		return -EINVAL;
4470 4471
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;
J
Jens Axboe 已提交
4472 4473 4474 4475 4476 4477 4478

	req->fadvise.offset = READ_ONCE(sqe->off);
	req->fadvise.len = READ_ONCE(sqe->len);
	req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
	return 0;
}

4479
static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
J
Jens Axboe 已提交
4480 4481 4482 4483
{
	struct io_fadvise *fa = &req->fadvise;
	int ret;

4484
	if (issue_flags & IO_URING_F_NONBLOCK) {
4485 4486 4487 4488 4489 4490 4491 4492 4493
		switch (fa->advice) {
		case POSIX_FADV_NORMAL:
		case POSIX_FADV_RANDOM:
		case POSIX_FADV_SEQUENTIAL:
			break;
		default:
			return -EAGAIN;
		}
	}
J
Jens Axboe 已提交
4494 4495 4496 4497

	ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
	if (ret < 0)
		req_set_fail_links(req);
4498
	io_req_complete(req, ret);
J
Jens Axboe 已提交
4499 4500 4501
	return 0;
}

4502 4503
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
4504
	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
4505
		return -EINVAL;
4506 4507
	if (sqe->ioprio || sqe->buf_index)
		return -EINVAL;
4508
	if (req->flags & REQ_F_FIXED_FILE)
4509
		return -EBADF;
4510

4511 4512
	req->statx.dfd = READ_ONCE(sqe->fd);
	req->statx.mask = READ_ONCE(sqe->len);
B
Bijan Mottahedeh 已提交
4513
	req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
4514 4515
	req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
	req->statx.flags = READ_ONCE(sqe->statx_flags);
4516 4517 4518 4519

	return 0;
}

4520
static int io_statx(struct io_kiocb *req, unsigned int issue_flags)
4521
{
4522
	struct io_statx *ctx = &req->statx;
4523 4524
	int ret;

4525
	if (issue_flags & IO_URING_F_NONBLOCK) {
4526 4527 4528
		/* only need file table for an actual valid fd */
		if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
			req->flags |= REQ_F_NO_FILE_TABLE;
4529
		return -EAGAIN;
4530
	}
4531

B
Bijan Mottahedeh 已提交
4532 4533
	ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
		       ctx->buffer);
4534 4535 4536

	if (ret < 0)
		req_set_fail_links(req);
4537
	io_req_complete(req, ret);
4538 4539 4540
	return 0;
}

4541 4542
static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
4543
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4544
		return -EINVAL;
4545 4546 4547
	if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
	    sqe->rw_flags || sqe->buf_index)
		return -EINVAL;
4548
	if (req->flags & REQ_F_FIXED_FILE)
4549
		return -EBADF;
4550 4551 4552 4553 4554

	req->close.fd = READ_ONCE(sqe->fd);
	return 0;
}

4555
static int io_close(struct io_kiocb *req, unsigned int issue_flags)
4556
{
4557
	struct files_struct *files = current->files;
4558
	struct io_close *close = &req->close;
4559 4560
	struct fdtable *fdt;
	struct file *file;
4561 4562
	int ret;

4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580
	file = NULL;
	ret = -EBADF;
	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	if (close->fd >= fdt->max_fds) {
		spin_unlock(&files->file_lock);
		goto err;
	}
	file = fdt->fd[close->fd];
	if (!file) {
		spin_unlock(&files->file_lock);
		goto err;
	}

	if (file->f_op == &io_uring_fops) {
		spin_unlock(&files->file_lock);
		file = NULL;
		goto err;
4581
	}
4582 4583

	/* if the file has a flush method, be safe and punt to async */
4584
	if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
4585
		spin_unlock(&files->file_lock);
4586
		return -EAGAIN;
P
Pavel Begunkov 已提交
4587
	}
4588

4589 4590 4591 4592 4593 4594 4595 4596
	ret = __close_fd_get_file(close->fd, &file);
	spin_unlock(&files->file_lock);
	if (ret < 0) {
		if (ret == -ENOENT)
			ret = -EBADF;
		goto err;
	}

4597
	/* No ->flush() or already async, safely close from here */
4598 4599
	ret = filp_close(file, current->files);
err:
4600 4601
	if (ret < 0)
		req_set_fail_links(req);
4602 4603
	if (file)
		fput(file);
4604
	__io_req_complete(req, issue_flags, ret, 0);
4605
	return 0;
4606 4607
}

4608
static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4609 4610 4611 4612 4613 4614 4615 4616
{
	struct io_ring_ctx *ctx = req->ctx;

	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;
	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
		return -EINVAL;

4617 4618 4619 4620 4621 4622
	req->sync.off = READ_ONCE(sqe->off);
	req->sync.len = READ_ONCE(sqe->len);
	req->sync.flags = READ_ONCE(sqe->sync_range_flags);
	return 0;
}

4623
static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
4624 4625 4626
{
	int ret;

4627
	/* sync_file_range always requires a blocking context */
4628
	if (issue_flags & IO_URING_F_NONBLOCK)
4629 4630
		return -EAGAIN;

4631
	ret = sync_file_range(req->file, req->sync.off, req->sync.len,
4632 4633 4634
				req->sync.flags);
	if (ret < 0)
		req_set_fail_links(req);
4635
	io_req_complete(req, ret);
4636 4637 4638
	return 0;
}

4639
#if defined(CONFIG_NET)
4640 4641 4642
static int io_setup_async_msg(struct io_kiocb *req,
			      struct io_async_msghdr *kmsg)
{
4643 4644 4645
	struct io_async_msghdr *async_msg = req->async_data;

	if (async_msg)
4646
		return -EAGAIN;
4647
	if (io_alloc_async_data(req)) {
4648
		kfree(kmsg->free_iov);
4649 4650
		return -ENOMEM;
	}
4651
	async_msg = req->async_data;
4652
	req->flags |= REQ_F_NEED_CLEANUP;
4653
	memcpy(async_msg, kmsg, sizeof(*kmsg));
4654
	async_msg->msg.msg_name = &async_msg->addr;
4655 4656 4657 4658
	/* if were using fast_iov, set it to the new one */
	if (!async_msg->free_iov)
		async_msg->msg.msg_iter.iov = async_msg->fast_iov;

4659 4660 4661
	return -EAGAIN;
}

4662 4663 4664 4665
static int io_sendmsg_copy_hdr(struct io_kiocb *req,
			       struct io_async_msghdr *iomsg)
{
	iomsg->msg.msg_name = &iomsg->addr;
4666
	iomsg->free_iov = iomsg->fast_iov;
4667
	return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4668
				   req->sr_msg.msg_flags, &iomsg->free_iov);
4669 4670
}

4671
static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4672
{
4673
	struct io_async_msghdr *async_msg = req->async_data;
4674
	struct io_sr_msg *sr = &req->sr_msg;
P
Pavel Begunkov 已提交
4675
	int ret;
4676

4677 4678 4679
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;

4680
	sr->msg_flags = READ_ONCE(sqe->msg_flags);
4681
	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4682
	sr->len = READ_ONCE(sqe->len);
4683

4684 4685 4686 4687 4688
#ifdef CONFIG_COMPAT
	if (req->ctx->compat)
		sr->msg_flags |= MSG_CMSG_COMPAT;
#endif

4689
	if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
4690
		return 0;
4691
	ret = io_sendmsg_copy_hdr(req, async_msg);
P
Pavel Begunkov 已提交
4692 4693 4694
	if (!ret)
		req->flags |= REQ_F_NEED_CLEANUP;
	return ret;
4695 4696
}

4697
static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
J
Jens Axboe 已提交
4698
{
4699
	struct io_async_msghdr iomsg, *kmsg;
J
Jens Axboe 已提交
4700
	struct socket *sock;
4701
	unsigned flags;
J
Jens Axboe 已提交
4702 4703
	int ret;

4704
	sock = sock_from_file(req->file);
4705
	if (unlikely(!sock))
4706
		return -ENOTSOCK;
4707

4708 4709
	kmsg = req->async_data;
	if (!kmsg) {
4710 4711 4712 4713
		ret = io_sendmsg_copy_hdr(req, &iomsg);
		if (ret)
			return ret;
		kmsg = &iomsg;
J
Jens Axboe 已提交
4714 4715
	}

4716 4717 4718
	flags = req->sr_msg.msg_flags;
	if (flags & MSG_DONTWAIT)
		req->flags |= REQ_F_NOWAIT;
4719
	else if (issue_flags & IO_URING_F_NONBLOCK)
4720
		flags |= MSG_DONTWAIT;
4721

4722
	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4723
	if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
4724 4725 4726
		return io_setup_async_msg(req, kmsg);
	if (ret == -ERESTARTSYS)
		ret = -EINTR;
J
Jens Axboe 已提交
4727

4728 4729 4730
	/* fast path, check for non-NULL to avoid function call */
	if (kmsg->free_iov)
		kfree(kmsg->free_iov);
P
Pavel Begunkov 已提交
4731
	req->flags &= ~REQ_F_NEED_CLEANUP;
J
Jens Axboe 已提交
4732 4733
	if (ret < 0)
		req_set_fail_links(req);
4734
	__io_req_complete(req, issue_flags, ret, 0);
4735
	return 0;
4736
}
J
Jens Axboe 已提交
4737

4738
static int io_send(struct io_kiocb *req, unsigned int issue_flags)
4739
{
4740 4741 4742
	struct io_sr_msg *sr = &req->sr_msg;
	struct msghdr msg;
	struct iovec iov;
4743
	struct socket *sock;
4744
	unsigned flags;
4745 4746
	int ret;

4747
	sock = sock_from_file(req->file);
4748
	if (unlikely(!sock))
4749
		return -ENOTSOCK;
4750

4751 4752
	ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
	if (unlikely(ret))
Z
Zheng Bin 已提交
4753
		return ret;
4754

4755 4756 4757 4758
	msg.msg_name = NULL;
	msg.msg_control = NULL;
	msg.msg_controllen = 0;
	msg.msg_namelen = 0;
4759

4760 4761 4762
	flags = req->sr_msg.msg_flags;
	if (flags & MSG_DONTWAIT)
		req->flags |= REQ_F_NOWAIT;
4763
	else if (issue_flags & IO_URING_F_NONBLOCK)
4764
		flags |= MSG_DONTWAIT;
4765

4766 4767
	msg.msg_flags = flags;
	ret = sock_sendmsg(sock, &msg);
4768
	if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
4769 4770 4771
		return -EAGAIN;
	if (ret == -ERESTARTSYS)
		ret = -EINTR;
4772 4773 4774

	if (ret < 0)
		req_set_fail_links(req);
4775
	__io_req_complete(req, issue_flags, ret, 0);
4776 4777 4778
	return 0;
}

4779 4780
static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
				 struct io_async_msghdr *iomsg)
4781 4782 4783 4784 4785 4786
{
	struct io_sr_msg *sr = &req->sr_msg;
	struct iovec __user *uiov;
	size_t iov_len;
	int ret;

4787 4788
	ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
					&iomsg->uaddr, &uiov, &iov_len);
4789 4790 4791 4792 4793 4794
	if (ret)
		return ret;

	if (req->flags & REQ_F_BUFFER_SELECT) {
		if (iov_len > 1)
			return -EINVAL;
4795
		if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
4796
			return -EFAULT;
4797
		sr->len = iomsg->fast_iov[0].iov_len;
4798
		iomsg->free_iov = NULL;
4799
	} else {
4800
		iomsg->free_iov = iomsg->fast_iov;
4801
		ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
4802
				     &iomsg->free_iov, &iomsg->msg.msg_iter,
4803
				     false);
4804 4805 4806 4807 4808 4809 4810 4811 4812
		if (ret > 0)
			ret = 0;
	}

	return ret;
}

#ifdef CONFIG_COMPAT
static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
4813
					struct io_async_msghdr *iomsg)
4814 4815 4816 4817 4818 4819 4820 4821
{
	struct compat_msghdr __user *msg_compat;
	struct io_sr_msg *sr = &req->sr_msg;
	struct compat_iovec __user *uiov;
	compat_uptr_t ptr;
	compat_size_t len;
	int ret;

4822
	msg_compat = (struct compat_msghdr __user *) sr->umsg;
4823
	ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839
					&ptr, &len);
	if (ret)
		return ret;

	uiov = compat_ptr(ptr);
	if (req->flags & REQ_F_BUFFER_SELECT) {
		compat_ssize_t clen;

		if (len > 1)
			return -EINVAL;
		if (!access_ok(uiov, sizeof(*uiov)))
			return -EFAULT;
		if (__get_user(clen, &uiov->iov_len))
			return -EFAULT;
		if (clen < 0)
			return -EINVAL;
4840
		sr->len = clen;
4841
		iomsg->free_iov = NULL;
4842
	} else {
4843
		iomsg->free_iov = iomsg->fast_iov;
4844
		ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
4845
				   UIO_FASTIOV, &iomsg->free_iov,
4846
				   &iomsg->msg.msg_iter, true);
4847 4848 4849 4850 4851 4852 4853 4854
		if (ret < 0)
			return ret;
	}

	return 0;
}
#endif

4855 4856
static int io_recvmsg_copy_hdr(struct io_kiocb *req,
			       struct io_async_msghdr *iomsg)
4857
{
4858
	iomsg->msg.msg_name = &iomsg->addr;
4859 4860 4861

#ifdef CONFIG_COMPAT
	if (req->ctx->compat)
4862
		return __io_compat_recvmsg_copy_hdr(req, iomsg);
4863
#endif
4864

4865
	return __io_recvmsg_copy_hdr(req, iomsg);
4866 4867
}

4868
static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
4869
					       bool needs_lock)
4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880
{
	struct io_sr_msg *sr = &req->sr_msg;
	struct io_buffer *kbuf;

	kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
	if (IS_ERR(kbuf))
		return kbuf;

	sr->kbuf = kbuf;
	req->flags |= REQ_F_BUFFER_SELECTED;
	return kbuf;
4881 4882
}

4883 4884 4885 4886 4887
static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
{
	return io_put_kbuf(req, req->sr_msg.kbuf);
}

4888 4889
static int io_recvmsg_prep(struct io_kiocb *req,
			   const struct io_uring_sqe *sqe)
J
Jens Axboe 已提交
4890
{
4891
	struct io_async_msghdr *async_msg = req->async_data;
4892
	struct io_sr_msg *sr = &req->sr_msg;
P
Pavel Begunkov 已提交
4893
	int ret;
4894

4895 4896 4897
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;

4898
	sr->msg_flags = READ_ONCE(sqe->msg_flags);
4899
	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4900
	sr->len = READ_ONCE(sqe->len);
4901
	sr->bgid = READ_ONCE(sqe->buf_group);
4902

4903 4904 4905 4906 4907
#ifdef CONFIG_COMPAT
	if (req->ctx->compat)
		sr->msg_flags |= MSG_CMSG_COMPAT;
#endif

4908
	if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
4909
		return 0;
4910
	ret = io_recvmsg_copy_hdr(req, async_msg);
P
Pavel Begunkov 已提交
4911 4912 4913
	if (!ret)
		req->flags |= REQ_F_NEED_CLEANUP;
	return ret;
J
Jens Axboe 已提交
4914 4915
}

4916
static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
J
Jens Axboe 已提交
4917
{
4918
	struct io_async_msghdr iomsg, *kmsg;
4919
	struct socket *sock;
4920
	struct io_buffer *kbuf;
4921
	unsigned flags;
4922
	int ret, cflags = 0;
4923
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4924

4925
	sock = sock_from_file(req->file);
4926
	if (unlikely(!sock))
4927
		return -ENOTSOCK;
4928

4929 4930
	kmsg = req->async_data;
	if (!kmsg) {
4931 4932
		ret = io_recvmsg_copy_hdr(req, &iomsg);
		if (ret)
4933
			return ret;
4934 4935
		kmsg = &iomsg;
	}
4936

4937
	if (req->flags & REQ_F_BUFFER_SELECT) {
4938
		kbuf = io_recv_buffer_select(req, !force_nonblock);
4939
		if (IS_ERR(kbuf))
4940
			return PTR_ERR(kbuf);
4941
		kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4942 4943
		kmsg->fast_iov[0].iov_len = req->sr_msg.len;
		iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov,
4944 4945
				1, req->sr_msg.len);
	}
4946

4947 4948 4949 4950 4951
	flags = req->sr_msg.msg_flags;
	if (flags & MSG_DONTWAIT)
		req->flags |= REQ_F_NOWAIT;
	else if (force_nonblock)
		flags |= MSG_DONTWAIT;
4952

4953 4954
	ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
					kmsg->uaddr, flags);
4955 4956
	if (force_nonblock && ret == -EAGAIN)
		return io_setup_async_msg(req, kmsg);
4957 4958
	if (ret == -ERESTARTSYS)
		ret = -EINTR;
4959

4960 4961
	if (req->flags & REQ_F_BUFFER_SELECTED)
		cflags = io_put_recv_kbuf(req);
4962 4963 4964
	/* fast path, check for non-NULL to avoid function call */
	if (kmsg->free_iov)
		kfree(kmsg->free_iov);
P
Pavel Begunkov 已提交
4965
	req->flags &= ~REQ_F_NEED_CLEANUP;
J
Jens Axboe 已提交
4966 4967
	if (ret < 0)
		req_set_fail_links(req);
4968
	__io_req_complete(req, issue_flags, ret, cflags);
4969
	return 0;
J
Jens Axboe 已提交
4970
}
4971

4972
static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
4973
{
4974
	struct io_buffer *kbuf;
4975 4976 4977
	struct io_sr_msg *sr = &req->sr_msg;
	struct msghdr msg;
	void __user *buf = sr->buf;
4978
	struct socket *sock;
4979 4980
	struct iovec iov;
	unsigned flags;
4981
	int ret, cflags = 0;
4982
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
4983

4984
	sock = sock_from_file(req->file);
4985
	if (unlikely(!sock))
4986
		return -ENOTSOCK;
4987

4988
	if (req->flags & REQ_F_BUFFER_SELECT) {
4989
		kbuf = io_recv_buffer_select(req, !force_nonblock);
4990 4991
		if (IS_ERR(kbuf))
			return PTR_ERR(kbuf);
4992
		buf = u64_to_user_ptr(kbuf->addr);
4993
	}
4994

4995
	ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
4996 4997
	if (unlikely(ret))
		goto out_free;
4998

4999 5000 5001 5002 5003 5004
	msg.msg_name = NULL;
	msg.msg_control = NULL;
	msg.msg_controllen = 0;
	msg.msg_namelen = 0;
	msg.msg_iocb = NULL;
	msg.msg_flags = 0;
5005

5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016
	flags = req->sr_msg.msg_flags;
	if (flags & MSG_DONTWAIT)
		req->flags |= REQ_F_NOWAIT;
	else if (force_nonblock)
		flags |= MSG_DONTWAIT;

	ret = sock_recvmsg(sock, &msg, flags);
	if (force_nonblock && ret == -EAGAIN)
		return -EAGAIN;
	if (ret == -ERESTARTSYS)
		ret = -EINTR;
5017
out_free:
5018 5019
	if (req->flags & REQ_F_BUFFER_SELECTED)
		cflags = io_put_recv_kbuf(req);
5020 5021
	if (ret < 0)
		req_set_fail_links(req);
5022
	__io_req_complete(req, issue_flags, ret, cflags);
5023 5024 5025
	return 0;
}

5026
static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5027
{
5028 5029
	struct io_accept *accept = &req->accept;

5030
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5031
		return -EINVAL;
5032
	if (sqe->ioprio || sqe->len || sqe->buf_index)
5033 5034
		return -EINVAL;

5035 5036
	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
5037
	accept->flags = READ_ONCE(sqe->accept_flags);
5038
	accept->nofile = rlimit(RLIMIT_NOFILE);
5039 5040
	return 0;
}
5041

5042
static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
5043 5044
{
	struct io_accept *accept = &req->accept;
5045
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
5046
	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
5047 5048
	int ret;

5049 5050 5051
	if (req->file->f_flags & O_NONBLOCK)
		req->flags |= REQ_F_NOWAIT;

5052
	ret = __sys_accept4_file(req->file, file_flags, accept->addr,
5053 5054
					accept->addr_len, accept->flags,
					accept->nofile);
5055
	if (ret == -EAGAIN && force_nonblock)
5056
		return -EAGAIN;
5057 5058 5059
	if (ret < 0) {
		if (ret == -ERESTARTSYS)
			ret = -EINTR;
J
Jens Axboe 已提交
5060
		req_set_fail_links(req);
5061
	}
5062
	__io_req_complete(req, issue_flags, ret, 0);
5063
	return 0;
5064 5065
}

5066
static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5067
{
5068
	struct io_connect *conn = &req->connect;
5069
	struct io_async_connect *io = req->async_data;
5070

5071
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5072 5073 5074 5075
		return -EINVAL;
	if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
		return -EINVAL;

5076 5077 5078 5079 5080 5081 5082
	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
	conn->addr_len =  READ_ONCE(sqe->addr2);

	if (!io)
		return 0;

	return move_addr_to_kernel(conn->addr, conn->addr_len,
5083
					&io->address);
5084 5085
}

5086
static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
5087
{
5088
	struct io_async_connect __io, *io;
5089
	unsigned file_flags;
5090
	int ret;
5091
	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
5092

5093 5094
	if (req->async_data) {
		io = req->async_data;
5095
	} else {
5096 5097
		ret = move_addr_to_kernel(req->connect.addr,
						req->connect.addr_len,
5098
						&__io.address);
5099 5100 5101 5102 5103
		if (ret)
			goto out;
		io = &__io;
	}

5104 5105
	file_flags = force_nonblock ? O_NONBLOCK : 0;

5106
	ret = __sys_connect_file(req->file, &io->address,
5107
					req->connect.addr_len, file_flags);
5108
	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
5109
		if (req->async_data)
5110
			return -EAGAIN;
5111
		if (io_alloc_async_data(req)) {
5112 5113 5114
			ret = -ENOMEM;
			goto out;
		}
5115 5116
		io = req->async_data;
		memcpy(req->async_data, &__io, sizeof(__io));
5117
		return -EAGAIN;
5118
	}
5119 5120
	if (ret == -ERESTARTSYS)
		ret = -EINTR;
5121
out:
J
Jens Axboe 已提交
5122 5123
	if (ret < 0)
		req_set_fail_links(req);
5124
	__io_req_complete(req, issue_flags, ret, 0);
5125
	return 0;
5126 5127 5128 5129
}
#else /* !CONFIG_NET */
static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
5130 5131 5132
	return -EOPNOTSUPP;
}

5133
static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
5134 5135 5136 5137
{
	return -EOPNOTSUPP;
}

5138
static int io_send(struct io_kiocb *req, unsigned int issue_flags)
5139 5140 5141 5142 5143 5144 5145 5146 5147 5148
{
	return -EOPNOTSUPP;
}

static int io_recvmsg_prep(struct io_kiocb *req,
			   const struct io_uring_sqe *sqe)
{
	return -EOPNOTSUPP;
}

5149
static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
5150 5151 5152 5153
{
	return -EOPNOTSUPP;
}

5154
static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
5155 5156 5157 5158 5159 5160 5161 5162 5163
{
	return -EOPNOTSUPP;
}

static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
	return -EOPNOTSUPP;
}

5164
static int io_accept(struct io_kiocb *req, unsigned int issue_flags)
5165 5166 5167
{
	return -EOPNOTSUPP;
}
5168

5169 5170 5171 5172 5173
static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
	return -EOPNOTSUPP;
}

5174
static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
5175
{
5176
	return -EOPNOTSUPP;
5177
}
5178
#endif /* CONFIG_NET */
5179

5180 5181 5182 5183 5184
struct io_poll_table {
	struct poll_table_struct pt;
	struct io_kiocb *req;
	int error;
};
5185

5186 5187 5188
static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
			   __poll_t mask, task_work_func_t func)
{
5189
	int ret;
5190 5191 5192 5193 5194 5195 5196 5197 5198 5199

	/* for instances that support it check for an event match first: */
	if (mask && !(mask & poll->events))
		return 0;

	trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);

	list_del_init(&poll->wait.entry);

	req->result = mask;
5200
	req->task_work.func = func;
5201 5202
	percpu_ref_get(&req->ctx->refs);

5203
	/*
5204 5205 5206 5207
	 * If this fails, then the task is exiting. When a task exits, the
	 * work gets canceled, so just cancel this request as well instead
	 * of executing it. We can't safely execute it anyway, as we may not
	 * have the needed state needed for it anyway.
5208
	 */
5209
	ret = io_req_task_work_add(req);
5210
	if (unlikely(ret)) {
5211
		WRITE_ONCE(poll->canceled, true);
5212
		io_req_task_work_add_fallback(req, func);
5213
	}
5214 5215 5216
	return 1;
}

5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236
static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
	__acquires(&req->ctx->completion_lock)
{
	struct io_ring_ctx *ctx = req->ctx;

	if (!req->result && !READ_ONCE(poll->canceled)) {
		struct poll_table_struct pt = { ._key = poll->events };

		req->result = vfs_poll(req->file, &pt) & poll->events;
	}

	spin_lock_irq(&ctx->completion_lock);
	if (!req->result && !READ_ONCE(poll->canceled)) {
		add_wait_queue(poll->head, &poll->wait);
		return true;
	}

	return false;
}

5237
static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
5238
{
5239
	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
5240
	if (req->opcode == IORING_OP_POLL_ADD)
5241
		return req->async_data;
5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254
	return req->apoll->double_poll;
}

static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
{
	if (req->opcode == IORING_OP_POLL_ADD)
		return &req->poll;
	return &req->apoll->poll;
}

static void io_poll_remove_double(struct io_kiocb *req)
{
	struct io_poll_iocb *poll = io_poll_get_double(req);
5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273

	lockdep_assert_held(&req->ctx->completion_lock);

	if (poll && poll->head) {
		struct wait_queue_head *head = poll->head;

		spin_lock(&head->lock);
		list_del_init(&poll->wait.entry);
		if (poll->wait.private)
			refcount_dec(&req->refs);
		poll->head = NULL;
		spin_unlock(&head->lock);
	}
}

static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
{
	struct io_ring_ctx *ctx = req->ctx;

5274
	io_poll_remove_double(req);
5275 5276 5277 5278 5279
	req->poll.done = true;
	io_cqring_fill_event(req, error ? error : mangle_poll(mask));
	io_commit_cqring(ctx);
}

5280
static void io_poll_task_func(struct callback_head *cb)
5281
{
5282
	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5283
	struct io_ring_ctx *ctx = req->ctx;
5284
	struct io_kiocb *nxt;
5285 5286 5287

	if (io_poll_rewait(req, &req->poll)) {
		spin_unlock_irq(&ctx->completion_lock);
5288 5289 5290 5291
	} else {
		hash_del(&req->hash_node);
		io_poll_complete(req, req->result, 0);
		spin_unlock_irq(&ctx->completion_lock);
5292

5293 5294 5295 5296 5297
		nxt = io_put_req_find_next(req);
		io_cqring_ev_posted(ctx);
		if (nxt)
			__io_req_task_submit(nxt);
	}
5298

5299
	percpu_ref_put(&ctx->refs);
5300 5301 5302 5303 5304 5305
}

static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
			       int sync, void *key)
{
	struct io_kiocb *req = wait->private;
5306
	struct io_poll_iocb *poll = io_poll_get_single(req);
5307 5308 5309 5310 5311 5312
	__poll_t mask = key_to_poll(key);

	/* for instances that support it check for an event match first: */
	if (mask && !(mask & poll->events))
		return 0;

5313 5314
	list_del_init(&wait->entry);

5315
	if (poll && poll->head) {
5316 5317
		bool done;

5318 5319
		spin_lock(&poll->head->lock);
		done = list_empty(&poll->wait.entry);
5320
		if (!done)
5321
			list_del_init(&poll->wait.entry);
5322 5323
		/* make sure double remove sees this as being gone */
		wait->private = NULL;
5324
		spin_unlock(&poll->head->lock);
5325 5326 5327 5328
		if (!done) {
			/* use wait func handler, so it matches the rq type */
			poll->wait.func(&poll->wait, mode, sync, key);
		}
5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345
	}
	refcount_dec(&req->refs);
	return 1;
}

static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
			      wait_queue_func_t wake_func)
{
	poll->head = NULL;
	poll->done = false;
	poll->canceled = false;
	poll->events = events;
	INIT_LIST_HEAD(&poll->wait.entry);
	init_waitqueue_func_entry(&poll->wait, wake_func);
}

static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
5346 5347
			    struct wait_queue_head *head,
			    struct io_poll_iocb **poll_ptr)
5348 5349 5350 5351 5352 5353 5354 5355 5356
{
	struct io_kiocb *req = pt->req;

	/*
	 * If poll->head is already set, it's because the file being polled
	 * uses multiple waitqueues for poll handling (eg one for read, one
	 * for write). Setup a separate io_poll_iocb if this happens.
	 */
	if (unlikely(poll->head)) {
5357 5358
		struct io_poll_iocb *poll_one = poll;

5359
		/* already have a 2nd entry, fail a third attempt */
5360
		if (*poll_ptr) {
5361 5362 5363 5364 5365 5366 5367 5368
			pt->error = -EINVAL;
			return;
		}
		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
		if (!poll) {
			pt->error = -ENOMEM;
			return;
		}
5369
		io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
5370 5371
		refcount_inc(&req->refs);
		poll->wait.private = req;
5372
		*poll_ptr = poll;
5373 5374 5375 5376
	}

	pt->error = 0;
	poll->head = head;
5377 5378 5379 5380 5381

	if (poll->events & EPOLLEXCLUSIVE)
		add_wait_queue_exclusive(head, &poll->wait);
	else
		add_wait_queue(head, &poll->wait);
5382 5383 5384 5385 5386 5387
}

static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
			       struct poll_table_struct *p)
{
	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5388
	struct async_poll *apoll = pt->req->apoll;
5389

5390
	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
5391 5392
}

5393 5394 5395 5396 5397 5398 5399 5400
static void io_async_task_func(struct callback_head *cb)
{
	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
	struct async_poll *apoll = req->apoll;
	struct io_ring_ctx *ctx = req->ctx;

	trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);

5401
	if (io_poll_rewait(req, &apoll->poll)) {
5402
		spin_unlock_irq(&ctx->completion_lock);
5403
		percpu_ref_put(&ctx->refs);
5404
		return;
5405 5406
	}

5407
	/* If req is still hashed, it cannot have been canceled. Don't check. */
5408
	if (hash_hashed(&req->hash_node))
5409
		hash_del(&req->hash_node);
5410

5411
	io_poll_remove_double(req);
5412 5413
	spin_unlock_irq(&ctx->completion_lock);

5414 5415 5416 5417
	if (!READ_ONCE(apoll->poll.canceled))
		__io_req_task_submit(req);
	else
		__io_req_task_cancel(req, -ECANCELED);
5418

5419
	percpu_ref_put(&ctx->refs);
5420
	kfree(apoll->double_poll);
5421
	kfree(apoll);
5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453
}

static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
			void *key)
{
	struct io_kiocb *req = wait->private;
	struct io_poll_iocb *poll = &req->apoll->poll;

	trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
					key_to_poll(key));

	return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
}

static void io_poll_req_insert(struct io_kiocb *req)
{
	struct io_ring_ctx *ctx = req->ctx;
	struct hlist_head *list;

	list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
	hlist_add_head(&req->hash_node, list);
}

static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
				      struct io_poll_iocb *poll,
				      struct io_poll_table *ipt, __poll_t mask,
				      wait_queue_func_t wake_func)
	__acquires(&ctx->completion_lock)
{
	struct io_ring_ctx *ctx = req->ctx;
	bool cancel = false;

5454
	INIT_HLIST_NODE(&req->hash_node);
5455
	io_init_poll_iocb(poll, mask, wake_func);
5456
	poll->file = req->file;
5457
	poll->wait.private = req;
5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492

	ipt->pt._key = mask;
	ipt->req = req;
	ipt->error = -EINVAL;

	mask = vfs_poll(req->file, &ipt->pt) & poll->events;

	spin_lock_irq(&ctx->completion_lock);
	if (likely(poll->head)) {
		spin_lock(&poll->head->lock);
		if (unlikely(list_empty(&poll->wait.entry))) {
			if (ipt->error)
				cancel = true;
			ipt->error = 0;
			mask = 0;
		}
		if (mask || ipt->error)
			list_del_init(&poll->wait.entry);
		else if (cancel)
			WRITE_ONCE(poll->canceled, true);
		else if (!poll->done) /* actually waiting for an event */
			io_poll_req_insert(req);
		spin_unlock(&poll->head->lock);
	}

	return mask;
}

static bool io_arm_poll_handler(struct io_kiocb *req)
{
	const struct io_op_def *def = &io_op_defs[req->opcode];
	struct io_ring_ctx *ctx = req->ctx;
	struct async_poll *apoll;
	struct io_poll_table ipt;
	__poll_t mask, ret;
5493
	int rw;
5494 5495 5496

	if (!req->file || !file_can_poll(req->file))
		return false;
5497
	if (req->flags & REQ_F_POLLED)
5498
		return false;
5499 5500 5501 5502 5503 5504 5505 5506
	if (def->pollin)
		rw = READ;
	else if (def->pollout)
		rw = WRITE;
	else
		return false;
	/* if we can't nonblock try, then no point in arming a poll handler */
	if (!io_file_supports_async(req->file, rw))
5507 5508 5509 5510 5511
		return false;

	apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
	if (unlikely(!apoll))
		return false;
5512
	apoll->double_poll = NULL;
5513 5514 5515 5516

	req->flags |= REQ_F_POLLED;
	req->apoll = apoll;

5517
	mask = 0;
5518
	if (def->pollin)
5519
		mask |= POLLIN | POLLRDNORM;
5520 5521
	if (def->pollout)
		mask |= POLLOUT | POLLWRNORM;
5522 5523 5524 5525 5526 5527

	/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
	if ((req->opcode == IORING_OP_RECVMSG) &&
	    (req->sr_msg.msg_flags & MSG_ERRQUEUE))
		mask &= ~POLLIN;

5528 5529 5530 5531 5532 5533
	mask |= POLLERR | POLLPRI;

	ipt.pt._qproc = io_async_queue_proc;

	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
					io_async_wake);
5534
	if (ret || ipt.error) {
5535
		io_poll_remove_double(req);
5536
		spin_unlock_irq(&ctx->completion_lock);
5537
		kfree(apoll->double_poll);
5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548
		kfree(apoll);
		return false;
	}
	spin_unlock_irq(&ctx->completion_lock);
	trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
					apoll->poll.events);
	return true;
}

static bool __io_poll_remove_one(struct io_kiocb *req,
				 struct io_poll_iocb *poll)
5549
{
5550
	bool do_complete = false;
5551 5552 5553

	spin_lock(&poll->head->lock);
	WRITE_ONCE(poll->canceled, true);
5554 5555
	if (!list_empty(&poll->wait.entry)) {
		list_del_init(&poll->wait.entry);
5556
		do_complete = true;
5557 5558
	}
	spin_unlock(&poll->head->lock);
5559
	hash_del(&req->hash_node);
5560 5561 5562 5563 5564 5565 5566
	return do_complete;
}

static bool io_poll_remove_one(struct io_kiocb *req)
{
	bool do_complete;

5567 5568
	io_poll_remove_double(req);

5569 5570 5571
	if (req->opcode == IORING_OP_POLL_ADD) {
		do_complete = __io_poll_remove_one(req, &req->poll);
	} else {
5572 5573
		struct async_poll *apoll = req->apoll;

5574
		/* non-poll requests have submit ref still */
5575 5576
		do_complete = __io_poll_remove_one(req, &apoll->poll);
		if (do_complete) {
5577
			io_put_req(req);
5578
			kfree(apoll->double_poll);
5579 5580
			kfree(apoll);
		}
5581 5582
	}

5583 5584 5585
	if (do_complete) {
		io_cqring_fill_event(req, -ECANCELED);
		io_commit_cqring(req->ctx);
5586
		req_set_fail_links(req);
5587
		io_put_req_deferred(req, 1);
5588 5589 5590
	}

	return do_complete;
5591 5592
}

5593 5594 5595
/*
 * Returns true if we found and killed one or more poll requests
 */
5596 5597
static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
			       struct files_struct *files)
5598
{
5599
	struct hlist_node *tmp;
5600
	struct io_kiocb *req;
5601
	int posted = 0, i;
5602 5603

	spin_lock_irq(&ctx->completion_lock);
5604 5605 5606 5607
	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
		struct hlist_head *list;

		list = &ctx->cancel_hash[i];
5608
		hlist_for_each_entry_safe(req, tmp, list, hash_node) {
5609
			if (io_match_task(req, tsk, files))
5610 5611
				posted += io_poll_remove_one(req);
		}
5612 5613
	}
	spin_unlock_irq(&ctx->completion_lock);
5614

5615 5616
	if (posted)
		io_cqring_ev_posted(ctx);
5617 5618

	return posted != 0;
5619 5620
}

5621 5622
static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
{
5623
	struct hlist_head *list;
5624 5625
	struct io_kiocb *req;

5626 5627
	list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
	hlist_for_each_entry(req, list, hash_node) {
5628 5629 5630
		if (sqe_addr != req->user_data)
			continue;
		if (io_poll_remove_one(req))
5631
			return 0;
5632
		return -EALREADY;
5633 5634 5635 5636 5637
	}

	return -ENOENT;
}

5638 5639
static int io_poll_remove_prep(struct io_kiocb *req,
			       const struct io_uring_sqe *sqe)
5640 5641 5642 5643 5644 5645 5646
{
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;
	if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
	    sqe->poll_events)
		return -EINVAL;

5647
	req->poll_remove.addr = READ_ONCE(sqe->addr);
5648 5649 5650
	return 0;
}

5651 5652 5653 5654
/*
 * Find a running poll command that matches one specified in sqe->addr,
 * and remove it if found.
 */
5655
static int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
5656 5657
{
	struct io_ring_ctx *ctx = req->ctx;
5658
	int ret;
5659 5660

	spin_lock_irq(&ctx->completion_lock);
5661
	ret = io_poll_cancel(ctx, req->poll_remove.addr);
5662 5663
	spin_unlock_irq(&ctx->completion_lock);

J
Jens Axboe 已提交
5664 5665
	if (ret < 0)
		req_set_fail_links(req);
5666
	io_req_complete(req, ret);
5667 5668 5669 5670 5671 5672
	return 0;
}

static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
			void *key)
{
5673 5674
	struct io_kiocb *req = wait->private;
	struct io_poll_iocb *poll = &req->poll;
5675

5676
	return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
5677 5678 5679 5680 5681 5682 5683
}

static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
			       struct poll_table_struct *p)
{
	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);

5684
	__io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
5685 5686
}

5687
static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5688 5689
{
	struct io_poll_iocb *poll = &req->poll;
5690
	u32 events;
5691 5692 5693 5694 5695 5696

	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;
	if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
		return -EINVAL;

5697 5698 5699 5700
	events = READ_ONCE(sqe->poll32_events);
#ifdef __BIG_ENDIAN
	events = swahw32(events);
#endif
5701 5702
	poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
		       (events & EPOLLEXCLUSIVE);
5703 5704 5705
	return 0;
}

5706
static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
5707 5708 5709 5710 5711 5712
{
	struct io_poll_iocb *poll = &req->poll;
	struct io_ring_ctx *ctx = req->ctx;
	struct io_poll_table ipt;
	__poll_t mask;

5713
	ipt.pt._qproc = io_poll_queue_proc;
5714

5715 5716
	mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
					io_poll_wake);
5717

J
Jens Axboe 已提交
5718
	if (mask) { /* no async, we'd stolen it */
5719
		ipt.error = 0;
5720
		io_poll_complete(req, mask, 0);
5721 5722 5723
	}
	spin_unlock_irq(&ctx->completion_lock);

J
Jens Axboe 已提交
5724 5725
	if (mask) {
		io_cqring_ev_posted(ctx);
5726
		io_put_req(req);
5727
	}
J
Jens Axboe 已提交
5728
	return ipt.error;
5729 5730
}

J
Jens Axboe 已提交
5731 5732
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
{
5733 5734 5735 5736
	struct io_timeout_data *data = container_of(timer,
						struct io_timeout_data, timer);
	struct io_kiocb *req = data->req;
	struct io_ring_ctx *ctx = req->ctx;
J
Jens Axboe 已提交
5737 5738 5739
	unsigned long flags;

	spin_lock_irqsave(&ctx->completion_lock, flags);
5740
	list_del_init(&req->timeout.list);
5741 5742 5743
	atomic_set(&req->ctx->cq_timeouts,
		atomic_read(&req->ctx->cq_timeouts) + 1);

5744
	io_cqring_fill_event(req, -ETIME);
J
Jens Axboe 已提交
5745 5746 5747 5748
	io_commit_cqring(ctx);
	spin_unlock_irqrestore(&ctx->completion_lock, flags);

	io_cqring_ev_posted(ctx);
J
Jens Axboe 已提交
5749
	req_set_fail_links(req);
J
Jens Axboe 已提交
5750 5751 5752 5753
	io_put_req(req);
	return HRTIMER_NORESTART;
}

5754 5755
static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
					   __u64 user_data)
5756
{
5757
	struct io_timeout_data *io;
5758 5759
	struct io_kiocb *req;
	int ret = -ENOENT;
5760

P
Pavel Begunkov 已提交
5761
	list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5762 5763 5764 5765 5766 5767 5768
		if (user_data == req->user_data) {
			ret = 0;
			break;
		}
	}

	if (ret == -ENOENT)
5769 5770 5771
		return ERR_PTR(ret);

	io = req->async_data;
5772
	ret = hrtimer_try_to_cancel(&io->timer);
5773
	if (ret == -1)
5774
		return ERR_PTR(-EALREADY);
5775
	list_del_init(&req->timeout.list);
5776 5777
	return req;
}
5778

5779 5780 5781 5782 5783 5784
static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
{
	struct io_kiocb *req = io_timeout_extract(ctx, user_data);

	if (IS_ERR(req))
		return PTR_ERR(req);
5785 5786 5787

	req_set_fail_links(req);
	io_cqring_fill_event(req, -ECANCELED);
5788
	io_put_req_deferred(req, 1);
5789 5790 5791
	return 0;
}

P
Pavel Begunkov 已提交
5792 5793
static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
			     struct timespec64 *ts, enum hrtimer_mode mode)
5794
{
P
Pavel Begunkov 已提交
5795 5796
	struct io_kiocb *req = io_timeout_extract(ctx, user_data);
	struct io_timeout_data *data;
5797

P
Pavel Begunkov 已提交
5798 5799
	if (IS_ERR(req))
		return PTR_ERR(req);
5800

P
Pavel Begunkov 已提交
5801 5802 5803 5804 5805 5806 5807
	req->timeout.off = 0; /* noseq */
	data = req->async_data;
	list_add_tail(&req->timeout.list, &ctx->timeout_list);
	hrtimer_init(&data->timer, CLOCK_MONOTONIC, mode);
	data->timer.function = io_timeout_fn;
	hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
	return 0;
5808 5809
}

5810 5811
static int io_timeout_remove_prep(struct io_kiocb *req,
				  const struct io_uring_sqe *sqe)
5812
{
P
Pavel Begunkov 已提交
5813 5814
	struct io_timeout_rem *tr = &req->timeout_rem;

5815 5816
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
		return -EINVAL;
5817 5818
	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
		return -EINVAL;
P
Pavel Begunkov 已提交
5819
	if (sqe->ioprio || sqe->buf_index || sqe->len)
5820 5821
		return -EINVAL;

P
Pavel Begunkov 已提交
5822 5823 5824 5825 5826 5827 5828 5829 5830
	tr->addr = READ_ONCE(sqe->addr);
	tr->flags = READ_ONCE(sqe->timeout_flags);
	if (tr->flags & IORING_TIMEOUT_UPDATE) {
		if (tr->flags & ~(IORING_TIMEOUT_UPDATE|IORING_TIMEOUT_ABS))
			return -EINVAL;
		if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
			return -EFAULT;
	} else if (tr->flags) {
		/* timeout removal doesn't support flags */
5831
		return -EINVAL;
P
Pavel Begunkov 已提交
5832
	}
5833 5834 5835 5836

	return 0;
}

5837 5838 5839 5840 5841 5842
static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
{
	return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
					    : HRTIMER_MODE_REL;
}

5843 5844 5845
/*
 * Remove or update an existing timeout command
 */
5846
static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
5847
{
P
Pavel Begunkov 已提交
5848
	struct io_timeout_rem *tr = &req->timeout_rem;
5849
	struct io_ring_ctx *ctx = req->ctx;
5850
	int ret;
5851 5852

	spin_lock_irq(&ctx->completion_lock);
5853
	if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE))
P
Pavel Begunkov 已提交
5854
		ret = io_timeout_cancel(ctx, tr->addr);
5855 5856 5857
	else
		ret = io_timeout_update(ctx, tr->addr, &tr->ts,
					io_translate_timeout_mode(tr->flags));
5858

5859
	io_cqring_fill_event(req, ret);
5860 5861
	io_commit_cqring(ctx);
	spin_unlock_irq(&ctx->completion_lock);
J
Jens Axboe 已提交
5862
	io_cqring_ev_posted(ctx);
J
Jens Axboe 已提交
5863 5864
	if (ret < 0)
		req_set_fail_links(req);
5865
	io_put_req(req);
5866
	return 0;
J
Jens Axboe 已提交
5867 5868
}

5869
static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5870
			   bool is_timeout_link)
J
Jens Axboe 已提交
5871
{
5872
	struct io_timeout_data *data;
5873
	unsigned flags;
5874
	u32 off = READ_ONCE(sqe->off);
J
Jens Axboe 已提交
5875

5876
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
J
Jens Axboe 已提交
5877
		return -EINVAL;
5878
	if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
5879
		return -EINVAL;
5880
	if (off && is_timeout_link)
5881
		return -EINVAL;
5882 5883
	flags = READ_ONCE(sqe->timeout_flags);
	if (flags & ~IORING_TIMEOUT_ABS)
J
Jens Axboe 已提交
5884
		return -EINVAL;
5885

5886
	req->timeout.off = off;
5887

5888
	if (!req->async_data && io_alloc_async_data(req))
5889 5890
		return -ENOMEM;

5891
	data = req->async_data;
5892 5893 5894
	data->req = req;

	if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
J
Jens Axboe 已提交
5895 5896
		return -EFAULT;

5897
	data->mode = io_translate_timeout_mode(flags);
5898 5899 5900 5901
	hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
	return 0;
}

5902
static int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
5903 5904
{
	struct io_ring_ctx *ctx = req->ctx;
5905
	struct io_timeout_data *data = req->async_data;
5906
	struct list_head *entry;
5907
	u32 tail, off = req->timeout.off;
5908

5909
	spin_lock_irq(&ctx->completion_lock);
5910

J
Jens Axboe 已提交
5911 5912
	/*
	 * sqe->off holds how many events that need to occur for this
5913 5914
	 * timeout event to be satisfied. If it isn't set, then this is
	 * a pure timeout request, sequence isn't used.
J
Jens Axboe 已提交
5915
	 */
5916
	if (io_is_timeout_noseq(req)) {
5917 5918 5919
		entry = ctx->timeout_list.prev;
		goto add;
	}
J
Jens Axboe 已提交
5920

5921 5922
	tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
	req->timeout.target_seq = tail + off;
J
Jens Axboe 已提交
5923

5924 5925 5926 5927 5928 5929
	/* Update the last seq here in case io_flush_timeouts() hasn't.
	 * This is safe because ->completion_lock is held, and submissions
	 * and completions are never mixed in the same ->completion_lock section.
	 */
	ctx->cq_last_tm_flush = tail;

J
Jens Axboe 已提交
5930 5931 5932 5933 5934
	/*
	 * Insertion sort, ensuring the first entry in the list is always
	 * the one we need first.
	 */
	list_for_each_prev(entry, &ctx->timeout_list) {
P
Pavel Begunkov 已提交
5935 5936
		struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
						  timeout.list);
J
Jens Axboe 已提交
5937

5938
		if (io_is_timeout_noseq(nxt))
5939
			continue;
5940 5941
		/* nxt.seq is behind @tail, otherwise would've been completed */
		if (off >= nxt->timeout.target_seq - tail)
J
Jens Axboe 已提交
5942 5943
			break;
	}
5944
add:
P
Pavel Begunkov 已提交
5945
	list_add(&req->timeout.list, entry);
5946 5947
	data->timer.function = io_timeout_fn;
	hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
J
Jens Axboe 已提交
5948 5949 5950 5951
	spin_unlock_irq(&ctx->completion_lock);
	return 0;
}

5952 5953 5954 5955 5956 5957 5958
static bool io_cancel_cb(struct io_wq_work *work, void *data)
{
	struct io_kiocb *req = container_of(work, struct io_kiocb, work);

	return req->user_data == (unsigned long) data;
}

5959
static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
5960 5961 5962 5963
{
	enum io_wq_cancel cancel_ret;
	int ret = 0;

5964
	cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976
	switch (cancel_ret) {
	case IO_WQ_CANCEL_OK:
		ret = 0;
		break;
	case IO_WQ_CANCEL_RUNNING:
		ret = -EALREADY;
		break;
	case IO_WQ_CANCEL_NOTFOUND:
		ret = -ENOENT;
		break;
	}

5977 5978 5979
	return ret;
}

5980 5981
static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
				     struct io_kiocb *req, __u64 sqe_addr,
5982
				     int success_ret)
5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998
{
	unsigned long flags;
	int ret;

	ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
	if (ret != -ENOENT) {
		spin_lock_irqsave(&ctx->completion_lock, flags);
		goto done;
	}

	spin_lock_irqsave(&ctx->completion_lock, flags);
	ret = io_timeout_cancel(ctx, sqe_addr);
	if (ret != -ENOENT)
		goto done;
	ret = io_poll_cancel(ctx, sqe_addr);
done:
5999 6000
	if (!ret)
		ret = success_ret;
6001 6002 6003 6004 6005
	io_cqring_fill_event(req, ret);
	io_commit_cqring(ctx);
	spin_unlock_irqrestore(&ctx->completion_lock, flags);
	io_cqring_ev_posted(ctx);

J
Jens Axboe 已提交
6006 6007
	if (ret < 0)
		req_set_fail_links(req);
6008
	io_put_req(req);
6009 6010
}

6011 6012
static int io_async_cancel_prep(struct io_kiocb *req,
				const struct io_uring_sqe *sqe)
6013
{
6014
	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
6015
		return -EINVAL;
6016 6017 6018
	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
		return -EINVAL;
	if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
6019 6020
		return -EINVAL;

6021 6022 6023 6024
	req->cancel.addr = READ_ONCE(sqe->addr);
	return 0;
}

6025
static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
6026 6027 6028
{
	struct io_ring_ctx *ctx = req->ctx;

6029
	io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
J
Jens Axboe 已提交
6030 6031 6032
	return 0;
}

6033
static int io_rsrc_update_prep(struct io_kiocb *req,
6034 6035
				const struct io_uring_sqe *sqe)
{
6036 6037
	if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
		return -EINVAL;
6038 6039 6040
	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
		return -EINVAL;
	if (sqe->ioprio || sqe->rw_flags)
6041 6042
		return -EINVAL;

6043 6044 6045
	req->rsrc_update.offset = READ_ONCE(sqe->off);
	req->rsrc_update.nr_args = READ_ONCE(sqe->len);
	if (!req->rsrc_update.nr_args)
6046
		return -EINVAL;
6047
	req->rsrc_update.arg = READ_ONCE(sqe->addr);
6048 6049 6050
	return 0;
}

6051
static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
6052 6053
{
	struct io_ring_ctx *ctx = req->ctx;
6054
	struct io_uring_rsrc_update up;
6055
	int ret;
6056

6057
	if (issue_flags & IO_URING_F_NONBLOCK)
6058 6059
		return -EAGAIN;

6060 6061
	up.offset = req->rsrc_update.offset;
	up.data = req->rsrc_update.arg;
6062 6063

	mutex_lock(&ctx->uring_lock);
6064
	ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
6065 6066 6067 6068
	mutex_unlock(&ctx->uring_lock);

	if (ret < 0)
		req_set_fail_links(req);
6069
	__io_req_complete(req, issue_flags, ret, 0);
J
Jens Axboe 已提交
6070 6071 6072
	return 0;
}

6073
static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6074
{
6075
	switch (req->opcode) {
J
Jens Axboe 已提交
6076
	case IORING_OP_NOP:
6077
		return 0;
6078 6079
	case IORING_OP_READV:
	case IORING_OP_READ_FIXED:
6080
	case IORING_OP_READ:
6081
		return io_read_prep(req, sqe);
6082 6083
	case IORING_OP_WRITEV:
	case IORING_OP_WRITE_FIXED:
6084
	case IORING_OP_WRITE:
6085
		return io_write_prep(req, sqe);
6086
	case IORING_OP_POLL_ADD:
6087
		return io_poll_add_prep(req, sqe);
6088
	case IORING_OP_POLL_REMOVE:
6089
		return io_poll_remove_prep(req, sqe);
6090
	case IORING_OP_FSYNC:
6091
		return io_fsync_prep(req, sqe);
6092
	case IORING_OP_SYNC_FILE_RANGE:
6093
		return io_sfr_prep(req, sqe);
6094
	case IORING_OP_SENDMSG:
6095
	case IORING_OP_SEND:
6096
		return io_sendmsg_prep(req, sqe);
6097
	case IORING_OP_RECVMSG:
6098
	case IORING_OP_RECV:
6099
		return io_recvmsg_prep(req, sqe);
6100
	case IORING_OP_CONNECT:
6101
		return io_connect_prep(req, sqe);
6102
	case IORING_OP_TIMEOUT:
6103
		return io_timeout_prep(req, sqe, false);
6104
	case IORING_OP_TIMEOUT_REMOVE:
6105
		return io_timeout_remove_prep(req, sqe);
6106
	case IORING_OP_ASYNC_CANCEL:
6107
		return io_async_cancel_prep(req, sqe);
6108
	case IORING_OP_LINK_TIMEOUT:
6109
		return io_timeout_prep(req, sqe, true);
6110
	case IORING_OP_ACCEPT:
6111
		return io_accept_prep(req, sqe);
6112
	case IORING_OP_FALLOCATE:
6113
		return io_fallocate_prep(req, sqe);
6114
	case IORING_OP_OPENAT:
6115
		return io_openat_prep(req, sqe);
6116
	case IORING_OP_CLOSE:
6117
		return io_close_prep(req, sqe);
6118
	case IORING_OP_FILES_UPDATE:
6119
		return io_rsrc_update_prep(req, sqe);
6120
	case IORING_OP_STATX:
6121
		return io_statx_prep(req, sqe);
J
Jens Axboe 已提交
6122
	case IORING_OP_FADVISE:
6123
		return io_fadvise_prep(req, sqe);
J
Jens Axboe 已提交
6124
	case IORING_OP_MADVISE:
6125
		return io_madvise_prep(req, sqe);
6126
	case IORING_OP_OPENAT2:
6127
		return io_openat2_prep(req, sqe);
6128
	case IORING_OP_EPOLL_CTL:
6129
		return io_epoll_ctl_prep(req, sqe);
P
Pavel Begunkov 已提交
6130
	case IORING_OP_SPLICE:
6131
		return io_splice_prep(req, sqe);
6132
	case IORING_OP_PROVIDE_BUFFERS:
6133
		return io_provide_buffers_prep(req, sqe);
6134
	case IORING_OP_REMOVE_BUFFERS:
6135
		return io_remove_buffers_prep(req, sqe);
P
Pavel Begunkov 已提交
6136
	case IORING_OP_TEE:
6137
		return io_tee_prep(req, sqe);
J
Jens Axboe 已提交
6138 6139
	case IORING_OP_SHUTDOWN:
		return io_shutdown_prep(req, sqe);
6140 6141
	case IORING_OP_RENAMEAT:
		return io_renameat_prep(req, sqe);
6142 6143
	case IORING_OP_UNLINKAT:
		return io_unlinkat_prep(req, sqe);
6144 6145
	}

6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158
	printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
			req->opcode);
	return-EINVAL;
}

static int io_req_defer_prep(struct io_kiocb *req,
			     const struct io_uring_sqe *sqe)
{
	if (!sqe)
		return 0;
	if (io_alloc_async_data(req))
		return -EAGAIN;
	return io_req_prep(req, sqe);
6159 6160
}

6161 6162 6163 6164
static u32 io_get_sequence(struct io_kiocb *req)
{
	struct io_kiocb *pos;
	struct io_ring_ctx *ctx = req->ctx;
6165
	u32 total_submitted, nr_reqs = 0;
6166

6167 6168
	io_for_each_link(pos, req)
		nr_reqs++;
6169 6170 6171 6172 6173

	total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
	return total_submitted - nr_reqs;
}

6174
static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6175
{
6176
	struct io_ring_ctx *ctx = req->ctx;
6177
	struct io_defer_entry *de;
6178
	int ret;
6179
	u32 seq;
6180

B
Bob Liu 已提交
6181
	/* Still need defer if there is pending req in defer list. */
6182 6183 6184 6185 6186 6187 6188
	if (likely(list_empty_careful(&ctx->defer_list) &&
		!(req->flags & REQ_F_IO_DRAIN)))
		return 0;

	seq = io_get_sequence(req);
	/* Still a chance to pass the sequence check */
	if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
6189 6190
		return 0;

6191
	if (!req->async_data) {
6192
		ret = io_req_defer_prep(req, sqe);
6193
		if (ret)
6194 6195
			return ret;
	}
6196
	io_prep_async_link(req);
6197 6198 6199
	de = kmalloc(sizeof(*de), GFP_KERNEL);
	if (!de)
		return -ENOMEM;
6200

6201
	spin_lock_irq(&ctx->completion_lock);
6202
	if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
6203
		spin_unlock_irq(&ctx->completion_lock);
6204
		kfree(de);
6205 6206
		io_queue_async_work(req);
		return -EIOCBQUEUED;
6207 6208
	}

6209
	trace_io_uring_defer(ctx, req, req->user_data);
6210
	de->req = req;
6211
	de->seq = seq;
6212
	list_add_tail(&de->list, &ctx->defer_list);
6213 6214 6215 6216
	spin_unlock_irq(&ctx->completion_lock);
	return -EIOCBQUEUED;
}

6217
static void __io_clean_op(struct io_kiocb *req)
P
Pavel Begunkov 已提交
6218
{
6219 6220 6221 6222 6223
	if (req->flags & REQ_F_BUFFER_SELECTED) {
		switch (req->opcode) {
		case IORING_OP_READV:
		case IORING_OP_READ_FIXED:
		case IORING_OP_READ:
6224
			kfree((void *)(unsigned long)req->rw.addr);
6225 6226 6227
			break;
		case IORING_OP_RECVMSG:
		case IORING_OP_RECV:
6228
			kfree(req->sr_msg.kbuf);
6229 6230 6231
			break;
		}
		req->flags &= ~REQ_F_BUFFER_SELECTED;
P
Pavel Begunkov 已提交
6232 6233
	}

6234 6235 6236 6237 6238 6239 6240
	if (req->flags & REQ_F_NEED_CLEANUP) {
		switch (req->opcode) {
		case IORING_OP_READV:
		case IORING_OP_READ_FIXED:
		case IORING_OP_READ:
		case IORING_OP_WRITEV:
		case IORING_OP_WRITE_FIXED:
6241 6242 6243 6244
		case IORING_OP_WRITE: {
			struct io_async_rw *io = req->async_data;
			if (io->free_iovec)
				kfree(io->free_iovec);
6245
			break;
6246
			}
6247
		case IORING_OP_RECVMSG:
6248 6249
		case IORING_OP_SENDMSG: {
			struct io_async_msghdr *io = req->async_data;
6250 6251

			kfree(io->free_iov);
6252
			break;
6253
			}
6254 6255 6256 6257 6258
		case IORING_OP_SPLICE:
		case IORING_OP_TEE:
			io_put_file(req, req->splice.file_in,
				    (req->splice.flags & SPLICE_F_FD_IN_FIXED));
			break;
6259 6260 6261 6262 6263
		case IORING_OP_OPENAT:
		case IORING_OP_OPENAT2:
			if (req->open.filename)
				putname(req->open.filename);
			break;
6264 6265 6266 6267
		case IORING_OP_RENAMEAT:
			putname(req->rename.oldpath);
			putname(req->rename.newpath);
			break;
6268 6269 6270
		case IORING_OP_UNLINKAT:
			putname(req->unlink.filename);
			break;
6271 6272
		}
		req->flags &= ~REQ_F_NEED_CLEANUP;
P
Pavel Begunkov 已提交
6273 6274 6275
	}
}

6276
static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
J
Jens Axboe 已提交
6277
{
6278
	struct io_ring_ctx *ctx = req->ctx;
6279
	int ret;
J
Jens Axboe 已提交
6280

6281
	switch (req->opcode) {
J
Jens Axboe 已提交
6282
	case IORING_OP_NOP:
6283
		ret = io_nop(req, issue_flags);
J
Jens Axboe 已提交
6284 6285
		break;
	case IORING_OP_READV:
6286
	case IORING_OP_READ_FIXED:
6287
	case IORING_OP_READ:
6288
		ret = io_read(req, issue_flags);
6289
		break;
6290
	case IORING_OP_WRITEV:
6291
	case IORING_OP_WRITE_FIXED:
6292
	case IORING_OP_WRITE:
6293
		ret = io_write(req, issue_flags);
J
Jens Axboe 已提交
6294
		break;
C
Christoph Hellwig 已提交
6295
	case IORING_OP_FSYNC:
6296
		ret = io_fsync(req, issue_flags);
C
Christoph Hellwig 已提交
6297
		break;
6298
	case IORING_OP_POLL_ADD:
6299
		ret = io_poll_add(req, issue_flags);
6300 6301
		break;
	case IORING_OP_POLL_REMOVE:
6302
		ret = io_poll_remove(req, issue_flags);
6303
		break;
6304
	case IORING_OP_SYNC_FILE_RANGE:
6305
		ret = io_sync_file_range(req, issue_flags);
6306
		break;
J
Jens Axboe 已提交
6307
	case IORING_OP_SENDMSG:
6308
		ret = io_sendmsg(req, issue_flags);
6309
		break;
6310
	case IORING_OP_SEND:
6311
		ret = io_send(req, issue_flags);
J
Jens Axboe 已提交
6312
		break;
J
Jens Axboe 已提交
6313
	case IORING_OP_RECVMSG:
6314
		ret = io_recvmsg(req, issue_flags);
6315
		break;
6316
	case IORING_OP_RECV:
6317
		ret = io_recv(req, issue_flags);
J
Jens Axboe 已提交
6318
		break;
J
Jens Axboe 已提交
6319
	case IORING_OP_TIMEOUT:
6320
		ret = io_timeout(req, issue_flags);
J
Jens Axboe 已提交
6321
		break;
6322
	case IORING_OP_TIMEOUT_REMOVE:
6323
		ret = io_timeout_remove(req, issue_flags);
6324
		break;
6325
	case IORING_OP_ACCEPT:
6326
		ret = io_accept(req, issue_flags);
6327
		break;
6328
	case IORING_OP_CONNECT:
6329
		ret = io_connect(req, issue_flags);
6330
		break;
6331
	case IORING_OP_ASYNC_CANCEL:
6332
		ret = io_async_cancel(req, issue_flags);
6333
		break;
6334
	case IORING_OP_FALLOCATE:
6335
		ret = io_fallocate(req, issue_flags);
6336
		break;
6337
	case IORING_OP_OPENAT:
6338
		ret = io_openat(req, issue_flags);
6339
		break;
6340
	case IORING_OP_CLOSE:
6341
		ret = io_close(req, issue_flags);
6342
		break;
6343
	case IORING_OP_FILES_UPDATE:
6344
		ret = io_files_update(req, issue_flags);
6345
		break;
6346
	case IORING_OP_STATX:
6347
		ret = io_statx(req, issue_flags);
6348
		break;
J
Jens Axboe 已提交
6349
	case IORING_OP_FADVISE:
6350
		ret = io_fadvise(req, issue_flags);
J
Jens Axboe 已提交
6351
		break;
J
Jens Axboe 已提交
6352
	case IORING_OP_MADVISE:
6353
		ret = io_madvise(req, issue_flags);
J
Jens Axboe 已提交
6354
		break;
6355
	case IORING_OP_OPENAT2:
6356
		ret = io_openat2(req, issue_flags);
6357
		break;
6358
	case IORING_OP_EPOLL_CTL:
6359
		ret = io_epoll_ctl(req, issue_flags);
6360
		break;
P
Pavel Begunkov 已提交
6361
	case IORING_OP_SPLICE:
6362
		ret = io_splice(req, issue_flags);
P
Pavel Begunkov 已提交
6363
		break;
6364
	case IORING_OP_PROVIDE_BUFFERS:
6365
		ret = io_provide_buffers(req, issue_flags);
6366
		break;
6367
	case IORING_OP_REMOVE_BUFFERS:
6368
		ret = io_remove_buffers(req, issue_flags);
6369
		break;
P
Pavel Begunkov 已提交
6370
	case IORING_OP_TEE:
6371
		ret = io_tee(req, issue_flags);
P
Pavel Begunkov 已提交
6372
		break;
J
Jens Axboe 已提交
6373
	case IORING_OP_SHUTDOWN:
6374
		ret = io_shutdown(req, issue_flags);
J
Jens Axboe 已提交
6375
		break;
6376
	case IORING_OP_RENAMEAT:
6377
		ret = io_renameat(req, issue_flags);
6378
		break;
6379
	case IORING_OP_UNLINKAT:
6380
		ret = io_unlinkat(req, issue_flags);
6381
		break;
J
Jens Axboe 已提交
6382 6383 6384 6385 6386
	default:
		ret = -EINVAL;
		break;
	}

J
Jens Axboe 已提交
6387 6388 6389
	if (ret)
		return ret;

6390 6391
	/* If the op doesn't have a file, we're not polling for it */
	if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
6392 6393 6394 6395 6396 6397
		const bool in_async = io_wq_current_is_worker();

		/* workqueue context doesn't hold uring_lock, grab it now */
		if (in_async)
			mutex_lock(&ctx->uring_lock);

6398
		io_iopoll_req_issued(req, in_async);
6399 6400 6401

		if (in_async)
			mutex_unlock(&ctx->uring_lock);
J
Jens Axboe 已提交
6402 6403 6404
	}

	return 0;
J
Jens Axboe 已提交
6405 6406
}

6407
static void io_wq_submit_work(struct io_wq_work *work)
J
Jens Axboe 已提交
6408 6409
{
	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6410
	struct io_kiocb *timeout;
6411
	int ret = 0;
J
Jens Axboe 已提交
6412

6413 6414 6415
	timeout = io_prep_linked_timeout(req);
	if (timeout)
		io_queue_linked_timeout(timeout);
6416

6417
	if (work->flags & IO_WQ_WORK_CANCEL)
6418
		ret = -ECANCELED;
6419

6420 6421
	if (!ret) {
		do {
6422
			ret = io_issue_sqe(req, 0);
6423 6424 6425 6426 6427 6428 6429 6430 6431 6432
			/*
			 * We can get EAGAIN for polled IO even though we're
			 * forcing a sync submission from here, since we can't
			 * wait for request slots on the block side.
			 */
			if (ret != -EAGAIN)
				break;
			cond_resched();
		} while (1);
	}
6433

6434
	if (ret) {
6435 6436 6437 6438 6439
		struct io_ring_ctx *lock_ctx = NULL;

		if (req->ctx->flags & IORING_SETUP_IOPOLL)
			lock_ctx = req->ctx;

6440
		/*
6441 6442 6443 6444 6445 6446 6447
		 * io_iopoll_complete() does not hold completion_lock to
		 * complete polled io, so here for polled io, we can not call
		 * io_req_complete() directly, otherwise there maybe concurrent
		 * access to cqring, defer_list, etc, which is not safe. Given
		 * that io_iopoll_complete() is always called under uring_lock,
		 * so here for polled io, we also get uring_lock to complete
		 * it.
6448
		 */
6449 6450
		if (lock_ctx)
			mutex_lock(&lock_ctx->uring_lock);
6451

6452 6453 6454 6455 6456
		req_set_fail_links(req);
		io_req_complete(req, ret);

		if (lock_ctx)
			mutex_unlock(&lock_ctx->uring_lock);
6457
	}
J
Jens Axboe 已提交
6458 6459
}

6460 6461 6462
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
					      int index)
{
6463
	struct fixed_rsrc_table *table;
6464

6465
	table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
6466
	return table->files[index & IORING_FILE_TABLE_MASK];
6467 6468
}

P
Pavel Begunkov 已提交
6469 6470
static struct file *io_file_get(struct io_submit_state *state,
				struct io_kiocb *req, int fd, bool fixed)
J
Jens Axboe 已提交
6471
{
6472
	struct io_ring_ctx *ctx = req->ctx;
6473
	struct file *file;
J
Jens Axboe 已提交
6474

6475
	if (fixed) {
6476
		if (unlikely((unsigned int)fd >= ctx->nr_user_files))
P
Pavel Begunkov 已提交
6477
			return NULL;
6478
		fd = array_index_nospec(fd, ctx->nr_user_files);
6479
		file = io_file_from_index(ctx, fd);
6480
		io_set_resource_node(req);
J
Jens Axboe 已提交
6481
	} else {
6482
		trace_io_uring_file_get(ctx, fd);
6483
		file = __io_file_get(state, fd);
J
Jens Axboe 已提交
6484 6485
	}

6486 6487
	if (file && unlikely(file->f_op == &io_uring_fops))
		io_req_track_inflight(req);
P
Pavel Begunkov 已提交
6488
	return file;
J
Jens Axboe 已提交
6489 6490
}

6491
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
J
Jens Axboe 已提交
6492
{
6493 6494
	struct io_timeout_data *data = container_of(timer,
						struct io_timeout_data, timer);
6495
	struct io_kiocb *prev, *req = data->req;
6496 6497 6498 6499
	struct io_ring_ctx *ctx = req->ctx;
	unsigned long flags;

	spin_lock_irqsave(&ctx->completion_lock, flags);
6500 6501
	prev = req->timeout.head;
	req->timeout.head = NULL;
6502 6503 6504 6505 6506

	/*
	 * We don't expect the list to be empty, that will only happen if we
	 * race with the completion of the linked work.
	 */
6507
	if (prev && refcount_inc_not_zero(&prev->refs))
6508
		io_remove_next_linked(prev);
6509 6510
	else
		prev = NULL;
6511 6512 6513
	spin_unlock_irqrestore(&ctx->completion_lock, flags);

	if (prev) {
J
Jens Axboe 已提交
6514
		req_set_fail_links(prev);
6515
		io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
6516
		io_put_req_deferred(prev, 1);
6517
	} else {
6518 6519
		io_req_complete_post(req, -ETIME, 0);
		io_put_req_deferred(req, 1);
6520 6521 6522 6523
	}
	return HRTIMER_NORESTART;
}

6524
static void __io_queue_linked_timeout(struct io_kiocb *req)
6525
{
6526
	/*
6527 6528
	 * If the back reference is NULL, then our linked request finished
	 * before we got a chance to setup the timer
6529
	 */
6530
	if (req->timeout.head) {
6531
		struct io_timeout_data *data = req->async_data;
6532

6533 6534 6535
		data->timer.function = io_link_timeout_fn;
		hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
				data->mode);
6536
	}
6537 6538 6539 6540 6541 6542 6543 6544
}

static void io_queue_linked_timeout(struct io_kiocb *req)
{
	struct io_ring_ctx *ctx = req->ctx;

	spin_lock_irq(&ctx->completion_lock);
	__io_queue_linked_timeout(req);
6545
	spin_unlock_irq(&ctx->completion_lock);
6546 6547

	/* drop submission reference */
6548 6549
	io_put_req(req);
}
6550

6551
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
6552
{
6553
	struct io_kiocb *nxt = req->link;
6554

6555 6556
	if (!nxt || (req->flags & REQ_F_LINK_TIMEOUT) ||
	    nxt->opcode != IORING_OP_LINK_TIMEOUT)
6557
		return NULL;
6558

6559
	nxt->timeout.head = req;
6560
	nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
6561 6562
	req->flags |= REQ_F_LINK_TIMEOUT;
	return nxt;
6563 6564
}

6565
static void __io_queue_sqe(struct io_kiocb *req)
J
Jens Axboe 已提交
6566
{
6567
	struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
6568
	const struct cred *old_creds = NULL;
6569
	int ret;
J
Jens Axboe 已提交
6570

6571 6572
	if ((req->flags & REQ_F_WORK_INITIALIZED) &&
	    (req->work.flags & IO_WQ_WORK_CREDS) &&
6573 6574
	    req->work.identity->creds != current_cred())
		old_creds = override_creds(req->work.identity->creds);
6575

6576
	ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
6577

6578 6579 6580
	if (old_creds)
		revert_creds(old_creds);

6581 6582 6583 6584
	/*
	 * We async punt it if the file wasn't marked NOWAIT, or if the file
	 * doesn't support non-blocking read/write attempts
	 */
6585
	if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
6586 6587 6588 6589 6590 6591
		if (!io_arm_poll_handler(req)) {
			/*
			 * Queued up for async execution, worker will release
			 * submit reference when the iocb is actually submitted.
			 */
			io_queue_async_work(req);
J
Jens Axboe 已提交
6592
		}
6593 6594
	} else if (likely(!ret)) {
		/* drop submission reference */
6595
		if (req->flags & REQ_F_COMPLETE_INLINE) {
6596 6597 6598
			struct io_ring_ctx *ctx = req->ctx;
			struct io_comp_state *cs = &ctx->submit_state.comp;

6599
			cs->reqs[cs->nr++] = req;
6600
			if (cs->nr == ARRAY_SIZE(cs->reqs))
6601
				io_submit_flush_completions(cs, ctx);
6602
		} else {
6603
			io_put_req(req);
6604 6605
		}
	} else {
J
Jens Axboe 已提交
6606
		req_set_fail_links(req);
6607
		io_put_req(req);
6608
		io_req_complete(req, ret);
J
Jens Axboe 已提交
6609
	}
6610 6611
	if (linked_timeout)
		io_queue_linked_timeout(linked_timeout);
J
Jens Axboe 已提交
6612 6613
}

6614
static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
6615 6616 6617
{
	int ret;

6618
	ret = io_req_defer(req, sqe);
6619 6620
	if (ret) {
		if (ret != -EIOCBQUEUED) {
6621
fail_req:
J
Jens Axboe 已提交
6622
			req_set_fail_links(req);
6623 6624
			io_put_req(req);
			io_req_complete(req, ret);
6625
		}
6626
	} else if (req->flags & REQ_F_FORCE_ASYNC) {
6627
		if (!req->async_data) {
6628
			ret = io_req_defer_prep(req, sqe);
6629
			if (unlikely(ret))
6630 6631
				goto fail_req;
		}
J
Jens Axboe 已提交
6632 6633
		io_queue_async_work(req);
	} else {
6634 6635 6636 6637 6638
		if (sqe) {
			ret = io_req_prep(req, sqe);
			if (unlikely(ret))
				goto fail_req;
		}
6639
		__io_queue_sqe(req);
J
Jens Axboe 已提交
6640
	}
6641 6642
}

6643
static inline void io_queue_link_head(struct io_kiocb *req)
6644
{
6645
	if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
6646 6647
		io_put_req(req);
		io_req_complete(req, -ECANCELED);
6648
	} else
6649
		io_queue_sqe(req, NULL);
6650 6651
}

6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754
/*
 * Check SQE restrictions (opcode and flags).
 *
 * Returns 'true' if SQE is allowed, 'false' otherwise.
 */
static inline bool io_check_restriction(struct io_ring_ctx *ctx,
					struct io_kiocb *req,
					unsigned int sqe_flags)
{
	if (!ctx->restricted)
		return true;

	if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
		return false;

	if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
	    ctx->restrictions.sqe_flags_required)
		return false;

	if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
			  ctx->restrictions.sqe_flags_required))
		return false;

	return true;
}

static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
		       const struct io_uring_sqe *sqe)
{
	struct io_submit_state *state;
	unsigned int sqe_flags;
	int id, ret = 0;

	req->opcode = READ_ONCE(sqe->opcode);
	/* same numerical values with corresponding REQ_F_*, safe to copy */
	req->flags = sqe_flags = READ_ONCE(sqe->flags);
	req->user_data = READ_ONCE(sqe->user_data);
	req->async_data = NULL;
	req->file = NULL;
	req->ctx = ctx;
	req->link = NULL;
	req->fixed_rsrc_refs = NULL;
	/* one is dropped after submission, the other at completion */
	refcount_set(&req->refs, 2);
	req->task = current;
	req->result = 0;

	/* enforce forwards compatibility on users */
	if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
		return -EINVAL;

	if (unlikely(req->opcode >= IORING_OP_LAST))
		return -EINVAL;

	if (unlikely(io_sq_thread_acquire_mm_files(ctx, req)))
		return -EFAULT;

	if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
		return -EACCES;

	if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
	    !io_op_defs[req->opcode].buffer_select)
		return -EOPNOTSUPP;

	id = READ_ONCE(sqe->personality);
	if (id) {
		struct io_identity *iod;

		iod = idr_find(&ctx->personality_idr, id);
		if (unlikely(!iod))
			return -EINVAL;
		refcount_inc(&iod->count);

		__io_req_init_async(req);
		get_cred(iod->creds);
		req->work.identity = iod;
		req->work.flags |= IO_WQ_WORK_CREDS;
	}

	state = &ctx->submit_state;

	/*
	 * Plug now if we have more than 1 IO left after this, and the target
	 * is potentially a read/write to block based storage.
	 */
	if (!state->plug_started && state->ios_left > 1 &&
	    io_op_defs[req->opcode].plug) {
		blk_start_plug(&state->plug);
		state->plug_started = true;
	}

	if (io_op_defs[req->opcode].needs_file) {
		bool fixed = req->flags & REQ_F_FIXED_FILE;

		req->file = io_file_get(state, req, READ_ONCE(sqe->fd), fixed);
		if (unlikely(!req->file))
			ret = -EBADF;
	}

	state->ios_left--;
	return ret;
}

6755
static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
6756
			 const struct io_uring_sqe *sqe)
J
Jens Axboe 已提交
6757
{
6758
	struct io_submit_link *link = &ctx->submit_state.link;
6759
	int ret;
J
Jens Axboe 已提交
6760

6761 6762 6763 6764 6765
	ret = io_init_req(ctx, req, sqe);
	if (unlikely(ret)) {
fail_req:
		io_put_req(req);
		io_req_complete(req, ret);
6766 6767 6768
		/* fail even hard links since we don't submit */
		if (link->head)
			link->head->flags |= REQ_F_FAIL_LINK;
6769 6770 6771 6772 6773 6774
		return ret;
	}

	trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
				true, ctx->flags & IORING_SETUP_SQPOLL);

J
Jens Axboe 已提交
6775 6776 6777 6778 6779 6780 6781
	/*
	 * If we already have a head request, queue this one for async
	 * submittal once the head completes. If we don't have a head but
	 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
	 * submitted sync once the chain is complete. If none of those
	 * conditions are true (normal request), then just queue it.
	 */
6782 6783
	if (link->head) {
		struct io_kiocb *head = link->head;
J
Jens Axboe 已提交
6784

6785 6786 6787 6788 6789 6790 6791
		/*
		 * Taking sequential execution of a link, draining both sides
		 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
		 * requests in the link. So, it drains the head and the
		 * next after the link request. The last one is done via
		 * drain_next flag to persist the effect across calls.
		 */
6792
		if (req->flags & REQ_F_IO_DRAIN) {
6793 6794 6795
			head->flags |= REQ_F_IO_DRAIN;
			ctx->drain_next = 1;
		}
6796
		ret = io_req_defer_prep(req, sqe);
6797
		if (unlikely(ret))
6798
			goto fail_req;
P
Pavel Begunkov 已提交
6799
		trace_io_uring_link(ctx, req, head);
6800
		link->last->link = req;
6801
		link->last = req;
6802 6803

		/* last request of a link, enqueue the link */
6804
		if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
6805
			io_queue_link_head(head);
6806
			link->head = NULL;
6807
		}
J
Jens Axboe 已提交
6808
	} else {
6809 6810
		if (unlikely(ctx->drain_next)) {
			req->flags |= REQ_F_IO_DRAIN;
6811
			ctx->drain_next = 0;
6812
		}
6813
		if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
6814
			ret = io_req_defer_prep(req, sqe);
6815
			if (unlikely(ret))
6816
				req->flags |= REQ_F_FAIL_LINK;
6817 6818
			link->head = req;
			link->last = req;
6819
		} else {
6820
			io_queue_sqe(req, sqe);
6821
		}
J
Jens Axboe 已提交
6822
	}
6823

6824
	return 0;
J
Jens Axboe 已提交
6825 6826
}

6827 6828 6829
/*
 * Batched submission is done, ensure local IO is flushed out.
 */
6830 6831
static void io_submit_state_end(struct io_submit_state *state,
				struct io_ring_ctx *ctx)
6832
{
6833 6834
	if (state->link.head)
		io_queue_link_head(state->link.head);
6835
	if (state->comp.nr)
6836
		io_submit_flush_completions(&state->comp, ctx);
J
Jens Axboe 已提交
6837 6838
	if (state->plug_started)
		blk_finish_plug(&state->plug);
P
Pavel Begunkov 已提交
6839
	io_state_file_put(state);
6840 6841 6842 6843 6844 6845
}

/*
 * Start submission side cache.
 */
static void io_submit_state_start(struct io_submit_state *state,
6846
				  unsigned int max_ios)
6847
{
J
Jens Axboe 已提交
6848
	state->plug_started = false;
6849
	state->ios_left = max_ios;
6850 6851
	/* set only head, no need to init link_last in advance */
	state->link.head = NULL;
6852 6853
}

J
Jens Axboe 已提交
6854 6855
static void io_commit_sqring(struct io_ring_ctx *ctx)
{
6856
	struct io_rings *rings = ctx->rings;
J
Jens Axboe 已提交
6857

6858 6859 6860 6861 6862 6863
	/*
	 * Ensure any loads from the SQEs are done at this point,
	 * since once we write the new head, the application could
	 * write new data to them.
	 */
	smp_store_release(&rings->sq.head, ctx->cached_sq_head);
J
Jens Axboe 已提交
6864 6865 6866
}

/*
6867
 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
J
Jens Axboe 已提交
6868 6869 6870 6871 6872 6873
 * that is mapped by userspace. This means that care needs to be taken to
 * ensure that reads are stable, as we cannot rely on userspace always
 * being a good citizen. If members of the sqe are validated and then later
 * used, it's important that those reads are done through READ_ONCE() to
 * prevent a re-load down the line.
 */
6874
static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
J
Jens Axboe 已提交
6875
{
6876
	u32 *sq_array = ctx->sq_array;
J
Jens Axboe 已提交
6877 6878 6879 6880 6881 6882 6883 6884 6885 6886
	unsigned head;

	/*
	 * The cached sq head (or cq tail) serves two purposes:
	 *
	 * 1) allows us to batch the cost of updating the user visible
	 *    head updates.
	 * 2) allows the kernel side to track the head on its own, even
	 *    though the application is the one updating it.
	 */
6887
	head = READ_ONCE(sq_array[ctx->cached_sq_head++ & ctx->sq_mask]);
6888 6889
	if (likely(head < ctx->sq_entries))
		return &ctx->sq_sqes[head];
J
Jens Axboe 已提交
6890 6891

	/* drop invalid entries */
6892
	ctx->cached_sq_dropped++;
6893
	WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
6894 6895 6896
	return NULL;
}

6897
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
J
Jens Axboe 已提交
6898
{
6899
	int submitted = 0;
J
Jens Axboe 已提交
6900

6901
	/* if we have a backlog and couldn't flush it all, return BUSY */
6902
	if (test_bit(0, &ctx->sq_check_overflow)) {
6903
		if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
6904 6905
			return -EBUSY;
	}
J
Jens Axboe 已提交
6906

6907 6908
	/* make sure SQ entry isn't read before tail */
	nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
6909

6910 6911
	if (!percpu_ref_tryget_many(&ctx->refs, nr))
		return -EAGAIN;
J
Jens Axboe 已提交
6912

6913
	percpu_counter_add(&current->io_uring->inflight, nr);
6914
	refcount_add(nr, &current->usage);
6915
	io_submit_state_start(&ctx->submit_state, nr);
P
Pavel Begunkov 已提交
6916

6917
	while (submitted < nr) {
6918
		const struct io_uring_sqe *sqe;
6919
		struct io_kiocb *req;
6920

6921
		req = io_alloc_req(ctx);
6922 6923 6924
		if (unlikely(!req)) {
			if (!submitted)
				submitted = -EAGAIN;
6925
			break;
6926
		}
6927 6928 6929 6930 6931
		sqe = io_get_sqe(ctx);
		if (unlikely(!sqe)) {
			kmem_cache_free(req_cachep, req);
			break;
		}
6932 6933
		/* will complete beyond this point, count as submitted */
		submitted++;
6934
		if (io_submit_sqe(ctx, req, sqe))
6935
			break;
J
Jens Axboe 已提交
6936 6937
	}

6938 6939
	if (unlikely(submitted != nr)) {
		int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
6940 6941
		struct io_uring_task *tctx = current->io_uring;
		int unused = nr - ref_used;
6942

6943 6944 6945
		percpu_ref_put_many(&ctx->refs, unused);
		percpu_counter_sub(&tctx->inflight, unused);
		put_task_struct_many(current, unused);
6946
	}
J
Jens Axboe 已提交
6947

6948
	io_submit_state_end(&ctx->submit_state, ctx);
6949 6950 6951
	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
	io_commit_sqring(ctx);

J
Jens Axboe 已提交
6952 6953 6954
	return submitted;
}

6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969
static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
{
	/* Tell userspace we may need a wakeup call */
	spin_lock_irq(&ctx->completion_lock);
	ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
	spin_unlock_irq(&ctx->completion_lock);
}

static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
{
	spin_lock_irq(&ctx->completion_lock);
	ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
	spin_unlock_irq(&ctx->completion_lock);
}

6970
static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
J
Jens Axboe 已提交
6971
{
6972
	unsigned int to_submit;
6973
	int ret = 0;
J
Jens Axboe 已提交
6974

6975
	to_submit = io_sqring_entries(ctx);
6976 6977 6978 6979
	/* if we're handling multiple rings, cap submit size for fairness */
	if (cap_entries && to_submit > 8)
		to_submit = 8;

6980
	if (!list_empty(&ctx->iopoll_list) || to_submit) {
6981
		unsigned nr_events = 0;
6982

6983
		mutex_lock(&ctx->uring_lock);
6984
		if (!list_empty(&ctx->iopoll_list))
6985
			io_do_iopoll(ctx, &nr_events, 0);
6986

6987 6988
		if (to_submit && !ctx->sqo_dead &&
		    likely(!percpu_ref_is_dying(&ctx->refs)))
6989
			ret = io_submit_sqes(ctx, to_submit);
6990 6991
		mutex_unlock(&ctx->uring_lock);
	}
J
Jens Axboe 已提交
6992

6993 6994
	if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
		wake_up(&ctx->sqo_sq_wait);
J
Jens Axboe 已提交
6995

6996 6997
	return ret;
}
J
Jens Axboe 已提交
6998

6999 7000 7001 7002
static void io_sqd_update_thread_idle(struct io_sq_data *sqd)
{
	struct io_ring_ctx *ctx;
	unsigned sq_thread_idle = 0;
J
Jens Axboe 已提交
7003

7004 7005 7006
	list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
		if (sq_thread_idle < ctx->sq_thread_idle)
			sq_thread_idle = ctx->sq_thread_idle;
7007
	}
7008

7009
	sqd->sq_thread_idle = sq_thread_idle;
7010
}
J
Jens Axboe 已提交
7011

7012 7013 7014 7015 7016 7017 7018 7019 7020
static void io_sqd_init_new(struct io_sq_data *sqd)
{
	struct io_ring_ctx *ctx;

	while (!list_empty(&sqd->ctx_new_list)) {
		ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
		list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
		complete(&ctx->sq_thread_comp);
	}
7021 7022

	io_sqd_update_thread_idle(sqd);
7023 7024
}

7025 7026
static int io_sq_thread(void *data)
{
7027
	struct cgroup_subsys_state *cur_css = NULL;
7028 7029
	struct files_struct *old_files = current->files;
	struct nsproxy *old_nsproxy = current->nsproxy;
7030 7031 7032
	const struct cred *old_cred = NULL;
	struct io_sq_data *sqd = data;
	struct io_ring_ctx *ctx;
7033
	unsigned long timeout = 0;
7034
	DEFINE_WAIT(wait);
J
Jens Axboe 已提交
7035

7036 7037 7038 7039
	task_lock(current);
	current->files = NULL;
	current->nsproxy = NULL;
	task_unlock(current);
J
Jens Axboe 已提交
7040

7041
	while (!kthread_should_stop()) {
7042 7043
		int ret;
		bool cap_entries, sqt_spin, needs_sched;
7044 7045

		/*
7046 7047 7048
		 * Any changes to the sqd lists are synchronized through the
		 * kthread parking. This synchronizes the thread vs users,
		 * the users are synchronized on the sqd->ctx_lock.
7049
		 */
7050
		if (kthread_should_park()) {
7051
			kthread_parkme();
7052 7053 7054 7055 7056 7057 7058 7059
			/*
			 * When sq thread is unparked, in case the previous park operation
			 * comes from io_put_sq_data(), which means that sq thread is going
			 * to be stopped, so here needs to have a check.
			 */
			if (kthread_should_stop())
				break;
		}
7060

7061
		if (unlikely(!list_empty(&sqd->ctx_new_list))) {
7062
			io_sqd_init_new(sqd);
7063 7064
			timeout = jiffies + sqd->sq_thread_idle;
		}
J
Jens Axboe 已提交
7065

7066
		sqt_spin = false;
7067
		cap_entries = !list_is_singular(&sqd->ctx_list);
7068 7069 7070 7071 7072
		list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
			if (current->cred != ctx->creds) {
				if (old_cred)
					revert_creds(old_cred);
				old_cred = override_creds(ctx->creds);
7073
			}
7074
			io_sq_thread_associate_blkcg(ctx, &cur_css);
7075 7076 7077 7078
#ifdef CONFIG_AUDIT
			current->loginuid = ctx->loginuid;
			current->sessionid = ctx->sessionid;
#endif
7079

7080 7081 7082
			ret = __io_sq_thread(ctx, cap_entries);
			if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list)))
				sqt_spin = true;
J
Jens Axboe 已提交
7083

7084
			io_sq_thread_drop_mm_files();
7085
		}
J
Jens Axboe 已提交
7086

7087
		if (sqt_spin || !time_after(jiffies, timeout)) {
7088
			io_run_task_work();
7089
			io_sq_thread_drop_mm_files();
7090
			cond_resched();
7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109
			if (sqt_spin)
				timeout = jiffies + sqd->sq_thread_idle;
			continue;
		}

		needs_sched = true;
		prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
		list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
			if ((ctx->flags & IORING_SETUP_IOPOLL) &&
			    !list_empty_careful(&ctx->iopoll_list)) {
				needs_sched = false;
				break;
			}
			if (io_sqring_entries(ctx)) {
				needs_sched = false;
				break;
			}
		}

7110
		if (needs_sched && !kthread_should_park()) {
7111 7112
			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
				io_ring_set_wakeup_flag(ctx);
7113

7114 7115 7116
			schedule();
			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
				io_ring_clear_wakeup_flag(ctx);
J
Jens Axboe 已提交
7117
		}
7118 7119 7120

		finish_wait(&sqd->wait, &wait);
		timeout = jiffies + sqd->sq_thread_idle;
J
Jens Axboe 已提交
7121 7122
	}

7123
	io_run_task_work();
7124
	io_sq_thread_drop_mm_files();
7125

7126 7127
	if (cur_css)
		io_sq_thread_unassociate_blkcg();
7128 7129
	if (old_cred)
		revert_creds(old_cred);
7130

7131 7132 7133 7134 7135
	task_lock(current);
	current->files = old_files;
	current->nsproxy = old_nsproxy;
	task_unlock(current);

7136
	kthread_parkme();
7137

J
Jens Axboe 已提交
7138 7139 7140
	return 0;
}

7141 7142 7143 7144 7145 7146 7147
struct io_wait_queue {
	struct wait_queue_entry wq;
	struct io_ring_ctx *ctx;
	unsigned to_wait;
	unsigned nr_timeouts;
};

7148
static inline bool io_should_wake(struct io_wait_queue *iowq)
7149 7150 7151 7152
{
	struct io_ring_ctx *ctx = iowq->ctx;

	/*
7153
	 * Wake up if we have enough events, or if a timeout occurred since we
7154 7155 7156
	 * started waiting. For timeouts, we always want to return to userspace,
	 * regardless of event count.
	 */
7157
	return io_cqring_events(ctx) >= iowq->to_wait ||
7158 7159 7160 7161 7162 7163 7164 7165 7166
			atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}

static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
			    int wake_flags, void *key)
{
	struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
							wq);

7167 7168 7169 7170 7171 7172 7173
	/*
	 * Cannot safely flush overflowed CQEs from here, ensure we wake up
	 * the task, and the next invocation will do it.
	 */
	if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
		return autoremove_wake_function(curr, mode, wake_flags, key);
	return -1;
7174 7175
}

7176 7177 7178 7179 7180 7181
static int io_run_task_work_sig(void)
{
	if (io_run_task_work())
		return 1;
	if (!signal_pending(current))
		return 0;
7182 7183
	if (test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))
		return -ERESTARTSYS;
7184 7185 7186
	return -EINTR;
}

7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
					  struct io_wait_queue *iowq,
					  signed long *timeout)
{
	int ret;

	/* make sure we run task_work before checking for signals */
	ret = io_run_task_work_sig();
	if (ret || io_should_wake(iowq))
		return ret;
	/* let the caller flush overflows, retry */
	if (test_bit(0, &ctx->cq_check_overflow))
		return 1;

	*timeout = schedule_timeout(*timeout);
	return !*timeout ? -ETIME : 1;
}

J
Jens Axboe 已提交
7206 7207 7208 7209 7210
/*
 * Wait until events become available, if we don't already have some. The
 * application must reap them itself, as they reside on the shared cq ring.
 */
static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
7211 7212
			  const sigset_t __user *sig, size_t sigsz,
			  struct __kernel_timespec __user *uts)
J
Jens Axboe 已提交
7213
{
7214 7215 7216 7217 7218 7219 7220 7221 7222
	struct io_wait_queue iowq = {
		.wq = {
			.private	= current,
			.func		= io_wake_function,
			.entry		= LIST_HEAD_INIT(iowq.wq.entry),
		},
		.ctx		= ctx,
		.to_wait	= min_events,
	};
7223
	struct io_rings *rings = ctx->rings;
7224 7225
	signed long timeout = MAX_SCHEDULE_TIMEOUT;
	int ret;
J
Jens Axboe 已提交
7226

7227
	do {
7228 7229
		io_cqring_overflow_flush(ctx, false, NULL, NULL);
		if (io_cqring_events(ctx) >= min_events)
7230
			return 0;
7231
		if (!io_run_task_work())
7232 7233
			break;
	} while (1);
J
Jens Axboe 已提交
7234 7235

	if (sig) {
7236 7237 7238
#ifdef CONFIG_COMPAT
		if (in_compat_syscall())
			ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
7239
						      sigsz);
7240 7241
		else
#endif
7242
			ret = set_user_sigmask(sig, sigsz);
7243

J
Jens Axboe 已提交
7244 7245 7246 7247
		if (ret)
			return ret;
	}

7248
	if (uts) {
7249 7250
		struct timespec64 ts;

7251 7252 7253 7254 7255
		if (get_timespec64(&ts, uts))
			return -EFAULT;
		timeout = timespec64_to_jiffies(&ts);
	}

7256
	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
7257
	trace_io_uring_cqring_wait(ctx, min_events);
7258
	do {
7259
		io_cqring_overflow_flush(ctx, false, NULL, NULL);
7260 7261
		prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
						TASK_INTERRUPTIBLE);
7262 7263 7264
		ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
		finish_wait(&ctx->wait, &iowq.wq);
	} while (ret > 0);
7265

7266
	restore_saved_sigmask_unless(ret == -EINTR);
J
Jens Axboe 已提交
7267

7268
	return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
J
Jens Axboe 已提交
7269 7270
}

J
Jens Axboe 已提交
7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283
static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
#if defined(CONFIG_UNIX)
	if (ctx->ring_sock) {
		struct sock *sock = ctx->ring_sock->sk;
		struct sk_buff *skb;

		while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
			kfree_skb(skb);
	}
#else
	int i;

7284 7285 7286 7287 7288 7289 7290
	for (i = 0; i < ctx->nr_user_files; i++) {
		struct file *file;

		file = io_file_from_index(ctx, i);
		if (file)
			fput(file);
	}
J
Jens Axboe 已提交
7291 7292 7293
#endif
}

7294
static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
7295
{
7296
	struct fixed_rsrc_data *data;
7297

7298
	data = container_of(ref, struct fixed_rsrc_data, refs);
7299 7300 7301
	complete(&data->done);
}

7302 7303 7304 7305 7306 7307 7308 7309 7310 7311
static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
{
	spin_lock_bh(&ctx->rsrc_ref_lock);
}

static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
{
	spin_unlock_bh(&ctx->rsrc_ref_lock);
}

7312 7313
static void io_sqe_rsrc_set_node(struct io_ring_ctx *ctx,
				 struct fixed_rsrc_data *rsrc_data,
7314
				 struct fixed_rsrc_ref_node *ref_node)
7315
{
7316
	io_rsrc_ref_lock(ctx);
7317
	rsrc_data->node = ref_node;
7318
	list_add_tail(&ref_node->node, &ctx->rsrc_ref_list);
7319
	io_rsrc_ref_unlock(ctx);
7320
	percpu_ref_get(&rsrc_data->refs);
7321 7322
}

7323 7324 7325
static int io_rsrc_ref_quiesce(struct fixed_rsrc_data *data,
			       struct io_ring_ctx *ctx,
			       struct fixed_rsrc_ref_node *backup_node)
J
Jens Axboe 已提交
7326
{
7327
	struct fixed_rsrc_ref_node *ref_node;
7328
	int ret;
7329

7330
	io_rsrc_ref_lock(ctx);
7331
	ref_node = data->node;
7332
	io_rsrc_ref_unlock(ctx);
7333 7334 7335 7336 7337 7338
	if (ref_node)
		percpu_ref_kill(&ref_node->refs);

	percpu_ref_kill(&data->refs);

	/* wait for all refs nodes to complete */
7339
	flush_delayed_work(&ctx->rsrc_put_work);
7340 7341 7342 7343 7344 7345 7346 7347
	do {
		ret = wait_for_completion_interruptible(&data->done);
		if (!ret)
			break;
		ret = io_run_task_work_sig();
		if (ret < 0) {
			percpu_ref_resurrect(&data->refs);
			reinit_completion(&data->done);
7348
			io_sqe_rsrc_set_node(ctx, data, backup_node);
7349 7350 7351
			return ret;
		}
	} while (1);
7352

7353 7354 7355 7356
	destroy_fixed_rsrc_ref_node(backup_node);
	return 0;
}

7357 7358 7359 7360 7361 7362 7363 7364
static struct fixed_rsrc_data *alloc_fixed_rsrc_data(struct io_ring_ctx *ctx)
{
	struct fixed_rsrc_data *data;

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return NULL;

7365
	if (percpu_ref_init(&data->refs, io_rsrc_data_ref_zero,
7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381
			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
		kfree(data);
		return NULL;
	}
	data->ctx = ctx;
	init_completion(&data->done);
	return data;
}

static void free_fixed_rsrc_data(struct fixed_rsrc_data *data)
{
	percpu_ref_exit(&data->refs);
	kfree(data->table);
	kfree(data);
}

7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
	struct fixed_rsrc_data *data = ctx->file_data;
	struct fixed_rsrc_ref_node *backup_node;
	unsigned nr_tables, i;
	int ret;

	if (!data)
		return -ENXIO;
	backup_node = alloc_fixed_rsrc_ref_node(ctx);
	if (!backup_node)
		return -ENOMEM;
	init_fixed_file_ref_node(ctx, backup_node);

	ret = io_rsrc_ref_quiesce(data, ctx, backup_node);
	if (ret)
		return ret;

J
Jens Axboe 已提交
7400
	__io_sqe_files_unregister(ctx);
7401 7402
	nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
	for (i = 0; i < nr_tables; i++)
7403
		kfree(data->table[i].files);
7404
	free_fixed_rsrc_data(data);
7405
	ctx->file_data = NULL;
J
Jens Axboe 已提交
7406 7407 7408 7409
	ctx->nr_user_files = 0;
	return 0;
}

7410
static void io_put_sq_data(struct io_sq_data *sqd)
J
Jens Axboe 已提交
7411
{
7412
	if (refcount_dec_and_test(&sqd->refs)) {
7413 7414 7415 7416 7417
		/*
		 * The park is a bit of a work-around, without it we get
		 * warning spews on shutdown with SQPOLL set and affinity
		 * set to a single CPU.
		 */
7418 7419 7420 7421 7422 7423 7424 7425 7426
		if (sqd->thread) {
			kthread_park(sqd->thread);
			kthread_stop(sqd->thread);
		}

		kfree(sqd);
	}
}

7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452
static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
{
	struct io_ring_ctx *ctx_attach;
	struct io_sq_data *sqd;
	struct fd f;

	f = fdget(p->wq_fd);
	if (!f.file)
		return ERR_PTR(-ENXIO);
	if (f.file->f_op != &io_uring_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

	ctx_attach = f.file->private_data;
	sqd = ctx_attach->sq_data;
	if (!sqd) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

	refcount_inc(&sqd->refs);
	fdput(f);
	return sqd;
}

7453 7454 7455 7456
static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
{
	struct io_sq_data *sqd;

7457 7458 7459
	if (p->flags & IORING_SETUP_ATTACH_WQ)
		return io_attach_sq_data(p);

7460 7461 7462 7463 7464
	sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
	if (!sqd)
		return ERR_PTR(-ENOMEM);

	refcount_set(&sqd->refs, 1);
7465 7466 7467 7468
	INIT_LIST_HEAD(&sqd->ctx_list);
	INIT_LIST_HEAD(&sqd->ctx_new_list);
	mutex_init(&sqd->ctx_lock);
	mutex_init(&sqd->lock);
7469 7470 7471 7472
	init_waitqueue_head(&sqd->wait);
	return sqd;
}

7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490
static void io_sq_thread_unpark(struct io_sq_data *sqd)
	__releases(&sqd->lock)
{
	if (!sqd->thread)
		return;
	kthread_unpark(sqd->thread);
	mutex_unlock(&sqd->lock);
}

static void io_sq_thread_park(struct io_sq_data *sqd)
	__acquires(&sqd->lock)
{
	if (!sqd->thread)
		return;
	mutex_lock(&sqd->lock);
	kthread_park(sqd->thread);
}

7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504
static void io_sq_thread_stop(struct io_ring_ctx *ctx)
{
	struct io_sq_data *sqd = ctx->sq_data;

	if (sqd) {
		if (sqd->thread) {
			/*
			 * We may arrive here from the error branch in
			 * io_sq_offload_create() where the kthread is created
			 * without being waked up, thus wake it up now to make
			 * sure the wait will complete.
			 */
			wake_up_process(sqd->thread);
			wait_for_completion(&ctx->sq_thread_comp);
7505 7506 7507 7508 7509 7510

			io_sq_thread_park(sqd);
		}

		mutex_lock(&sqd->ctx_lock);
		list_del(&ctx->sqd_list);
7511
		io_sqd_update_thread_idle(sqd);
7512 7513
		mutex_unlock(&sqd->ctx_lock);

7514
		if (sqd->thread)
7515
			io_sq_thread_unpark(sqd);
7516 7517 7518

		io_put_sq_data(sqd);
		ctx->sq_data = NULL;
J
Jens Axboe 已提交
7519 7520 7521
	}
}

J
Jens Axboe 已提交
7522 7523
static void io_finish_async(struct io_ring_ctx *ctx)
{
J
Jens Axboe 已提交
7524 7525
	io_sq_thread_stop(ctx);

7526 7527 7528
	if (ctx->io_wq) {
		io_wq_destroy(ctx->io_wq);
		ctx->io_wq = NULL;
J
Jens Axboe 已提交
7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542
	}
}

#if defined(CONFIG_UNIX)
/*
 * Ensure the UNIX gc is aware of our file set, so we are certain that
 * the io_uring can be safely unregistered on process exit, even if we have
 * loops in the file referencing.
 */
static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
{
	struct sock *sk = ctx->ring_sock->sk;
	struct scm_fp_list *fpl;
	struct sk_buff *skb;
7543
	int i, nr_files;
J
Jens Axboe 已提交
7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556

	fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
	if (!fpl)
		return -ENOMEM;

	skb = alloc_skb(0, GFP_KERNEL);
	if (!skb) {
		kfree(fpl);
		return -ENOMEM;
	}

	skb->sk = sk;

7557
	nr_files = 0;
J
Jens Axboe 已提交
7558 7559
	fpl->user = get_uid(ctx->user);
	for (i = 0; i < nr; i++) {
7560 7561 7562
		struct file *file = io_file_from_index(ctx, i + offset);

		if (!file)
7563
			continue;
7564
		fpl->fp[nr_files] = get_file(file);
7565 7566
		unix_inflight(fpl->user, fpl->fp[nr_files]);
		nr_files++;
J
Jens Axboe 已提交
7567 7568
	}

7569 7570 7571 7572
	if (nr_files) {
		fpl->max = SCM_MAX_FD;
		fpl->count = nr_files;
		UNIXCB(skb).fp = fpl;
7573
		skb->destructor = unix_destruct_scm;
7574 7575
		refcount_add(skb->truesize, &sk->sk_wmem_alloc);
		skb_queue_head(&sk->sk_receive_queue, skb);
J
Jens Axboe 已提交
7576

7577 7578 7579 7580 7581 7582
		for (i = 0; i < nr_files; i++)
			fput(fpl->fp[i]);
	} else {
		kfree_skb(skb);
		kfree(fpl);
	}
J
Jens Axboe 已提交
7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612

	return 0;
}

/*
 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
 * causes regular reference counting to break down. We rely on the UNIX
 * garbage collection to take care of this problem for us.
 */
static int io_sqe_files_scm(struct io_ring_ctx *ctx)
{
	unsigned left, total;
	int ret = 0;

	total = 0;
	left = ctx->nr_user_files;
	while (left) {
		unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);

		ret = __io_sqe_files_scm(ctx, this_files, total);
		if (ret)
			break;
		left -= this_files;
		total += this_files;
	}

	if (!ret)
		return 0;

	while (total < ctx->nr_user_files) {
7613 7614 7615 7616
		struct file *file = io_file_from_index(ctx, total);

		if (file)
			fput(file);
J
Jens Axboe 已提交
7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628
		total++;
	}

	return ret;
}
#else
static int io_sqe_files_scm(struct io_ring_ctx *ctx)
{
	return 0;
}
#endif

7629
static int io_sqe_alloc_file_tables(struct fixed_rsrc_data *file_data,
7630
				    unsigned nr_tables, unsigned nr_files)
7631 7632 7633 7634
{
	int i;

	for (i = 0; i < nr_tables; i++) {
7635
		struct fixed_rsrc_table *table = &file_data->table[i];
7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649
		unsigned this_files;

		this_files = min(nr_files, IORING_MAX_FILES_TABLE);
		table->files = kcalloc(this_files, sizeof(struct file *),
					GFP_KERNEL);
		if (!table->files)
			break;
		nr_files -= this_files;
	}

	if (i == nr_tables)
		return 0;

	for (i = 0; i < nr_tables; i++) {
7650
		struct fixed_rsrc_table *table = &file_data->table[i];
7651 7652 7653 7654 7655
		kfree(table->files);
	}
	return 1;
}

7656
static void io_ring_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
7657
{
7658
	struct file *file = prsrc->file;
7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718
#if defined(CONFIG_UNIX)
	struct sock *sock = ctx->ring_sock->sk;
	struct sk_buff_head list, *head = &sock->sk_receive_queue;
	struct sk_buff *skb;
	int i;

	__skb_queue_head_init(&list);

	/*
	 * Find the skb that holds this file in its SCM_RIGHTS. When found,
	 * remove this entry and rearrange the file array.
	 */
	skb = skb_dequeue(head);
	while (skb) {
		struct scm_fp_list *fp;

		fp = UNIXCB(skb).fp;
		for (i = 0; i < fp->count; i++) {
			int left;

			if (fp->fp[i] != file)
				continue;

			unix_notinflight(fp->user, fp->fp[i]);
			left = fp->count - 1 - i;
			if (left) {
				memmove(&fp->fp[i], &fp->fp[i + 1],
						left * sizeof(struct file *));
			}
			fp->count--;
			if (!fp->count) {
				kfree_skb(skb);
				skb = NULL;
			} else {
				__skb_queue_tail(&list, skb);
			}
			fput(file);
			file = NULL;
			break;
		}

		if (!file)
			break;

		__skb_queue_tail(&list, skb);

		skb = skb_dequeue(head);
	}

	if (skb_peek(&list)) {
		spin_lock_irq(&head->lock);
		while ((skb = __skb_dequeue(&list)) != NULL)
			__skb_queue_tail(head, skb);
		spin_unlock_irq(&head->lock);
	}
#else
	fput(file);
#endif
}

7719
static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
7720
{
7721 7722 7723
	struct fixed_rsrc_data *rsrc_data = ref_node->rsrc_data;
	struct io_ring_ctx *ctx = rsrc_data->ctx;
	struct io_rsrc_put *prsrc, *tmp;
7724

7725 7726
	list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
		list_del(&prsrc->list);
7727
		ref_node->rsrc_put(ctx, prsrc);
7728
		kfree(prsrc);
7729
	}
7730 7731 7732

	percpu_ref_exit(&ref_node->refs);
	kfree(ref_node);
7733
	percpu_ref_put(&rsrc_data->refs);
7734
}
7735

7736
static void io_rsrc_put_work(struct work_struct *work)
7737 7738 7739 7740
{
	struct io_ring_ctx *ctx;
	struct llist_node *node;

7741 7742
	ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
	node = llist_del_all(&ctx->rsrc_put_llist);
7743 7744

	while (node) {
7745
		struct fixed_rsrc_ref_node *ref_node;
7746 7747
		struct llist_node *next = node->next;

7748 7749
		ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
		__io_rsrc_put_work(ref_node);
7750 7751 7752 7753
		node = next;
	}
}

7754 7755 7756 7757 7758 7759 7760 7761 7762
static struct file **io_fixed_file_slot(struct fixed_rsrc_data *file_data,
					unsigned i)
{
	struct fixed_rsrc_table *table;

	table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
	return &table->files[i & IORING_FILE_TABLE_MASK];
}

7763
static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
7764
{
7765 7766
	struct fixed_rsrc_ref_node *ref_node;
	struct fixed_rsrc_data *data;
7767
	struct io_ring_ctx *ctx;
P
Pavel Begunkov 已提交
7768
	bool first_add = false;
7769
	int delay = HZ;
7770

7771 7772
	ref_node = container_of(ref, struct fixed_rsrc_ref_node, refs);
	data = ref_node->rsrc_data;
P
Pavel Begunkov 已提交
7773 7774
	ctx = data->ctx;

7775
	io_rsrc_ref_lock(ctx);
P
Pavel Begunkov 已提交
7776 7777
	ref_node->done = true;

7778 7779
	while (!list_empty(&ctx->rsrc_ref_list)) {
		ref_node = list_first_entry(&ctx->rsrc_ref_list,
7780
					struct fixed_rsrc_ref_node, node);
P
Pavel Begunkov 已提交
7781 7782 7783 7784
		/* recycle ref nodes in order */
		if (!ref_node->done)
			break;
		list_del(&ref_node->node);
7785
		first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
P
Pavel Begunkov 已提交
7786
	}
7787
	io_rsrc_ref_unlock(ctx);
7788

P
Pavel Begunkov 已提交
7789
	if (percpu_ref_is_dying(&data->refs))
7790
		delay = 0;
7791

7792
	if (!delay)
7793
		mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
7794
	else if (first_add)
7795
		queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
7796
}
7797

7798
static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
7799
			struct io_ring_ctx *ctx)
7800
{
7801
	struct fixed_rsrc_ref_node *ref_node;
7802

7803 7804
	ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
	if (!ref_node)
7805
		return NULL;
7806

7807
	if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
7808 7809
			    0, GFP_KERNEL)) {
		kfree(ref_node);
7810
		return NULL;
7811 7812
	}
	INIT_LIST_HEAD(&ref_node->node);
7813
	INIT_LIST_HEAD(&ref_node->rsrc_list);
7814 7815 7816 7817
	ref_node->done = false;
	return ref_node;
}

7818 7819
static void init_fixed_file_ref_node(struct io_ring_ctx *ctx,
				     struct fixed_rsrc_ref_node *ref_node)
7820
{
7821
	ref_node->rsrc_data = ctx->file_data;
7822
	ref_node->rsrc_put = io_ring_file_put;
7823 7824
}

7825
static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node)
7826 7827 7828
{
	percpu_ref_exit(&ref_node->refs);
	kfree(ref_node);
7829 7830
}

7831

J
Jens Axboe 已提交
7832 7833 7834 7835
static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
				 unsigned nr_args)
{
	__s32 __user *fds = (__s32 __user *) arg;
7836
	unsigned nr_tables, i;
7837
	struct file *file;
7838
	int fd, ret = -ENOMEM;
7839 7840
	struct fixed_rsrc_ref_node *ref_node;
	struct fixed_rsrc_data *file_data;
J
Jens Axboe 已提交
7841

7842
	if (ctx->file_data)
J
Jens Axboe 已提交
7843 7844 7845 7846 7847 7848
		return -EBUSY;
	if (!nr_args)
		return -EINVAL;
	if (nr_args > IORING_MAX_FIXED_FILES)
		return -EMFILE;

7849
	file_data = alloc_fixed_rsrc_data(ctx);
7850
	if (!file_data)
7851
		return -ENOMEM;
7852
	ctx->file_data = file_data;
7853

7854
	nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
C
Colin Ian King 已提交
7855
	file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
7856
				   GFP_KERNEL);
7857 7858
	if (!file_data->table)
		goto out_free;
7859

7860
	if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
7861
		goto out_free;
7862

7863
	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
7864 7865 7866 7867
		if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
			ret = -EFAULT;
			goto out_fput;
		}
7868
		/* allow sparse sets */
7869
		if (fd == -1)
7870
			continue;
J
Jens Axboe 已提交
7871

7872
		file = fget(fd);
J
Jens Axboe 已提交
7873
		ret = -EBADF;
7874
		if (!file)
7875
			goto out_fput;
7876

J
Jens Axboe 已提交
7877 7878 7879 7880 7881 7882 7883
		/*
		 * Don't allow io_uring instances to be registered. If UNIX
		 * isn't enabled, then this causes a reference cycle and this
		 * instance can never get freed. If UNIX is enabled we'll
		 * handle it just fine, but there's still no point in allowing
		 * a ring fd as it doesn't support regular read/write anyway.
		 */
7884 7885
		if (file->f_op == &io_uring_fops) {
			fput(file);
7886
			goto out_fput;
J
Jens Axboe 已提交
7887
		}
7888
		*io_fixed_file_slot(file_data, i) = file;
J
Jens Axboe 已提交
7889 7890 7891
	}

	ret = io_sqe_files_scm(ctx);
7892
	if (ret) {
J
Jens Axboe 已提交
7893
		io_sqe_files_unregister(ctx);
7894 7895
		return ret;
	}
J
Jens Axboe 已提交
7896

7897
	ref_node = alloc_fixed_rsrc_ref_node(ctx);
7898
	if (!ref_node) {
7899
		io_sqe_files_unregister(ctx);
7900
		return -ENOMEM;
7901
	}
7902
	init_fixed_file_ref_node(ctx, ref_node);
7903

7904
	io_sqe_rsrc_set_node(ctx, file_data, ref_node);
J
Jens Axboe 已提交
7905
	return ret;
7906 7907 7908 7909 7910 7911 7912 7913 7914 7915
out_fput:
	for (i = 0; i < ctx->nr_user_files; i++) {
		file = io_file_from_index(ctx, i);
		if (file)
			fput(file);
	}
	for (i = 0; i < nr_tables; i++)
		kfree(file_data->table[i].files);
	ctx->nr_user_files = 0;
out_free:
7916
	free_fixed_rsrc_data(ctx->file_data);
7917
	ctx->file_data = NULL;
J
Jens Axboe 已提交
7918 7919 7920
	return ret;
}

7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963
static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
				int index)
{
#if defined(CONFIG_UNIX)
	struct sock *sock = ctx->ring_sock->sk;
	struct sk_buff_head *head = &sock->sk_receive_queue;
	struct sk_buff *skb;

	/*
	 * See if we can merge this file into an existing skb SCM_RIGHTS
	 * file set. If there's no room, fall back to allocating a new skb
	 * and filling it in.
	 */
	spin_lock_irq(&head->lock);
	skb = skb_peek(head);
	if (skb) {
		struct scm_fp_list *fpl = UNIXCB(skb).fp;

		if (fpl->count < SCM_MAX_FD) {
			__skb_unlink(skb, head);
			spin_unlock_irq(&head->lock);
			fpl->fp[fpl->count] = get_file(file);
			unix_inflight(fpl->user, fpl->fp[fpl->count]);
			fpl->count++;
			spin_lock_irq(&head->lock);
			__skb_queue_head(head, skb);
		} else {
			skb = NULL;
		}
	}
	spin_unlock_irq(&head->lock);

	if (skb) {
		fput(file);
		return 0;
	}

	return __io_sqe_files_scm(ctx, 1, index);
#else
	return 0;
#endif
}

7964
static int io_queue_rsrc_removal(struct fixed_rsrc_data *data, void *rsrc)
7965
{
7966 7967
	struct io_rsrc_put *prsrc;
	struct fixed_rsrc_ref_node *ref_node = data->node;
7968

7969 7970
	prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
	if (!prsrc)
7971
		return -ENOMEM;
7972

7973
	prsrc->rsrc = rsrc;
7974
	list_add(&prsrc->list, &ref_node->rsrc_list);
7975

7976
	return 0;
7977 7978
}

7979 7980 7981
static inline int io_queue_file_removal(struct fixed_rsrc_data *data,
					struct file *file)
{
7982
	return io_queue_rsrc_removal(data, (void *)file);
7983 7984
}

7985
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
7986
				 struct io_uring_rsrc_update *up,
7987 7988
				 unsigned nr_args)
{
7989 7990
	struct fixed_rsrc_data *data = ctx->file_data;
	struct fixed_rsrc_ref_node *ref_node;
7991
	struct file *file, **file_slot;
7992 7993 7994
	__s32 __user *fds;
	int fd, i, err;
	__u32 done;
7995
	bool needs_switch = false;
7996

7997
	if (check_add_overflow(up->offset, nr_args, &done))
7998 7999 8000 8001
		return -EOVERFLOW;
	if (done > ctx->nr_user_files)
		return -EINVAL;

8002
	ref_node = alloc_fixed_rsrc_ref_node(ctx);
8003 8004
	if (!ref_node)
		return -ENOMEM;
8005
	init_fixed_file_ref_node(ctx, ref_node);
8006

8007
	fds = u64_to_user_ptr(up->data);
8008
	for (done = 0; done < nr_args; done++) {
8009 8010 8011 8012 8013
		err = 0;
		if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
			err = -EFAULT;
			break;
		}
8014 8015 8016
		if (fd == IORING_REGISTER_FILES_SKIP)
			continue;

8017
		i = array_index_nospec(up->offset + done, ctx->nr_user_files);
8018 8019 8020 8021
		file_slot = io_fixed_file_slot(ctx->file_data, i);

		if (*file_slot) {
			err = io_queue_file_removal(data, *file_slot);
8022 8023
			if (err)
				break;
8024
			*file_slot = NULL;
8025
			needs_switch = true;
8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045
		}
		if (fd != -1) {
			file = fget(fd);
			if (!file) {
				err = -EBADF;
				break;
			}
			/*
			 * Don't allow io_uring instances to be registered. If
			 * UNIX isn't enabled, then this causes a reference
			 * cycle and this instance can never get freed. If UNIX
			 * is enabled we'll handle it just fine, but there's
			 * still no point in allowing a ring fd as it doesn't
			 * support regular read/write anyway.
			 */
			if (file->f_op == &io_uring_fops) {
				fput(file);
				err = -EBADF;
				break;
			}
8046
			*file_slot = file;
8047
			err = io_sqe_file_register(ctx, file, i);
8048
			if (err) {
8049
				*file_slot = NULL;
8050
				fput(file);
8051
				break;
8052
			}
8053
		}
8054 8055
	}

8056
	if (needs_switch) {
8057
		percpu_ref_kill(&data->node->refs);
8058
		io_sqe_rsrc_set_node(ctx, data, ref_node);
8059
	} else
8060
		destroy_fixed_rsrc_ref_node(ref_node);
8061 8062 8063

	return done ? done : err;
}
8064

8065 8066 8067
static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
			       unsigned nr_args)
{
8068
	struct io_uring_rsrc_update up;
8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080

	if (!ctx->file_data)
		return -ENXIO;
	if (!nr_args)
		return -EINVAL;
	if (copy_from_user(&up, arg, sizeof(up)))
		return -EFAULT;
	if (up.resv)
		return -EINVAL;

	return __io_sqe_files_update(ctx, &up, nr_args);
}
8081

8082
static struct io_wq_work *io_free_work(struct io_wq_work *work)
8083 8084 8085
{
	struct io_kiocb *req = container_of(work, struct io_kiocb, work);

8086 8087
	req = io_put_req_find_next(req);
	return req ? &req->work : NULL;
8088 8089
}

8090 8091 8092 8093 8094 8095 8096 8097 8098 8099
static int io_init_wq_offload(struct io_ring_ctx *ctx,
			      struct io_uring_params *p)
{
	struct io_wq_data data;
	struct fd f;
	struct io_ring_ctx *ctx_attach;
	unsigned int concurrency;
	int ret = 0;

	data.user = ctx->user;
8100
	data.free_work = io_free_work;
8101
	data.do_work = io_wq_submit_work;
8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136

	if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
		/* Do QD, or 4 * CPUS, whatever is smallest */
		concurrency = min(ctx->sq_entries, 4 * num_online_cpus());

		ctx->io_wq = io_wq_create(concurrency, &data);
		if (IS_ERR(ctx->io_wq)) {
			ret = PTR_ERR(ctx->io_wq);
			ctx->io_wq = NULL;
		}
		return ret;
	}

	f = fdget(p->wq_fd);
	if (!f.file)
		return -EBADF;

	if (f.file->f_op != &io_uring_fops) {
		ret = -EINVAL;
		goto out_fput;
	}

	ctx_attach = f.file->private_data;
	/* @io_wq is protected by holding the fd */
	if (!io_wq_get(ctx_attach->io_wq, &data)) {
		ret = -EINVAL;
		goto out_fput;
	}

	ctx->io_wq = ctx_attach->io_wq;
out_fput:
	fdput(f);
	return ret;
}

8137 8138 8139
static int io_uring_alloc_task_context(struct task_struct *task)
{
	struct io_uring_task *tctx;
8140
	int ret;
8141 8142 8143 8144 8145

	tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
	if (unlikely(!tctx))
		return -ENOMEM;

8146 8147 8148 8149 8150 8151
	ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
	if (unlikely(ret)) {
		kfree(tctx);
		return ret;
	}

8152 8153 8154
	xa_init(&tctx->xa);
	init_waitqueue_head(&tctx->wait);
	tctx->last = NULL;
8155 8156
	atomic_set(&tctx->in_idle, 0);
	tctx->sqpoll = false;
8157 8158
	io_init_identity(&tctx->__identity);
	tctx->identity = &tctx->__identity;
8159
	task->io_uring = tctx;
8160 8161 8162 8163
	spin_lock_init(&tctx->task_lock);
	INIT_WQ_LIST(&tctx->task_list);
	tctx->task_state = 0;
	init_task_work(&tctx->task_work, tctx_task_work);
8164 8165 8166 8167 8168 8169 8170 8171
	return 0;
}

void __io_uring_free(struct task_struct *tsk)
{
	struct io_uring_task *tctx = tsk->io_uring;

	WARN_ON_ONCE(!xa_empty(&tctx->xa));
8172 8173 8174
	WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
	if (tctx->identity != &tctx->__identity)
		kfree(tctx->identity);
8175
	percpu_counter_destroy(&tctx->inflight);
8176 8177 8178 8179
	kfree(tctx);
	tsk->io_uring = NULL;
}

8180 8181
static int io_sq_offload_create(struct io_ring_ctx *ctx,
				struct io_uring_params *p)
J
Jens Axboe 已提交
8182 8183 8184
{
	int ret;

J
Jens Axboe 已提交
8185
	if (ctx->flags & IORING_SETUP_SQPOLL) {
8186 8187
		struct io_sq_data *sqd;

8188
		ret = -EPERM;
8189
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
8190 8191
			goto err;

8192 8193 8194 8195 8196
		sqd = io_get_sq_data(p);
		if (IS_ERR(sqd)) {
			ret = PTR_ERR(sqd);
			goto err;
		}
8197

8198
		ctx->sq_data = sqd;
8199 8200 8201 8202 8203
		io_sq_thread_park(sqd);
		mutex_lock(&sqd->ctx_lock);
		list_add(&ctx->sqd_list, &sqd->ctx_new_list);
		mutex_unlock(&sqd->ctx_lock);
		io_sq_thread_unpark(sqd);
8204

8205 8206 8207 8208
		ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
		if (!ctx->sq_thread_idle)
			ctx->sq_thread_idle = HZ;

8209 8210 8211
		if (sqd->thread)
			goto done;

J
Jens Axboe 已提交
8212
		if (p->flags & IORING_SETUP_SQ_AFF) {
8213
			int cpu = p->sq_thread_cpu;
J
Jens Axboe 已提交
8214

8215
			ret = -EINVAL;
8216 8217
			if (cpu >= nr_cpu_ids)
				goto err;
8218
			if (!cpu_online(cpu))
8219 8220
				goto err;

8221
			sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
8222
							cpu, "io_uring-sq");
J
Jens Axboe 已提交
8223
		} else {
8224
			sqd->thread = kthread_create(io_sq_thread, sqd,
J
Jens Axboe 已提交
8225 8226
							"io_uring-sq");
		}
8227 8228 8229
		if (IS_ERR(sqd->thread)) {
			ret = PTR_ERR(sqd->thread);
			sqd->thread = NULL;
J
Jens Axboe 已提交
8230 8231
			goto err;
		}
8232
		ret = io_uring_alloc_task_context(sqd->thread);
8233 8234
		if (ret)
			goto err;
J
Jens Axboe 已提交
8235 8236 8237 8238 8239 8240
	} else if (p->flags & IORING_SETUP_SQ_AFF) {
		/* Can't have SQ_AFF without SQPOLL */
		ret = -EINVAL;
		goto err;
	}

8241
done:
8242 8243
	ret = io_init_wq_offload(ctx, p);
	if (ret)
J
Jens Axboe 已提交
8244 8245 8246 8247
		goto err;

	return 0;
err:
8248
	io_finish_async(ctx);
J
Jens Axboe 已提交
8249 8250 8251
	return ret;
}

8252 8253
static void io_sq_offload_start(struct io_ring_ctx *ctx)
{
8254 8255 8256 8257
	struct io_sq_data *sqd = ctx->sq_data;

	if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread)
		wake_up_process(sqd->thread);
8258 8259
}

8260 8261
static inline void __io_unaccount_mem(struct user_struct *user,
				      unsigned long nr_pages)
J
Jens Axboe 已提交
8262 8263 8264 8265
{
	atomic_long_sub(nr_pages, &user->locked_vm);
}

8266 8267
static inline int __io_account_mem(struct user_struct *user,
				   unsigned long nr_pages)
J
Jens Axboe 已提交
8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284
{
	unsigned long page_limit, cur_pages, new_pages;

	/* Don't allow more pages than we can safely lock */
	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

	do {
		cur_pages = atomic_long_read(&user->locked_vm);
		new_pages = cur_pages + nr_pages;
		if (new_pages > page_limit)
			return -ENOMEM;
	} while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
					new_pages) != cur_pages);

	return 0;
}

8285
static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
8286
{
8287
	if (ctx->limit_mem)
8288
		__io_unaccount_mem(ctx->user, nr_pages);
8289

8290 8291
	if (ctx->mm_account)
		atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
8292 8293
}

8294
static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
8295
{
8296 8297 8298 8299 8300 8301 8302 8303
	int ret;

	if (ctx->limit_mem) {
		ret = __io_account_mem(ctx->user, nr_pages);
		if (ret)
			return ret;
	}

8304 8305
	if (ctx->mm_account)
		atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
8306 8307 8308 8309

	return 0;
}

J
Jens Axboe 已提交
8310 8311
static void io_mem_free(void *ptr)
{
8312 8313 8314 8315
	struct page *page;

	if (!ptr)
		return;
J
Jens Axboe 已提交
8316

8317
	page = virt_to_head_page(ptr);
J
Jens Axboe 已提交
8318 8319 8320 8321 8322 8323 8324
	if (put_page_testzero(page))
		free_compound_page(page);
}

static void *io_mem_alloc(size_t size)
{
	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
8325
				__GFP_NORETRY | __GFP_ACCOUNT;
J
Jens Axboe 已提交
8326 8327 8328 8329

	return (void *) __get_free_pages(gfp_flags, get_order(size));
}

8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345
static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
				size_t *sq_offset)
{
	struct io_rings *rings;
	size_t off, sq_array_size;

	off = struct_size(rings, cqes, cq_entries);
	if (off == SIZE_MAX)
		return SIZE_MAX;

#ifdef CONFIG_SMP
	off = ALIGN(off, SMP_CACHE_BYTES);
	if (off == 0)
		return SIZE_MAX;
#endif

8346 8347 8348
	if (sq_offset)
		*sq_offset = off;

8349 8350 8351 8352 8353 8354 8355 8356 8357 8358
	sq_array_size = array_size(sizeof(u32), sq_entries);
	if (sq_array_size == SIZE_MAX)
		return SIZE_MAX;

	if (check_add_overflow(off, sq_array_size, &off))
		return SIZE_MAX;

	return off;
}

8359
static int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
8360 8361 8362 8363 8364 8365 8366 8367 8368 8369
{
	int i, j;

	if (!ctx->user_bufs)
		return -ENXIO;

	for (i = 0; i < ctx->nr_user_bufs; i++) {
		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];

		for (j = 0; j < imu->nr_bvecs; j++)
8370
			unpin_user_page(imu->bvec[j].bv_page);
8371

8372
		if (imu->acct_pages)
8373
			io_unaccount_mem(ctx, imu->acct_pages);
8374
		kvfree(imu->bvec);
8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397
		imu->nr_bvecs = 0;
	}

	kfree(ctx->user_bufs);
	ctx->user_bufs = NULL;
	ctx->nr_user_bufs = 0;
	return 0;
}

static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
		       void __user *arg, unsigned index)
{
	struct iovec __user *src;

#ifdef CONFIG_COMPAT
	if (ctx->compat) {
		struct compat_iovec __user *ciovs;
		struct compat_iovec ciov;

		ciovs = (struct compat_iovec __user *) arg;
		if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
			return -EFAULT;

8398
		dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
8399 8400 8401 8402 8403 8404 8405 8406 8407 8408
		dst->iov_len = ciov.iov_len;
		return 0;
	}
#endif
	src = (struct iovec __user *) arg;
	if (copy_from_user(dst, &src[index], sizeof(*dst)))
		return -EFAULT;
	return 0;
}

8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470
/*
 * Not super efficient, but this is just a registration time. And we do cache
 * the last compound head, so generally we'll only do a full search if we don't
 * match that one.
 *
 * We check if the given compound head page has already been accounted, to
 * avoid double accounting it. This allows us to account the full size of the
 * page, not just the constituent pages of a huge page.
 */
static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
				  int nr_pages, struct page *hpage)
{
	int i, j;

	/* check current page array */
	for (i = 0; i < nr_pages; i++) {
		if (!PageCompound(pages[i]))
			continue;
		if (compound_head(pages[i]) == hpage)
			return true;
	}

	/* check previously registered pages */
	for (i = 0; i < ctx->nr_user_bufs; i++) {
		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];

		for (j = 0; j < imu->nr_bvecs; j++) {
			if (!PageCompound(imu->bvec[j].bv_page))
				continue;
			if (compound_head(imu->bvec[j].bv_page) == hpage)
				return true;
		}
	}

	return false;
}

static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
				 int nr_pages, struct io_mapped_ubuf *imu,
				 struct page **last_hpage)
{
	int i, ret;

	for (i = 0; i < nr_pages; i++) {
		if (!PageCompound(pages[i])) {
			imu->acct_pages++;
		} else {
			struct page *hpage;

			hpage = compound_head(pages[i]);
			if (hpage == *last_hpage)
				continue;
			*last_hpage = hpage;
			if (headpage_already_acct(ctx, pages, i, hpage))
				continue;
			imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
		}
	}

	if (!imu->acct_pages)
		return 0;

8471
	ret = io_account_mem(ctx, imu->acct_pages);
8472 8473 8474 8475 8476
	if (ret)
		imu->acct_pages = 0;
	return ret;
}

8477 8478 8479
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
				  struct io_mapped_ubuf *imu,
				  struct page **last_hpage)
8480 8481 8482
{
	struct vm_area_struct **vmas = NULL;
	struct page **pages = NULL;
8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567
	unsigned long off, start, end, ubuf;
	size_t size;
	int ret, pret, nr_pages, i;

	ubuf = (unsigned long) iov->iov_base;
	end = (ubuf + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
	start = ubuf >> PAGE_SHIFT;
	nr_pages = end - start;

	ret = -ENOMEM;

	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
	if (!pages)
		goto done;

	vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
			      GFP_KERNEL);
	if (!vmas)
		goto done;

	imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
				   GFP_KERNEL);
	if (!imu->bvec)
		goto done;

	ret = 0;
	mmap_read_lock(current->mm);
	pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
			      pages, vmas);
	if (pret == nr_pages) {
		/* don't support file backed memory */
		for (i = 0; i < nr_pages; i++) {
			struct vm_area_struct *vma = vmas[i];

			if (vma->vm_file &&
			    !is_file_hugepages(vma->vm_file)) {
				ret = -EOPNOTSUPP;
				break;
			}
		}
	} else {
		ret = pret < 0 ? pret : -EFAULT;
	}
	mmap_read_unlock(current->mm);
	if (ret) {
		/*
		 * if we did partial map, or found file backed vmas,
		 * release any pages we did get
		 */
		if (pret > 0)
			unpin_user_pages(pages, pret);
		kvfree(imu->bvec);
		goto done;
	}

	ret = io_buffer_account_pin(ctx, pages, pret, imu, last_hpage);
	if (ret) {
		unpin_user_pages(pages, pret);
		kvfree(imu->bvec);
		goto done;
	}

	off = ubuf & ~PAGE_MASK;
	size = iov->iov_len;
	for (i = 0; i < nr_pages; i++) {
		size_t vec_len;

		vec_len = min_t(size_t, size, PAGE_SIZE - off);
		imu->bvec[i].bv_page = pages[i];
		imu->bvec[i].bv_len = vec_len;
		imu->bvec[i].bv_offset = off;
		off = 0;
		size -= vec_len;
	}
	/* store original address for later verification */
	imu->ubuf = ubuf;
	imu->len = iov->iov_len;
	imu->nr_bvecs = nr_pages;
	ret = 0;
done:
	kvfree(pages);
	kvfree(vmas);
	return ret;
}

8568
static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
8569
{
8570 8571 8572 8573 8574 8575 8576 8577 8578 8579
	if (ctx->user_bufs)
		return -EBUSY;
	if (!nr_args || nr_args > UIO_MAXIOV)
		return -EINVAL;

	ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
					GFP_KERNEL);
	if (!ctx->user_bufs)
		return -ENOMEM;

8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610
	return 0;
}

static int io_buffer_validate(struct iovec *iov)
{
	/*
	 * Don't impose further limits on the size and buffer
	 * constraints here, we'll -EINVAL later when IO is
	 * submitted if they are wrong.
	 */
	if (!iov->iov_base || !iov->iov_len)
		return -EFAULT;

	/* arbitrary limit, but we need something */
	if (iov->iov_len > SZ_1G)
		return -EFAULT;

	return 0;
}

static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
				   unsigned int nr_args)
{
	int i, ret;
	struct iovec iov;
	struct page *last_hpage = NULL;

	ret = io_buffers_map_alloc(ctx, nr_args);
	if (ret)
		return ret;

8611 8612 8613 8614 8615
	for (i = 0; i < nr_args; i++) {
		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];

		ret = io_copy_iov(ctx, &iov, arg, i);
		if (ret)
8616
			break;
8617

8618 8619
		ret = io_buffer_validate(&iov);
		if (ret)
8620
			break;
8621

8622 8623 8624
		ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);
		if (ret)
			break;
8625 8626 8627

		ctx->nr_user_bufs++;
	}
8628 8629 8630 8631

	if (ret)
		io_sqe_buffers_unregister(ctx);

8632 8633 8634
	return ret;
}

8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666
static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
{
	__s32 __user *fds = arg;
	int fd;

	if (ctx->cq_ev_fd)
		return -EBUSY;

	if (copy_from_user(&fd, fds, sizeof(*fds)))
		return -EFAULT;

	ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
	if (IS_ERR(ctx->cq_ev_fd)) {
		int ret = PTR_ERR(ctx->cq_ev_fd);
		ctx->cq_ev_fd = NULL;
		return ret;
	}

	return 0;
}

static int io_eventfd_unregister(struct io_ring_ctx *ctx)
{
	if (ctx->cq_ev_fd) {
		eventfd_ctx_put(ctx->cq_ev_fd);
		ctx->cq_ev_fd = NULL;
		return 0;
	}

	return -ENXIO;
}

8667 8668 8669 8670 8671
static int __io_destroy_buffers(int id, void *p, void *data)
{
	struct io_ring_ctx *ctx = data;
	struct io_buffer *buf = p;

8672
	__io_remove_buffers(ctx, buf, id, -1U);
8673 8674 8675 8676 8677 8678 8679 8680 8681
	return 0;
}

static void io_destroy_buffers(struct io_ring_ctx *ctx)
{
	idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
	idr_destroy(&ctx->io_buffer_idr);
}

8682
static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
8683
{
8684
	struct io_kiocb *req, *nxt;
8685

8686 8687 8688
	list_for_each_entry_safe(req, nxt, list, compl.list) {
		if (tsk && req->task != tsk)
			continue;
8689 8690 8691 8692 8693
		list_del(&req->compl.list);
		kmem_cache_free(req_cachep, req);
	}
}

8694
static void io_req_caches_free(struct io_ring_ctx *ctx, struct task_struct *tsk)
J
Jens Axboe 已提交
8695
{
P
Pavel Begunkov 已提交
8696 8697
	struct io_submit_state *submit_state = &ctx->submit_state;

8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714
	mutex_lock(&ctx->uring_lock);

	if (submit_state->free_reqs)
		kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
				     submit_state->reqs);

	io_req_cache_free(&submit_state->comp.free_list, NULL);

	spin_lock_irq(&ctx->completion_lock);
	io_req_cache_free(&submit_state->comp.locked_free_list, NULL);
	spin_unlock_irq(&ctx->completion_lock);

	mutex_unlock(&ctx->uring_lock);
}

static void io_ring_ctx_free(struct io_ring_ctx *ctx)
{
8715 8716 8717 8718 8719 8720 8721 8722
	/*
	 * Some may use context even when all refs and requests have been put,
	 * and they are free to do so while still holding uring_lock, see
	 * __io_req_task_submit(). Wait for them to finish.
	 */
	mutex_lock(&ctx->uring_lock);
	mutex_unlock(&ctx->uring_lock);

J
Jens Axboe 已提交
8723
	io_finish_async(ctx);
8724
	io_sqe_buffers_unregister(ctx);
8725 8726 8727 8728 8729 8730

	if (ctx->sqo_task) {
		put_task_struct(ctx->sqo_task);
		ctx->sqo_task = NULL;
		mmdrop(ctx->mm_account);
		ctx->mm_account = NULL;
8731
	}
J
Jens Axboe 已提交
8732

8733 8734 8735 8736 8737
#ifdef CONFIG_BLK_CGROUP
	if (ctx->sqo_blkcg_css)
		css_put(ctx->sqo_blkcg_css);
#endif

J
Jens Axboe 已提交
8738
	io_sqe_files_unregister(ctx);
8739
	io_eventfd_unregister(ctx);
8740
	io_destroy_buffers(ctx);
J
Jens Axboe 已提交
8741
	idr_destroy(&ctx->personality_idr);
J
Jens Axboe 已提交
8742

J
Jens Axboe 已提交
8743
#if defined(CONFIG_UNIX)
8744 8745
	if (ctx->ring_sock) {
		ctx->ring_sock->file = NULL; /* so that iput() is called */
J
Jens Axboe 已提交
8746
		sock_release(ctx->ring_sock);
8747
	}
J
Jens Axboe 已提交
8748 8749
#endif

8750
	io_mem_free(ctx->rings);
J
Jens Axboe 已提交
8751 8752 8753 8754
	io_mem_free(ctx->sq_sqes);

	percpu_ref_exit(&ctx->refs);
	free_uid(ctx->user);
8755
	put_cred(ctx->creds);
8756
	io_req_caches_free(ctx, NULL);
8757
	kfree(ctx->cancel_hash);
J
Jens Axboe 已提交
8758 8759 8760 8761 8762 8763 8764 8765 8766
	kfree(ctx);
}

static __poll_t io_uring_poll(struct file *file, poll_table *wait)
{
	struct io_ring_ctx *ctx = file->private_data;
	__poll_t mask = 0;

	poll_wait(file, &ctx->cq_wait, wait);
8767 8768 8769 8770
	/*
	 * synchronizes with barrier from wq_has_sleeper call in
	 * io_commit_cqring
	 */
J
Jens Axboe 已提交
8771
	smp_rmb();
8772
	if (!io_sqring_full(ctx))
J
Jens Axboe 已提交
8773
		mask |= EPOLLOUT | EPOLLWRNORM;
8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788

	/*
	 * Don't flush cqring overflow list here, just do a simple check.
	 * Otherwise there could possible be ABBA deadlock:
	 *      CPU0                    CPU1
	 *      ----                    ----
	 * lock(&ctx->uring_lock);
	 *                              lock(&ep->mtx);
	 *                              lock(&ctx->uring_lock);
	 * lock(&ep->mtx);
	 *
	 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
	 * pushs them to do the flush.
	 */
	if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
J
Jens Axboe 已提交
8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800
		mask |= EPOLLIN | EPOLLRDNORM;

	return mask;
}

static int io_uring_fasync(int fd, struct file *file, int on)
{
	struct io_ring_ctx *ctx = file->private_data;

	return fasync_helper(fd, file, on, &ctx->cq_fasync);
}

8801
static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
8802
{
J
Jens Axboe 已提交
8803
	struct io_identity *iod;
8804

J
Jens Axboe 已提交
8805 8806 8807 8808 8809
	iod = idr_remove(&ctx->personality_idr, id);
	if (iod) {
		put_cred(iod->creds);
		if (refcount_dec_and_test(&iod->count))
			kfree(iod);
8810
		return 0;
J
Jens Axboe 已提交
8811
	}
8812 8813 8814 8815 8816 8817 8818 8819 8820

	return -EINVAL;
}

static int io_remove_personalities(int id, void *p, void *data)
{
	struct io_ring_ctx *ctx = data;

	io_unregister_personality(ctx, id);
8821 8822 8823
	return 0;
}

8824 8825
static void io_ring_exit_work(struct work_struct *work)
{
8826 8827
	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
					       exit_work);
8828

8829 8830 8831 8832 8833 8834
	/*
	 * If we're doing polled IO and end up having requests being
	 * submitted async (out-of-line), then completions can come in while
	 * we're waiting for refs to drop. We need to reap these manually,
	 * as nobody else will be looking for them.
	 */
8835
	do {
8836
		io_uring_try_cancel_requests(ctx, NULL, NULL);
8837
	} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
8838 8839 8840
	io_ring_ctx_free(ctx);
}

8841 8842 8843 8844 8845 8846 8847
static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
{
	struct io_kiocb *req = container_of(work, struct io_kiocb, work);

	return req->ctx == data;
}

J
Jens Axboe 已提交
8848 8849 8850 8851
static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
	mutex_lock(&ctx->uring_lock);
	percpu_ref_kill(&ctx->refs);
8852 8853 8854 8855

	if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
		ctx->sqo_dead = 1;

8856 8857
	/* if force is set, the ring is going away. always drop after that */
	ctx->cq_overflow_flushed = 1;
8858
	if (ctx->rings)
8859
		__io_cqring_overflow_flush(ctx, true, NULL, NULL);
8860
	idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
J
Jens Axboe 已提交
8861 8862
	mutex_unlock(&ctx->uring_lock);

8863 8864
	io_kill_timeouts(ctx, NULL, NULL);
	io_poll_remove_all(ctx, NULL, NULL);
8865 8866

	if (ctx->io_wq)
8867
		io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true);
8868

8869
	/* if we failed setting up the ctx, we might not have any rings */
8870
	io_iopoll_try_reap_events(ctx);
8871

8872
	INIT_WORK(&ctx->exit_work, io_ring_exit_work);
8873 8874 8875 8876 8877 8878 8879
	/*
	 * Use system_unbound_wq to avoid spawning tons of event kworkers
	 * if we're exiting a ton of rings at the same time. It just adds
	 * noise and overhead, there's no discernable change in runtime
	 * over using system_wq.
	 */
	queue_work(system_unbound_wq, &ctx->exit_work);
J
Jens Axboe 已提交
8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890
}

static int io_uring_release(struct inode *inode, struct file *file)
{
	struct io_ring_ctx *ctx = file->private_data;

	file->private_data = NULL;
	io_ring_ctx_wait_and_kill(ctx);
	return 0;
}

8891 8892 8893 8894
struct io_task_cancel {
	struct task_struct *task;
	struct files_struct *files;
};
8895

8896
static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
8897
{
8898
	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8899
	struct io_task_cancel *cancel = data;
8900 8901
	bool ret;

8902
	if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
8903 8904 8905 8906 8907
		unsigned long flags;
		struct io_ring_ctx *ctx = req->ctx;

		/* protect against races with linked timeouts */
		spin_lock_irqsave(&ctx->completion_lock, flags);
8908
		ret = io_match_task(req, cancel->task, cancel->files);
8909 8910
		spin_unlock_irqrestore(&ctx->completion_lock, flags);
	} else {
8911
		ret = io_match_task(req, cancel->task, cancel->files);
8912 8913
	}
	return ret;
8914 8915
}

8916
static void io_cancel_defer_files(struct io_ring_ctx *ctx,
8917
				  struct task_struct *task,
8918 8919 8920 8921 8922 8923 8924
				  struct files_struct *files)
{
	struct io_defer_entry *de = NULL;
	LIST_HEAD(list);

	spin_lock_irq(&ctx->completion_lock);
	list_for_each_entry_reverse(de, &ctx->defer_list, list) {
8925
		if (io_match_task(de->req, task, files)) {
8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941
			list_cut_position(&list, &ctx->defer_list, &de->list);
			break;
		}
	}
	spin_unlock_irq(&ctx->completion_lock);

	while (!list_empty(&list)) {
		de = list_first_entry(&list, struct io_defer_entry, list);
		list_del_init(&de->list);
		req_set_fail_links(de->req);
		io_put_req(de->req);
		io_req_complete(de->req, -ECANCELED);
		kfree(de);
	}
}

8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975
static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
					 struct task_struct *task,
					 struct files_struct *files)
{
	struct io_task_cancel cancel = { .task = task, .files = files, };

	while (1) {
		enum io_wq_cancel cret;
		bool ret = false;

		if (ctx->io_wq) {
			cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
					       &cancel, true);
			ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
		}

		/* SQPOLL thread does its own polling */
		if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
			while (!list_empty_careful(&ctx->iopoll_list)) {
				io_iopoll_try_reap_events(ctx);
				ret = true;
			}
		}

		ret |= io_poll_remove_all(ctx, task, files);
		ret |= io_kill_timeouts(ctx, task, files);
		ret |= io_run_task_work();
		io_cqring_overflow_flush(ctx, true, task, files);
		if (!ret)
			break;
		cond_resched();
	}
}

8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989
static int io_uring_count_inflight(struct io_ring_ctx *ctx,
				   struct task_struct *task,
				   struct files_struct *files)
{
	struct io_kiocb *req;
	int cnt = 0;

	spin_lock_irq(&ctx->inflight_lock);
	list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
		cnt += io_match_task(req, task, files);
	spin_unlock_irq(&ctx->inflight_lock);
	return cnt;
}

8990
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
8991
				  struct task_struct *task,
8992 8993 8994
				  struct files_struct *files)
{
	while (!list_empty_careful(&ctx->inflight_list)) {
8995
		DEFINE_WAIT(wait);
8996
		int inflight;
8997

8998 8999
		inflight = io_uring_count_inflight(ctx, task, files);
		if (!inflight)
9000
			break;
9001

9002
		io_uring_try_cancel_requests(ctx, task, files);
9003 9004 9005

		if (ctx->sq_data)
			io_sq_thread_unpark(ctx->sq_data);
9006 9007 9008 9009
		prepare_to_wait(&task->io_uring->wait, &wait,
				TASK_UNINTERRUPTIBLE);
		if (inflight == io_uring_count_inflight(ctx, task, files))
			schedule();
9010
		finish_wait(&task->io_uring->wait, &wait);
9011 9012
		if (ctx->sq_data)
			io_sq_thread_park(ctx->sq_data);
9013
	}
9014 9015
}

9016 9017 9018 9019 9020 9021 9022
static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
{
	mutex_lock(&ctx->uring_lock);
	ctx->sqo_dead = 1;
	mutex_unlock(&ctx->uring_lock);

	/* make sure callers enter the ring to get error */
9023 9024
	if (ctx->rings)
		io_ring_set_wakeup_flag(ctx);
9025 9026
}

9027 9028 9029 9030 9031 9032 9033 9034 9035 9036
/*
 * We need to iteratively cancel requests, in case a request has dependent
 * hard links. These persist even for failure of cancelations, hence keep
 * looping until none are found.
 */
static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
					  struct files_struct *files)
{
	struct task_struct *task = current;

9037
	if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
9038
		io_disable_sqo_submit(ctx);
9039
		task = ctx->sq_data->thread;
9040 9041 9042
		atomic_inc(&task->io_uring->in_idle);
		io_sq_thread_park(ctx->sq_data);
	}
9043

9044
	io_cancel_defer_files(ctx, task, files);
9045

9046
	io_uring_cancel_files(ctx, task, files);
9047
	if (!files)
9048
		io_uring_try_cancel_requests(ctx, task, NULL);
9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059

	if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
		atomic_dec(&task->io_uring->in_idle);
		/*
		 * If the files that are going away are the ones in the thread
		 * identity, clear them out.
		 */
		if (task->io_uring->identity->files == files)
			task->io_uring->identity->files = NULL;
		io_sq_thread_unpark(ctx->sq_data);
	}
9060 9061 9062 9063 9064
}

/*
 * Note that this task has used io_uring. We use it for cancelation purposes.
 */
9065
static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
9066
{
9067
	struct io_uring_task *tctx = current->io_uring;
9068
	int ret;
9069 9070

	if (unlikely(!tctx)) {
9071 9072 9073
		ret = io_uring_alloc_task_context(current);
		if (unlikely(ret))
			return ret;
9074
		tctx = current->io_uring;
9075
	}
9076 9077
	if (tctx->last != file) {
		void *old = xa_load(&tctx->xa, (unsigned long)file);
9078

9079
		if (!old) {
9080
			get_file(file);
9081 9082 9083 9084 9085 9086
			ret = xa_err(xa_store(&tctx->xa, (unsigned long)file,
						file, GFP_KERNEL));
			if (ret) {
				fput(file);
				return ret;
			}
9087 9088 9089 9090

			/* one and only SQPOLL file note, held by sqo_task */
			WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) &&
				     current != ctx->sqo_task);
9091
		}
9092
		tctx->last = file;
9093 9094
	}

9095 9096 9097 9098 9099 9100 9101 9102
	/*
	 * This is race safe in that the task itself is doing this, hence it
	 * cannot be going through the exit/cancel paths at the same time.
	 * This cannot be modified while exit/cancel is running.
	 */
	if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
		tctx->sqpoll = true;

9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114
	return 0;
}

/*
 * Remove this io_uring_file -> task mapping.
 */
static void io_uring_del_task_file(struct file *file)
{
	struct io_uring_task *tctx = current->io_uring;

	if (tctx->last == file)
		tctx->last = NULL;
9115
	file = xa_erase(&tctx->xa, (unsigned long)file);
9116 9117 9118 9119
	if (file)
		fput(file);
}

9120 9121 9122 9123 9124 9125 9126 9127 9128
static void io_uring_remove_task_files(struct io_uring_task *tctx)
{
	struct file *file;
	unsigned long index;

	xa_for_each(&tctx->xa, index, file)
		io_uring_del_task_file(file);
}

9129 9130 9131
void __io_uring_files_cancel(struct files_struct *files)
{
	struct io_uring_task *tctx = current->io_uring;
9132 9133
	struct file *file;
	unsigned long index;
9134 9135

	/* make sure overflow events are dropped */
9136
	atomic_inc(&tctx->in_idle);
9137 9138
	xa_for_each(&tctx->xa, index, file)
		io_uring_cancel_task_requests(file->private_data, files);
9139
	atomic_dec(&tctx->in_idle);
9140 9141 9142

	if (files)
		io_uring_remove_task_files(tctx);
9143 9144 9145 9146
}

static s64 tctx_inflight(struct io_uring_task *tctx)
{
9147 9148
	return percpu_counter_sum(&tctx->inflight);
}
9149

9150 9151 9152 9153 9154
static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
{
	struct io_uring_task *tctx;
	s64 inflight;
	DEFINE_WAIT(wait);
9155

9156 9157 9158 9159
	if (!ctx->sq_data)
		return;
	tctx = ctx->sq_data->thread->io_uring;
	io_disable_sqo_submit(ctx);
9160

9161 9162 9163 9164 9165 9166 9167
	atomic_inc(&tctx->in_idle);
	do {
		/* read completions before cancelations */
		inflight = tctx_inflight(tctx);
		if (!inflight)
			break;
		io_uring_cancel_task_requests(ctx, NULL);
9168

9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179
		prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
		/*
		 * If we've seen completions, retry without waiting. This
		 * avoids a race where a completion comes in before we did
		 * prepare_to_wait().
		 */
		if (inflight == tctx_inflight(tctx))
			schedule();
		finish_wait(&tctx->wait, &wait);
	} while (1);
	atomic_dec(&tctx->in_idle);
9180 9181 9182 9183 9184 9185 9186 9187 9188 9189
}

/*
 * Find any io_uring fd that this task has registered or done IO on, and cancel
 * requests.
 */
void __io_uring_task_cancel(void)
{
	struct io_uring_task *tctx = current->io_uring;
	DEFINE_WAIT(wait);
9190
	s64 inflight;
9191 9192

	/* make sure overflow events are dropped */
9193
	atomic_inc(&tctx->in_idle);
9194

9195
	/* trigger io_disable_sqo_submit() */
9196 9197 9198 9199 9200 9201 9202
	if (tctx->sqpoll) {
		struct file *file;
		unsigned long index;

		xa_for_each(&tctx->xa, index, file)
			io_uring_cancel_sqpoll(file->private_data);
	}
9203

9204
	do {
9205
		/* read completions before cancelations */
9206
		inflight = tctx_inflight(tctx);
9207 9208
		if (!inflight)
			break;
9209 9210 9211 9212 9213
		__io_uring_files_cancel(NULL);

		prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);

		/*
9214 9215 9216
		 * If we've seen completions, retry without waiting. This
		 * avoids a race where a completion comes in before we did
		 * prepare_to_wait().
9217
		 */
9218 9219
		if (inflight == tctx_inflight(tctx))
			schedule();
9220
		finish_wait(&tctx->wait, &wait);
9221
	} while (1);
9222

9223
	atomic_dec(&tctx->in_idle);
9224 9225

	io_uring_remove_task_files(tctx);
9226 9227
}

9228 9229
static int io_uring_flush(struct file *file, void *data)
{
9230
	struct io_uring_task *tctx = current->io_uring;
9231
	struct io_ring_ctx *ctx = file->private_data;
9232

9233
	if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
9234
		io_uring_cancel_task_requests(ctx, NULL);
9235 9236
		io_req_caches_free(ctx, current);
	}
9237

9238
	if (!tctx)
9239 9240
		return 0;

9241 9242 9243 9244
	/* we should have cancelled and erased it before PF_EXITING */
	WARN_ON_ONCE((current->flags & PF_EXITING) &&
		     xa_load(&tctx->xa, (unsigned long)file));

9245 9246 9247 9248
	/*
	 * fput() is pending, will be 2 if the only other ref is our potential
	 * task file note. If the task is exiting, drop regardless of count.
	 */
9249 9250
	if (atomic_long_read(&file->f_count) != 2)
		return 0;
9251

9252 9253
	if (ctx->flags & IORING_SETUP_SQPOLL) {
		/* there is only one file note, which is owned by sqo_task */
9254 9255 9256 9257
		WARN_ON_ONCE(ctx->sqo_task != current &&
			     xa_load(&tctx->xa, (unsigned long)file));
		/* sqo_dead check is for when this happens after cancellation */
		WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
9258 9259 9260 9261 9262 9263 9264
			     !xa_load(&tctx->xa, (unsigned long)file));

		io_disable_sqo_submit(ctx);
	}

	if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
		io_uring_del_task_file(file);
9265 9266 9267
	return 0;
}

9268 9269
static void *io_uring_validate_mmap_request(struct file *file,
					    loff_t pgoff, size_t sz)
J
Jens Axboe 已提交
9270 9271
{
	struct io_ring_ctx *ctx = file->private_data;
9272
	loff_t offset = pgoff << PAGE_SHIFT;
J
Jens Axboe 已提交
9273 9274 9275 9276 9277
	struct page *page;
	void *ptr;

	switch (offset) {
	case IORING_OFF_SQ_RING:
9278 9279
	case IORING_OFF_CQ_RING:
		ptr = ctx->rings;
J
Jens Axboe 已提交
9280 9281 9282 9283 9284
		break;
	case IORING_OFF_SQES:
		ptr = ctx->sq_sqes;
		break;
	default:
9285
		return ERR_PTR(-EINVAL);
J
Jens Axboe 已提交
9286 9287 9288
	}

	page = virt_to_head_page(ptr);
9289
	if (sz > page_size(page))
9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305
		return ERR_PTR(-EINVAL);

	return ptr;
}

#ifdef CONFIG_MMU

static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
{
	size_t sz = vma->vm_end - vma->vm_start;
	unsigned long pfn;
	void *ptr;

	ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
	if (IS_ERR(ptr))
		return PTR_ERR(ptr);
J
Jens Axboe 已提交
9306 9307 9308 9309 9310

	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}

9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337
#else /* !CONFIG_MMU */

static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
{
	return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
}

static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
{
	return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
}

static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
	unsigned long addr, unsigned long len,
	unsigned long pgoff, unsigned long flags)
{
	void *ptr;

	ptr = io_uring_validate_mmap_request(file, pgoff, len);
	if (IS_ERR(ptr))
		return PTR_ERR(ptr);

	return (unsigned long) ptr;
}

#endif /* !CONFIG_MMU */

9338
static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
9339
{
9340
	int ret = 0;
9341 9342 9343 9344 9345 9346 9347 9348
	DEFINE_WAIT(wait);

	do {
		if (!io_sqring_full(ctx))
			break;

		prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);

9349 9350 9351 9352 9353
		if (unlikely(ctx->sqo_dead)) {
			ret = -EOWNERDEAD;
			goto out;
		}

9354 9355 9356 9357 9358 9359 9360
		if (!io_sqring_full(ctx))
			break;

		schedule();
	} while (!signal_pending(current));

	finish_wait(&ctx->sqo_sq_wait, &wait);
9361 9362
out:
	return ret;
9363 9364
}

9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394
static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
			  struct __kernel_timespec __user **ts,
			  const sigset_t __user **sig)
{
	struct io_uring_getevents_arg arg;

	/*
	 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
	 * is just a pointer to the sigset_t.
	 */
	if (!(flags & IORING_ENTER_EXT_ARG)) {
		*sig = (const sigset_t __user *) argp;
		*ts = NULL;
		return 0;
	}

	/*
	 * EXT_ARG is set - ensure we agree on the size of it and copy in our
	 * timespec and sigset_t pointers if good.
	 */
	if (*argsz != sizeof(arg))
		return -EINVAL;
	if (copy_from_user(&arg, argp, sizeof(arg)))
		return -EFAULT;
	*sig = u64_to_user_ptr(arg.sigmask);
	*argsz = arg.sigmask_sz;
	*ts = u64_to_user_ptr(arg.ts);
	return 0;
}

J
Jens Axboe 已提交
9395
SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
9396 9397
		u32, min_complete, u32, flags, const void __user *, argp,
		size_t, argsz)
J
Jens Axboe 已提交
9398 9399 9400 9401 9402 9403
{
	struct io_ring_ctx *ctx;
	long ret = -EBADF;
	int submitted = 0;
	struct fd f;

9404
	io_run_task_work();
9405

9406
	if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9407
			IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG))
J
Jens Axboe 已提交
9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422
		return -EINVAL;

	f = fdget(fd);
	if (!f.file)
		return -EBADF;

	ret = -EOPNOTSUPP;
	if (f.file->f_op != &io_uring_fops)
		goto out_fput;

	ret = -ENXIO;
	ctx = f.file->private_data;
	if (!percpu_ref_tryget(&ctx->refs))
		goto out_fput;

9423 9424 9425 9426
	ret = -EBADFD;
	if (ctx->flags & IORING_SETUP_R_DISABLED)
		goto out;

J
Jens Axboe 已提交
9427 9428 9429 9430 9431
	/*
	 * For SQ polling, the thread will do all submissions and completions.
	 * Just return the requested submit count, and wake the thread if
	 * we were asked to.
	 */
9432
	ret = 0;
J
Jens Axboe 已提交
9433
	if (ctx->flags & IORING_SETUP_SQPOLL) {
9434
		io_cqring_overflow_flush(ctx, false, NULL, NULL);
9435

9436 9437 9438
		ret = -EOWNERDEAD;
		if (unlikely(ctx->sqo_dead))
			goto out;
J
Jens Axboe 已提交
9439
		if (flags & IORING_ENTER_SQ_WAKEUP)
9440
			wake_up(&ctx->sq_data->wait);
9441 9442 9443 9444 9445
		if (flags & IORING_ENTER_SQ_WAIT) {
			ret = io_sqpoll_wait_sq(ctx);
			if (ret)
				goto out;
		}
J
Jens Axboe 已提交
9446
		submitted = to_submit;
9447
	} else if (to_submit) {
9448
		ret = io_uring_add_task_file(ctx, f.file);
9449 9450
		if (unlikely(ret))
			goto out;
J
Jens Axboe 已提交
9451
		mutex_lock(&ctx->uring_lock);
9452
		submitted = io_submit_sqes(ctx, to_submit);
J
Jens Axboe 已提交
9453
		mutex_unlock(&ctx->uring_lock);
9454 9455 9456

		if (submitted != to_submit)
			goto out;
J
Jens Axboe 已提交
9457 9458
	}
	if (flags & IORING_ENTER_GETEVENTS) {
9459 9460 9461 9462 9463 9464 9465
		const sigset_t __user *sig;
		struct __kernel_timespec __user *ts;

		ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
		if (unlikely(ret))
			goto out;

J
Jens Axboe 已提交
9466 9467
		min_complete = min(min_complete, ctx->cq_entries);

9468 9469 9470 9471 9472 9473 9474 9475
		/*
		 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
		 * space applications don't need to do io completion events
		 * polling again, they can rely on io_sq_thread to do polling
		 * work, which can reduce cpu usage and uring_lock contention.
		 */
		if (ctx->flags & IORING_SETUP_IOPOLL &&
		    !(ctx->flags & IORING_SETUP_SQPOLL)) {
9476
			ret = io_iopoll_check(ctx, min_complete);
J
Jens Axboe 已提交
9477
		} else {
9478
			ret = io_cqring_wait(ctx, min_complete, sig, argsz, ts);
J
Jens Axboe 已提交
9479
		}
J
Jens Axboe 已提交
9480 9481
	}

9482
out:
9483
	percpu_ref_put(&ctx->refs);
J
Jens Axboe 已提交
9484 9485 9486 9487 9488
out_fput:
	fdput(f);
	return submitted ? submitted : ret;
}

9489
#ifdef CONFIG_PROC_FS
9490 9491
static int io_uring_show_cred(int id, void *p, void *data)
{
9492 9493
	struct io_identity *iod = p;
	const struct cred *cred = iod->creds;
9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525
	struct seq_file *m = data;
	struct user_namespace *uns = seq_user_ns(m);
	struct group_info *gi;
	kernel_cap_t cap;
	unsigned __capi;
	int g;

	seq_printf(m, "%5d\n", id);
	seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
	seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
	seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
	seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
	seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
	seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
	seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
	seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
	seq_puts(m, "\n\tGroups:\t");
	gi = cred->group_info;
	for (g = 0; g < gi->ngroups; g++) {
		seq_put_decimal_ull(m, g ? " " : "",
					from_kgid_munged(uns, gi->gid[g]));
	}
	seq_puts(m, "\n\tCapEff:\t");
	cap = cred->cap_effective;
	CAP_FOR_EACH_U32(__capi)
		seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
	seq_putc(m, '\n');
	return 0;
}

static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
{
9526
	struct io_sq_data *sq = NULL;
9527
	bool has_lock;
9528 9529
	int i;

9530 9531 9532 9533 9534 9535 9536 9537
	/*
	 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
	 * since fdinfo case grabs it in the opposite direction of normal use
	 * cases. If we fail to get the lock, we just don't iterate any
	 * structures that could be going away outside the io_uring mutex.
	 */
	has_lock = mutex_trylock(&ctx->uring_lock);

9538 9539 9540 9541 9542
	if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
		sq = ctx->sq_data;

	seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
	seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
9543
	seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
9544
	for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
9545
		struct file *f = *io_fixed_file_slot(ctx->file_data, i);
9546 9547 9548 9549 9550 9551 9552

		if (f)
			seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
		else
			seq_printf(m, "%5u: <none>\n", i);
	}
	seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
9553
	for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
9554 9555 9556 9557 9558
		struct io_mapped_ubuf *buf = &ctx->user_bufs[i];

		seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
						(unsigned int) buf->len);
	}
9559
	if (has_lock && !idr_is_empty(&ctx->personality_idr)) {
9560 9561 9562
		seq_printf(m, "Personalities:\n");
		idr_for_each(&ctx->personality_idr, io_uring_show_cred, m);
	}
9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573
	seq_printf(m, "PollList:\n");
	spin_lock_irq(&ctx->completion_lock);
	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
		struct hlist_head *list = &ctx->cancel_hash[i];
		struct io_kiocb *req;

		hlist_for_each_entry(req, list, hash_node)
			seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
					req->task->task_works != NULL);
	}
	spin_unlock_irq(&ctx->completion_lock);
9574 9575
	if (has_lock)
		mutex_unlock(&ctx->uring_lock);
9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586
}

static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
{
	struct io_ring_ctx *ctx = f->private_data;

	if (percpu_ref_tryget(&ctx->refs)) {
		__io_uring_show_fdinfo(ctx, m);
		percpu_ref_put(&ctx->refs);
	}
}
9587
#endif
9588

J
Jens Axboe 已提交
9589 9590
static const struct file_operations io_uring_fops = {
	.release	= io_uring_release,
9591
	.flush		= io_uring_flush,
J
Jens Axboe 已提交
9592
	.mmap		= io_uring_mmap,
9593 9594 9595 9596
#ifndef CONFIG_MMU
	.get_unmapped_area = io_uring_nommu_get_unmapped_area,
	.mmap_capabilities = io_uring_nommu_mmap_capabilities,
#endif
J
Jens Axboe 已提交
9597 9598
	.poll		= io_uring_poll,
	.fasync		= io_uring_fasync,
9599
#ifdef CONFIG_PROC_FS
9600
	.show_fdinfo	= io_uring_show_fdinfo,
9601
#endif
J
Jens Axboe 已提交
9602 9603 9604 9605 9606
};

static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
				  struct io_uring_params *p)
{
9607 9608
	struct io_rings *rings;
	size_t size, sq_array_offset;
J
Jens Axboe 已提交
9609

9610 9611 9612 9613
	/* make sure these are sane, as we already accounted them */
	ctx->sq_entries = p->sq_entries;
	ctx->cq_entries = p->cq_entries;

9614 9615 9616 9617 9618 9619
	size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
	if (size == SIZE_MAX)
		return -EOVERFLOW;

	rings = io_mem_alloc(size);
	if (!rings)
J
Jens Axboe 已提交
9620 9621
		return -ENOMEM;

9622 9623 9624 9625 9626 9627 9628 9629
	ctx->rings = rings;
	ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
	rings->sq_ring_mask = p->sq_entries - 1;
	rings->cq_ring_mask = p->cq_entries - 1;
	rings->sq_ring_entries = p->sq_entries;
	rings->cq_ring_entries = p->cq_entries;
	ctx->sq_mask = rings->sq_ring_mask;
	ctx->cq_mask = rings->cq_ring_mask;
J
Jens Axboe 已提交
9630 9631

	size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
9632 9633 9634
	if (size == SIZE_MAX) {
		io_mem_free(ctx->rings);
		ctx->rings = NULL;
J
Jens Axboe 已提交
9635
		return -EOVERFLOW;
9636
	}
J
Jens Axboe 已提交
9637 9638

	ctx->sq_sqes = io_mem_alloc(size);
9639 9640 9641
	if (!ctx->sq_sqes) {
		io_mem_free(ctx->rings);
		ctx->rings = NULL;
J
Jens Axboe 已提交
9642
		return -ENOMEM;
9643
	}
J
Jens Axboe 已提交
9644 9645 9646 9647

	return 0;
}

9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664
static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
{
	int ret, fd;

	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
	if (fd < 0)
		return fd;

	ret = io_uring_add_task_file(ctx, file);
	if (ret) {
		put_unused_fd(fd);
		return ret;
	}
	fd_install(fd, file);
	return fd;
}

J
Jens Axboe 已提交
9665 9666 9667 9668 9669 9670
/*
 * Allocate an anonymous fd, this is what constitutes the application
 * visible backing of an io_uring instance. The application mmaps this
 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
 * we have to tie this fd to a socket for file garbage collection purposes.
 */
9671
static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
J
Jens Axboe 已提交
9672 9673
{
	struct file *file;
9674
#if defined(CONFIG_UNIX)
J
Jens Axboe 已提交
9675 9676 9677 9678 9679
	int ret;

	ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
				&ctx->ring_sock);
	if (ret)
9680
		return ERR_PTR(ret);
J
Jens Axboe 已提交
9681 9682 9683 9684 9685
#endif

	file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
					O_RDWR | O_CLOEXEC);
#if defined(CONFIG_UNIX)
9686 9687 9688 9689 9690
	if (IS_ERR(file)) {
		sock_release(ctx->ring_sock);
		ctx->ring_sock = NULL;
	} else {
		ctx->ring_sock->file = file;
9691
	}
J
Jens Axboe 已提交
9692
#endif
9693
	return file;
J
Jens Axboe 已提交
9694 9695
}

9696 9697
static int io_uring_create(unsigned entries, struct io_uring_params *p,
			   struct io_uring_params __user *params)
J
Jens Axboe 已提交
9698 9699 9700
{
	struct user_struct *user = NULL;
	struct io_ring_ctx *ctx;
9701
	struct file *file;
J
Jens Axboe 已提交
9702 9703
	int ret;

9704
	if (!entries)
J
Jens Axboe 已提交
9705
		return -EINVAL;
9706 9707 9708 9709 9710
	if (entries > IORING_MAX_ENTRIES) {
		if (!(p->flags & IORING_SETUP_CLAMP))
			return -EINVAL;
		entries = IORING_MAX_ENTRIES;
	}
J
Jens Axboe 已提交
9711 9712 9713 9714 9715

	/*
	 * Use twice as many entries for the CQ ring. It's possible for the
	 * application to drive a higher depth than the size of the SQ ring,
	 * since the sqes are only used at submission time. This allows for
9716 9717 9718
	 * some flexibility in overcommitting a bit. If the application has
	 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
	 * of CQ ring entries manually.
J
Jens Axboe 已提交
9719 9720
	 */
	p->sq_entries = roundup_pow_of_two(entries);
9721 9722 9723 9724 9725 9726
	if (p->flags & IORING_SETUP_CQSIZE) {
		/*
		 * If IORING_SETUP_CQSIZE is set, we do the same roundup
		 * to a power-of-two, if it isn't already. We do NOT impose
		 * any cq vs sq ring sizing.
		 */
9727
		if (!p->cq_entries)
9728
			return -EINVAL;
9729 9730 9731 9732 9733
		if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
			if (!(p->flags & IORING_SETUP_CLAMP))
				return -EINVAL;
			p->cq_entries = IORING_MAX_CQ_ENTRIES;
		}
9734 9735 9736
		p->cq_entries = roundup_pow_of_two(p->cq_entries);
		if (p->cq_entries < p->sq_entries)
			return -EINVAL;
9737 9738 9739
	} else {
		p->cq_entries = 2 * p->sq_entries;
	}
J
Jens Axboe 已提交
9740 9741 9742 9743 9744 9745 9746 9747 9748

	user = get_uid(current_user());

	ctx = io_ring_ctx_alloc(p);
	if (!ctx) {
		free_uid(user);
		return -ENOMEM;
	}
	ctx->compat = in_compat_syscall();
9749
	ctx->limit_mem = !capable(CAP_IPC_LOCK);
J
Jens Axboe 已提交
9750
	ctx->user = user;
9751
	ctx->creds = get_current_cred();
9752 9753 9754 9755
#ifdef CONFIG_AUDIT
	ctx->loginuid = current->loginuid;
	ctx->sessionid = current->sessionid;
#endif
9756 9757 9758 9759 9760 9761 9762 9763
	ctx->sqo_task = get_task_struct(current);

	/*
	 * This is just grabbed for accounting purposes. When a process exits,
	 * the mm is exited and dropped before the files, hence we need to hang
	 * on to this mm purely for the purposes of being able to unaccount
	 * memory (locked/pinned vm). It's not used for anything else.
	 */
9764
	mmgrab(current->mm);
9765
	ctx->mm_account = current->mm;
9766

9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784
#ifdef CONFIG_BLK_CGROUP
	/*
	 * The sq thread will belong to the original cgroup it was inited in.
	 * If the cgroup goes offline (e.g. disabling the io controller), then
	 * issued bios will be associated with the closest cgroup later in the
	 * block layer.
	 */
	rcu_read_lock();
	ctx->sqo_blkcg_css = blkcg_css();
	ret = css_tryget_online(ctx->sqo_blkcg_css);
	rcu_read_unlock();
	if (!ret) {
		/* don't init against a dying cgroup, have the user try again */
		ctx->sqo_blkcg_css = NULL;
		ret = -ENODEV;
		goto err;
	}
#endif
J
Jens Axboe 已提交
9785 9786 9787 9788
	ret = io_allocate_scq_urings(ctx, p);
	if (ret)
		goto err;

9789
	ret = io_sq_offload_create(ctx, p);
J
Jens Axboe 已提交
9790 9791 9792
	if (ret)
		goto err;

9793 9794 9795
	if (!(p->flags & IORING_SETUP_R_DISABLED))
		io_sq_offload_start(ctx);

J
Jens Axboe 已提交
9796
	memset(&p->sq_off, 0, sizeof(p->sq_off));
9797 9798 9799 9800 9801 9802 9803
	p->sq_off.head = offsetof(struct io_rings, sq.head);
	p->sq_off.tail = offsetof(struct io_rings, sq.tail);
	p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
	p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
	p->sq_off.flags = offsetof(struct io_rings, sq_flags);
	p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
	p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
J
Jens Axboe 已提交
9804 9805

	memset(&p->cq_off, 0, sizeof(p->cq_off));
9806 9807 9808 9809 9810 9811
	p->cq_off.head = offsetof(struct io_rings, cq.head);
	p->cq_off.tail = offsetof(struct io_rings, cq.tail);
	p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
	p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
	p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
	p->cq_off.cqes = offsetof(struct io_rings, cqes);
9812
	p->cq_off.flags = offsetof(struct io_rings, cq_flags);
9813

9814 9815
	p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
			IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
9816
			IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
9817 9818
			IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
			IORING_FEAT_EXT_ARG;
9819 9820 9821 9822 9823

	if (copy_to_user(params, p, sizeof(*p))) {
		ret = -EFAULT;
		goto err;
	}
9824

9825 9826 9827 9828 9829 9830
	file = io_uring_get_file(ctx);
	if (IS_ERR(file)) {
		ret = PTR_ERR(file);
		goto err;
	}

9831 9832 9833 9834
	/*
	 * Install ring fd as the very last thing, so we don't risk someone
	 * having closed it before we finish setup
	 */
9835 9836
	ret = io_uring_install_fd(ctx, file);
	if (ret < 0) {
9837
		io_disable_sqo_submit(ctx);
9838 9839 9840 9841
		/* fput will clean it up */
		fput(file);
		return ret;
	}
9842

9843
	trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
J
Jens Axboe 已提交
9844 9845
	return ret;
err:
9846
	io_disable_sqo_submit(ctx);
J
Jens Axboe 已提交
9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867
	io_ring_ctx_wait_and_kill(ctx);
	return ret;
}

/*
 * Sets up an aio uring context, and returns the fd. Applications asks for a
 * ring size, we return the actual sq/cq ring sizes (among other things) in the
 * params structure passed in.
 */
static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
{
	struct io_uring_params p;
	int i;

	if (copy_from_user(&p, params, sizeof(p)))
		return -EFAULT;
	for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
		if (p.resv[i])
			return -EINVAL;
	}

J
Jens Axboe 已提交
9868
	if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
9869
			IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
9870 9871
			IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
			IORING_SETUP_R_DISABLED))
J
Jens Axboe 已提交
9872 9873
		return -EINVAL;

9874
	return  io_uring_create(entries, &p, params);
J
Jens Axboe 已提交
9875 9876 9877 9878 9879 9880 9881 9882
}

SYSCALL_DEFINE2(io_uring_setup, u32, entries,
		struct io_uring_params __user *, params)
{
	return io_uring_setup(entries, params);
}

9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921
static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
{
	struct io_uring_probe *p;
	size_t size;
	int i, ret;

	size = struct_size(p, ops, nr_args);
	if (size == SIZE_MAX)
		return -EOVERFLOW;
	p = kzalloc(size, GFP_KERNEL);
	if (!p)
		return -ENOMEM;

	ret = -EFAULT;
	if (copy_from_user(p, arg, size))
		goto out;
	ret = -EINVAL;
	if (memchr_inv(p, 0, size))
		goto out;

	p->last_op = IORING_OP_LAST - 1;
	if (nr_args > IORING_OP_LAST)
		nr_args = IORING_OP_LAST;

	for (i = 0; i < nr_args; i++) {
		p->ops[i].op = i;
		if (!io_op_defs[i].not_supported)
			p->ops[i].flags = IO_URING_OP_SUPPORTED;
	}
	p->ops_len = i;

	ret = 0;
	if (copy_to_user(arg, p, size))
		ret = -EFAULT;
out:
	kfree(p);
	return ret;
}

9922 9923
static int io_register_personality(struct io_ring_ctx *ctx)
{
J
Jens Axboe 已提交
9924 9925
	struct io_identity *id;
	int ret;
9926

J
Jens Axboe 已提交
9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939
	id = kmalloc(sizeof(*id), GFP_KERNEL);
	if (unlikely(!id))
		return -ENOMEM;

	io_init_identity(id);
	id->creds = get_current_cred();

	ret = idr_alloc_cyclic(&ctx->personality_idr, id, 1, USHRT_MAX, GFP_KERNEL);
	if (ret < 0) {
		put_cred(id->creds);
		kfree(id);
	}
	return ret;
9940 9941
}

9942 9943 9944 9945 9946 9947 9948
static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
				    unsigned int nr_args)
{
	struct io_uring_restriction *res;
	size_t size;
	int i, ret;

9949 9950 9951 9952
	/* Restrictions allowed only if rings started disabled */
	if (!(ctx->flags & IORING_SETUP_R_DISABLED))
		return -EBADFD;

9953
	/* We allow only a single restrictions registration */
9954
	if (ctx->restrictions.registered)
9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005
		return -EBUSY;

	if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
		return -EINVAL;

	size = array_size(nr_args, sizeof(*res));
	if (size == SIZE_MAX)
		return -EOVERFLOW;

	res = memdup_user(arg, size);
	if (IS_ERR(res))
		return PTR_ERR(res);

	ret = 0;

	for (i = 0; i < nr_args; i++) {
		switch (res[i].opcode) {
		case IORING_RESTRICTION_REGISTER_OP:
			if (res[i].register_op >= IORING_REGISTER_LAST) {
				ret = -EINVAL;
				goto out;
			}

			__set_bit(res[i].register_op,
				  ctx->restrictions.register_op);
			break;
		case IORING_RESTRICTION_SQE_OP:
			if (res[i].sqe_op >= IORING_OP_LAST) {
				ret = -EINVAL;
				goto out;
			}

			__set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
			break;
		case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
			ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
			break;
		case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
			ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
			break;
		default:
			ret = -EINVAL;
			goto out;
		}
	}

out:
	/* Reset all restrictions if an error happened */
	if (ret != 0)
		memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
	else
10006
		ctx->restrictions.registered = true;
10007 10008 10009 10010 10011

	kfree(res);
	return ret;
}

10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026
static int io_register_enable_rings(struct io_ring_ctx *ctx)
{
	if (!(ctx->flags & IORING_SETUP_R_DISABLED))
		return -EBADFD;

	if (ctx->restrictions.registered)
		ctx->restricted = 1;

	ctx->flags &= ~IORING_SETUP_R_DISABLED;

	io_sq_offload_start(ctx);

	return 0;
}

10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040
static bool io_register_op_must_quiesce(int op)
{
	switch (op) {
	case IORING_UNREGISTER_FILES:
	case IORING_REGISTER_FILES_UPDATE:
	case IORING_REGISTER_PROBE:
	case IORING_REGISTER_PERSONALITY:
	case IORING_UNREGISTER_PERSONALITY:
		return false;
	default:
		return true;
	}
}

10041 10042
static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
			       void __user *arg, unsigned nr_args)
10043 10044
	__releases(ctx->uring_lock)
	__acquires(ctx->uring_lock)
10045 10046 10047
{
	int ret;

10048 10049 10050 10051 10052 10053 10054 10055
	/*
	 * We're inside the ring mutex, if the ref is already dying, then
	 * someone else killed the ctx or is already going through
	 * io_uring_register().
	 */
	if (percpu_ref_is_dying(&ctx->refs))
		return -ENXIO;

10056
	if (io_register_op_must_quiesce(opcode)) {
10057
		percpu_ref_kill(&ctx->refs);
10058

10059 10060 10061 10062 10063 10064 10065 10066 10067
		/*
		 * Drop uring mutex before waiting for references to exit. If
		 * another thread is currently inside io_uring_enter() it might
		 * need to grab the uring_lock to make progress. If we hold it
		 * here across the drain wait, then we can deadlock. It's safe
		 * to drop the mutex here, since no new references will come in
		 * after we've killed the percpu ref.
		 */
		mutex_unlock(&ctx->uring_lock);
10068 10069 10070 10071
		do {
			ret = wait_for_completion_interruptible(&ctx->ref_comp);
			if (!ret)
				break;
10072 10073 10074
			ret = io_run_task_work_sig();
			if (ret < 0)
				break;
10075 10076
		} while (1);

10077
		mutex_lock(&ctx->uring_lock);
10078

10079 10080
		if (ret) {
			percpu_ref_resurrect(&ctx->refs);
10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092
			goto out_quiesce;
		}
	}

	if (ctx->restricted) {
		if (opcode >= IORING_REGISTER_LAST) {
			ret = -EINVAL;
			goto out;
		}

		if (!test_bit(opcode, ctx->restrictions.register_op)) {
			ret = -EACCES;
10093 10094
			goto out;
		}
10095
	}
10096 10097 10098

	switch (opcode) {
	case IORING_REGISTER_BUFFERS:
10099
		ret = io_sqe_buffers_register(ctx, arg, nr_args);
10100 10101 10102 10103 10104
		break;
	case IORING_UNREGISTER_BUFFERS:
		ret = -EINVAL;
		if (arg || nr_args)
			break;
10105
		ret = io_sqe_buffers_unregister(ctx);
10106
		break;
J
Jens Axboe 已提交
10107 10108 10109 10110 10111 10112 10113 10114 10115
	case IORING_REGISTER_FILES:
		ret = io_sqe_files_register(ctx, arg, nr_args);
		break;
	case IORING_UNREGISTER_FILES:
		ret = -EINVAL;
		if (arg || nr_args)
			break;
		ret = io_sqe_files_unregister(ctx);
		break;
10116 10117 10118
	case IORING_REGISTER_FILES_UPDATE:
		ret = io_sqe_files_update(ctx, arg, nr_args);
		break;
10119
	case IORING_REGISTER_EVENTFD:
10120
	case IORING_REGISTER_EVENTFD_ASYNC:
10121 10122 10123 10124
		ret = -EINVAL;
		if (nr_args != 1)
			break;
		ret = io_eventfd_register(ctx, arg);
10125 10126 10127 10128 10129 10130
		if (ret)
			break;
		if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
			ctx->eventfd_async = 1;
		else
			ctx->eventfd_async = 0;
10131 10132 10133 10134 10135 10136 10137
		break;
	case IORING_UNREGISTER_EVENTFD:
		ret = -EINVAL;
		if (arg || nr_args)
			break;
		ret = io_eventfd_unregister(ctx);
		break;
10138 10139 10140 10141 10142 10143
	case IORING_REGISTER_PROBE:
		ret = -EINVAL;
		if (!arg || nr_args > 256)
			break;
		ret = io_probe(ctx, arg, nr_args);
		break;
10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155
	case IORING_REGISTER_PERSONALITY:
		ret = -EINVAL;
		if (arg || nr_args)
			break;
		ret = io_register_personality(ctx);
		break;
	case IORING_UNREGISTER_PERSONALITY:
		ret = -EINVAL;
		if (arg)
			break;
		ret = io_unregister_personality(ctx, nr_args);
		break;
10156 10157 10158 10159 10160 10161
	case IORING_REGISTER_ENABLE_RINGS:
		ret = -EINVAL;
		if (arg || nr_args)
			break;
		ret = io_register_enable_rings(ctx);
		break;
10162 10163 10164
	case IORING_REGISTER_RESTRICTIONS:
		ret = io_register_restrictions(ctx, arg, nr_args);
		break;
10165 10166 10167 10168 10169
	default:
		ret = -EINVAL;
		break;
	}

10170
out:
10171
	if (io_register_op_must_quiesce(opcode)) {
10172 10173
		/* bring the ctx back to life */
		percpu_ref_reinit(&ctx->refs);
10174
out_quiesce:
10175
		reinit_completion(&ctx->ref_comp);
10176
	}
10177 10178 10179 10180 10181 10182 10183 10184 10185 10186 10187 10188 10189 10190 10191 10192 10193 10194 10195 10196 10197 10198 10199
	return ret;
}

SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
		void __user *, arg, unsigned int, nr_args)
{
	struct io_ring_ctx *ctx;
	long ret = -EBADF;
	struct fd f;

	f = fdget(fd);
	if (!f.file)
		return -EBADF;

	ret = -EOPNOTSUPP;
	if (f.file->f_op != &io_uring_fops)
		goto out_fput;

	ctx = f.file->private_data;

	mutex_lock(&ctx->uring_lock);
	ret = __io_uring_register(ctx, opcode, arg, nr_args);
	mutex_unlock(&ctx->uring_lock);
10200 10201
	trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
							ctx->cq_ev_fd != NULL, ret);
10202 10203 10204 10205 10206
out_fput:
	fdput(f);
	return ret;
}

J
Jens Axboe 已提交
10207 10208
static int __init io_uring_init(void)
{
10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223
#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
	BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
	BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
} while (0)

#define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
	__BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
	BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
	BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
	BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
	BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
	BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
	BUILD_BUG_SQE_ELEM(8,  __u64,  off);
	BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
	BUILD_BUG_SQE_ELEM(16, __u64,  addr);
P
Pavel Begunkov 已提交
10224
	BUILD_BUG_SQE_ELEM(16, __u64,  splice_off_in);
10225 10226 10227 10228 10229
	BUILD_BUG_SQE_ELEM(24, __u32,  len);
	BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
	BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
	BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
	BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
10230 10231
	BUILD_BUG_SQE_ELEM(28, /* compat */ __u16,  poll_events);
	BUILD_BUG_SQE_ELEM(28, __u32,  poll32_events);
10232 10233 10234 10235 10236 10237 10238 10239
	BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
	BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
	BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
	BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
	BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
	BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
	BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
	BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
P
Pavel Begunkov 已提交
10240
	BUILD_BUG_SQE_ELEM(28, __u32,  splice_flags);
10241 10242 10243
	BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
	BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
	BUILD_BUG_SQE_ELEM(42, __u16,  personality);
P
Pavel Begunkov 已提交
10244
	BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
10245

10246
	BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
10247
	BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
10248 10249
	req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
				SLAB_ACCOUNT);
J
Jens Axboe 已提交
10250 10251 10252
	return 0;
};
__initcall(io_uring_init);