aio.c 42.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *	An async IO implementation for Linux
 *	Written by Benjamin LaHaise <bcrl@kvack.org>
 *
 *	Implements an efficient asynchronous io interface.
 *
 *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
 *
 *	See ../COPYING for licensing terms.
 */
K
Kent Overstreet 已提交
11 12
#define pr_fmt(fmt) "%s: " fmt, __func__

L
Linus Torvalds 已提交
13 14 15 16 17
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/aio_abi.h>
18
#include <linux/export.h>
L
Linus Torvalds 已提交
19
#include <linux/syscalls.h>
20
#include <linux/backing-dev.h>
21
#include <linux/uio.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27

#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
28
#include <linux/mmu_context.h>
K
Kent Overstreet 已提交
29
#include <linux/percpu.h>
L
Linus Torvalds 已提交
30 31 32 33 34 35
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/aio.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
#include <linux/security.h>
36
#include <linux/eventfd.h>
J
Jeff Moyer 已提交
37
#include <linux/blkdev.h>
38
#include <linux/compat.h>
39 40
#include <linux/migrate.h>
#include <linux/ramfs.h>
K
Kent Overstreet 已提交
41
#include <linux/percpu-refcount.h>
42
#include <linux/mount.h>
L
Linus Torvalds 已提交
43 44 45 46

#include <asm/kmap_types.h>
#include <asm/uaccess.h>

A
Al Viro 已提交
47 48
#include "internal.h"

49 50 51 52 53 54
#define AIO_RING_MAGIC			0xa10a10a1
#define AIO_RING_COMPAT_FEATURES	1
#define AIO_RING_INCOMPAT_FEATURES	0
struct aio_ring {
	unsigned	id;	/* kernel internal index number */
	unsigned	nr;	/* number of io_events */
55 56
	unsigned	head;	/* Written to by userland or under ring_lock
				 * mutex by aio_read_events_ring(). */
57 58 59 60 61 62 63 64 65 66 67 68 69
	unsigned	tail;

	unsigned	magic;
	unsigned	compat_features;
	unsigned	incompat_features;
	unsigned	header_length;	/* size of aio_ring */


	struct io_event		io_events[0];
}; /* 128 bytes + ring size */

#define AIO_RING_PAGES	8

70 71 72 73 74 75
struct kioctx_table {
	struct rcu_head	rcu;
	unsigned	nr;
	struct kioctx	*table[];
};

K
Kent Overstreet 已提交
76 77 78 79
struct kioctx_cpu {
	unsigned		reqs_available;
};

80
struct kioctx {
K
Kent Overstreet 已提交
81
	struct percpu_ref	users;
K
Kent Overstreet 已提交
82
	atomic_t		dead;
83

K
Kent Overstreet 已提交
84 85
	struct percpu_ref	reqs;

86 87
	unsigned long		user_id;

K
Kent Overstreet 已提交
88 89 90 91 92 93 94
	struct __percpu kioctx_cpu *cpu;

	/*
	 * For percpu reqs_available, number of slots we move to/from global
	 * counter at a time:
	 */
	unsigned		req_batch;
95 96 97 98
	/*
	 * This is what userspace passed to io_setup(), it's not used for
	 * anything but counting against the global max_reqs quota.
	 *
K
Kent Overstreet 已提交
99
	 * The real limit is nr_events - 1, which will be larger (see
100 101
	 * aio_setup_ring())
	 */
102 103
	unsigned		max_reqs;

K
Kent Overstreet 已提交
104 105
	/* Size of ringbuffer, in units of struct io_event */
	unsigned		nr_events;
106

K
Kent Overstreet 已提交
107 108 109 110 111 112
	unsigned long		mmap_base;
	unsigned long		mmap_size;

	struct page		**ring_pages;
	long			nr_pages;

K
Kent Overstreet 已提交
113
	struct work_struct	free_work;
114

115 116 117 118 119
	/*
	 * signals when all in-flight requests are done
	 */
	struct completion *requests_done;

120
	struct {
121 122 123 124 125
		/*
		 * This counts the number of available slots in the ringbuffer,
		 * so we avoid overflowing it: it's decremented (if positive)
		 * when allocating a kiocb and incremented when the resulting
		 * io_event is pulled off the ringbuffer.
K
Kent Overstreet 已提交
126 127
		 *
		 * We batch accesses to it with a percpu version.
128 129
		 */
		atomic_t	reqs_available;
130 131 132 133 134 135 136
	} ____cacheline_aligned_in_smp;

	struct {
		spinlock_t	ctx_lock;
		struct list_head active_reqs;	/* used for cancellation */
	} ____cacheline_aligned_in_smp;

K
Kent Overstreet 已提交
137 138
	struct {
		struct mutex	ring_lock;
139 140
		wait_queue_head_t wait;
	} ____cacheline_aligned_in_smp;
K
Kent Overstreet 已提交
141 142 143

	struct {
		unsigned	tail;
144
		unsigned	completed_events;
K
Kent Overstreet 已提交
145
		spinlock_t	completion_lock;
146
	} ____cacheline_aligned_in_smp;
K
Kent Overstreet 已提交
147 148

	struct page		*internal_pages[AIO_RING_PAGES];
149
	struct file		*aio_ring_file;
150 151

	unsigned		id;
152 153
};

L
Linus Torvalds 已提交
154
/*------ sysctl variables----*/
155 156 157
static DEFINE_SPINLOCK(aio_nr_lock);
unsigned long aio_nr;		/* current system wide number of aio requests */
unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
L
Linus Torvalds 已提交
158 159
/*----end sysctl variables---*/

160 161
static struct kmem_cache	*kiocb_cachep;
static struct kmem_cache	*kioctx_cachep;
L
Linus Torvalds 已提交
162

163 164 165 166 167 168 169 170 171 172 173
static struct vfsmount *aio_mnt;

static const struct file_operations aio_ring_fops;
static const struct address_space_operations aio_ctx_aops;

static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
{
	struct qstr this = QSTR_INIT("[aio]", 5);
	struct file *file;
	struct path path;
	struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
174 175
	if (IS_ERR(inode))
		return ERR_CAST(inode);
176 177 178

	inode->i_mapping->a_ops = &aio_ctx_aops;
	inode->i_mapping->private_data = ctx;
179
	inode->i_mapping->backing_dev_info = &noop_backing_dev_info;
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
	inode->i_size = PAGE_SIZE * nr_pages;

	path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
	if (!path.dentry) {
		iput(inode);
		return ERR_PTR(-ENOMEM);
	}
	path.mnt = mntget(aio_mnt);

	d_instantiate(path.dentry, inode);
	file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops);
	if (IS_ERR(file)) {
		path_put(&path);
		return file;
	}

	file->f_flags = O_RDWR;
	return file;
}

static struct dentry *aio_mount(struct file_system_type *fs_type,
				int flags, const char *dev_name, void *data)
{
	static const struct dentry_operations ops = {
		.d_dname	= simple_dname,
	};
206
	return mount_pseudo(fs_type, "aio:", NULL, &ops, AIO_RING_MAGIC);
207 208
}

L
Linus Torvalds 已提交
209 210 211 212 213 214
/* aio_setup
 *	Creates the slab caches used by the aio routines, panic on
 *	failure as this is done early during the boot sequence.
 */
static int __init aio_setup(void)
{
215 216 217 218 219 220 221 222 223
	static struct file_system_type aio_fs = {
		.name		= "aio",
		.mount		= aio_mount,
		.kill_sb	= kill_anon_super,
	};
	aio_mnt = kern_mount(&aio_fs);
	if (IS_ERR(aio_mnt))
		panic("Failed to create aio fs mount.");

224 225
	kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
L
Linus Torvalds 已提交
226

K
Kent Overstreet 已提交
227
	pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page));
L
Linus Torvalds 已提交
228 229 230

	return 0;
}
231
__initcall(aio_setup);
L
Linus Torvalds 已提交
232

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
static void put_aio_ring_file(struct kioctx *ctx)
{
	struct file *aio_ring_file = ctx->aio_ring_file;
	if (aio_ring_file) {
		truncate_setsize(aio_ring_file->f_inode, 0);

		/* Prevent further access to the kioctx from migratepages */
		spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock);
		aio_ring_file->f_inode->i_mapping->private_data = NULL;
		ctx->aio_ring_file = NULL;
		spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock);

		fput(aio_ring_file);
	}
}

L
Linus Torvalds 已提交
249 250
static void aio_free_ring(struct kioctx *ctx)
{
251
	int i;
L
Linus Torvalds 已提交
252

253 254 255 256 257
	/* Disconnect the kiotx from the ring file.  This prevents future
	 * accesses to the kioctx from page migration.
	 */
	put_aio_ring_file(ctx);

258
	for (i = 0; i < ctx->nr_pages; i++) {
259
		struct page *page;
260 261
		pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
				page_count(ctx->ring_pages[i]));
262 263 264 265 266
		page = ctx->ring_pages[i];
		if (!page)
			continue;
		ctx->ring_pages[i] = NULL;
		put_page(page);
267
	}
L
Linus Torvalds 已提交
268

269
	if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
K
Kent Overstreet 已提交
270
		kfree(ctx->ring_pages);
271 272
		ctx->ring_pages = NULL;
	}
273 274 275 276
}

static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
{
277
	vma->vm_flags |= VM_DONTEXPAND;
278 279 280 281
	vma->vm_ops = &generic_file_vm_ops;
	return 0;
}

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
static void aio_ring_remap(struct file *file, struct vm_area_struct *vma)
{
	struct mm_struct *mm = vma->vm_mm;
	struct kioctx_table *table;
	int i;

	spin_lock(&mm->ioctx_lock);
	rcu_read_lock();
	table = rcu_dereference(mm->ioctx_table);
	for (i = 0; i < table->nr; i++) {
		struct kioctx *ctx;

		ctx = table->table[i];
		if (ctx && ctx->aio_ring_file == file) {
			ctx->user_id = ctx->mmap_base = vma->vm_start;
			break;
		}
	}

	rcu_read_unlock();
	spin_unlock(&mm->ioctx_lock);
}

305 306
static const struct file_operations aio_ring_fops = {
	.mmap = aio_ring_mmap,
307
	.mremap = aio_ring_remap,
308 309
};

310
#if IS_ENABLED(CONFIG_MIGRATION)
311 312 313
static int aio_migratepage(struct address_space *mapping, struct page *new,
			struct page *old, enum migrate_mode mode)
{
314
	struct kioctx *ctx;
315
	unsigned long flags;
316
	pgoff_t idx;
317 318
	int rc;

319 320
	rc = 0;

321
	/* mapping->private_lock here protects against the kioctx teardown.  */
322 323
	spin_lock(&mapping->private_lock);
	ctx = mapping->private_data;
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
	if (!ctx) {
		rc = -EINVAL;
		goto out;
	}

	/* The ring_lock mutex.  The prevents aio_read_events() from writing
	 * to the ring's head, and prevents page migration from mucking in
	 * a partially initialized kiotx.
	 */
	if (!mutex_trylock(&ctx->ring_lock)) {
		rc = -EAGAIN;
		goto out;
	}

	idx = old->index;
	if (idx < (pgoff_t)ctx->nr_pages) {
		/* Make sure the old page hasn't already been changed */
		if (ctx->ring_pages[idx] != old)
			rc = -EAGAIN;
343 344 345 346
	} else
		rc = -EINVAL;

	if (rc != 0)
347
		goto out_unlock;
348

349 350
	/* Writeback must be complete */
	BUG_ON(PageWriteback(old));
351
	get_page(new);
352

353
	rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
354
	if (rc != MIGRATEPAGE_SUCCESS) {
355
		put_page(new);
356
		goto out_unlock;
357 358
	}

359 360 361
	/* Take completion_lock to prevent other writes to the ring buffer
	 * while the old page is copied to the new.  This prevents new
	 * events from being lost.
362
	 */
363 364 365 366 367
	spin_lock_irqsave(&ctx->completion_lock, flags);
	migrate_page_copy(new, old);
	BUG_ON(ctx->ring_pages[idx] != old);
	ctx->ring_pages[idx] = new;
	spin_unlock_irqrestore(&ctx->completion_lock, flags);
368

369 370
	/* The old page is no longer accessible. */
	put_page(old);
371

372 373 374 375
out_unlock:
	mutex_unlock(&ctx->ring_lock);
out:
	spin_unlock(&mapping->private_lock);
376
	return rc;
L
Linus Torvalds 已提交
377
}
378
#endif
L
Linus Torvalds 已提交
379

380
static const struct address_space_operations aio_ctx_aops = {
381
	.set_page_dirty = __set_page_dirty_no_writeback,
382
#if IS_ENABLED(CONFIG_MIGRATION)
383
	.migratepage	= aio_migratepage,
384
#endif
385 386
};

L
Linus Torvalds 已提交
387 388 389 390
static int aio_setup_ring(struct kioctx *ctx)
{
	struct aio_ring *ring;
	unsigned nr_events = ctx->max_reqs;
Z
Zach Brown 已提交
391
	struct mm_struct *mm = current->mm;
392
	unsigned long size, unused;
L
Linus Torvalds 已提交
393
	int nr_pages;
394 395
	int i;
	struct file *file;
L
Linus Torvalds 已提交
396 397 398 399 400 401 402

	/* Compensate for the ring buffer's head/tail overlap entry */
	nr_events += 2;	/* 1 is required, 2 for good luck */

	size = sizeof(struct aio_ring);
	size += sizeof(struct io_event) * nr_events;

403
	nr_pages = PFN_UP(size);
L
Linus Torvalds 已提交
404 405 406
	if (nr_pages < 0)
		return -EINVAL;

407
	file = aio_private_file(ctx, nr_pages);
408 409
	if (IS_ERR(file)) {
		ctx->aio_ring_file = NULL;
410
		return -ENOMEM;
411 412
	}

413 414 415 416 417 418 419 420 421 422 423 424 425 426
	ctx->aio_ring_file = file;
	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
			/ sizeof(struct io_event);

	ctx->ring_pages = ctx->internal_pages;
	if (nr_pages > AIO_RING_PAGES) {
		ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
					  GFP_KERNEL);
		if (!ctx->ring_pages) {
			put_aio_ring_file(ctx);
			return -ENOMEM;
		}
	}

427 428 429 430 431 432 433 434 435 436
	for (i = 0; i < nr_pages; i++) {
		struct page *page;
		page = find_or_create_page(file->f_inode->i_mapping,
					   i, GFP_HIGHUSER | __GFP_ZERO);
		if (!page)
			break;
		pr_debug("pid(%d) page[%d]->count=%d\n",
			 current->pid, i, page_count(page));
		SetPageUptodate(page);
		unlock_page(page);
437 438

		ctx->ring_pages[i] = page;
439
	}
440
	ctx->nr_pages = i;
L
Linus Torvalds 已提交
441

442 443
	if (unlikely(i != nr_pages)) {
		aio_free_ring(ctx);
444
		return -ENOMEM;
L
Linus Torvalds 已提交
445 446
	}

K
Kent Overstreet 已提交
447 448
	ctx->mmap_size = nr_pages * PAGE_SIZE;
	pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
449

Z
Zach Brown 已提交
450
	down_write(&mm->mmap_sem);
451 452
	ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
				       PROT_READ | PROT_WRITE,
453 454
				       MAP_SHARED, 0, &unused);
	up_write(&mm->mmap_sem);
K
Kent Overstreet 已提交
455 456
	if (IS_ERR((void *)ctx->mmap_base)) {
		ctx->mmap_size = 0;
L
Linus Torvalds 已提交
457
		aio_free_ring(ctx);
458
		return -ENOMEM;
L
Linus Torvalds 已提交
459 460
	}

K
Kent Overstreet 已提交
461
	pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
462

K
Kent Overstreet 已提交
463 464
	ctx->user_id = ctx->mmap_base;
	ctx->nr_events = nr_events; /* trusted copy */
L
Linus Torvalds 已提交
465

K
Kent Overstreet 已提交
466
	ring = kmap_atomic(ctx->ring_pages[0]);
L
Linus Torvalds 已提交
467
	ring->nr = nr_events;	/* user copy */
468
	ring->id = ~0U;
L
Linus Torvalds 已提交
469 470 471 472 473
	ring->head = ring->tail = 0;
	ring->magic = AIO_RING_MAGIC;
	ring->compat_features = AIO_RING_COMPAT_FEATURES;
	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
	ring->header_length = sizeof(struct aio_ring);
474
	kunmap_atomic(ring);
K
Kent Overstreet 已提交
475
	flush_dcache_page(ctx->ring_pages[0]);
L
Linus Torvalds 已提交
476 477 478 479 480 481 482 483

	return 0;
}

#define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
#define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
#define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
{
	struct kioctx *ctx = req->ki_ctx;
	unsigned long flags;

	spin_lock_irqsave(&ctx->ctx_lock, flags);

	if (!req->ki_list.next)
		list_add(&req->ki_list, &ctx->active_reqs);

	req->ki_cancel = cancel;

	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
}
EXPORT_SYMBOL(kiocb_set_cancel_fn);

500
static int kiocb_cancel(struct kiocb *kiocb)
K
Kent Overstreet 已提交
501
{
502
	kiocb_cancel_fn *old, *cancel;
K
Kent Overstreet 已提交
503

504 505 506 507 508 509 510 511
	/*
	 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it
	 * actually has a cancel function, hence the cmpxchg()
	 */

	cancel = ACCESS_ONCE(kiocb->ki_cancel);
	do {
		if (!cancel || cancel == KIOCB_CANCELLED)
K
Kent Overstreet 已提交
512
			return -EINVAL;
K
Kent Overstreet 已提交
513

514 515 516
		old = cancel;
		cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED);
	} while (cancel != old);
K
Kent Overstreet 已提交
517

K
Kent Overstreet 已提交
518
	return cancel(kiocb);
K
Kent Overstreet 已提交
519 520
}

K
Kent Overstreet 已提交
521
static void free_ioctx(struct work_struct *work)
K
Kent Overstreet 已提交
522
{
K
Kent Overstreet 已提交
523
	struct kioctx *ctx = container_of(work, struct kioctx, free_work);
K
Kent Overstreet 已提交
524

K
Kent Overstreet 已提交
525
	pr_debug("freeing %p\n", ctx);
K
Kent Overstreet 已提交
526

K
Kent Overstreet 已提交
527
	aio_free_ring(ctx);
K
Kent Overstreet 已提交
528
	free_percpu(ctx->cpu);
529 530
	percpu_ref_exit(&ctx->reqs);
	percpu_ref_exit(&ctx->users);
K
Kent Overstreet 已提交
531 532 533
	kmem_cache_free(kioctx_cachep, ctx);
}

K
Kent Overstreet 已提交
534 535 536 537
static void free_ioctx_reqs(struct percpu_ref *ref)
{
	struct kioctx *ctx = container_of(ref, struct kioctx, reqs);

538 539 540 541
	/* At this point we know that there are no any in-flight requests */
	if (ctx->requests_done)
		complete(ctx->requests_done);

K
Kent Overstreet 已提交
542 543 544 545
	INIT_WORK(&ctx->free_work, free_ioctx);
	schedule_work(&ctx->free_work);
}

K
Kent Overstreet 已提交
546 547 548 549 550
/*
 * When this function runs, the kioctx has been removed from the "hash table"
 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
 * now it's safe to cancel any that need to be.
 */
K
Kent Overstreet 已提交
551
static void free_ioctx_users(struct percpu_ref *ref)
K
Kent Overstreet 已提交
552
{
K
Kent Overstreet 已提交
553
	struct kioctx *ctx = container_of(ref, struct kioctx, users);
K
Kent Overstreet 已提交
554 555 556 557 558 559 560 561 562
	struct kiocb *req;

	spin_lock_irq(&ctx->ctx_lock);

	while (!list_empty(&ctx->active_reqs)) {
		req = list_first_entry(&ctx->active_reqs,
				       struct kiocb, ki_list);

		list_del_init(&req->ki_list);
563
		kiocb_cancel(req);
K
Kent Overstreet 已提交
564 565 566 567
	}

	spin_unlock_irq(&ctx->ctx_lock);

K
Kent Overstreet 已提交
568 569
	percpu_ref_kill(&ctx->reqs);
	percpu_ref_put(&ctx->reqs);
K
Kent Overstreet 已提交
570 571
}

572 573 574 575 576 577 578
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
{
	unsigned i, new_nr;
	struct kioctx_table *table, *old;
	struct aio_ring *ring;

	spin_lock(&mm->ioctx_lock);
579
	table = rcu_dereference_raw(mm->ioctx_table);
580 581 582 583 584 585 586 587 588

	while (1) {
		if (table)
			for (i = 0; i < table->nr; i++)
				if (!table->table[i]) {
					ctx->id = i;
					table->table[i] = ctx;
					spin_unlock(&mm->ioctx_lock);

589 590 591 592
					/* While kioctx setup is in progress,
					 * we are protected from page migration
					 * changes ring_pages by ->ring_lock.
					 */
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
					ring = kmap_atomic(ctx->ring_pages[0]);
					ring->id = ctx->id;
					kunmap_atomic(ring);
					return 0;
				}

		new_nr = (table ? table->nr : 1) * 4;
		spin_unlock(&mm->ioctx_lock);

		table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
				new_nr, GFP_KERNEL);
		if (!table)
			return -ENOMEM;

		table->nr = new_nr;

		spin_lock(&mm->ioctx_lock);
610
		old = rcu_dereference_raw(mm->ioctx_table);
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626

		if (!old) {
			rcu_assign_pointer(mm->ioctx_table, table);
		} else if (table->nr > old->nr) {
			memcpy(table->table, old->table,
			       old->nr * sizeof(struct kioctx *));

			rcu_assign_pointer(mm->ioctx_table, table);
			kfree_rcu(old, rcu);
		} else {
			kfree(table);
			table = old;
		}
	}
}

K
Kent Overstreet 已提交
627 628 629 630 631 632 633 634 635 636
static void aio_nr_sub(unsigned nr)
{
	spin_lock(&aio_nr_lock);
	if (WARN_ON(aio_nr - nr > aio_nr))
		aio_nr = 0;
	else
		aio_nr -= nr;
	spin_unlock(&aio_nr_lock);
}

L
Linus Torvalds 已提交
637 638 639 640 641
/* ioctx_alloc
 *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
 */
static struct kioctx *ioctx_alloc(unsigned nr_events)
{
Z
Zach Brown 已提交
642
	struct mm_struct *mm = current->mm;
L
Linus Torvalds 已提交
643
	struct kioctx *ctx;
644
	int err = -ENOMEM;
L
Linus Torvalds 已提交
645

K
Kent Overstreet 已提交
646 647 648 649 650 651 652 653 654 655 656 657
	/*
	 * We keep track of the number of available ringbuffer slots, to prevent
	 * overflow (reqs_available), and we also use percpu counters for this.
	 *
	 * So since up to half the slots might be on other cpu's percpu counters
	 * and unavailable, double nr_events so userspace sees what they
	 * expected: additionally, we move req_batch slots to/from percpu
	 * counters at a time, so make sure that isn't 0:
	 */
	nr_events = max(nr_events, num_possible_cpus() * 4);
	nr_events *= 2;

L
Linus Torvalds 已提交
658 659 660 661 662 663 664
	/* Prevent overflows */
	if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
	    (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
		pr_debug("ENOMEM: nr_events too high\n");
		return ERR_PTR(-EINVAL);
	}

665
	if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL))
L
Linus Torvalds 已提交
666 667
		return ERR_PTR(-EAGAIN);

668
	ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
L
Linus Torvalds 已提交
669 670 671 672 673 674
	if (!ctx)
		return ERR_PTR(-ENOMEM);

	ctx->max_reqs = nr_events;

	spin_lock_init(&ctx->ctx_lock);
675
	spin_lock_init(&ctx->completion_lock);
K
Kent Overstreet 已提交
676
	mutex_init(&ctx->ring_lock);
677 678 679
	/* Protect against page migration throughout kiotx setup by keeping
	 * the ring_lock mutex held until setup is complete. */
	mutex_lock(&ctx->ring_lock);
L
Linus Torvalds 已提交
680 681 682 683
	init_waitqueue_head(&ctx->wait);

	INIT_LIST_HEAD(&ctx->active_reqs);

684
	if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
685 686
		goto err;

687
	if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
688 689
		goto err;

K
Kent Overstreet 已提交
690 691
	ctx->cpu = alloc_percpu(struct kioctx_cpu);
	if (!ctx->cpu)
K
Kent Overstreet 已提交
692
		goto err;
L
Linus Torvalds 已提交
693

694 695
	err = aio_setup_ring(ctx);
	if (err < 0)
K
Kent Overstreet 已提交
696
		goto err;
K
Kent Overstreet 已提交
697

698
	atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
K
Kent Overstreet 已提交
699
	ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
700 701
	if (ctx->req_batch < 1)
		ctx->req_batch = 1;
702

L
Linus Torvalds 已提交
703
	/* limit the number of system wide aios */
704
	spin_lock(&aio_nr_lock);
705
	if (aio_nr + nr_events > (aio_max_nr * 2UL) ||
706
	    aio_nr + nr_events < aio_nr) {
707
		spin_unlock(&aio_nr_lock);
K
Kent Overstreet 已提交
708
		err = -EAGAIN;
G
Gu Zheng 已提交
709
		goto err_ctx;
710 711
	}
	aio_nr += ctx->max_reqs;
712
	spin_unlock(&aio_nr_lock);
L
Linus Torvalds 已提交
713

714 715
	percpu_ref_get(&ctx->users);	/* io_setup() will drop this ref */
	percpu_ref_get(&ctx->reqs);	/* free_ioctx_users() will drop this */
K
Kent Overstreet 已提交
716

717 718
	err = ioctx_add_table(ctx, mm);
	if (err)
K
Kent Overstreet 已提交
719
		goto err_cleanup;
720

721 722 723
	/* Release the ring_lock mutex now that all setup is complete. */
	mutex_unlock(&ctx->ring_lock);

K
Kent Overstreet 已提交
724
	pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
K
Kent Overstreet 已提交
725
		 ctx, ctx->user_id, mm, ctx->nr_events);
L
Linus Torvalds 已提交
726 727
	return ctx;

K
Kent Overstreet 已提交
728 729
err_cleanup:
	aio_nr_sub(ctx->max_reqs);
G
Gu Zheng 已提交
730 731
err_ctx:
	aio_free_ring(ctx);
K
Kent Overstreet 已提交
732
err:
733
	mutex_unlock(&ctx->ring_lock);
K
Kent Overstreet 已提交
734
	free_percpu(ctx->cpu);
735 736
	percpu_ref_exit(&ctx->reqs);
	percpu_ref_exit(&ctx->users);
L
Linus Torvalds 已提交
737
	kmem_cache_free(kioctx_cachep, ctx);
K
Kent Overstreet 已提交
738
	pr_debug("error allocating ioctx %d\n", err);
739
	return ERR_PTR(err);
L
Linus Torvalds 已提交
740 741
}

K
Kent Overstreet 已提交
742 743 744 745 746
/* kill_ioctx
 *	Cancels all outstanding aio requests on an aio context.  Used
 *	when the processes owning a context have all exited to encourage
 *	the rapid destruction of the kioctx.
 */
747
static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
748
		struct completion *requests_done)
K
Kent Overstreet 已提交
749
{
750
	struct kioctx_table *table;
751

752 753
	if (atomic_xchg(&ctx->dead, 1))
		return -EINVAL;
754 755


756
	spin_lock(&mm->ioctx_lock);
757
	table = rcu_dereference_raw(mm->ioctx_table);
758 759 760
	WARN_ON(ctx != table->table[ctx->id]);
	table->table[ctx->id] = NULL;
	spin_unlock(&mm->ioctx_lock);
761

762 763
	/* percpu_ref_kill() will do the necessary call_rcu() */
	wake_up_all(&ctx->wait);
764

765 766 767 768 769 770 771 772
	/*
	 * It'd be more correct to do this in free_ioctx(), after all
	 * the outstanding kiocbs have finished - but by then io_destroy
	 * has already returned, so io_setup() could potentially return
	 * -EAGAIN with no ioctxs actually in use (as far as userspace
	 *  could tell).
	 */
	aio_nr_sub(ctx->max_reqs);
773

774 775
	if (ctx->mmap_size)
		vm_munmap(ctx->mmap_base, ctx->mmap_size);
776

777 778 779
	ctx->requests_done = requests_done;
	percpu_ref_kill(&ctx->users);
	return 0;
L
Linus Torvalds 已提交
780 781 782 783 784
}

/* wait_on_sync_kiocb:
 *	Waits on the given sync kiocb to complete.
 */
K
Kent Overstreet 已提交
785
ssize_t wait_on_sync_kiocb(struct kiocb *req)
L
Linus Torvalds 已提交
786
{
K
Kent Overstreet 已提交
787
	while (!req->ki_ctx) {
L
Linus Torvalds 已提交
788
		set_current_state(TASK_UNINTERRUPTIBLE);
K
Kent Overstreet 已提交
789
		if (req->ki_ctx)
L
Linus Torvalds 已提交
790
			break;
J
Jeff Moyer 已提交
791
		io_schedule();
L
Linus Torvalds 已提交
792 793
	}
	__set_current_state(TASK_RUNNING);
K
Kent Overstreet 已提交
794
	return req->ki_user_data;
L
Linus Torvalds 已提交
795
}
796
EXPORT_SYMBOL(wait_on_sync_kiocb);
L
Linus Torvalds 已提交
797

K
Kent Overstreet 已提交
798 799 800 801 802 803 804
/*
 * exit_aio: called when the last user of mm goes away.  At this point, there is
 * no way for any new requests to be submited or any of the io_* syscalls to be
 * called on the context.
 *
 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
 * them.
L
Linus Torvalds 已提交
805
 */
806
void exit_aio(struct mm_struct *mm)
L
Linus Torvalds 已提交
807
{
808 809
	struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
	int i;
810

811 812
	if (!table)
		return;
813

814 815
	for (i = 0; i < table->nr; ++i) {
		struct kioctx *ctx = table->table[i];
816 817
		struct completion requests_done =
			COMPLETION_INITIALIZER_ONSTACK(requests_done);
J
Jens Axboe 已提交
818

819 820
		if (!ctx)
			continue;
821
		/*
822 823 824 825 826
		 * We don't need to bother with munmap() here - exit_mmap(mm)
		 * is coming and it'll unmap everything. And we simply can't,
		 * this is not necessarily our ->mm.
		 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
		 * that it needs to unmap the area, just set it to 0.
827
		 */
K
Kent Overstreet 已提交
828
		ctx->mmap_size = 0;
829
		kill_ioctx(mm, ctx, &requests_done);
K
Kent Overstreet 已提交
830

831 832
		/* Wait until all IO for the context are done. */
		wait_for_completion(&requests_done);
L
Linus Torvalds 已提交
833
	}
834 835 836

	RCU_INIT_POINTER(mm->ioctx_table, NULL);
	kfree(table);
L
Linus Torvalds 已提交
837 838
}

K
Kent Overstreet 已提交
839 840 841
static void put_reqs_available(struct kioctx *ctx, unsigned nr)
{
	struct kioctx_cpu *kcpu;
842
	unsigned long flags;
K
Kent Overstreet 已提交
843

844
	local_irq_save(flags);
845
	kcpu = this_cpu_ptr(ctx->cpu);
K
Kent Overstreet 已提交
846
	kcpu->reqs_available += nr;
847

K
Kent Overstreet 已提交
848 849 850 851 852
	while (kcpu->reqs_available >= ctx->req_batch * 2) {
		kcpu->reqs_available -= ctx->req_batch;
		atomic_add(ctx->req_batch, &ctx->reqs_available);
	}

853
	local_irq_restore(flags);
K
Kent Overstreet 已提交
854 855 856 857 858 859
}

static bool get_reqs_available(struct kioctx *ctx)
{
	struct kioctx_cpu *kcpu;
	bool ret = false;
860
	unsigned long flags;
K
Kent Overstreet 已提交
861

862
	local_irq_save(flags);
863
	kcpu = this_cpu_ptr(ctx->cpu);
K
Kent Overstreet 已提交
864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
	if (!kcpu->reqs_available) {
		int old, avail = atomic_read(&ctx->reqs_available);

		do {
			if (avail < ctx->req_batch)
				goto out;

			old = avail;
			avail = atomic_cmpxchg(&ctx->reqs_available,
					       avail, avail - ctx->req_batch);
		} while (avail != old);

		kcpu->reqs_available += ctx->req_batch;
	}

	ret = true;
	kcpu->reqs_available--;
out:
882
	local_irq_restore(flags);
K
Kent Overstreet 已提交
883 884 885
	return ret;
}

886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
/* refill_reqs_available
 *	Updates the reqs_available reference counts used for tracking the
 *	number of free slots in the completion ring.  This can be called
 *	from aio_complete() (to optimistically update reqs_available) or
 *	from aio_get_req() (the we're out of events case).  It must be
 *	called holding ctx->completion_lock.
 */
static void refill_reqs_available(struct kioctx *ctx, unsigned head,
                                  unsigned tail)
{
	unsigned events_in_ring, completed;

	/* Clamp head since userland can write to it. */
	head %= ctx->nr_events;
	if (head <= tail)
		events_in_ring = tail - head;
	else
		events_in_ring = ctx->nr_events - (head - tail);

	completed = ctx->completed_events;
	if (events_in_ring < completed)
		completed -= events_in_ring;
	else
		completed = 0;

	if (!completed)
		return;

	ctx->completed_events -= completed;
	put_reqs_available(ctx, completed);
}

/* user_refill_reqs_available
 *	Called to refill reqs_available when aio_get_req() encounters an
 *	out of space in the completion ring.
 */
static void user_refill_reqs_available(struct kioctx *ctx)
{
	spin_lock_irq(&ctx->completion_lock);
	if (ctx->completed_events) {
		struct aio_ring *ring;
		unsigned head;

		/* Access of ring->head may race with aio_read_events_ring()
		 * here, but that's okay since whether we read the old version
		 * or the new version, and either will be valid.  The important
		 * part is that head cannot pass tail since we prevent
		 * aio_complete() from updating tail by holding
		 * ctx->completion_lock.  Even if head is invalid, the check
		 * against ctx->completed_events below will make sure we do the
		 * safe/right thing.
		 */
		ring = kmap_atomic(ctx->ring_pages[0]);
		head = ring->head;
		kunmap_atomic(ring);

		refill_reqs_available(ctx, head, ctx->tail);
	}

	spin_unlock_irq(&ctx->completion_lock);
}

L
Linus Torvalds 已提交
948
/* aio_get_req
K
Kent Overstreet 已提交
949 950
 *	Allocate a slot for an aio request.
 * Returns NULL if no requests are free.
L
Linus Torvalds 已提交
951
 */
K
Kent Overstreet 已提交
952
static inline struct kiocb *aio_get_req(struct kioctx *ctx)
L
Linus Torvalds 已提交
953
{
K
Kent Overstreet 已提交
954 955
	struct kiocb *req;

956 957 958 959 960
	if (!get_reqs_available(ctx)) {
		user_refill_reqs_available(ctx);
		if (!get_reqs_available(ctx))
			return NULL;
	}
K
Kent Overstreet 已提交
961

962
	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
L
Linus Torvalds 已提交
963
	if (unlikely(!req))
K
Kent Overstreet 已提交
964
		goto out_put;
L
Linus Torvalds 已提交
965

K
Kent Overstreet 已提交
966 967
	percpu_ref_get(&ctx->reqs);

L
Linus Torvalds 已提交
968
	req->ki_ctx = ctx;
J
Jeff Moyer 已提交
969
	return req;
K
Kent Overstreet 已提交
970
out_put:
K
Kent Overstreet 已提交
971
	put_reqs_available(ctx, 1);
K
Kent Overstreet 已提交
972
	return NULL;
L
Linus Torvalds 已提交
973 974
}

975
static void kiocb_free(struct kiocb *req)
L
Linus Torvalds 已提交
976
{
977 978
	if (req->ki_filp)
		fput(req->ki_filp);
979 980
	if (req->ki_eventfd != NULL)
		eventfd_ctx_put(req->ki_eventfd);
L
Linus Torvalds 已提交
981 982 983
	kmem_cache_free(kiocb_cachep, req);
}

A
Adrian Bunk 已提交
984
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
L
Linus Torvalds 已提交
985
{
986
	struct aio_ring __user *ring  = (void __user *)ctx_id;
J
Jens Axboe 已提交
987
	struct mm_struct *mm = current->mm;
988
	struct kioctx *ctx, *ret = NULL;
989 990 991 992 993
	struct kioctx_table *table;
	unsigned id;

	if (get_user(id, &ring->id))
		return NULL;
L
Linus Torvalds 已提交
994

J
Jens Axboe 已提交
995
	rcu_read_lock();
996
	table = rcu_dereference(mm->ioctx_table);
J
Jens Axboe 已提交
997

998 999
	if (!table || id >= table->nr)
		goto out;
L
Linus Torvalds 已提交
1000

1001
	ctx = table->table[id];
1002
	if (ctx && ctx->user_id == ctx_id) {
1003 1004 1005 1006
		percpu_ref_get(&ctx->users);
		ret = ctx;
	}
out:
J
Jens Axboe 已提交
1007
	rcu_read_unlock();
1008
	return ret;
L
Linus Torvalds 已提交
1009 1010 1011 1012 1013
}

/* aio_complete
 *	Called when the io request on the given iocb is complete.
 */
1014
void aio_complete(struct kiocb *iocb, long res, long res2)
L
Linus Torvalds 已提交
1015 1016 1017
{
	struct kioctx	*ctx = iocb->ki_ctx;
	struct aio_ring	*ring;
K
Kent Overstreet 已提交
1018
	struct io_event	*ev_page, *event;
1019
	unsigned tail, pos, head;
L
Linus Torvalds 已提交
1020 1021
	unsigned long	flags;

1022 1023 1024 1025 1026 1027
	/*
	 * Special case handling for sync iocbs:
	 *  - events go directly into the iocb for fast handling
	 *  - the sync task with the iocb in its stack holds the single iocb
	 *    ref, no other paths have a way to get another ref
	 *  - the sync task helpfully left a reference to itself in the iocb
L
Linus Torvalds 已提交
1028 1029 1030
	 */
	if (is_sync_kiocb(iocb)) {
		iocb->ki_user_data = res;
K
Kent Overstreet 已提交
1031 1032
		smp_wmb();
		iocb->ki_ctx = ERR_PTR(-EXDEV);
L
Linus Torvalds 已提交
1033
		wake_up_process(iocb->ki_obj.tsk);
1034
		return;
L
Linus Torvalds 已提交
1035 1036
	}

1037 1038 1039 1040 1041 1042 1043
	if (iocb->ki_list.next) {
		unsigned long flags;

		spin_lock_irqsave(&ctx->ctx_lock, flags);
		list_del(&iocb->ki_list);
		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
	}
1044

1045 1046
	/*
	 * Add a completion event to the ring buffer. Must be done holding
1047
	 * ctx->completion_lock to prevent other code from messing with the tail
1048 1049 1050 1051
	 * pointer since we might be called from irq context.
	 */
	spin_lock_irqsave(&ctx->completion_lock, flags);

K
Kent Overstreet 已提交
1052
	tail = ctx->tail;
K
Kent Overstreet 已提交
1053 1054
	pos = tail + AIO_EVENTS_OFFSET;

K
Kent Overstreet 已提交
1055
	if (++tail >= ctx->nr_events)
1056
		tail = 0;
L
Linus Torvalds 已提交
1057

K
Kent Overstreet 已提交
1058
	ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
K
Kent Overstreet 已提交
1059 1060
	event = ev_page + pos % AIO_EVENTS_PER_PAGE;

L
Linus Torvalds 已提交
1061 1062 1063 1064 1065
	event->obj = (u64)(unsigned long)iocb->ki_obj.user;
	event->data = iocb->ki_user_data;
	event->res = res;
	event->res2 = res2;

K
Kent Overstreet 已提交
1066
	kunmap_atomic(ev_page);
K
Kent Overstreet 已提交
1067
	flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
K
Kent Overstreet 已提交
1068 1069

	pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
K
Kent Overstreet 已提交
1070 1071
		 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
		 res, res2);
L
Linus Torvalds 已提交
1072 1073 1074 1075 1076 1077

	/* after flagging the request as done, we
	 * must never even look at it again
	 */
	smp_wmb();	/* make event visible before updating tail */

K
Kent Overstreet 已提交
1078
	ctx->tail = tail;
L
Linus Torvalds 已提交
1079

K
Kent Overstreet 已提交
1080
	ring = kmap_atomic(ctx->ring_pages[0]);
1081
	head = ring->head;
K
Kent Overstreet 已提交
1082
	ring->tail = tail;
1083
	kunmap_atomic(ring);
K
Kent Overstreet 已提交
1084
	flush_dcache_page(ctx->ring_pages[0]);
L
Linus Torvalds 已提交
1085

1086 1087 1088
	ctx->completed_events++;
	if (ctx->completed_events > 1)
		refill_reqs_available(ctx, head, tail);
1089 1090
	spin_unlock_irqrestore(&ctx->completion_lock, flags);

K
Kent Overstreet 已提交
1091
	pr_debug("added to ring %p at [%u]\n", iocb, tail);
D
Davide Libenzi 已提交
1092 1093 1094 1095 1096 1097

	/*
	 * Check if the user asked us to deliver the result through an
	 * eventfd. The eventfd_signal() function is safe to be called
	 * from IRQ context.
	 */
1098
	if (iocb->ki_eventfd != NULL)
D
Davide Libenzi 已提交
1099 1100
		eventfd_signal(iocb->ki_eventfd, 1);

L
Linus Torvalds 已提交
1101
	/* everything turned out well, dispose of the aiocb. */
K
Kent Overstreet 已提交
1102
	kiocb_free(iocb);
L
Linus Torvalds 已提交
1103

1104 1105 1106 1107 1108 1109 1110 1111
	/*
	 * We have to order our ring_info tail store above and test
	 * of the wait list below outside the wait lock.  This is
	 * like in wake_up_bit() where clearing a bit has to be
	 * ordered with the unlocked test.
	 */
	smp_mb();

L
Linus Torvalds 已提交
1112 1113 1114
	if (waitqueue_active(&ctx->wait))
		wake_up(&ctx->wait);

K
Kent Overstreet 已提交
1115
	percpu_ref_put(&ctx->reqs);
L
Linus Torvalds 已提交
1116
}
1117
EXPORT_SYMBOL(aio_complete);
L
Linus Torvalds 已提交
1118

G
Gu Zheng 已提交
1119
/* aio_read_events_ring
1120 1121
 *	Pull an event off of the ioctx's event ring.  Returns the number of
 *	events fetched
L
Linus Torvalds 已提交
1122
 */
1123 1124
static long aio_read_events_ring(struct kioctx *ctx,
				 struct io_event __user *event, long nr)
L
Linus Torvalds 已提交
1125 1126
{
	struct aio_ring *ring;
1127
	unsigned head, tail, pos;
1128 1129 1130
	long ret = 0;
	int copy_ret;

K
Kent Overstreet 已提交
1131
	mutex_lock(&ctx->ring_lock);
L
Linus Torvalds 已提交
1132

1133
	/* Access to ->ring_pages here is protected by ctx->ring_lock. */
K
Kent Overstreet 已提交
1134
	ring = kmap_atomic(ctx->ring_pages[0]);
1135
	head = ring->head;
1136
	tail = ring->tail;
1137 1138
	kunmap_atomic(ring);

1139 1140 1141 1142 1143 1144
	/*
	 * Ensure that once we've read the current tail pointer, that
	 * we also see the events that were stored up to the tail.
	 */
	smp_rmb();

1145
	pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
L
Linus Torvalds 已提交
1146

1147
	if (head == tail)
L
Linus Torvalds 已提交
1148 1149
		goto out;

1150 1151 1152
	head %= ctx->nr_events;
	tail %= ctx->nr_events;

1153 1154 1155 1156 1157
	while (ret < nr) {
		long avail;
		struct io_event *ev;
		struct page *page;

1158 1159
		avail = (head <= tail ?  tail : ctx->nr_events) - head;
		if (head == tail)
1160 1161 1162 1163 1164 1165 1166
			break;

		avail = min(avail, nr - ret);
		avail = min_t(long, avail, AIO_EVENTS_PER_PAGE -
			    ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE));

		pos = head + AIO_EVENTS_OFFSET;
K
Kent Overstreet 已提交
1167
		page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
		pos %= AIO_EVENTS_PER_PAGE;

		ev = kmap(page);
		copy_ret = copy_to_user(event + ret, ev + pos,
					sizeof(*ev) * avail);
		kunmap(page);

		if (unlikely(copy_ret)) {
			ret = -EFAULT;
			goto out;
		}

		ret += avail;
		head += avail;
K
Kent Overstreet 已提交
1182
		head %= ctx->nr_events;
L
Linus Torvalds 已提交
1183 1184
	}

K
Kent Overstreet 已提交
1185
	ring = kmap_atomic(ctx->ring_pages[0]);
1186
	ring->head = head;
1187
	kunmap_atomic(ring);
K
Kent Overstreet 已提交
1188
	flush_dcache_page(ctx->ring_pages[0]);
1189

1190
	pr_debug("%li  h%u t%u\n", ret, head, tail);
1191
out:
K
Kent Overstreet 已提交
1192
	mutex_unlock(&ctx->ring_lock);
1193

L
Linus Torvalds 已提交
1194 1195 1196
	return ret;
}

1197 1198
static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
			    struct io_event __user *event, long *i)
L
Linus Torvalds 已提交
1199
{
1200
	long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
L
Linus Torvalds 已提交
1201

1202 1203
	if (ret > 0)
		*i += ret;
L
Linus Torvalds 已提交
1204

1205 1206
	if (unlikely(atomic_read(&ctx->dead)))
		ret = -EINVAL;
L
Linus Torvalds 已提交
1207

1208 1209
	if (!*i)
		*i = ret;
L
Linus Torvalds 已提交
1210

1211
	return ret < 0 || *i >= min_nr;
L
Linus Torvalds 已提交
1212 1213
}

1214
static long read_events(struct kioctx *ctx, long min_nr, long nr,
L
Linus Torvalds 已提交
1215 1216 1217
			struct io_event __user *event,
			struct timespec __user *timeout)
{
1218 1219
	ktime_t until = { .tv64 = KTIME_MAX };
	long ret = 0;
L
Linus Torvalds 已提交
1220 1221 1222

	if (timeout) {
		struct timespec	ts;
1223

L
Linus Torvalds 已提交
1224
		if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1225
			return -EFAULT;
L
Linus Torvalds 已提交
1226

1227
		until = timespec_to_ktime(ts);
L
Linus Torvalds 已提交
1228 1229
	}

1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
	/*
	 * Note that aio_read_events() is being called as the conditional - i.e.
	 * we're calling it after prepare_to_wait() has set task state to
	 * TASK_INTERRUPTIBLE.
	 *
	 * But aio_read_events() can block, and if it blocks it's going to flip
	 * the task state back to TASK_RUNNING.
	 *
	 * This should be ok, provided it doesn't flip the state back to
	 * TASK_RUNNING and return 0 too much - that causes us to spin. That
	 * will only happen if the mutex_lock() call blocks, and we then find
	 * the ringbuffer empty. So in practice we should be ok, but it's
	 * something to be aware of when touching this code.
	 */
1244 1245 1246 1247 1248 1249
	if (until.tv64 == 0)
		aio_read_events(ctx, min_nr, nr, event, &ret);
	else
		wait_event_interruptible_hrtimeout(ctx->wait,
				aio_read_events(ctx, min_nr, nr, event, &ret),
				until);
L
Linus Torvalds 已提交
1250

1251 1252
	if (!ret && signal_pending(current))
		ret = -EINTR;
L
Linus Torvalds 已提交
1253

1254
	return ret;
L
Linus Torvalds 已提交
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
}

/* sys_io_setup:
 *	Create an aio_context capable of receiving at least nr_events.
 *	ctxp must not point to an aio_context that already exists, and
 *	must be initialized to 0 prior to the call.  On successful
 *	creation of the aio_context, *ctxp is filled in with the resulting 
 *	handle.  May fail with -EINVAL if *ctxp is not initialized,
 *	if the specified nr_events exceeds internal limits.  May fail 
 *	with -EAGAIN if the specified nr_events exceeds the user's limit 
 *	of available events.  May fail with -ENOMEM if insufficient kernel
 *	resources are available.  May fail with -EFAULT if an invalid
 *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
 *	implemented.
 */
1270
SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
L
Linus Torvalds 已提交
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
{
	struct kioctx *ioctx = NULL;
	unsigned long ctx;
	long ret;

	ret = get_user(ctx, ctxp);
	if (unlikely(ret))
		goto out;

	ret = -EINVAL;
1281 1282 1283
	if (unlikely(ctx || nr_events == 0)) {
		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
		         ctx, nr_events);
L
Linus Torvalds 已提交
1284 1285 1286 1287 1288 1289 1290
		goto out;
	}

	ioctx = ioctx_alloc(nr_events);
	ret = PTR_ERR(ioctx);
	if (!IS_ERR(ioctx)) {
		ret = put_user(ioctx->user_id, ctxp);
1291
		if (ret)
1292
			kill_ioctx(current->mm, ioctx, NULL);
K
Kent Overstreet 已提交
1293
		percpu_ref_put(&ioctx->users);
L
Linus Torvalds 已提交
1294 1295 1296 1297 1298 1299 1300 1301 1302
	}

out:
	return ret;
}

/* sys_io_destroy:
 *	Destroy the aio_context specified.  May cancel any outstanding 
 *	AIOs and block on completion.  Will fail with -ENOSYS if not
1303
 *	implemented.  May fail with -EINVAL if the context pointed to
L
Linus Torvalds 已提交
1304 1305
 *	is invalid.
 */
1306
SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
L
Linus Torvalds 已提交
1307 1308 1309
{
	struct kioctx *ioctx = lookup_ioctx(ctx);
	if (likely(NULL != ioctx)) {
1310 1311
		struct completion requests_done =
			COMPLETION_INITIALIZER_ONSTACK(requests_done);
1312
		int ret;
1313 1314 1315 1316 1317

		/* Pass requests_done to kill_ioctx() where it can be set
		 * in a thread-safe way. If we try to set it here then we have
		 * a race condition if two io_destroy() called simultaneously.
		 */
1318
		ret = kill_ioctx(current->mm, ioctx, &requests_done);
K
Kent Overstreet 已提交
1319
		percpu_ref_put(&ioctx->users);
1320 1321 1322 1323 1324

		/* Wait until all IO for the context are done. Otherwise kernel
		 * keep using user-space buffers even if user thinks the context
		 * is destroyed.
		 */
1325 1326
		if (!ret)
			wait_for_completion(&requests_done);
1327

1328
		return ret;
L
Linus Torvalds 已提交
1329 1330 1331 1332 1333
	}
	pr_debug("EINVAL: io_destroy: invalid context id\n");
	return -EINVAL;
}

K
Kent Overstreet 已提交
1334 1335
typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *,
			    unsigned long, loff_t);
1336
typedef ssize_t (rw_iter_op)(struct kiocb *, struct iov_iter *);
K
Kent Overstreet 已提交
1337

1338 1339 1340 1341 1342
static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb,
				     int rw, char __user *buf,
				     unsigned long *nr_segs,
				     struct iovec **iovec,
				     bool compat)
B
Badari Pulavarty 已提交
1343 1344 1345
{
	ssize_t ret;

1346
	*nr_segs = kiocb->ki_nbytes;
K
Kent Overstreet 已提交
1347

1348 1349
#ifdef CONFIG_COMPAT
	if (compat)
K
Kent Overstreet 已提交
1350
		ret = compat_rw_copy_check_uvector(rw,
1351
				(struct compat_iovec __user *)buf,
1352
				*nr_segs, UIO_FASTIOV, *iovec, iovec);
1353 1354
	else
#endif
K
Kent Overstreet 已提交
1355
		ret = rw_copy_check_uvector(rw,
1356
				(struct iovec __user *)buf,
1357
				*nr_segs, UIO_FASTIOV, *iovec, iovec);
B
Badari Pulavarty 已提交
1358
	if (ret < 0)
K
Kent Overstreet 已提交
1359
		return ret;
1360

K
Kent Overstreet 已提交
1361
	/* ki_nbytes now reflect bytes instead of segs */
B
Badari Pulavarty 已提交
1362
	kiocb->ki_nbytes = ret;
K
Kent Overstreet 已提交
1363
	return 0;
B
Badari Pulavarty 已提交
1364 1365
}

1366 1367 1368 1369
static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
				       int rw, char __user *buf,
				       unsigned long *nr_segs,
				       struct iovec *iovec)
B
Badari Pulavarty 已提交
1370
{
1371
	if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes)))
K
Kent Overstreet 已提交
1372
		return -EFAULT;
1373

1374 1375 1376
	iovec->iov_base = buf;
	iovec->iov_len = kiocb->ki_nbytes;
	*nr_segs = 1;
B
Badari Pulavarty 已提交
1377 1378 1379
	return 0;
}

L
Linus Torvalds 已提交
1380
/*
G
Gu Zheng 已提交
1381 1382
 * aio_run_iocb:
 *	Performs the initial checks and io submission.
L
Linus Torvalds 已提交
1383
 */
1384 1385
static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
			    char __user *buf, bool compat)
L
Linus Torvalds 已提交
1386
{
K
Kent Overstreet 已提交
1387 1388
	struct file *file = req->ki_filp;
	ssize_t ret;
1389
	unsigned long nr_segs;
K
Kent Overstreet 已提交
1390 1391 1392
	int rw;
	fmode_t mode;
	aio_rw_op *rw_op;
1393
	rw_iter_op *iter_op;
1394
	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1395
	struct iov_iter iter;
L
Linus Torvalds 已提交
1396

1397
	switch (opcode) {
L
Linus Torvalds 已提交
1398
	case IOCB_CMD_PREAD:
B
Badari Pulavarty 已提交
1399
	case IOCB_CMD_PREADV:
K
Kent Overstreet 已提交
1400 1401 1402
		mode	= FMODE_READ;
		rw	= READ;
		rw_op	= file->f_op->aio_read;
1403
		iter_op	= file->f_op->read_iter;
K
Kent Overstreet 已提交
1404 1405 1406
		goto rw_common;

	case IOCB_CMD_PWRITE:
B
Badari Pulavarty 已提交
1407
	case IOCB_CMD_PWRITEV:
K
Kent Overstreet 已提交
1408 1409 1410
		mode	= FMODE_WRITE;
		rw	= WRITE;
		rw_op	= file->f_op->aio_write;
1411
		iter_op	= file->f_op->write_iter;
K
Kent Overstreet 已提交
1412 1413 1414 1415 1416
		goto rw_common;
rw_common:
		if (unlikely(!(file->f_mode & mode)))
			return -EBADF;

1417
		if (!rw_op && !iter_op)
K
Kent Overstreet 已提交
1418 1419
			return -EINVAL;

1420 1421 1422 1423 1424 1425
		ret = (opcode == IOCB_CMD_PREADV ||
		       opcode == IOCB_CMD_PWRITEV)
			? aio_setup_vectored_rw(req, rw, buf, &nr_segs,
						&iovec, compat)
			: aio_setup_single_vector(req, rw, buf, &nr_segs,
						  iovec);
1426 1427
		if (!ret)
			ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
1428
		if (ret < 0) {
1429
			if (iovec != inline_vecs)
1430
				kfree(iovec);
K
Kent Overstreet 已提交
1431
			return ret;
1432
		}
K
Kent Overstreet 已提交
1433 1434 1435

		req->ki_nbytes = ret;

K
Kent Overstreet 已提交
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
		/* XXX: move/kill - rw_verify_area()? */
		/* This matches the pread()/pwrite() logic */
		if (req->ki_pos < 0) {
			ret = -EINVAL;
			break;
		}

		if (rw == WRITE)
			file_start_write(file);

1446 1447 1448 1449 1450 1451
		if (iter_op) {
			iov_iter_init(&iter, rw, iovec, nr_segs, req->ki_nbytes);
			ret = iter_op(req, &iter);
		} else {
			ret = rw_op(req, iovec, nr_segs, req->ki_pos);
		}
K
Kent Overstreet 已提交
1452 1453 1454

		if (rw == WRITE)
			file_end_write(file);
L
Linus Torvalds 已提交
1455
		break;
K
Kent Overstreet 已提交
1456

L
Linus Torvalds 已提交
1457
	case IOCB_CMD_FDSYNC:
K
Kent Overstreet 已提交
1458 1459 1460 1461
		if (!file->f_op->aio_fsync)
			return -EINVAL;

		ret = file->f_op->aio_fsync(req, 1);
L
Linus Torvalds 已提交
1462
		break;
K
Kent Overstreet 已提交
1463

L
Linus Torvalds 已提交
1464
	case IOCB_CMD_FSYNC:
K
Kent Overstreet 已提交
1465 1466 1467 1468
		if (!file->f_op->aio_fsync)
			return -EINVAL;

		ret = file->f_op->aio_fsync(req, 0);
L
Linus Torvalds 已提交
1469
		break;
K
Kent Overstreet 已提交
1470

L
Linus Torvalds 已提交
1471
	default:
K
Kent Overstreet 已提交
1472
		pr_debug("EINVAL: no operation provided\n");
K
Kent Overstreet 已提交
1473
		return -EINVAL;
L
Linus Torvalds 已提交
1474 1475
	}

1476
	if (iovec != inline_vecs)
1477 1478
		kfree(iovec);

K
Kent Overstreet 已提交
1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
	if (ret != -EIOCBQUEUED) {
		/*
		 * There's no easy way to restart the syscall since other AIO's
		 * may be already running. Just fail this IO with EINTR.
		 */
		if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
			     ret == -ERESTARTNOHAND ||
			     ret == -ERESTART_RESTARTBLOCK))
			ret = -EINTR;
		aio_complete(req, ret, 0);
	}
L
Linus Torvalds 已提交
1490 1491 1492 1493

	return 0;
}

A
Adrian Bunk 已提交
1494
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
K
Kent Overstreet 已提交
1495
			 struct iocb *iocb, bool compat)
L
Linus Torvalds 已提交
1496 1497 1498 1499 1500
{
	struct kiocb *req;
	ssize_t ret;

	/* enforce forwards compatibility on users */
1501
	if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
K
Kent Overstreet 已提交
1502
		pr_debug("EINVAL: reserve field set\n");
L
Linus Torvalds 已提交
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
		return -EINVAL;
	}

	/* prevent overflows */
	if (unlikely(
	    (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
	    (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
	    ((ssize_t)iocb->aio_nbytes < 0)
	   )) {
		pr_debug("EINVAL: io_submit: overflow check\n");
		return -EINVAL;
	}

K
Kent Overstreet 已提交
1516
	req = aio_get_req(ctx);
1517
	if (unlikely(!req))
L
Linus Torvalds 已提交
1518
		return -EAGAIN;
1519 1520 1521 1522 1523

	req->ki_filp = fget(iocb->aio_fildes);
	if (unlikely(!req->ki_filp)) {
		ret = -EBADF;
		goto out_put_req;
L
Linus Torvalds 已提交
1524
	}
1525

1526 1527 1528 1529 1530 1531 1532
	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
		/*
		 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
		 * instance of the file* now. The file descriptor must be
		 * an eventfd() fd, and will be signaled for each completed
		 * event using the eventfd_signal() function.
		 */
1533
		req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1534
		if (IS_ERR(req->ki_eventfd)) {
1535
			ret = PTR_ERR(req->ki_eventfd);
1536
			req->ki_eventfd = NULL;
1537 1538 1539
			goto out_put_req;
		}
	}
L
Linus Torvalds 已提交
1540

K
Kent Overstreet 已提交
1541
	ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
L
Linus Torvalds 已提交
1542
	if (unlikely(ret)) {
K
Kent Overstreet 已提交
1543
		pr_debug("EFAULT: aio_key\n");
L
Linus Torvalds 已提交
1544 1545 1546 1547 1548 1549
		goto out_put_req;
	}

	req->ki_obj.user = user_iocb;
	req->ki_user_data = iocb->aio_data;
	req->ki_pos = iocb->aio_offset;
K
Kent Overstreet 已提交
1550
	req->ki_nbytes = iocb->aio_nbytes;
L
Linus Torvalds 已提交
1551

1552 1553 1554
	ret = aio_run_iocb(req, iocb->aio_lio_opcode,
			   (char __user *)(unsigned long)iocb->aio_buf,
			   compat);
Z
Zach Brown 已提交
1555
	if (ret)
1556
		goto out_put_req;
Z
Zach Brown 已提交
1557

L
Linus Torvalds 已提交
1558 1559
	return 0;
out_put_req:
K
Kent Overstreet 已提交
1560
	put_reqs_available(ctx, 1);
K
Kent Overstreet 已提交
1561
	percpu_ref_put(&ctx->reqs);
K
Kent Overstreet 已提交
1562
	kiocb_free(req);
L
Linus Torvalds 已提交
1563 1564 1565
	return ret;
}

1566 1567
long do_io_submit(aio_context_t ctx_id, long nr,
		  struct iocb __user *__user *iocbpp, bool compat)
L
Linus Torvalds 已提交
1568 1569 1570
{
	struct kioctx *ctx;
	long ret = 0;
J
Jeff Moyer 已提交
1571
	int i = 0;
S
Shaohua Li 已提交
1572
	struct blk_plug plug;
L
Linus Torvalds 已提交
1573 1574 1575 1576

	if (unlikely(nr < 0))
		return -EINVAL;

1577 1578 1579
	if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
		nr = LONG_MAX/sizeof(*iocbpp);

L
Linus Torvalds 已提交
1580 1581 1582 1583 1584
	if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
		return -EFAULT;

	ctx = lookup_ioctx(ctx_id);
	if (unlikely(!ctx)) {
K
Kent Overstreet 已提交
1585
		pr_debug("EINVAL: invalid context id\n");
L
Linus Torvalds 已提交
1586 1587 1588
		return -EINVAL;
	}

S
Shaohua Li 已提交
1589 1590
	blk_start_plug(&plug);

L
Linus Torvalds 已提交
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
	/*
	 * AKPM: should this return a partial result if some of the IOs were
	 * successfully submitted?
	 */
	for (i=0; i<nr; i++) {
		struct iocb __user *user_iocb;
		struct iocb tmp;

		if (unlikely(__get_user(user_iocb, iocbpp + i))) {
			ret = -EFAULT;
			break;
		}

		if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
			ret = -EFAULT;
			break;
		}

K
Kent Overstreet 已提交
1609
		ret = io_submit_one(ctx, user_iocb, &tmp, compat);
L
Linus Torvalds 已提交
1610 1611 1612
		if (ret)
			break;
	}
S
Shaohua Li 已提交
1613
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
1614

K
Kent Overstreet 已提交
1615
	percpu_ref_put(&ctx->users);
L
Linus Torvalds 已提交
1616 1617 1618
	return i ? i : ret;
}

1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
/* sys_io_submit:
 *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
 *	the number of iocbs queued.  May return -EINVAL if the aio_context
 *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
 *	*iocbpp[0] is not properly initialized, if the operation specified
 *	is invalid for the file descriptor in the iocb.  May fail with
 *	-EFAULT if any of the data structures point to invalid data.  May
 *	fail with -EBADF if the file descriptor specified in the first
 *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
 *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
 *	fail with -ENOSYS if not implemented.
 */
SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
		struct iocb __user * __user *, iocbpp)
{
	return do_io_submit(ctx_id, nr, iocbpp, 0);
}

L
Linus Torvalds 已提交
1637 1638 1639
/* lookup_kiocb
 *	Finds a given iocb for cancellation.
 */
1640 1641
static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
				  u32 key)
L
Linus Torvalds 已提交
1642 1643
{
	struct list_head *pos;
1644 1645 1646

	assert_spin_locked(&ctx->ctx_lock);

K
Kent Overstreet 已提交
1647 1648 1649
	if (key != KIOCB_KEY)
		return NULL;

L
Linus Torvalds 已提交
1650 1651 1652
	/* TODO: use a hash or array, this sucks. */
	list_for_each(pos, &ctx->active_reqs) {
		struct kiocb *kiocb = list_kiocb(pos);
K
Kent Overstreet 已提交
1653
		if (kiocb->ki_obj.user == iocb)
L
Linus Torvalds 已提交
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
			return kiocb;
	}
	return NULL;
}

/* sys_io_cancel:
 *	Attempts to cancel an iocb previously passed to io_submit.  If
 *	the operation is successfully cancelled, the resulting event is
 *	copied into the memory pointed to by result without being placed
 *	into the completion queue and 0 is returned.  May fail with
 *	-EFAULT if any of the data structures pointed to are invalid.
 *	May fail with -EINVAL if aio_context specified by ctx_id is
 *	invalid.  May fail with -EAGAIN if the iocb specified was not
 *	cancelled.  Will fail with -ENOSYS if not implemented.
 */
1669 1670
SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
		struct io_event __user *, result)
L
Linus Torvalds 已提交
1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
{
	struct kioctx *ctx;
	struct kiocb *kiocb;
	u32 key;
	int ret;

	ret = get_user(key, &iocb->aio_key);
	if (unlikely(ret))
		return -EFAULT;

	ctx = lookup_ioctx(ctx_id);
	if (unlikely(!ctx))
		return -EINVAL;

	spin_lock_irq(&ctx->ctx_lock);
K
Kent Overstreet 已提交
1686

L
Linus Torvalds 已提交
1687
	kiocb = lookup_kiocb(ctx, iocb, key);
K
Kent Overstreet 已提交
1688
	if (kiocb)
1689
		ret = kiocb_cancel(kiocb);
K
Kent Overstreet 已提交
1690 1691 1692
	else
		ret = -EINVAL;

L
Linus Torvalds 已提交
1693 1694
	spin_unlock_irq(&ctx->ctx_lock);

K
Kent Overstreet 已提交
1695
	if (!ret) {
1696 1697 1698 1699
		/*
		 * The result argument is no longer used - the io_event is
		 * always delivered via the ring buffer. -EINPROGRESS indicates
		 * cancellation is progress:
K
Kent Overstreet 已提交
1700
		 */
1701
		ret = -EINPROGRESS;
K
Kent Overstreet 已提交
1702
	}
L
Linus Torvalds 已提交
1703

K
Kent Overstreet 已提交
1704
	percpu_ref_put(&ctx->users);
L
Linus Torvalds 已提交
1705 1706 1707 1708 1709 1710

	return ret;
}

/* io_getevents:
 *	Attempts to read at least min_nr events and up to nr events from
1711 1712 1713 1714 1715 1716 1717 1718
 *	the completion queue for the aio_context specified by ctx_id. If
 *	it succeeds, the number of read events is returned. May fail with
 *	-EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
 *	out of range, if timeout is out of range.  May fail with -EFAULT
 *	if any of the memory specified is invalid.  May return 0 or
 *	< min_nr if the timeout specified by timeout has elapsed
 *	before sufficient events are available, where timeout == NULL
 *	specifies an infinite timeout. Note that the timeout pointed to by
J
Jeff Moyer 已提交
1719
 *	timeout is relative.  Will fail with -ENOSYS if not implemented.
L
Linus Torvalds 已提交
1720
 */
1721 1722 1723 1724 1725
SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
		long, min_nr,
		long, nr,
		struct io_event __user *, events,
		struct timespec __user *, timeout)
L
Linus Torvalds 已提交
1726 1727 1728 1729 1730
{
	struct kioctx *ioctx = lookup_ioctx(ctx_id);
	long ret = -EINVAL;

	if (likely(ioctx)) {
N
Namhyung Kim 已提交
1731
		if (likely(min_nr <= nr && min_nr >= 0))
L
Linus Torvalds 已提交
1732
			ret = read_events(ioctx, min_nr, nr, events, timeout);
K
Kent Overstreet 已提交
1733
		percpu_ref_put(&ioctx->users);
L
Linus Torvalds 已提交
1734 1735 1736
	}
	return ret;
}