aio.c 34.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *	An async IO implementation for Linux
 *	Written by Benjamin LaHaise <bcrl@kvack.org>
 *
 *	Implements an efficient asynchronous io interface.
 *
 *	Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
 *
 *	See ../COPYING for licensing terms.
 */
K
Kent Overstreet 已提交
11 12
#define pr_fmt(fmt) "%s: " fmt, __func__

L
Linus Torvalds 已提交
13 14 15 16 17
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/aio_abi.h>
18
#include <linux/export.h>
L
Linus Torvalds 已提交
19
#include <linux/syscalls.h>
20
#include <linux/backing-dev.h>
21
#include <linux/uio.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27

#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
28
#include <linux/mmu_context.h>
K
Kent Overstreet 已提交
29
#include <linux/percpu.h>
L
Linus Torvalds 已提交
30 31 32 33 34 35
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/aio.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
#include <linux/security.h>
36
#include <linux/eventfd.h>
J
Jeff Moyer 已提交
37
#include <linux/blkdev.h>
38
#include <linux/compat.h>
39 40 41
#include <linux/anon_inodes.h>
#include <linux/migrate.h>
#include <linux/ramfs.h>
K
Kent Overstreet 已提交
42
#include <linux/percpu-refcount.h>
L
Linus Torvalds 已提交
43 44 45 46

#include <asm/kmap_types.h>
#include <asm/uaccess.h>

A
Al Viro 已提交
47 48
#include "internal.h"

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
#define AIO_RING_MAGIC			0xa10a10a1
#define AIO_RING_COMPAT_FEATURES	1
#define AIO_RING_INCOMPAT_FEATURES	0
struct aio_ring {
	unsigned	id;	/* kernel internal index number */
	unsigned	nr;	/* number of io_events */
	unsigned	head;
	unsigned	tail;

	unsigned	magic;
	unsigned	compat_features;
	unsigned	incompat_features;
	unsigned	header_length;	/* size of aio_ring */


	struct io_event		io_events[0];
}; /* 128 bytes + ring size */

#define AIO_RING_PAGES	8

K
Kent Overstreet 已提交
69 70 71 72
struct kioctx_cpu {
	unsigned		reqs_available;
};

73
struct kioctx {
K
Kent Overstreet 已提交
74
	struct percpu_ref	users;
K
Kent Overstreet 已提交
75
	atomic_t		dead;
76 77 78 79 80

	/* This needs improving */
	unsigned long		user_id;
	struct hlist_node	list;

K
Kent Overstreet 已提交
81 82 83 84 85 86 87
	struct __percpu kioctx_cpu *cpu;

	/*
	 * For percpu reqs_available, number of slots we move to/from global
	 * counter at a time:
	 */
	unsigned		req_batch;
88 89 90 91
	/*
	 * This is what userspace passed to io_setup(), it's not used for
	 * anything but counting against the global max_reqs quota.
	 *
K
Kent Overstreet 已提交
92
	 * The real limit is nr_events - 1, which will be larger (see
93 94
	 * aio_setup_ring())
	 */
95 96
	unsigned		max_reqs;

K
Kent Overstreet 已提交
97 98
	/* Size of ringbuffer, in units of struct io_event */
	unsigned		nr_events;
99

K
Kent Overstreet 已提交
100 101 102 103 104 105
	unsigned long		mmap_base;
	unsigned long		mmap_size;

	struct page		**ring_pages;
	long			nr_pages;

106
	struct rcu_head		rcu_head;
K
Kent Overstreet 已提交
107
	struct work_struct	free_work;
108 109

	struct {
110 111 112 113 114
		/*
		 * This counts the number of available slots in the ringbuffer,
		 * so we avoid overflowing it: it's decremented (if positive)
		 * when allocating a kiocb and incremented when the resulting
		 * io_event is pulled off the ringbuffer.
K
Kent Overstreet 已提交
115 116
		 *
		 * We batch accesses to it with a percpu version.
117 118
		 */
		atomic_t	reqs_available;
119 120 121 122 123 124 125
	} ____cacheline_aligned_in_smp;

	struct {
		spinlock_t	ctx_lock;
		struct list_head active_reqs;	/* used for cancellation */
	} ____cacheline_aligned_in_smp;

K
Kent Overstreet 已提交
126 127
	struct {
		struct mutex	ring_lock;
128 129
		wait_queue_head_t wait;
	} ____cacheline_aligned_in_smp;
K
Kent Overstreet 已提交
130 131 132 133

	struct {
		unsigned	tail;
		spinlock_t	completion_lock;
134
	} ____cacheline_aligned_in_smp;
K
Kent Overstreet 已提交
135 136

	struct page		*internal_pages[AIO_RING_PAGES];
137
	struct file		*aio_ring_file;
138 139
};

L
Linus Torvalds 已提交
140
/*------ sysctl variables----*/
141 142 143
static DEFINE_SPINLOCK(aio_nr_lock);
unsigned long aio_nr;		/* current system wide number of aio requests */
unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
L
Linus Torvalds 已提交
144 145
/*----end sysctl variables---*/

146 147
static struct kmem_cache	*kiocb_cachep;
static struct kmem_cache	*kioctx_cachep;
L
Linus Torvalds 已提交
148 149 150 151 152 153 154

/* aio_setup
 *	Creates the slab caches used by the aio routines, panic on
 *	failure as this is done early during the boot sequence.
 */
static int __init aio_setup(void)
{
155 156
	kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
	kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
L
Linus Torvalds 已提交
157

K
Kent Overstreet 已提交
158
	pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page));
L
Linus Torvalds 已提交
159 160 161

	return 0;
}
162
__initcall(aio_setup);
L
Linus Torvalds 已提交
163 164 165

static void aio_free_ring(struct kioctx *ctx)
{
166 167
	int i;
	struct file *aio_ring_file = ctx->aio_ring_file;
L
Linus Torvalds 已提交
168

169 170 171
	for (i = 0; i < ctx->nr_pages; i++) {
		pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
				page_count(ctx->ring_pages[i]));
K
Kent Overstreet 已提交
172
		put_page(ctx->ring_pages[i]);
173
	}
L
Linus Torvalds 已提交
174

K
Kent Overstreet 已提交
175 176
	if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
		kfree(ctx->ring_pages);
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204

	if (aio_ring_file) {
		truncate_setsize(aio_ring_file->f_inode, 0);
		pr_debug("pid(%d) i_nlink=%u d_count=%d d_unhashed=%d i_count=%d\n",
			current->pid, aio_ring_file->f_inode->i_nlink,
			aio_ring_file->f_path.dentry->d_count,
			d_unhashed(aio_ring_file->f_path.dentry),
			atomic_read(&aio_ring_file->f_inode->i_count));
		fput(aio_ring_file);
		ctx->aio_ring_file = NULL;
	}
}

static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &generic_file_vm_ops;
	return 0;
}

static const struct file_operations aio_ring_fops = {
	.mmap = aio_ring_mmap,
};

static int aio_set_page_dirty(struct page *page)
{
	return 0;
}

205
#if IS_ENABLED(CONFIG_MIGRATION)
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
static int aio_migratepage(struct address_space *mapping, struct page *new,
			struct page *old, enum migrate_mode mode)
{
	struct kioctx *ctx = mapping->private_data;
	unsigned long flags;
	unsigned idx = old->index;
	int rc;

	/* Writeback must be complete */
	BUG_ON(PageWriteback(old));
	put_page(old);

	rc = migrate_page_move_mapping(mapping, new, old, NULL, mode);
	if (rc != MIGRATEPAGE_SUCCESS) {
		get_page(old);
		return rc;
	}

	get_page(new);

	spin_lock_irqsave(&ctx->completion_lock, flags);
	migrate_page_copy(new, old);
	ctx->ring_pages[idx] = new;
	spin_unlock_irqrestore(&ctx->completion_lock, flags);

	return rc;
L
Linus Torvalds 已提交
232
}
233
#endif
L
Linus Torvalds 已提交
234

235 236
static const struct address_space_operations aio_ctx_aops = {
	.set_page_dirty = aio_set_page_dirty,
237
#if IS_ENABLED(CONFIG_MIGRATION)
238
	.migratepage	= aio_migratepage,
239
#endif
240 241
};

L
Linus Torvalds 已提交
242 243 244 245
static int aio_setup_ring(struct kioctx *ctx)
{
	struct aio_ring *ring;
	unsigned nr_events = ctx->max_reqs;
Z
Zach Brown 已提交
246
	struct mm_struct *mm = current->mm;
247
	unsigned long size, populate;
L
Linus Torvalds 已提交
248
	int nr_pages;
249 250
	int i;
	struct file *file;
L
Linus Torvalds 已提交
251 252 253 254 255 256 257

	/* Compensate for the ring buffer's head/tail overlap entry */
	nr_events += 2;	/* 1 is required, 2 for good luck */

	size = sizeof(struct aio_ring);
	size += sizeof(struct io_event) * nr_events;

258
	nr_pages = PFN_UP(size);
L
Linus Torvalds 已提交
259 260 261
	if (nr_pages < 0)
		return -EINVAL;

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	file = anon_inode_getfile_private("[aio]", &aio_ring_fops, ctx, O_RDWR);
	if (IS_ERR(file)) {
		ctx->aio_ring_file = NULL;
		return -EAGAIN;
	}

	file->f_inode->i_mapping->a_ops = &aio_ctx_aops;
	file->f_inode->i_mapping->private_data = ctx;
	file->f_inode->i_size = PAGE_SIZE * (loff_t)nr_pages;

	for (i = 0; i < nr_pages; i++) {
		struct page *page;
		page = find_or_create_page(file->f_inode->i_mapping,
					   i, GFP_HIGHUSER | __GFP_ZERO);
		if (!page)
			break;
		pr_debug("pid(%d) page[%d]->count=%d\n",
			 current->pid, i, page_count(page));
		SetPageUptodate(page);
		SetPageDirty(page);
		unlock_page(page);
	}
	ctx->aio_ring_file = file;
	nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
			/ sizeof(struct io_event);
L
Linus Torvalds 已提交
287

K
Kent Overstreet 已提交
288
	ctx->ring_pages = ctx->internal_pages;
L
Linus Torvalds 已提交
289
	if (nr_pages > AIO_RING_PAGES) {
K
Kent Overstreet 已提交
290 291 292
		ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
					  GFP_KERNEL);
		if (!ctx->ring_pages)
L
Linus Torvalds 已提交
293 294 295
			return -ENOMEM;
	}

K
Kent Overstreet 已提交
296 297
	ctx->mmap_size = nr_pages * PAGE_SIZE;
	pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
298

Z
Zach Brown 已提交
299
	down_write(&mm->mmap_sem);
300 301 302
	ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
				       PROT_READ | PROT_WRITE,
				       MAP_SHARED | MAP_POPULATE, 0, &populate);
K
Kent Overstreet 已提交
303
	if (IS_ERR((void *)ctx->mmap_base)) {
Z
Zach Brown 已提交
304
		up_write(&mm->mmap_sem);
K
Kent Overstreet 已提交
305
		ctx->mmap_size = 0;
L
Linus Torvalds 已提交
306 307 308
		aio_free_ring(ctx);
		return -EAGAIN;
	}
309 310 311
	up_write(&mm->mmap_sem);

	mm_populate(ctx->mmap_base, populate);
L
Linus Torvalds 已提交
312

K
Kent Overstreet 已提交
313 314 315
	pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
	ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
				       1, 0, ctx->ring_pages, NULL);
316 317
	for (i = 0; i < ctx->nr_pages; i++)
		put_page(ctx->ring_pages[i]);
L
Linus Torvalds 已提交
318

K
Kent Overstreet 已提交
319
	if (unlikely(ctx->nr_pages != nr_pages)) {
L
Linus Torvalds 已提交
320 321 322 323
		aio_free_ring(ctx);
		return -EAGAIN;
	}

K
Kent Overstreet 已提交
324 325
	ctx->user_id = ctx->mmap_base;
	ctx->nr_events = nr_events; /* trusted copy */
L
Linus Torvalds 已提交
326

K
Kent Overstreet 已提交
327
	ring = kmap_atomic(ctx->ring_pages[0]);
L
Linus Torvalds 已提交
328 329 330 331 332 333 334
	ring->nr = nr_events;	/* user copy */
	ring->id = ctx->user_id;
	ring->head = ring->tail = 0;
	ring->magic = AIO_RING_MAGIC;
	ring->compat_features = AIO_RING_COMPAT_FEATURES;
	ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
	ring->header_length = sizeof(struct aio_ring);
335
	kunmap_atomic(ring);
K
Kent Overstreet 已提交
336
	flush_dcache_page(ctx->ring_pages[0]);
L
Linus Torvalds 已提交
337 338 339 340 341 342 343 344

	return 0;
}

#define AIO_EVENTS_PER_PAGE	(PAGE_SIZE / sizeof(struct io_event))
#define AIO_EVENTS_FIRST_PAGE	((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
#define AIO_EVENTS_OFFSET	(AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
{
	struct kioctx *ctx = req->ki_ctx;
	unsigned long flags;

	spin_lock_irqsave(&ctx->ctx_lock, flags);

	if (!req->ki_list.next)
		list_add(&req->ki_list, &ctx->active_reqs);

	req->ki_cancel = cancel;

	spin_unlock_irqrestore(&ctx->ctx_lock, flags);
}
EXPORT_SYMBOL(kiocb_set_cancel_fn);

361
static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
K
Kent Overstreet 已提交
362
{
363
	kiocb_cancel_fn *old, *cancel;
K
Kent Overstreet 已提交
364

365 366 367 368 369 370 371 372
	/*
	 * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it
	 * actually has a cancel function, hence the cmpxchg()
	 */

	cancel = ACCESS_ONCE(kiocb->ki_cancel);
	do {
		if (!cancel || cancel == KIOCB_CANCELLED)
K
Kent Overstreet 已提交
373
			return -EINVAL;
K
Kent Overstreet 已提交
374

375 376 377
		old = cancel;
		cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED);
	} while (cancel != old);
K
Kent Overstreet 已提交
378

K
Kent Overstreet 已提交
379
	return cancel(kiocb);
K
Kent Overstreet 已提交
380 381
}

K
Kent Overstreet 已提交
382 383 384
static void free_ioctx_rcu(struct rcu_head *head)
{
	struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
K
Kent Overstreet 已提交
385 386

	free_percpu(ctx->cpu);
K
Kent Overstreet 已提交
387 388 389 390 391 392 393 394
	kmem_cache_free(kioctx_cachep, ctx);
}

/*
 * When this function runs, the kioctx has been removed from the "hash table"
 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
 * now it's safe to cancel any that need to be.
 */
K
Kent Overstreet 已提交
395
static void free_ioctx(struct work_struct *work)
K
Kent Overstreet 已提交
396
{
K
Kent Overstreet 已提交
397
	struct kioctx *ctx = container_of(work, struct kioctx, free_work);
398
	struct aio_ring *ring;
K
Kent Overstreet 已提交
399
	struct kiocb *req;
400 401
	unsigned cpu, avail;
	DEFINE_WAIT(wait);
K
Kent Overstreet 已提交
402 403 404 405 406 407 408 409

	spin_lock_irq(&ctx->ctx_lock);

	while (!list_empty(&ctx->active_reqs)) {
		req = list_first_entry(&ctx->active_reqs,
				       struct kiocb, ki_list);

		list_del_init(&req->ki_list);
410
		kiocb_cancel(ctx, req);
K
Kent Overstreet 已提交
411 412 413 414
	}

	spin_unlock_irq(&ctx->ctx_lock);

K
Kent Overstreet 已提交
415 416 417 418 419 420 421
	for_each_possible_cpu(cpu) {
		struct kioctx_cpu *kcpu = per_cpu_ptr(ctx->cpu, cpu);

		atomic_add(kcpu->reqs_available, &ctx->reqs_available);
		kcpu->reqs_available = 0;
	}

422 423
	while (1) {
		prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE);
424

425 426 427 428
		ring = kmap_atomic(ctx->ring_pages[0]);
		avail = (ring->head <= ring->tail)
			 ? ring->tail - ring->head
			 : ctx->nr_events - ring->head + ring->tail;
429

430
		atomic_add(avail, &ctx->reqs_available);
431 432 433 434 435 436 437
		ring->head = ring->tail;
		kunmap_atomic(ring);

		if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1)
			break;

		schedule();
438
	}
439
	finish_wait(&ctx->wait, &wait);
440

441
	WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);
K
Kent Overstreet 已提交
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456

	aio_free_ring(ctx);

	pr_debug("freeing %p\n", ctx);

	/*
	 * Here the call_rcu() is between the wait_event() for reqs_active to
	 * hit 0, and freeing the ioctx.
	 *
	 * aio_complete() decrements reqs_active, but it has to touch the ioctx
	 * after to issue a wakeup so we use rcu.
	 */
	call_rcu(&ctx->rcu_head, free_ioctx_rcu);
}

K
Kent Overstreet 已提交
457
static void free_ioctx_ref(struct percpu_ref *ref)
K
Kent Overstreet 已提交
458
{
K
Kent Overstreet 已提交
459 460 461 462
	struct kioctx *ctx = container_of(ref, struct kioctx, users);

	INIT_WORK(&ctx->free_work, free_ioctx);
	schedule_work(&ctx->free_work);
K
Kent Overstreet 已提交
463 464
}

L
Linus Torvalds 已提交
465 466 467 468 469
/* ioctx_alloc
 *	Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
 */
static struct kioctx *ioctx_alloc(unsigned nr_events)
{
Z
Zach Brown 已提交
470
	struct mm_struct *mm = current->mm;
L
Linus Torvalds 已提交
471
	struct kioctx *ctx;
472
	int err = -ENOMEM;
L
Linus Torvalds 已提交
473

K
Kent Overstreet 已提交
474 475 476 477 478 479 480 481 482 483 484 485
	/*
	 * We keep track of the number of available ringbuffer slots, to prevent
	 * overflow (reqs_available), and we also use percpu counters for this.
	 *
	 * So since up to half the slots might be on other cpu's percpu counters
	 * and unavailable, double nr_events so userspace sees what they
	 * expected: additionally, we move req_batch slots to/from percpu
	 * counters at a time, so make sure that isn't 0:
	 */
	nr_events = max(nr_events, num_possible_cpus() * 4);
	nr_events *= 2;

L
Linus Torvalds 已提交
486 487 488 489 490 491 492
	/* Prevent overflows */
	if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
	    (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
		pr_debug("ENOMEM: nr_events too high\n");
		return ERR_PTR(-EINVAL);
	}

493
	if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL))
L
Linus Torvalds 已提交
494 495
		return ERR_PTR(-EAGAIN);

496
	ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
L
Linus Torvalds 已提交
497 498 499 500 501
	if (!ctx)
		return ERR_PTR(-ENOMEM);

	ctx->max_reqs = nr_events;

K
Kent Overstreet 已提交
502 503 504
	if (percpu_ref_init(&ctx->users, free_ioctx_ref))
		goto out_freectx;

L
Linus Torvalds 已提交
505
	spin_lock_init(&ctx->ctx_lock);
506
	spin_lock_init(&ctx->completion_lock);
K
Kent Overstreet 已提交
507
	mutex_init(&ctx->ring_lock);
L
Linus Torvalds 已提交
508 509 510 511
	init_waitqueue_head(&ctx->wait);

	INIT_LIST_HEAD(&ctx->active_reqs);

K
Kent Overstreet 已提交
512 513
	ctx->cpu = alloc_percpu(struct kioctx_cpu);
	if (!ctx->cpu)
K
Kent Overstreet 已提交
514
		goto out_freeref;
L
Linus Torvalds 已提交
515

K
Kent Overstreet 已提交
516 517 518
	if (aio_setup_ring(ctx) < 0)
		goto out_freepcpu;

519
	atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
K
Kent Overstreet 已提交
520 521
	ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
	BUG_ON(!ctx->req_batch);
522

L
Linus Torvalds 已提交
523
	/* limit the number of system wide aios */
524
	spin_lock(&aio_nr_lock);
525
	if (aio_nr + nr_events > (aio_max_nr * 2UL) ||
526
	    aio_nr + nr_events < aio_nr) {
527
		spin_unlock(&aio_nr_lock);
L
Linus Torvalds 已提交
528
		goto out_cleanup;
529 530
	}
	aio_nr += ctx->max_reqs;
531
	spin_unlock(&aio_nr_lock);
L
Linus Torvalds 已提交
532

K
Kent Overstreet 已提交
533 534
	percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */

J
Jeff Moyer 已提交
535
	/* now link into global list. */
J
Jens Axboe 已提交
536 537 538
	spin_lock(&mm->ioctx_lock);
	hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
	spin_unlock(&mm->ioctx_lock);
L
Linus Torvalds 已提交
539

K
Kent Overstreet 已提交
540
	pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
K
Kent Overstreet 已提交
541
		 ctx, ctx->user_id, mm, ctx->nr_events);
L
Linus Torvalds 已提交
542 543 544
	return ctx;

out_cleanup:
545 546
	err = -EAGAIN;
	aio_free_ring(ctx);
K
Kent Overstreet 已提交
547 548
out_freepcpu:
	free_percpu(ctx->cpu);
K
Kent Overstreet 已提交
549 550
out_freeref:
	free_percpu(ctx->users.pcpu_count);
L
Linus Torvalds 已提交
551
out_freectx:
552 553
	if (ctx->aio_ring_file)
		fput(ctx->aio_ring_file);
L
Linus Torvalds 已提交
554
	kmem_cache_free(kioctx_cachep, ctx);
K
Kent Overstreet 已提交
555
	pr_debug("error allocating ioctx %d\n", err);
556
	return ERR_PTR(err);
L
Linus Torvalds 已提交
557 558
}

K
Kent Overstreet 已提交
559 560 561 562 563 564 565 566 567
/* kill_ioctx
 *	Cancels all outstanding aio requests on an aio context.  Used
 *	when the processes owning a context have all exited to encourage
 *	the rapid destruction of the kioctx.
 */
static void kill_ioctx(struct kioctx *ctx)
{
	if (!atomic_xchg(&ctx->dead, 1)) {
		hlist_del_rcu(&ctx->list);
K
Kent Overstreet 已提交
568 569
		/* percpu_ref_kill() will do the necessary call_rcu() */
		wake_up_all(&ctx->wait);
570

K
Kent Overstreet 已提交
571
		/*
572 573 574 575 576
		 * It'd be more correct to do this in free_ioctx(), after all
		 * the outstanding kiocbs have finished - but by then io_destroy
		 * has already returned, so io_setup() could potentially return
		 * -EAGAIN with no ioctxs actually in use (as far as userspace
		 *  could tell).
K
Kent Overstreet 已提交
577
		 */
578 579 580 581 582 583 584 585
		spin_lock(&aio_nr_lock);
		BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
		aio_nr -= ctx->max_reqs;
		spin_unlock(&aio_nr_lock);

		if (ctx->mmap_size)
			vm_munmap(ctx->mmap_base, ctx->mmap_size);

K
Kent Overstreet 已提交
586
		percpu_ref_kill(&ctx->users);
K
Kent Overstreet 已提交
587
	}
L
Linus Torvalds 已提交
588 589 590 591 592
}

/* wait_on_sync_kiocb:
 *	Waits on the given sync kiocb to complete.
 */
K
Kent Overstreet 已提交
593
ssize_t wait_on_sync_kiocb(struct kiocb *req)
L
Linus Torvalds 已提交
594
{
K
Kent Overstreet 已提交
595
	while (!req->ki_ctx) {
L
Linus Torvalds 已提交
596
		set_current_state(TASK_UNINTERRUPTIBLE);
K
Kent Overstreet 已提交
597
		if (req->ki_ctx)
L
Linus Torvalds 已提交
598
			break;
J
Jeff Moyer 已提交
599
		io_schedule();
L
Linus Torvalds 已提交
600 601
	}
	__set_current_state(TASK_RUNNING);
K
Kent Overstreet 已提交
602
	return req->ki_user_data;
L
Linus Torvalds 已提交
603
}
604
EXPORT_SYMBOL(wait_on_sync_kiocb);
L
Linus Torvalds 已提交
605

K
Kent Overstreet 已提交
606 607 608 609 610 611 612
/*
 * exit_aio: called when the last user of mm goes away.  At this point, there is
 * no way for any new requests to be submited or any of the io_* syscalls to be
 * called on the context.
 *
 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
 * them.
L
Linus Torvalds 已提交
613
 */
614
void exit_aio(struct mm_struct *mm)
L
Linus Torvalds 已提交
615
{
J
Jens Axboe 已提交
616
	struct kioctx *ctx;
K
Kent Overstreet 已提交
617
	struct hlist_node *n;
J
Jens Axboe 已提交
618

K
Kent Overstreet 已提交
619
	hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) {
620 621 622 623 624 625 626 627
		/*
		 * We don't need to bother with munmap() here -
		 * exit_mmap(mm) is coming and it'll unmap everything.
		 * Since aio_free_ring() uses non-zero ->mmap_size
		 * as indicator that it needs to unmap the area,
		 * just set it to 0; aio_free_ring() is the only
		 * place that uses ->mmap_size, so it's safe.
		 */
K
Kent Overstreet 已提交
628
		ctx->mmap_size = 0;
K
Kent Overstreet 已提交
629

630
		kill_ioctx(ctx);
L
Linus Torvalds 已提交
631 632 633
	}
}

K
Kent Overstreet 已提交
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
static void put_reqs_available(struct kioctx *ctx, unsigned nr)
{
	struct kioctx_cpu *kcpu;

	preempt_disable();
	kcpu = this_cpu_ptr(ctx->cpu);

	kcpu->reqs_available += nr;
	while (kcpu->reqs_available >= ctx->req_batch * 2) {
		kcpu->reqs_available -= ctx->req_batch;
		atomic_add(ctx->req_batch, &ctx->reqs_available);
	}

	preempt_enable();
}

static bool get_reqs_available(struct kioctx *ctx)
{
	struct kioctx_cpu *kcpu;
	bool ret = false;

	preempt_disable();
	kcpu = this_cpu_ptr(ctx->cpu);

	if (!kcpu->reqs_available) {
		int old, avail = atomic_read(&ctx->reqs_available);

		do {
			if (avail < ctx->req_batch)
				goto out;

			old = avail;
			avail = atomic_cmpxchg(&ctx->reqs_available,
					       avail, avail - ctx->req_batch);
		} while (avail != old);

		kcpu->reqs_available += ctx->req_batch;
	}

	ret = true;
	kcpu->reqs_available--;
out:
	preempt_enable();
	return ret;
}

L
Linus Torvalds 已提交
680
/* aio_get_req
K
Kent Overstreet 已提交
681 682
 *	Allocate a slot for an aio request.
 * Returns NULL if no requests are free.
L
Linus Torvalds 已提交
683
 */
K
Kent Overstreet 已提交
684
static inline struct kiocb *aio_get_req(struct kioctx *ctx)
L
Linus Torvalds 已提交
685
{
K
Kent Overstreet 已提交
686 687
	struct kiocb *req;

K
Kent Overstreet 已提交
688
	if (!get_reqs_available(ctx))
K
Kent Overstreet 已提交
689 690
		return NULL;

691
	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
L
Linus Torvalds 已提交
692
	if (unlikely(!req))
K
Kent Overstreet 已提交
693
		goto out_put;
L
Linus Torvalds 已提交
694 695

	req->ki_ctx = ctx;
J
Jeff Moyer 已提交
696
	return req;
K
Kent Overstreet 已提交
697
out_put:
K
Kent Overstreet 已提交
698
	put_reqs_available(ctx, 1);
K
Kent Overstreet 已提交
699
	return NULL;
L
Linus Torvalds 已提交
700 701
}

702
static void kiocb_free(struct kiocb *req)
L
Linus Torvalds 已提交
703
{
704 705
	if (req->ki_filp)
		fput(req->ki_filp);
706 707
	if (req->ki_eventfd != NULL)
		eventfd_ctx_put(req->ki_eventfd);
L
Linus Torvalds 已提交
708 709 710
	kmem_cache_free(kiocb_cachep, req);
}

A
Adrian Bunk 已提交
711
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
L
Linus Torvalds 已提交
712
{
J
Jens Axboe 已提交
713
	struct mm_struct *mm = current->mm;
714
	struct kioctx *ctx, *ret = NULL;
L
Linus Torvalds 已提交
715

J
Jens Axboe 已提交
716 717
	rcu_read_lock();

718
	hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
K
Kent Overstreet 已提交
719
		if (ctx->user_id == ctx_id) {
K
Kent Overstreet 已提交
720
			percpu_ref_get(&ctx->users);
721
			ret = ctx;
L
Linus Torvalds 已提交
722 723
			break;
		}
J
Jens Axboe 已提交
724
	}
L
Linus Torvalds 已提交
725

J
Jens Axboe 已提交
726
	rcu_read_unlock();
727
	return ret;
L
Linus Torvalds 已提交
728 729 730 731 732
}

/* aio_complete
 *	Called when the io request on the given iocb is complete.
 */
733
void aio_complete(struct kiocb *iocb, long res, long res2)
L
Linus Torvalds 已提交
734 735 736
{
	struct kioctx	*ctx = iocb->ki_ctx;
	struct aio_ring	*ring;
K
Kent Overstreet 已提交
737
	struct io_event	*ev_page, *event;
L
Linus Torvalds 已提交
738
	unsigned long	flags;
K
Kent Overstreet 已提交
739
	unsigned tail, pos;
L
Linus Torvalds 已提交
740

741 742 743 744 745 746
	/*
	 * Special case handling for sync iocbs:
	 *  - events go directly into the iocb for fast handling
	 *  - the sync task with the iocb in its stack holds the single iocb
	 *    ref, no other paths have a way to get another ref
	 *  - the sync task helpfully left a reference to itself in the iocb
L
Linus Torvalds 已提交
747 748 749
	 */
	if (is_sync_kiocb(iocb)) {
		iocb->ki_user_data = res;
K
Kent Overstreet 已提交
750 751
		smp_wmb();
		iocb->ki_ctx = ERR_PTR(-EXDEV);
L
Linus Torvalds 已提交
752
		wake_up_process(iocb->ki_obj.tsk);
753
		return;
L
Linus Torvalds 已提交
754 755
	}

K
Kent Overstreet 已提交
756 757
	/*
	 * Take rcu_read_lock() in case the kioctx is being destroyed, as we
758
	 * need to issue a wakeup after incrementing reqs_available.
L
Linus Torvalds 已提交
759
	 */
K
Kent Overstreet 已提交
760
	rcu_read_lock();
L
Linus Torvalds 已提交
761

762 763 764 765 766 767 768
	if (iocb->ki_list.next) {
		unsigned long flags;

		spin_lock_irqsave(&ctx->ctx_lock, flags);
		list_del(&iocb->ki_list);
		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
	}
769

770 771
	/*
	 * Add a completion event to the ring buffer. Must be done holding
772
	 * ctx->completion_lock to prevent other code from messing with the tail
773 774 775 776
	 * pointer since we might be called from irq context.
	 */
	spin_lock_irqsave(&ctx->completion_lock, flags);

K
Kent Overstreet 已提交
777
	tail = ctx->tail;
K
Kent Overstreet 已提交
778 779
	pos = tail + AIO_EVENTS_OFFSET;

K
Kent Overstreet 已提交
780
	if (++tail >= ctx->nr_events)
781
		tail = 0;
L
Linus Torvalds 已提交
782

K
Kent Overstreet 已提交
783
	ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
K
Kent Overstreet 已提交
784 785
	event = ev_page + pos % AIO_EVENTS_PER_PAGE;

L
Linus Torvalds 已提交
786 787 788 789 790
	event->obj = (u64)(unsigned long)iocb->ki_obj.user;
	event->data = iocb->ki_user_data;
	event->res = res;
	event->res2 = res2;

K
Kent Overstreet 已提交
791
	kunmap_atomic(ev_page);
K
Kent Overstreet 已提交
792
	flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
K
Kent Overstreet 已提交
793 794

	pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
K
Kent Overstreet 已提交
795 796
		 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
		 res, res2);
L
Linus Torvalds 已提交
797 798 799 800 801 802

	/* after flagging the request as done, we
	 * must never even look at it again
	 */
	smp_wmb();	/* make event visible before updating tail */

K
Kent Overstreet 已提交
803
	ctx->tail = tail;
L
Linus Torvalds 已提交
804

K
Kent Overstreet 已提交
805
	ring = kmap_atomic(ctx->ring_pages[0]);
K
Kent Overstreet 已提交
806
	ring->tail = tail;
807
	kunmap_atomic(ring);
K
Kent Overstreet 已提交
808
	flush_dcache_page(ctx->ring_pages[0]);
L
Linus Torvalds 已提交
809

810 811
	spin_unlock_irqrestore(&ctx->completion_lock, flags);

K
Kent Overstreet 已提交
812
	pr_debug("added to ring %p at [%u]\n", iocb, tail);
D
Davide Libenzi 已提交
813 814 815 816 817 818

	/*
	 * Check if the user asked us to deliver the result through an
	 * eventfd. The eventfd_signal() function is safe to be called
	 * from IRQ context.
	 */
819
	if (iocb->ki_eventfd != NULL)
D
Davide Libenzi 已提交
820 821
		eventfd_signal(iocb->ki_eventfd, 1);

L
Linus Torvalds 已提交
822
	/* everything turned out well, dispose of the aiocb. */
K
Kent Overstreet 已提交
823
	kiocb_free(iocb);
L
Linus Torvalds 已提交
824

825 826 827 828 829 830 831 832
	/*
	 * We have to order our ring_info tail store above and test
	 * of the wait list below outside the wait lock.  This is
	 * like in wake_up_bit() where clearing a bit has to be
	 * ordered with the unlocked test.
	 */
	smp_mb();

L
Linus Torvalds 已提交
833 834 835
	if (waitqueue_active(&ctx->wait))
		wake_up(&ctx->wait);

K
Kent Overstreet 已提交
836
	rcu_read_unlock();
L
Linus Torvalds 已提交
837
}
838
EXPORT_SYMBOL(aio_complete);
L
Linus Torvalds 已提交
839

840 841 842
/* aio_read_events
 *	Pull an event off of the ioctx's event ring.  Returns the number of
 *	events fetched
L
Linus Torvalds 已提交
843
 */
844 845
static long aio_read_events_ring(struct kioctx *ctx,
				 struct io_event __user *event, long nr)
L
Linus Torvalds 已提交
846 847
{
	struct aio_ring *ring;
848
	unsigned head, tail, pos;
849 850 851
	long ret = 0;
	int copy_ret;

K
Kent Overstreet 已提交
852
	mutex_lock(&ctx->ring_lock);
L
Linus Torvalds 已提交
853

K
Kent Overstreet 已提交
854
	ring = kmap_atomic(ctx->ring_pages[0]);
855
	head = ring->head;
856
	tail = ring->tail;
857 858
	kunmap_atomic(ring);

859
	pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
L
Linus Torvalds 已提交
860

861
	if (head == tail)
L
Linus Torvalds 已提交
862 863
		goto out;

864 865 866 867 868
	while (ret < nr) {
		long avail;
		struct io_event *ev;
		struct page *page;

869 870
		avail = (head <= tail ?  tail : ctx->nr_events) - head;
		if (head == tail)
871 872 873 874 875 876 877
			break;

		avail = min(avail, nr - ret);
		avail = min_t(long, avail, AIO_EVENTS_PER_PAGE -
			    ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE));

		pos = head + AIO_EVENTS_OFFSET;
K
Kent Overstreet 已提交
878
		page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
879 880 881 882 883 884 885 886 887 888 889 890 891 892
		pos %= AIO_EVENTS_PER_PAGE;

		ev = kmap(page);
		copy_ret = copy_to_user(event + ret, ev + pos,
					sizeof(*ev) * avail);
		kunmap(page);

		if (unlikely(copy_ret)) {
			ret = -EFAULT;
			goto out;
		}

		ret += avail;
		head += avail;
K
Kent Overstreet 已提交
893
		head %= ctx->nr_events;
L
Linus Torvalds 已提交
894 895
	}

K
Kent Overstreet 已提交
896
	ring = kmap_atomic(ctx->ring_pages[0]);
897
	ring->head = head;
898
	kunmap_atomic(ring);
K
Kent Overstreet 已提交
899
	flush_dcache_page(ctx->ring_pages[0]);
900

901
	pr_debug("%li  h%u t%u\n", ret, head, tail);
902

K
Kent Overstreet 已提交
903
	put_reqs_available(ctx, ret);
904
out:
K
Kent Overstreet 已提交
905
	mutex_unlock(&ctx->ring_lock);
906

L
Linus Torvalds 已提交
907 908 909
	return ret;
}

910 911
static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
			    struct io_event __user *event, long *i)
L
Linus Torvalds 已提交
912
{
913
	long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
L
Linus Torvalds 已提交
914

915 916
	if (ret > 0)
		*i += ret;
L
Linus Torvalds 已提交
917

918 919
	if (unlikely(atomic_read(&ctx->dead)))
		ret = -EINVAL;
L
Linus Torvalds 已提交
920

921 922
	if (!*i)
		*i = ret;
L
Linus Torvalds 已提交
923

924
	return ret < 0 || *i >= min_nr;
L
Linus Torvalds 已提交
925 926
}

927
static long read_events(struct kioctx *ctx, long min_nr, long nr,
L
Linus Torvalds 已提交
928 929 930
			struct io_event __user *event,
			struct timespec __user *timeout)
{
931 932
	ktime_t until = { .tv64 = KTIME_MAX };
	long ret = 0;
L
Linus Torvalds 已提交
933 934 935

	if (timeout) {
		struct timespec	ts;
936

L
Linus Torvalds 已提交
937
		if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
938
			return -EFAULT;
L
Linus Torvalds 已提交
939

940
		until = timespec_to_ktime(ts);
L
Linus Torvalds 已提交
941 942
	}

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
	/*
	 * Note that aio_read_events() is being called as the conditional - i.e.
	 * we're calling it after prepare_to_wait() has set task state to
	 * TASK_INTERRUPTIBLE.
	 *
	 * But aio_read_events() can block, and if it blocks it's going to flip
	 * the task state back to TASK_RUNNING.
	 *
	 * This should be ok, provided it doesn't flip the state back to
	 * TASK_RUNNING and return 0 too much - that causes us to spin. That
	 * will only happen if the mutex_lock() call blocks, and we then find
	 * the ringbuffer empty. So in practice we should be ok, but it's
	 * something to be aware of when touching this code.
	 */
	wait_event_interruptible_hrtimeout(ctx->wait,
			aio_read_events(ctx, min_nr, nr, event, &ret), until);
L
Linus Torvalds 已提交
959

960 961
	if (!ret && signal_pending(current))
		ret = -EINTR;
L
Linus Torvalds 已提交
962

963
	return ret;
L
Linus Torvalds 已提交
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
}

/* sys_io_setup:
 *	Create an aio_context capable of receiving at least nr_events.
 *	ctxp must not point to an aio_context that already exists, and
 *	must be initialized to 0 prior to the call.  On successful
 *	creation of the aio_context, *ctxp is filled in with the resulting 
 *	handle.  May fail with -EINVAL if *ctxp is not initialized,
 *	if the specified nr_events exceeds internal limits.  May fail 
 *	with -EAGAIN if the specified nr_events exceeds the user's limit 
 *	of available events.  May fail with -ENOMEM if insufficient kernel
 *	resources are available.  May fail with -EFAULT if an invalid
 *	pointer is passed for ctxp.  Will fail with -ENOSYS if not
 *	implemented.
 */
979
SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
L
Linus Torvalds 已提交
980 981 982 983 984 985 986 987 988 989
{
	struct kioctx *ioctx = NULL;
	unsigned long ctx;
	long ret;

	ret = get_user(ctx, ctxp);
	if (unlikely(ret))
		goto out;

	ret = -EINVAL;
990 991 992
	if (unlikely(ctx || nr_events == 0)) {
		pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
		         ctx, nr_events);
L
Linus Torvalds 已提交
993 994 995 996 997 998 999
		goto out;
	}

	ioctx = ioctx_alloc(nr_events);
	ret = PTR_ERR(ioctx);
	if (!IS_ERR(ioctx)) {
		ret = put_user(ioctx->user_id, ctxp);
1000
		if (ret)
K
Kent Overstreet 已提交
1001
			kill_ioctx(ioctx);
K
Kent Overstreet 已提交
1002
		percpu_ref_put(&ioctx->users);
L
Linus Torvalds 已提交
1003 1004 1005 1006 1007 1008 1009 1010 1011
	}

out:
	return ret;
}

/* sys_io_destroy:
 *	Destroy the aio_context specified.  May cancel any outstanding 
 *	AIOs and block on completion.  Will fail with -ENOSYS if not
1012
 *	implemented.  May fail with -EINVAL if the context pointed to
L
Linus Torvalds 已提交
1013 1014
 *	is invalid.
 */
1015
SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
L
Linus Torvalds 已提交
1016 1017 1018
{
	struct kioctx *ioctx = lookup_ioctx(ctx);
	if (likely(NULL != ioctx)) {
K
Kent Overstreet 已提交
1019
		kill_ioctx(ioctx);
K
Kent Overstreet 已提交
1020
		percpu_ref_put(&ioctx->users);
L
Linus Torvalds 已提交
1021 1022 1023 1024 1025 1026
		return 0;
	}
	pr_debug("EINVAL: io_destroy: invalid context id\n");
	return -EINVAL;
}

K
Kent Overstreet 已提交
1027 1028 1029
typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *,
			    unsigned long, loff_t);

1030 1031 1032 1033 1034
static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb,
				     int rw, char __user *buf,
				     unsigned long *nr_segs,
				     struct iovec **iovec,
				     bool compat)
B
Badari Pulavarty 已提交
1035 1036 1037
{
	ssize_t ret;

1038
	*nr_segs = kiocb->ki_nbytes;
K
Kent Overstreet 已提交
1039

1040 1041
#ifdef CONFIG_COMPAT
	if (compat)
K
Kent Overstreet 已提交
1042
		ret = compat_rw_copy_check_uvector(rw,
1043 1044
				(struct compat_iovec __user *)buf,
				*nr_segs, 1, *iovec, iovec);
1045 1046
	else
#endif
K
Kent Overstreet 已提交
1047
		ret = rw_copy_check_uvector(rw,
1048 1049
				(struct iovec __user *)buf,
				*nr_segs, 1, *iovec, iovec);
B
Badari Pulavarty 已提交
1050
	if (ret < 0)
K
Kent Overstreet 已提交
1051
		return ret;
1052

K
Kent Overstreet 已提交
1053
	/* ki_nbytes now reflect bytes instead of segs */
B
Badari Pulavarty 已提交
1054
	kiocb->ki_nbytes = ret;
K
Kent Overstreet 已提交
1055
	return 0;
B
Badari Pulavarty 已提交
1056 1057
}

1058 1059 1060 1061
static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
				       int rw, char __user *buf,
				       unsigned long *nr_segs,
				       struct iovec *iovec)
B
Badari Pulavarty 已提交
1062
{
1063
	if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes)))
K
Kent Overstreet 已提交
1064
		return -EFAULT;
1065

1066 1067 1068
	iovec->iov_base = buf;
	iovec->iov_len = kiocb->ki_nbytes;
	*nr_segs = 1;
B
Badari Pulavarty 已提交
1069 1070 1071
	return 0;
}

L
Linus Torvalds 已提交
1072 1073 1074 1075 1076
/*
 * aio_setup_iocb:
 *	Performs the initial checks and aio retry method
 *	setup for the kiocb at the time of io submission.
 */
1077 1078
static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
			    char __user *buf, bool compat)
L
Linus Torvalds 已提交
1079
{
K
Kent Overstreet 已提交
1080 1081
	struct file *file = req->ki_filp;
	ssize_t ret;
1082
	unsigned long nr_segs;
K
Kent Overstreet 已提交
1083 1084 1085
	int rw;
	fmode_t mode;
	aio_rw_op *rw_op;
1086
	struct iovec inline_vec, *iovec = &inline_vec;
L
Linus Torvalds 已提交
1087

1088
	switch (opcode) {
L
Linus Torvalds 已提交
1089
	case IOCB_CMD_PREAD:
B
Badari Pulavarty 已提交
1090
	case IOCB_CMD_PREADV:
K
Kent Overstreet 已提交
1091 1092 1093 1094 1095 1096
		mode	= FMODE_READ;
		rw	= READ;
		rw_op	= file->f_op->aio_read;
		goto rw_common;

	case IOCB_CMD_PWRITE:
B
Badari Pulavarty 已提交
1097
	case IOCB_CMD_PWRITEV:
K
Kent Overstreet 已提交
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
		mode	= FMODE_WRITE;
		rw	= WRITE;
		rw_op	= file->f_op->aio_write;
		goto rw_common;
rw_common:
		if (unlikely(!(file->f_mode & mode)))
			return -EBADF;

		if (!rw_op)
			return -EINVAL;

1109 1110 1111 1112 1113 1114
		ret = (opcode == IOCB_CMD_PREADV ||
		       opcode == IOCB_CMD_PWRITEV)
			? aio_setup_vectored_rw(req, rw, buf, &nr_segs,
						&iovec, compat)
			: aio_setup_single_vector(req, rw, buf, &nr_segs,
						  iovec);
B
Badari Pulavarty 已提交
1115
		if (ret)
K
Kent Overstreet 已提交
1116 1117 1118
			return ret;

		ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
1119 1120 1121
		if (ret < 0) {
			if (iovec != &inline_vec)
				kfree(iovec);
K
Kent Overstreet 已提交
1122
			return ret;
1123
		}
K
Kent Overstreet 已提交
1124 1125 1126

		req->ki_nbytes = ret;

K
Kent Overstreet 已提交
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
		/* XXX: move/kill - rw_verify_area()? */
		/* This matches the pread()/pwrite() logic */
		if (req->ki_pos < 0) {
			ret = -EINVAL;
			break;
		}

		if (rw == WRITE)
			file_start_write(file);

1137
		ret = rw_op(req, iovec, nr_segs, req->ki_pos);
K
Kent Overstreet 已提交
1138 1139 1140

		if (rw == WRITE)
			file_end_write(file);
L
Linus Torvalds 已提交
1141
		break;
K
Kent Overstreet 已提交
1142

L
Linus Torvalds 已提交
1143
	case IOCB_CMD_FDSYNC:
K
Kent Overstreet 已提交
1144 1145 1146 1147
		if (!file->f_op->aio_fsync)
			return -EINVAL;

		ret = file->f_op->aio_fsync(req, 1);
L
Linus Torvalds 已提交
1148
		break;
K
Kent Overstreet 已提交
1149

L
Linus Torvalds 已提交
1150
	case IOCB_CMD_FSYNC:
K
Kent Overstreet 已提交
1151 1152 1153 1154
		if (!file->f_op->aio_fsync)
			return -EINVAL;

		ret = file->f_op->aio_fsync(req, 0);
L
Linus Torvalds 已提交
1155
		break;
K
Kent Overstreet 已提交
1156

L
Linus Torvalds 已提交
1157
	default:
K
Kent Overstreet 已提交
1158
		pr_debug("EINVAL: no operation provided\n");
K
Kent Overstreet 已提交
1159
		return -EINVAL;
L
Linus Torvalds 已提交
1160 1161
	}

1162 1163 1164
	if (iovec != &inline_vec)
		kfree(iovec);

K
Kent Overstreet 已提交
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
	if (ret != -EIOCBQUEUED) {
		/*
		 * There's no easy way to restart the syscall since other AIO's
		 * may be already running. Just fail this IO with EINTR.
		 */
		if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
			     ret == -ERESTARTNOHAND ||
			     ret == -ERESTART_RESTARTBLOCK))
			ret = -EINTR;
		aio_complete(req, ret, 0);
	}
L
Linus Torvalds 已提交
1176 1177 1178 1179

	return 0;
}

A
Adrian Bunk 已提交
1180
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
K
Kent Overstreet 已提交
1181
			 struct iocb *iocb, bool compat)
L
Linus Torvalds 已提交
1182 1183 1184 1185 1186
{
	struct kiocb *req;
	ssize_t ret;

	/* enforce forwards compatibility on users */
1187
	if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
K
Kent Overstreet 已提交
1188
		pr_debug("EINVAL: reserve field set\n");
L
Linus Torvalds 已提交
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
		return -EINVAL;
	}

	/* prevent overflows */
	if (unlikely(
	    (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
	    (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
	    ((ssize_t)iocb->aio_nbytes < 0)
	   )) {
		pr_debug("EINVAL: io_submit: overflow check\n");
		return -EINVAL;
	}

K
Kent Overstreet 已提交
1202
	req = aio_get_req(ctx);
1203
	if (unlikely(!req))
L
Linus Torvalds 已提交
1204
		return -EAGAIN;
1205 1206 1207 1208 1209

	req->ki_filp = fget(iocb->aio_fildes);
	if (unlikely(!req->ki_filp)) {
		ret = -EBADF;
		goto out_put_req;
L
Linus Torvalds 已提交
1210
	}
1211

1212 1213 1214 1215 1216 1217 1218
	if (iocb->aio_flags & IOCB_FLAG_RESFD) {
		/*
		 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
		 * instance of the file* now. The file descriptor must be
		 * an eventfd() fd, and will be signaled for each completed
		 * event using the eventfd_signal() function.
		 */
1219
		req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1220
		if (IS_ERR(req->ki_eventfd)) {
1221
			ret = PTR_ERR(req->ki_eventfd);
1222
			req->ki_eventfd = NULL;
1223 1224 1225
			goto out_put_req;
		}
	}
L
Linus Torvalds 已提交
1226

K
Kent Overstreet 已提交
1227
	ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
L
Linus Torvalds 已提交
1228
	if (unlikely(ret)) {
K
Kent Overstreet 已提交
1229
		pr_debug("EFAULT: aio_key\n");
L
Linus Torvalds 已提交
1230 1231 1232 1233 1234 1235
		goto out_put_req;
	}

	req->ki_obj.user = user_iocb;
	req->ki_user_data = iocb->aio_data;
	req->ki_pos = iocb->aio_offset;
K
Kent Overstreet 已提交
1236
	req->ki_nbytes = iocb->aio_nbytes;
L
Linus Torvalds 已提交
1237

1238 1239 1240
	ret = aio_run_iocb(req, iocb->aio_lio_opcode,
			   (char __user *)(unsigned long)iocb->aio_buf,
			   compat);
Z
Zach Brown 已提交
1241
	if (ret)
1242
		goto out_put_req;
Z
Zach Brown 已提交
1243

L
Linus Torvalds 已提交
1244 1245
	return 0;
out_put_req:
K
Kent Overstreet 已提交
1246
	put_reqs_available(ctx, 1);
K
Kent Overstreet 已提交
1247
	kiocb_free(req);
L
Linus Torvalds 已提交
1248 1249 1250
	return ret;
}

1251 1252
long do_io_submit(aio_context_t ctx_id, long nr,
		  struct iocb __user *__user *iocbpp, bool compat)
L
Linus Torvalds 已提交
1253 1254 1255
{
	struct kioctx *ctx;
	long ret = 0;
J
Jeff Moyer 已提交
1256
	int i = 0;
S
Shaohua Li 已提交
1257
	struct blk_plug plug;
L
Linus Torvalds 已提交
1258 1259 1260 1261

	if (unlikely(nr < 0))
		return -EINVAL;

1262 1263 1264
	if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
		nr = LONG_MAX/sizeof(*iocbpp);

L
Linus Torvalds 已提交
1265 1266 1267 1268 1269
	if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
		return -EFAULT;

	ctx = lookup_ioctx(ctx_id);
	if (unlikely(!ctx)) {
K
Kent Overstreet 已提交
1270
		pr_debug("EINVAL: invalid context id\n");
L
Linus Torvalds 已提交
1271 1272 1273
		return -EINVAL;
	}

S
Shaohua Li 已提交
1274 1275
	blk_start_plug(&plug);

L
Linus Torvalds 已提交
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
	/*
	 * AKPM: should this return a partial result if some of the IOs were
	 * successfully submitted?
	 */
	for (i=0; i<nr; i++) {
		struct iocb __user *user_iocb;
		struct iocb tmp;

		if (unlikely(__get_user(user_iocb, iocbpp + i))) {
			ret = -EFAULT;
			break;
		}

		if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
			ret = -EFAULT;
			break;
		}

K
Kent Overstreet 已提交
1294
		ret = io_submit_one(ctx, user_iocb, &tmp, compat);
L
Linus Torvalds 已提交
1295 1296 1297
		if (ret)
			break;
	}
S
Shaohua Li 已提交
1298
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
1299

K
Kent Overstreet 已提交
1300
	percpu_ref_put(&ctx->users);
L
Linus Torvalds 已提交
1301 1302 1303
	return i ? i : ret;
}

1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
/* sys_io_submit:
 *	Queue the nr iocbs pointed to by iocbpp for processing.  Returns
 *	the number of iocbs queued.  May return -EINVAL if the aio_context
 *	specified by ctx_id is invalid, if nr is < 0, if the iocb at
 *	*iocbpp[0] is not properly initialized, if the operation specified
 *	is invalid for the file descriptor in the iocb.  May fail with
 *	-EFAULT if any of the data structures point to invalid data.  May
 *	fail with -EBADF if the file descriptor specified in the first
 *	iocb is invalid.  May fail with -EAGAIN if insufficient resources
 *	are available to queue any iocbs.  Will return 0 if nr is 0.  Will
 *	fail with -ENOSYS if not implemented.
 */
SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
		struct iocb __user * __user *, iocbpp)
{
	return do_io_submit(ctx_id, nr, iocbpp, 0);
}

L
Linus Torvalds 已提交
1322 1323 1324
/* lookup_kiocb
 *	Finds a given iocb for cancellation.
 */
1325 1326
static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
				  u32 key)
L
Linus Torvalds 已提交
1327 1328
{
	struct list_head *pos;
1329 1330 1331

	assert_spin_locked(&ctx->ctx_lock);

K
Kent Overstreet 已提交
1332 1333 1334
	if (key != KIOCB_KEY)
		return NULL;

L
Linus Torvalds 已提交
1335 1336 1337
	/* TODO: use a hash or array, this sucks. */
	list_for_each(pos, &ctx->active_reqs) {
		struct kiocb *kiocb = list_kiocb(pos);
K
Kent Overstreet 已提交
1338
		if (kiocb->ki_obj.user == iocb)
L
Linus Torvalds 已提交
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
			return kiocb;
	}
	return NULL;
}

/* sys_io_cancel:
 *	Attempts to cancel an iocb previously passed to io_submit.  If
 *	the operation is successfully cancelled, the resulting event is
 *	copied into the memory pointed to by result without being placed
 *	into the completion queue and 0 is returned.  May fail with
 *	-EFAULT if any of the data structures pointed to are invalid.
 *	May fail with -EINVAL if aio_context specified by ctx_id is
 *	invalid.  May fail with -EAGAIN if the iocb specified was not
 *	cancelled.  Will fail with -ENOSYS if not implemented.
 */
1354 1355
SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
		struct io_event __user *, result)
L
Linus Torvalds 已提交
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
{
	struct kioctx *ctx;
	struct kiocb *kiocb;
	u32 key;
	int ret;

	ret = get_user(key, &iocb->aio_key);
	if (unlikely(ret))
		return -EFAULT;

	ctx = lookup_ioctx(ctx_id);
	if (unlikely(!ctx))
		return -EINVAL;

	spin_lock_irq(&ctx->ctx_lock);
K
Kent Overstreet 已提交
1371

L
Linus Torvalds 已提交
1372
	kiocb = lookup_kiocb(ctx, iocb, key);
K
Kent Overstreet 已提交
1373
	if (kiocb)
1374
		ret = kiocb_cancel(ctx, kiocb);
K
Kent Overstreet 已提交
1375 1376 1377
	else
		ret = -EINVAL;

L
Linus Torvalds 已提交
1378 1379
	spin_unlock_irq(&ctx->ctx_lock);

K
Kent Overstreet 已提交
1380
	if (!ret) {
1381 1382 1383 1384
		/*
		 * The result argument is no longer used - the io_event is
		 * always delivered via the ring buffer. -EINPROGRESS indicates
		 * cancellation is progress:
K
Kent Overstreet 已提交
1385
		 */
1386
		ret = -EINPROGRESS;
K
Kent Overstreet 已提交
1387
	}
L
Linus Torvalds 已提交
1388

K
Kent Overstreet 已提交
1389
	percpu_ref_put(&ctx->users);
L
Linus Torvalds 已提交
1390 1391 1392 1393 1394 1395

	return ret;
}

/* io_getevents:
 *	Attempts to read at least min_nr events and up to nr events from
1396 1397 1398 1399 1400 1401 1402 1403
 *	the completion queue for the aio_context specified by ctx_id. If
 *	it succeeds, the number of read events is returned. May fail with
 *	-EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
 *	out of range, if timeout is out of range.  May fail with -EFAULT
 *	if any of the memory specified is invalid.  May return 0 or
 *	< min_nr if the timeout specified by timeout has elapsed
 *	before sufficient events are available, where timeout == NULL
 *	specifies an infinite timeout. Note that the timeout pointed to by
J
Jeff Moyer 已提交
1404
 *	timeout is relative.  Will fail with -ENOSYS if not implemented.
L
Linus Torvalds 已提交
1405
 */
1406 1407 1408 1409 1410
SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
		long, min_nr,
		long, nr,
		struct io_event __user *, events,
		struct timespec __user *, timeout)
L
Linus Torvalds 已提交
1411 1412 1413 1414 1415
{
	struct kioctx *ioctx = lookup_ioctx(ctx_id);
	long ret = -EINVAL;

	if (likely(ioctx)) {
N
Namhyung Kim 已提交
1416
		if (likely(min_nr <= nr && min_nr >= 0))
L
Linus Torvalds 已提交
1417
			ret = read_events(ioctx, min_nr, nr, events, timeout);
K
Kent Overstreet 已提交
1418
		percpu_ref_put(&ioctx->users);
L
Linus Torvalds 已提交
1419 1420 1421
	}
	return ret;
}