dm-io.c 12.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2003 Sistina Software
3
 * Copyright (C) 2006 Red Hat GmbH
L
Linus Torvalds 已提交
4 5 6 7
 *
 * This file is released under the GPL.
 */

M
Mikulas Patocka 已提交
8 9
#include "dm.h"

10
#include <linux/device-mapper.h>
L
Linus Torvalds 已提交
11 12 13 14 15 16

#include <linux/bio.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
A
Alasdair G Kergon 已提交
17
#include <linux/dm-io.h>
L
Linus Torvalds 已提交
18

19 20 21 22
#define DM_MSG_PREFIX "io"

#define DM_IO_MAX_REGIONS	BITS_PER_LONG

23 24 25 26 27
struct dm_io_client {
	mempool_t *pool;
	struct bio_set *bios;
};

28 29 30 31
/*
 * Aligning 'struct io' reduces the number of bits required to store
 * its address.  Refer to store_io_and_region_in_bio() below.
 */
L
Linus Torvalds 已提交
32
struct io {
33
	unsigned long error_bits;
M
Mikulas Patocka 已提交
34
	unsigned long eopnotsupp_bits;
L
Linus Torvalds 已提交
35 36
	atomic_t count;
	struct task_struct *sleeper;
37
	struct dm_io_client *client;
L
Linus Torvalds 已提交
38 39
	io_notify_fn callback;
	void *context;
40
} __attribute__((aligned(DM_IO_MAX_REGIONS)));
L
Linus Torvalds 已提交
41

M
Mikulas Patocka 已提交
42 43
static struct kmem_cache *_dm_io_cache;

L
Linus Torvalds 已提交
44 45 46
/*
 * io contexts are only dynamically allocated for asynchronous
 * io.  Since async io is likely to be the majority of io we'll
47
 * have the same number of io contexts as bios! (FIXME: must reduce this).
L
Linus Torvalds 已提交
48
 */
49

L
Linus Torvalds 已提交
50 51 52 53 54
static unsigned int pages_to_ios(unsigned int pages)
{
	return 4 * pages;	/* too many ? */
}

H
Heinz Mauelshagen 已提交
55 56 57 58 59 60 61 62 63 64 65 66
/*
 * Create a client with mempool and bioset.
 */
struct dm_io_client *dm_io_client_create(unsigned num_pages)
{
	unsigned ios = pages_to_ios(num_pages);
	struct dm_io_client *client;

	client = kmalloc(sizeof(*client), GFP_KERNEL);
	if (!client)
		return ERR_PTR(-ENOMEM);

M
Mikulas Patocka 已提交
67
	client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
H
Heinz Mauelshagen 已提交
68 69 70
	if (!client->pool)
		goto bad;

71
	client->bios = bioset_create(16, 0);
H
Heinz Mauelshagen 已提交
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	if (!client->bios)
		goto bad;

	return client;

   bad:
	if (client->pool)
		mempool_destroy(client->pool);
	kfree(client);
	return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(dm_io_client_create);

int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
{
	return mempool_resize(client->pool, pages_to_ios(num_pages),
			      GFP_KERNEL);
}
EXPORT_SYMBOL(dm_io_client_resize);

void dm_io_client_destroy(struct dm_io_client *client)
{
	mempool_destroy(client->pool);
	bioset_free(client->bios);
	kfree(client);
}
EXPORT_SYMBOL(dm_io_client_destroy);

L
Linus Torvalds 已提交
100 101
/*-----------------------------------------------------------------
 * We need to keep track of which region a bio is doing io for.
102 103 104 105
 * To avoid a memory allocation to store just 5 or 6 bits, we
 * ensure the 'struct io' pointer is aligned so enough low bits are
 * always zero and then combine it with the region number directly in
 * bi_private.
L
Linus Torvalds 已提交
106
 *---------------------------------------------------------------*/
107 108
static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
				       unsigned region)
L
Linus Torvalds 已提交
109
{
110 111 112 113 114 115
	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
		DMCRIT("Unaligned struct io pointer %p", io);
		BUG();
	}

	bio->bi_private = (void *)((unsigned long)io | region);
L
Linus Torvalds 已提交
116 117
}

118 119
static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
				       unsigned *region)
L
Linus Torvalds 已提交
120
{
121 122 123 124
	unsigned long val = (unsigned long)bio->bi_private;

	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
	*region = val & (DM_IO_MAX_REGIONS - 1);
L
Linus Torvalds 已提交
125 126 127 128 129 130 131 132
}

/*-----------------------------------------------------------------
 * We need an io object to keep track of the number of bios that
 * have been dispatched for a particular io.
 *---------------------------------------------------------------*/
static void dec_count(struct io *io, unsigned int region, int error)
{
M
Mikulas Patocka 已提交
133
	if (error) {
134
		set_bit(region, &io->error_bits);
M
Mikulas Patocka 已提交
135 136 137
		if (error == -EOPNOTSUPP)
			set_bit(region, &io->eopnotsupp_bits);
	}
L
Linus Torvalds 已提交
138 139 140 141 142 143

	if (atomic_dec_and_test(&io->count)) {
		if (io->sleeper)
			wake_up_process(io->sleeper);

		else {
144
			unsigned long r = io->error_bits;
L
Linus Torvalds 已提交
145 146 147
			io_notify_fn fn = io->callback;
			void *context = io->context;

M
Milan Broz 已提交
148
			mempool_free(io, io->client->pool);
L
Linus Torvalds 已提交
149 150 151 152 153
			fn(r, context);
		}
	}
}

154
static void endio(struct bio *bio, int error)
L
Linus Torvalds 已提交
155
{
H
Heinz Mauelshagen 已提交
156 157
	struct io *io;
	unsigned region;
L
Linus Torvalds 已提交
158 159 160 161

	if (error && bio_data_dir(bio) == READ)
		zero_fill_bio(bio);

H
Heinz Mauelshagen 已提交
162 163 164
	/*
	 * The bio destructor in bio_put() may use the io object.
	 */
165
	retrieve_io_and_region_from_bio(bio, &io, &region);
H
Heinz Mauelshagen 已提交
166

L
Linus Torvalds 已提交
167 168
	bio_put(bio);

H
Heinz Mauelshagen 已提交
169
	dec_count(io, region, error);
L
Linus Torvalds 已提交
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
}

/*-----------------------------------------------------------------
 * These little objects provide an abstraction for getting a new
 * destination page for io.
 *---------------------------------------------------------------*/
struct dpages {
	void (*get_page)(struct dpages *dp,
			 struct page **p, unsigned long *len, unsigned *offset);
	void (*next_page)(struct dpages *dp);

	unsigned context_u;
	void *context_ptr;
};

/*
 * Functions for getting the pages from a list.
 */
static void list_get_page(struct dpages *dp,
		  struct page **p, unsigned long *len, unsigned *offset)
{
	unsigned o = dp->context_u;
	struct page_list *pl = (struct page_list *) dp->context_ptr;

	*p = pl->page;
	*len = PAGE_SIZE - o;
	*offset = o;
}

static void list_next_page(struct dpages *dp)
{
	struct page_list *pl = (struct page_list *) dp->context_ptr;
	dp->context_ptr = pl->next;
	dp->context_u = 0;
}

static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
{
	dp->get_page = list_get_page;
	dp->next_page = list_next_page;
	dp->context_u = offset;
	dp->context_ptr = pl;
}

/*
 * Functions for getting the pages from a bvec.
 */
static void bvec_get_page(struct dpages *dp,
		  struct page **p, unsigned long *len, unsigned *offset)
{
	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
	*p = bvec->bv_page;
	*len = bvec->bv_len;
	*offset = bvec->bv_offset;
}

static void bvec_next_page(struct dpages *dp)
{
	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
	dp->context_ptr = bvec + 1;
}

static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
{
	dp->get_page = bvec_get_page;
	dp->next_page = bvec_next_page;
	dp->context_ptr = bvec;
}

H
Heinz Mauelshagen 已提交
239 240 241
/*
 * Functions for getting the pages from a VMA.
 */
L
Linus Torvalds 已提交
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
static void vm_get_page(struct dpages *dp,
		 struct page **p, unsigned long *len, unsigned *offset)
{
	*p = vmalloc_to_page(dp->context_ptr);
	*offset = dp->context_u;
	*len = PAGE_SIZE - dp->context_u;
}

static void vm_next_page(struct dpages *dp)
{
	dp->context_ptr += PAGE_SIZE - dp->context_u;
	dp->context_u = 0;
}

static void vm_dp_init(struct dpages *dp, void *data)
{
	dp->get_page = vm_get_page;
	dp->next_page = vm_next_page;
	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
	dp->context_ptr = data;
}

P
Peter Osterlund 已提交
264 265
static void dm_bio_destructor(struct bio *bio)
{
266 267 268 269
	unsigned region;
	struct io *io;

	retrieve_io_and_region_from_bio(bio, &io, &region);
270

M
Milan Broz 已提交
271
	bio_free(bio, io->client->bios);
P
Peter Osterlund 已提交
272 273
}

H
Heinz Mauelshagen 已提交
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
/*
 * Functions for getting the pages from kernel memory.
 */
static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
			unsigned *offset)
{
	*p = virt_to_page(dp->context_ptr);
	*offset = dp->context_u;
	*len = PAGE_SIZE - dp->context_u;
}

static void km_next_page(struct dpages *dp)
{
	dp->context_ptr += PAGE_SIZE - dp->context_u;
	dp->context_u = 0;
}

static void km_dp_init(struct dpages *dp, void *data)
{
	dp->get_page = km_get_page;
	dp->next_page = km_next_page;
	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
	dp->context_ptr = data;
}

L
Linus Torvalds 已提交
299 300 301
/*-----------------------------------------------------------------
 * IO routines that accept a list of pages.
 *---------------------------------------------------------------*/
H
Heinz Mauelshagen 已提交
302
static void do_region(int rw, unsigned region, struct dm_io_region *where,
L
Linus Torvalds 已提交
303 304 305 306 307 308 309 310 311
		      struct dpages *dp, struct io *io)
{
	struct bio *bio;
	struct page *page;
	unsigned long len;
	unsigned offset;
	unsigned num_bvecs;
	sector_t remaining = where->count;

M
Mikulas Patocka 已提交
312 313 314 315 316
	/*
	 * where->count may be zero if rw holds a write barrier and we
	 * need to send a zero-sized barrier.
	 */
	do {
L
Linus Torvalds 已提交
317
		/*
318
		 * Allocate a suitably sized-bio.
L
Linus Torvalds 已提交
319
		 */
320 321
		num_bvecs = dm_sector_div_up(remaining,
					     (PAGE_SIZE >> SECTOR_SHIFT));
322
		num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
M
Milan Broz 已提交
323
		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
L
Linus Torvalds 已提交
324 325 326
		bio->bi_sector = where->sector + (where->count - remaining);
		bio->bi_bdev = where->bdev;
		bio->bi_end_io = endio;
P
Peter Osterlund 已提交
327
		bio->bi_destructor = dm_bio_destructor;
328
		store_io_and_region_in_bio(bio, io, region);
L
Linus Torvalds 已提交
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345

		/*
		 * Try and add as many pages as possible.
		 */
		while (remaining) {
			dp->get_page(dp, &page, &len, &offset);
			len = min(len, to_bytes(remaining));
			if (!bio_add_page(bio, page, len, offset))
				break;

			offset = 0;
			remaining -= to_sector(len);
			dp->next_page(dp);
		}

		atomic_inc(&io->count);
		submit_bio(rw, bio);
M
Mikulas Patocka 已提交
346
	} while (remaining);
L
Linus Torvalds 已提交
347 348 349
}

static void dispatch_io(int rw, unsigned int num_regions,
H
Heinz Mauelshagen 已提交
350
			struct dm_io_region *where, struct dpages *dp,
L
Linus Torvalds 已提交
351 352 353 354 355
			struct io *io, int sync)
{
	int i;
	struct dpages old_pages = *dp;

356 357
	BUG_ON(num_regions > DM_IO_MAX_REGIONS);

L
Linus Torvalds 已提交
358
	if (sync)
359
		rw |= REQ_SYNC | REQ_UNPLUG;
L
Linus Torvalds 已提交
360 361 362 363 364 365 366

	/*
	 * For multiple regions we need to be careful to rewind
	 * the dp object for each call to do_region.
	 */
	for (i = 0; i < num_regions; i++) {
		*dp = old_pages;
367
		if (where[i].count || (rw & REQ_HARDBARRIER))
L
Linus Torvalds 已提交
368 369 370 371
			do_region(rw, i, where + i, dp, io);
	}

	/*
H
Heinz Mauelshagen 已提交
372
	 * Drop the extra reference that we were holding to avoid
L
Linus Torvalds 已提交
373 374 375 376 377
	 * the io being completed too early.
	 */
	dec_count(io, 0, 0);
}

378
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
H
Heinz Mauelshagen 已提交
379
		   struct dm_io_region *where, int rw, struct dpages *dp,
380
		   unsigned long *error_bits)
L
Linus Torvalds 已提交
381
{
382 383 384 385 386 387 388 389
	/*
	 * gcc <= 4.3 can't do the alignment for stack variables, so we must
	 * align it on our own.
	 * volatile prevents the optimizer from removing or reusing
	 * "io_" field from the stack frame (allowed in ANSI C).
	 */
	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
L
Linus Torvalds 已提交
390

M
Mikulas Patocka 已提交
391
	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
L
Linus Torvalds 已提交
392 393 394 395
		WARN_ON(1);
		return -EIO;
	}

396
retry:
397 398 399 400 401
	io->error_bits = 0;
	io->eopnotsupp_bits = 0;
	atomic_set(&io->count, 1); /* see dispatch_io() */
	io->sleeper = current;
	io->client = client;
L
Linus Torvalds 已提交
402

403
	dispatch_io(rw, num_regions, where, dp, io, 1);
L
Linus Torvalds 已提交
404 405 406 407

	while (1) {
		set_current_state(TASK_UNINTERRUPTIBLE);

408
		if (!atomic_read(&io->count))
L
Linus Torvalds 已提交
409 410 411 412 413 414
			break;

		io_schedule();
	}
	set_current_state(TASK_RUNNING);

415 416
	if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
		rw &= ~REQ_HARDBARRIER;
417 418 419
		goto retry;
	}

420
	if (error_bits)
421
		*error_bits = io->error_bits;
422

423
	return io->error_bits ? -EIO : 0;
L
Linus Torvalds 已提交
424 425
}

426
static int async_io(struct dm_io_client *client, unsigned int num_regions,
H
Heinz Mauelshagen 已提交
427
		    struct dm_io_region *where, int rw, struct dpages *dp,
428
		    io_notify_fn fn, void *context)
L
Linus Torvalds 已提交
429 430 431
{
	struct io *io;

M
Mikulas Patocka 已提交
432
	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
L
Linus Torvalds 已提交
433 434 435 436 437
		WARN_ON(1);
		fn(1, context);
		return -EIO;
	}

M
Milan Broz 已提交
438
	io = mempool_alloc(client->pool, GFP_NOIO);
439
	io->error_bits = 0;
M
Mikulas Patocka 已提交
440
	io->eopnotsupp_bits = 0;
L
Linus Torvalds 已提交
441 442
	atomic_set(&io->count, 1); /* see dispatch_io() */
	io->sleeper = NULL;
443
	io->client = client;
L
Linus Torvalds 已提交
444 445 446 447 448 449 450
	io->callback = fn;
	io->context = context;

	dispatch_io(rw, num_regions, where, dp, io, 0);
	return 0;
}

H
Heinz Mauelshagen 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
{
	/* Set up dpages based on memory type */
	switch (io_req->mem.type) {
	case DM_IO_PAGE_LIST:
		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
		break;

	case DM_IO_BVEC:
		bvec_dp_init(dp, io_req->mem.ptr.bvec);
		break;

	case DM_IO_VMA:
		vm_dp_init(dp, io_req->mem.ptr.vma);
		break;

	case DM_IO_KMEM:
		km_dp_init(dp, io_req->mem.ptr.addr);
		break;

	default:
		return -EINVAL;
	}

	return 0;
}

/*
M
Mikulas Patocka 已提交
479 480 481
 * New collapsed (a)synchronous interface.
 *
 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
482 483
 * the queue with blk_unplug() some time later or set REQ_SYNC in
io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
M
Mikulas Patocka 已提交
484
 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
H
Heinz Mauelshagen 已提交
485 486
 */
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
H
Heinz Mauelshagen 已提交
487
	  struct dm_io_region *where, unsigned long *sync_error_bits)
H
Heinz Mauelshagen 已提交
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
{
	int r;
	struct dpages dp;

	r = dp_init(io_req, &dp);
	if (r)
		return r;

	if (!io_req->notify.fn)
		return sync_io(io_req->client, num_regions, where,
			       io_req->bi_rw, &dp, sync_error_bits);

	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
			&dp, io_req->notify.fn, io_req->notify.context);
}
EXPORT_SYMBOL(dm_io);
M
Mikulas Patocka 已提交
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518

int __init dm_io_init(void)
{
	_dm_io_cache = KMEM_CACHE(io, 0);
	if (!_dm_io_cache)
		return -ENOMEM;

	return 0;
}

void dm_io_exit(void)
{
	kmem_cache_destroy(_dm_io_cache);
	_dm_io_cache = NULL;
}