dm.c 35.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
M
Milan Broz 已提交
3
 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7 8 9
 *
 * This file is released under the GPL.
 */

#include "dm.h"
#include "dm-bio-list.h"
M
Mike Anderson 已提交
10
#include "dm-uevent.h"
L
Linus Torvalds 已提交
11 12 13

#include <linux/init.h>
#include <linux/module.h>
A
Arjan van de Ven 已提交
14
#include <linux/mutex.h>
L
Linus Torvalds 已提交
15 16 17 18 19 20 21
#include <linux/moduleparam.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
D
Darrick J. Wong 已提交
22
#include <linux/hdreg.h>
23
#include <linux/blktrace_api.h>
24
#include <trace/block.h>
L
Linus Torvalds 已提交
25

26 27
#define DM_MSG_PREFIX "core"

L
Linus Torvalds 已提交
28 29 30 31 32
static const char *_name = DM_NAME;

static unsigned int major = 0;
static unsigned int _major = 0;

33
static DEFINE_SPINLOCK(_minor_lock);
L
Linus Torvalds 已提交
34
/*
K
Kiyoshi Ueda 已提交
35
 * For bio-based dm.
L
Linus Torvalds 已提交
36 37 38 39 40 41
 * One of these is allocated per bio.
 */
struct dm_io {
	struct mapped_device *md;
	int error;
	atomic_t io_count;
R
Richard Kennedy 已提交
42
	struct bio *bio;
43
	unsigned long start_time;
L
Linus Torvalds 已提交
44 45 46
};

/*
K
Kiyoshi Ueda 已提交
47
 * For bio-based dm.
L
Linus Torvalds 已提交
48 49 50
 * One of these is allocated per target within a bio.  Hopefully
 * this will be simplified out one day.
 */
A
Alasdair G Kergon 已提交
51
struct dm_target_io {
L
Linus Torvalds 已提交
52 53 54 55 56
	struct dm_io *io;
	struct dm_target *ti;
	union map_info info;
};

57 58
DEFINE_TRACE(block_bio_complete);

K
Kiyoshi Ueda 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * For request-based dm.
 * One of these is allocated per request.
 */
struct dm_rq_target_io {
	struct mapped_device *md;
	struct dm_target *ti;
	struct request *orig, clone;
	int error;
	union map_info info;
};

/*
 * For request-based dm.
 * One of these is allocated per bio.
 */
struct dm_rq_clone_bio_info {
	struct bio *orig;
	struct request *rq;
};

L
Linus Torvalds 已提交
80 81
union map_info *dm_get_mapinfo(struct bio *bio)
{
A
Alasdair G Kergon 已提交
82
	if (bio && bio->bi_private)
A
Alasdair G Kergon 已提交
83
		return &((struct dm_target_io *)bio->bi_private)->info;
A
Alasdair G Kergon 已提交
84
	return NULL;
L
Linus Torvalds 已提交
85 86
}

87 88
#define MINOR_ALLOCED ((void *)-1)

L
Linus Torvalds 已提交
89 90 91 92 93
/*
 * Bits for the md->flags field.
 */
#define DMF_BLOCK_IO 0
#define DMF_SUSPENDED 1
94
#define DMF_FROZEN 2
J
Jeff Mahoney 已提交
95
#define DMF_FREEING 3
96
#define DMF_DELETING 4
97
#define DMF_NOFLUSH_SUSPENDING 5
L
Linus Torvalds 已提交
98

99 100 101
/*
 * Work processed by per-device workqueue.
 */
L
Linus Torvalds 已提交
102
struct mapped_device {
103
	struct rw_semaphore io_lock;
104
	struct mutex suspend_lock;
L
Linus Torvalds 已提交
105 106
	rwlock_t map_lock;
	atomic_t holders;
107
	atomic_t open_count;
L
Linus Torvalds 已提交
108 109 110

	unsigned long flags;

111
	struct request_queue *queue;
L
Linus Torvalds 已提交
112
	struct gendisk *disk;
M
Mike Anderson 已提交
113
	char name[16];
L
Linus Torvalds 已提交
114 115 116 117 118 119 120 121

	void *interface_ptr;

	/*
	 * A list of ios that arrived while we were suspended.
	 */
	atomic_t pending;
	wait_queue_head_t wait;
122
	struct work_struct work;
K
Kiyoshi Ueda 已提交
123
	struct bio_list deferred;
124
	spinlock_t deferred_lock;
L
Linus Torvalds 已提交
125

126 127 128 129 130
	/*
	 * Processing queue (flush/barriers)
	 */
	struct workqueue_struct *wq;

L
Linus Torvalds 已提交
131 132 133 134 135 136 137 138 139 140 141
	/*
	 * The current mapping.
	 */
	struct dm_table *map;

	/*
	 * io objects are allocated from here.
	 */
	mempool_t *io_pool;
	mempool_t *tio_pool;

S
Stefan Bader 已提交
142 143
	struct bio_set *bs;

L
Linus Torvalds 已提交
144 145 146 147 148
	/*
	 * Event handling.
	 */
	atomic_t event_nr;
	wait_queue_head_t eventq;
M
Mike Anderson 已提交
149 150 151
	atomic_t uevent_seq;
	struct list_head uevent_list;
	spinlock_t uevent_lock; /* Protect access to uevent_list */
L
Linus Torvalds 已提交
152 153 154 155 156

	/*
	 * freeze/thaw support require holding onto a super block
	 */
	struct super_block *frozen_sb;
157
	struct block_device *suspended_bdev;
D
Darrick J. Wong 已提交
158 159 160

	/* forced geometry settings */
	struct hd_geometry geometry;
M
Milan Broz 已提交
161 162 163

	/* sysfs handle */
	struct kobject kobj;
L
Linus Torvalds 已提交
164 165 166
};

#define MIN_IOS 256
167 168
static struct kmem_cache *_io_cache;
static struct kmem_cache *_tio_cache;
K
Kiyoshi Ueda 已提交
169 170
static struct kmem_cache *_rq_tio_cache;
static struct kmem_cache *_rq_bio_info_cache;
L
Linus Torvalds 已提交
171 172 173

static int __init local_init(void)
{
K
Kiyoshi Ueda 已提交
174
	int r = -ENOMEM;
L
Linus Torvalds 已提交
175 176

	/* allocate a slab for the dm_ios */
A
Alasdair G Kergon 已提交
177
	_io_cache = KMEM_CACHE(dm_io, 0);
L
Linus Torvalds 已提交
178
	if (!_io_cache)
K
Kiyoshi Ueda 已提交
179
		return r;
L
Linus Torvalds 已提交
180 181

	/* allocate a slab for the target ios */
A
Alasdair G Kergon 已提交
182
	_tio_cache = KMEM_CACHE(dm_target_io, 0);
K
Kiyoshi Ueda 已提交
183 184
	if (!_tio_cache)
		goto out_free_io_cache;
L
Linus Torvalds 已提交
185

K
Kiyoshi Ueda 已提交
186 187 188 189 190 191 192 193
	_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
	if (!_rq_tio_cache)
		goto out_free_tio_cache;

	_rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
	if (!_rq_bio_info_cache)
		goto out_free_rq_tio_cache;

M
Mike Anderson 已提交
194
	r = dm_uevent_init();
K
Kiyoshi Ueda 已提交
195
	if (r)
K
Kiyoshi Ueda 已提交
196
		goto out_free_rq_bio_info_cache;
M
Mike Anderson 已提交
197

L
Linus Torvalds 已提交
198 199
	_major = major;
	r = register_blkdev(_major, _name);
K
Kiyoshi Ueda 已提交
200 201
	if (r < 0)
		goto out_uevent_exit;
L
Linus Torvalds 已提交
202 203 204 205 206

	if (!_major)
		_major = r;

	return 0;
K
Kiyoshi Ueda 已提交
207 208 209

out_uevent_exit:
	dm_uevent_exit();
K
Kiyoshi Ueda 已提交
210 211 212 213
out_free_rq_bio_info_cache:
	kmem_cache_destroy(_rq_bio_info_cache);
out_free_rq_tio_cache:
	kmem_cache_destroy(_rq_tio_cache);
K
Kiyoshi Ueda 已提交
214 215 216 217 218 219
out_free_tio_cache:
	kmem_cache_destroy(_tio_cache);
out_free_io_cache:
	kmem_cache_destroy(_io_cache);

	return r;
L
Linus Torvalds 已提交
220 221 222 223
}

static void local_exit(void)
{
K
Kiyoshi Ueda 已提交
224 225
	kmem_cache_destroy(_rq_bio_info_cache);
	kmem_cache_destroy(_rq_tio_cache);
L
Linus Torvalds 已提交
226 227
	kmem_cache_destroy(_tio_cache);
	kmem_cache_destroy(_io_cache);
228
	unregister_blkdev(_major, _name);
M
Mike Anderson 已提交
229
	dm_uevent_exit();
L
Linus Torvalds 已提交
230 231 232 233 234 235

	_major = 0;

	DMINFO("cleaned up");
}

236
static int (*_inits[])(void) __initdata = {
L
Linus Torvalds 已提交
237 238 239 240
	local_init,
	dm_target_init,
	dm_linear_init,
	dm_stripe_init,
241
	dm_kcopyd_init,
L
Linus Torvalds 已提交
242 243 244
	dm_interface_init,
};

245
static void (*_exits[])(void) = {
L
Linus Torvalds 已提交
246 247 248 249
	local_exit,
	dm_target_exit,
	dm_linear_exit,
	dm_stripe_exit,
250
	dm_kcopyd_exit,
L
Linus Torvalds 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
	dm_interface_exit,
};

static int __init dm_init(void)
{
	const int count = ARRAY_SIZE(_inits);

	int r, i;

	for (i = 0; i < count; i++) {
		r = _inits[i]();
		if (r)
			goto bad;
	}

	return 0;

      bad:
	while (i--)
		_exits[i]();

	return r;
}

static void __exit dm_exit(void)
{
	int i = ARRAY_SIZE(_exits);

	while (i--)
		_exits[i]();
}

/*
 * Block device functions
 */
A
Al Viro 已提交
286
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
L
Linus Torvalds 已提交
287 288 289
{
	struct mapped_device *md;

J
Jeff Mahoney 已提交
290 291
	spin_lock(&_minor_lock);

A
Al Viro 已提交
292
	md = bdev->bd_disk->private_data;
J
Jeff Mahoney 已提交
293 294 295
	if (!md)
		goto out;

296 297
	if (test_bit(DMF_FREEING, &md->flags) ||
	    test_bit(DMF_DELETING, &md->flags)) {
J
Jeff Mahoney 已提交
298 299 300 301
		md = NULL;
		goto out;
	}

L
Linus Torvalds 已提交
302
	dm_get(md);
303
	atomic_inc(&md->open_count);
J
Jeff Mahoney 已提交
304 305 306 307 308

out:
	spin_unlock(&_minor_lock);

	return md ? 0 : -ENXIO;
L
Linus Torvalds 已提交
309 310
}

A
Al Viro 已提交
311
static int dm_blk_close(struct gendisk *disk, fmode_t mode)
L
Linus Torvalds 已提交
312
{
A
Al Viro 已提交
313
	struct mapped_device *md = disk->private_data;
314
	atomic_dec(&md->open_count);
L
Linus Torvalds 已提交
315 316 317 318
	dm_put(md);
	return 0;
}

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
int dm_open_count(struct mapped_device *md)
{
	return atomic_read(&md->open_count);
}

/*
 * Guarantees nothing is using the device before it's deleted.
 */
int dm_lock_for_deletion(struct mapped_device *md)
{
	int r = 0;

	spin_lock(&_minor_lock);

	if (dm_open_count(md))
		r = -EBUSY;
	else
		set_bit(DMF_DELETING, &md->flags);

	spin_unlock(&_minor_lock);

	return r;
}

D
Darrick J. Wong 已提交
343 344 345 346 347 348 349
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	struct mapped_device *md = bdev->bd_disk->private_data;

	return dm_get_geometry(md, geo);
}

A
Al Viro 已提交
350
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
351 352
			unsigned int cmd, unsigned long arg)
{
A
Al Viro 已提交
353 354
	struct mapped_device *md = bdev->bd_disk->private_data;
	struct dm_table *map = dm_get_table(md);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
	struct dm_target *tgt;
	int r = -ENOTTY;

	if (!map || !dm_table_get_size(map))
		goto out;

	/* We only support devices that have a single target */
	if (dm_table_get_num_targets(map) != 1)
		goto out;

	tgt = dm_table_get_target(map, 0);

	if (dm_suspended(md)) {
		r = -EAGAIN;
		goto out;
	}

	if (tgt->type->ioctl)
373
		r = tgt->type->ioctl(tgt, cmd, arg);
374 375 376 377 378 379 380

out:
	dm_table_put(map);

	return r;
}

A
Alasdair G Kergon 已提交
381
static struct dm_io *alloc_io(struct mapped_device *md)
L
Linus Torvalds 已提交
382 383 384 385
{
	return mempool_alloc(md->io_pool, GFP_NOIO);
}

A
Alasdair G Kergon 已提交
386
static void free_io(struct mapped_device *md, struct dm_io *io)
L
Linus Torvalds 已提交
387 388 389 390
{
	mempool_free(io, md->io_pool);
}

A
Alasdair G Kergon 已提交
391
static struct dm_target_io *alloc_tio(struct mapped_device *md)
L
Linus Torvalds 已提交
392 393 394 395
{
	return mempool_alloc(md->tio_pool, GFP_NOIO);
}

A
Alasdair G Kergon 已提交
396
static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
L
Linus Torvalds 已提交
397 398 399 400
{
	mempool_free(tio, md->tio_pool);
}

401 402 403
static void start_io_acct(struct dm_io *io)
{
	struct mapped_device *md = io->md;
T
Tejun Heo 已提交
404
	int cpu;
405 406 407

	io->start_time = jiffies;

T
Tejun Heo 已提交
408 409 410 411
	cpu = part_stat_lock();
	part_round_stats(cpu, &dm_disk(md)->part0);
	part_stat_unlock();
	dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
412 413
}

414
static void end_io_acct(struct dm_io *io)
415 416 417 418
{
	struct mapped_device *md = io->md;
	struct bio *bio = io->bio;
	unsigned long duration = jiffies - io->start_time;
T
Tejun Heo 已提交
419
	int pending, cpu;
420 421
	int rw = bio_data_dir(bio);

T
Tejun Heo 已提交
422 423 424 425
	cpu = part_stat_lock();
	part_round_stats(cpu, &dm_disk(md)->part0);
	part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
	part_stat_unlock();
426

T
Tejun Heo 已提交
427 428
	dm_disk(md)->part0.in_flight = pending =
		atomic_dec_return(&md->pending);
429

430 431 432
	/* nudge anyone waiting on suspend queue */
	if (!pending)
		wake_up(&md->wait);
433 434
}

L
Linus Torvalds 已提交
435 436 437 438 439
/*
 * Add the bio to the list of deferred io.
 */
static int queue_io(struct mapped_device *md, struct bio *bio)
{
440
	down_write(&md->io_lock);
L
Linus Torvalds 已提交
441 442

	if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
443
		up_write(&md->io_lock);
L
Linus Torvalds 已提交
444 445 446
		return 1;
	}

447
	spin_lock_irq(&md->deferred_lock);
L
Linus Torvalds 已提交
448
	bio_list_add(&md->deferred, bio);
449
	spin_unlock_irq(&md->deferred_lock);
L
Linus Torvalds 已提交
450

451
	up_write(&md->io_lock);
L
Linus Torvalds 已提交
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
	return 0;		/* deferred successfully */
}

/*
 * Everyone (including functions in this file), should use this
 * function to access the md->map field, and make sure they call
 * dm_table_put() when finished.
 */
struct dm_table *dm_get_table(struct mapped_device *md)
{
	struct dm_table *t;

	read_lock(&md->map_lock);
	t = md->map;
	if (t)
		dm_table_get(t);
	read_unlock(&md->map_lock);

	return t;
}

D
Darrick J. Wong 已提交
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
/*
 * Get the geometry associated with a dm device
 */
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	*geo = md->geometry;

	return 0;
}

/*
 * Set the geometry of a device.
 */
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;

	if (geo->start > sz) {
		DMWARN("Start sector is beyond the geometry limits.");
		return -EINVAL;
	}

	md->geometry = *geo;

	return 0;
}

L
Linus Torvalds 已提交
500 501 502 503 504 505 506 507 508
/*-----------------------------------------------------------------
 * CRUD START:
 *   A more elegant soln is in the works that uses the queue
 *   merge fn, unfortunately there are a couple of changes to
 *   the block layer that I want to make for this.  So in the
 *   interests of getting something for people to use I give
 *   you this clearly demarcated crap.
 *---------------------------------------------------------------*/

509 510 511 512 513
static int __noflush_suspending(struct mapped_device *md)
{
	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}

L
Linus Torvalds 已提交
514 515 516 517
/*
 * Decrements the number of outstanding ios that a bio has been
 * cloned into, completing the original io if necc.
 */
518
static void dec_pending(struct dm_io *io, int error)
L
Linus Torvalds 已提交
519
{
520
	unsigned long flags;
521 522 523
	int io_error;
	struct bio *bio;
	struct mapped_device *md = io->md;
524 525

	/* Push-back supersedes any I/O errors */
526
	if (error && !(io->error > 0 && __noflush_suspending(md)))
L
Linus Torvalds 已提交
527 528 529
		io->error = error;

	if (atomic_dec_and_test(&io->io_count)) {
530 531 532 533
		if (io->error == DM_ENDIO_REQUEUE) {
			/*
			 * Target requested pushing back the I/O.
			 */
534
			spin_lock_irqsave(&md->deferred_lock, flags);
535
			if (__noflush_suspending(md))
536
				bio_list_add(&md->deferred, io->bio);
537 538 539
			else
				/* noflush suspend was interrupted. */
				io->error = -EIO;
540
			spin_unlock_irqrestore(&md->deferred_lock, flags);
541 542
		}

543
		end_io_acct(io);
L
Linus Torvalds 已提交
544

545 546
		io_error = io->error;
		bio = io->bio;
547

548 549 550 551
		free_io(md, io);

		if (io_error != DM_ENDIO_REQUEUE) {
			trace_block_bio_complete(md->queue, bio);
552

553 554
			bio_endio(bio, io_error);
		}
L
Linus Torvalds 已提交
555 556 557
	}
}

558
static void clone_endio(struct bio *bio, int error)
L
Linus Torvalds 已提交
559 560
{
	int r = 0;
A
Alasdair G Kergon 已提交
561
	struct dm_target_io *tio = bio->bi_private;
562
	struct dm_io *io = tio->io;
S
Stefan Bader 已提交
563
	struct mapped_device *md = tio->io->md;
L
Linus Torvalds 已提交
564 565 566 567 568 569 570
	dm_endio_fn endio = tio->ti->type->end_io;

	if (!bio_flagged(bio, BIO_UPTODATE) && !error)
		error = -EIO;

	if (endio) {
		r = endio(tio->ti, bio, error, &tio->info);
571 572 573 574 575
		if (r < 0 || r == DM_ENDIO_REQUEUE)
			/*
			 * error and requeue request are handled
			 * in dec_pending().
			 */
L
Linus Torvalds 已提交
576
			error = r;
577 578
		else if (r == DM_ENDIO_INCOMPLETE)
			/* The target will handle the io */
579
			return;
580 581 582 583
		else if (r) {
			DMWARN("unimplemented target endio return value: %d", r);
			BUG();
		}
L
Linus Torvalds 已提交
584 585
	}

S
Stefan Bader 已提交
586 587 588 589 590 591
	/*
	 * Store md for cleanup instead of tio which is about to get freed.
	 */
	bio->bi_private = md->bs;

	free_tio(md, tio);
592 593
	bio_put(bio);
	dec_pending(io, error);
L
Linus Torvalds 已提交
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
}

static sector_t max_io_len(struct mapped_device *md,
			   sector_t sector, struct dm_target *ti)
{
	sector_t offset = sector - ti->begin;
	sector_t len = ti->len - offset;

	/*
	 * Does the target need to split even further ?
	 */
	if (ti->split_io) {
		sector_t boundary;
		boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
			   - offset;
		if (len > boundary)
			len = boundary;
	}

	return len;
}

static void __map_bio(struct dm_target *ti, struct bio *clone,
A
Alasdair G Kergon 已提交
617
		      struct dm_target_io *tio)
L
Linus Torvalds 已提交
618 619
{
	int r;
620
	sector_t sector;
S
Stefan Bader 已提交
621
	struct mapped_device *md;
L
Linus Torvalds 已提交
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636

	/*
	 * Sanity checks.
	 */
	BUG_ON(!clone->bi_size);

	clone->bi_end_io = clone_endio;
	clone->bi_private = tio;

	/*
	 * Map the clone.  If r == 0 we don't need to do
	 * anything, the target has assumed ownership of
	 * this io.
	 */
	atomic_inc(&tio->io->io_count);
637
	sector = clone->bi_sector;
L
Linus Torvalds 已提交
638
	r = ti->type->map(ti, clone, &tio->info);
639
	if (r == DM_MAPIO_REMAPPED) {
L
Linus Torvalds 已提交
640
		/* the bio has been remapped so dispatch it */
641

642
		trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
643 644
				    tio->io->bio->bi_bdev->bd_dev,
				    clone->bi_sector, sector);
645

L
Linus Torvalds 已提交
646
		generic_make_request(clone);
647 648
	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
		/* error the io and bail out, or requeue it if needed */
S
Stefan Bader 已提交
649 650 651 652 653 654
		md = tio->io->md;
		dec_pending(tio->io, r);
		/*
		 * Store bio_set for cleanup.
		 */
		clone->bi_private = md->bs;
L
Linus Torvalds 已提交
655
		bio_put(clone);
S
Stefan Bader 已提交
656
		free_tio(md, tio);
657 658 659
	} else if (r) {
		DMWARN("unimplemented target map return value: %d", r);
		BUG();
L
Linus Torvalds 已提交
660 661 662 663 664 665 666 667 668 669 670 671 672
	}
}

struct clone_info {
	struct mapped_device *md;
	struct dm_table *map;
	struct bio *bio;
	struct dm_io *io;
	sector_t sector;
	sector_t sector_count;
	unsigned short idx;
};

P
Peter Osterlund 已提交
673 674
static void dm_bio_destructor(struct bio *bio)
{
S
Stefan Bader 已提交
675 676 677
	struct bio_set *bs = bio->bi_private;

	bio_free(bio, bs);
P
Peter Osterlund 已提交
678 679
}

L
Linus Torvalds 已提交
680 681 682 683 684
/*
 * Creates a little bio that is just does part of a bvec.
 */
static struct bio *split_bvec(struct bio *bio, sector_t sector,
			      unsigned short idx, unsigned int offset,
S
Stefan Bader 已提交
685
			      unsigned int len, struct bio_set *bs)
L
Linus Torvalds 已提交
686 687 688 689
{
	struct bio *clone;
	struct bio_vec *bv = bio->bi_io_vec + idx;

S
Stefan Bader 已提交
690
	clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
P
Peter Osterlund 已提交
691
	clone->bi_destructor = dm_bio_destructor;
L
Linus Torvalds 已提交
692 693 694 695 696 697 698 699 700
	*clone->bi_io_vec = *bv;

	clone->bi_sector = sector;
	clone->bi_bdev = bio->bi_bdev;
	clone->bi_rw = bio->bi_rw;
	clone->bi_vcnt = 1;
	clone->bi_size = to_bytes(len);
	clone->bi_io_vec->bv_offset = offset;
	clone->bi_io_vec->bv_len = clone->bi_size;
M
Martin K. Petersen 已提交
701
	clone->bi_flags |= 1 << BIO_CLONED;
L
Linus Torvalds 已提交
702

M
Martin K. Petersen 已提交
703 704 705 706 707 708
	if (bio_integrity(bio)) {
		bio_integrity_clone(clone, bio, GFP_NOIO);
		bio_integrity_trim(clone,
				   bio_sector_offset(bio, idx, offset), len);
	}

L
Linus Torvalds 已提交
709 710 711 712 713 714 715 716
	return clone;
}

/*
 * Creates a bio that consists of range of complete bvecs.
 */
static struct bio *clone_bio(struct bio *bio, sector_t sector,
			     unsigned short idx, unsigned short bv_count,
S
Stefan Bader 已提交
717
			     unsigned int len, struct bio_set *bs)
L
Linus Torvalds 已提交
718 719 720
{
	struct bio *clone;

S
Stefan Bader 已提交
721 722 723
	clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
	__bio_clone(clone, bio);
	clone->bi_destructor = dm_bio_destructor;
L
Linus Torvalds 已提交
724 725 726 727 728 729
	clone->bi_sector = sector;
	clone->bi_idx = idx;
	clone->bi_vcnt = idx + bv_count;
	clone->bi_size = to_bytes(len);
	clone->bi_flags &= ~(1 << BIO_SEG_VALID);

M
Martin K. Petersen 已提交
730 731 732 733 734 735 736 737
	if (bio_integrity(bio)) {
		bio_integrity_clone(clone, bio, GFP_NOIO);

		if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
			bio_integrity_trim(clone,
					   bio_sector_offset(bio, idx, 0), len);
	}

L
Linus Torvalds 已提交
738 739 740
	return clone;
}

741
static int __clone_and_map(struct clone_info *ci)
L
Linus Torvalds 已提交
742 743
{
	struct bio *clone, *bio = ci->bio;
744 745
	struct dm_target *ti;
	sector_t len = 0, max;
A
Alasdair G Kergon 已提交
746
	struct dm_target_io *tio;
L
Linus Torvalds 已提交
747

748 749 750 751 752 753
	ti = dm_table_find_target(ci->map, ci->sector);
	if (!dm_target_is_valid(ti))
		return -EIO;

	max = max_io_len(ci->md, ci->sector, ti);

L
Linus Torvalds 已提交
754 755 756 757 758 759 760 761 762 763 764 765 766 767
	/*
	 * Allocate a target io object.
	 */
	tio = alloc_tio(ci->md);
	tio->io = ci->io;
	tio->ti = ti;
	memset(&tio->info, 0, sizeof(tio->info));

	if (ci->sector_count <= max) {
		/*
		 * Optimise for the simple case where we can do all of
		 * the remaining io with a single clone.
		 */
		clone = clone_bio(bio, ci->sector, ci->idx,
S
Stefan Bader 已提交
768 769
				  bio->bi_vcnt - ci->idx, ci->sector_count,
				  ci->md->bs);
L
Linus Torvalds 已提交
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
		__map_bio(ti, clone, tio);
		ci->sector_count = 0;

	} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
		/*
		 * There are some bvecs that don't span targets.
		 * Do as many of these as possible.
		 */
		int i;
		sector_t remaining = max;
		sector_t bv_len;

		for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
			bv_len = to_sector(bio->bi_io_vec[i].bv_len);

			if (bv_len > remaining)
				break;

			remaining -= bv_len;
			len += bv_len;
		}

S
Stefan Bader 已提交
792 793
		clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
				  ci->md->bs);
L
Linus Torvalds 已提交
794 795 796 797 798 799 800 801
		__map_bio(ti, clone, tio);

		ci->sector += len;
		ci->sector_count -= len;
		ci->idx = i;

	} else {
		/*
802
		 * Handle a bvec that must be split between two or more targets.
L
Linus Torvalds 已提交
803 804
		 */
		struct bio_vec *bv = bio->bi_io_vec + ci->idx;
805 806
		sector_t remaining = to_sector(bv->bv_len);
		unsigned int offset = 0;
L
Linus Torvalds 已提交
807

808 809 810
		do {
			if (offset) {
				ti = dm_table_find_target(ci->map, ci->sector);
811 812 813
				if (!dm_target_is_valid(ti))
					return -EIO;

814
				max = max_io_len(ci->md, ci->sector, ti);
L
Linus Torvalds 已提交
815

816 817 818 819 820 821 822 823 824
				tio = alloc_tio(ci->md);
				tio->io = ci->io;
				tio->ti = ti;
				memset(&tio->info, 0, sizeof(tio->info));
			}

			len = min(remaining, max);

			clone = split_bvec(bio, ci->sector, ci->idx,
S
Stefan Bader 已提交
825 826
					   bv->bv_offset + offset, len,
					   ci->md->bs);
827 828 829 830 831 832 833

			__map_bio(ti, clone, tio);

			ci->sector += len;
			ci->sector_count -= len;
			offset += to_bytes(len);
		} while (remaining -= len);
L
Linus Torvalds 已提交
834 835 836

		ci->idx++;
	}
837 838

	return 0;
L
Linus Torvalds 已提交
839 840 841
}

/*
M
Mikulas Patocka 已提交
842
 * Split the bio into several clones and submit it to targets.
L
Linus Torvalds 已提交
843
 */
844
static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
L
Linus Torvalds 已提交
845 846
{
	struct clone_info ci;
847
	int error = 0;
L
Linus Torvalds 已提交
848 849

	ci.map = dm_get_table(md);
850 851 852 853
	if (unlikely(!ci.map)) {
		bio_io_error(bio);
		return;
	}
854

L
Linus Torvalds 已提交
855 856 857 858 859 860 861 862 863 864 865
	ci.md = md;
	ci.bio = bio;
	ci.io = alloc_io(md);
	ci.io->error = 0;
	atomic_set(&ci.io->io_count, 1);
	ci.io->bio = bio;
	ci.io->md = md;
	ci.sector = bio->bi_sector;
	ci.sector_count = bio_sectors(bio);
	ci.idx = bio->bi_idx;

866
	start_io_acct(ci.io);
867 868
	while (ci.sector_count && !error)
		error = __clone_and_map(&ci);
L
Linus Torvalds 已提交
869 870

	/* drop the extra reference count */
871
	dec_pending(ci.io, error);
L
Linus Torvalds 已提交
872 873 874 875 876 877
	dm_table_put(ci.map);
}
/*-----------------------------------------------------------------
 * CRUD END
 *---------------------------------------------------------------*/

M
Milan Broz 已提交
878 879 880 881 882 883 884 885
static int dm_merge_bvec(struct request_queue *q,
			 struct bvec_merge_data *bvm,
			 struct bio_vec *biovec)
{
	struct mapped_device *md = q->queuedata;
	struct dm_table *map = dm_get_table(md);
	struct dm_target *ti;
	sector_t max_sectors;
886
	int max_size = 0;
M
Milan Broz 已提交
887 888

	if (unlikely(!map))
889
		goto out;
M
Milan Broz 已提交
890 891

	ti = dm_table_find_target(map, bvm->bi_sector);
892 893
	if (!dm_target_is_valid(ti))
		goto out_table;
M
Milan Broz 已提交
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911

	/*
	 * Find maximum amount of I/O that won't need splitting
	 */
	max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
			  (sector_t) BIO_MAX_SECTORS);
	max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
	if (max_size < 0)
		max_size = 0;

	/*
	 * merge_bvec_fn() returns number of bytes
	 * it can accept at this offset
	 * max is precomputed maximal io size
	 */
	if (max_size && ti->type->merge)
		max_size = ti->type->merge(ti, bvm, biovec, max_size);

912
out_table:
913 914 915
	dm_table_put(map);

out:
M
Milan Broz 已提交
916 917 918 919 920 921 922 923 924
	/*
	 * Always allow an entire first page
	 */
	if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
		max_size = biovec->bv_len;

	return max_size;
}

L
Linus Torvalds 已提交
925 926 927 928
/*
 * The request function that just remaps the bio built up by
 * dm_merge_bvec.
 */
929
static int dm_request(struct request_queue *q, struct bio *bio)
L
Linus Torvalds 已提交
930
{
M
Milan Broz 已提交
931
	int r = -EIO;
932
	int rw = bio_data_dir(bio);
L
Linus Torvalds 已提交
933
	struct mapped_device *md = q->queuedata;
T
Tejun Heo 已提交
934
	int cpu;
L
Linus Torvalds 已提交
935

936 937 938 939 940 941 942 943 944
	/*
	 * There is no use in forwarding any barrier request since we can't
	 * guarantee it is (or can be) handled by the targets correctly.
	 */
	if (unlikely(bio_barrier(bio))) {
		bio_endio(bio, -EOPNOTSUPP);
		return 0;
	}

945
	down_read(&md->io_lock);
L
Linus Torvalds 已提交
946

T
Tejun Heo 已提交
947 948 949 950
	cpu = part_stat_lock();
	part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
	part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
	part_stat_unlock();
951

L
Linus Torvalds 已提交
952 953 954 955 956
	/*
	 * If we're suspended we have to queue
	 * this io for later.
	 */
	while (test_bit(DMF_BLOCK_IO, &md->flags)) {
957
		up_read(&md->io_lock);
L
Linus Torvalds 已提交
958

M
Milan Broz 已提交
959 960
		if (bio_rw(bio) != READA)
			r = queue_io(md, bio);
L
Linus Torvalds 已提交
961

M
Milan Broz 已提交
962 963
		if (r <= 0)
			goto out_req;
L
Linus Torvalds 已提交
964 965 966 967 968

		/*
		 * We're in a while loop, because someone could suspend
		 * before we get to the following read lock.
		 */
969
		down_read(&md->io_lock);
L
Linus Torvalds 已提交
970 971
	}

972
	__split_and_process_bio(md, bio);
973
	up_read(&md->io_lock);
974
	return 0;
M
Milan Broz 已提交
975 976 977 978 979

out_req:
	if (r < 0)
		bio_io_error(bio);

L
Linus Torvalds 已提交
980 981 982
	return 0;
}

983
static void dm_unplug_all(struct request_queue *q)
L
Linus Torvalds 已提交
984 985 986 987 988 989 990 991 992 993 994 995
{
	struct mapped_device *md = q->queuedata;
	struct dm_table *map = dm_get_table(md);

	if (map) {
		dm_table_unplug_all(map);
		dm_table_put(map);
	}
}

static int dm_any_congested(void *congested_data, int bdi_bits)
{
996 997 998
	int r = bdi_bits;
	struct mapped_device *md = congested_data;
	struct dm_table *map;
L
Linus Torvalds 已提交
999

1000 1001 1002 1003 1004 1005 1006 1007
	if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
		map = dm_get_table(md);
		if (map) {
			r = dm_table_any_congested(map, bdi_bits);
			dm_table_put(map);
		}
	}

L
Linus Torvalds 已提交
1008 1009 1010 1011 1012 1013 1014 1015
	return r;
}

/*-----------------------------------------------------------------
 * An IDR is used to keep track of allocated minor numbers.
 *---------------------------------------------------------------*/
static DEFINE_IDR(_minor_idr);

1016
static void free_minor(int minor)
L
Linus Torvalds 已提交
1017
{
1018
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1019
	idr_remove(&_minor_idr, minor);
1020
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
1021 1022 1023 1024 1025
}

/*
 * See if the device with a specific minor # is free.
 */
1026
static int specific_minor(int minor)
L
Linus Torvalds 已提交
1027 1028 1029 1030 1031 1032
{
	int r, m;

	if (minor >= (1 << MINORBITS))
		return -EINVAL;

J
Jeff Mahoney 已提交
1033 1034 1035 1036
	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
	if (!r)
		return -ENOMEM;

1037
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1038 1039 1040 1041 1042 1043

	if (idr_find(&_minor_idr, minor)) {
		r = -EBUSY;
		goto out;
	}

1044
	r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
J
Jeff Mahoney 已提交
1045
	if (r)
L
Linus Torvalds 已提交
1046 1047 1048 1049 1050 1051 1052 1053 1054
		goto out;

	if (m != minor) {
		idr_remove(&_minor_idr, m);
		r = -EBUSY;
		goto out;
	}

out:
1055
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
1056 1057 1058
	return r;
}

1059
static int next_free_minor(int *minor)
L
Linus Torvalds 已提交
1060
{
1061
	int r, m;
L
Linus Torvalds 已提交
1062 1063

	r = idr_pre_get(&_minor_idr, GFP_KERNEL);
J
Jeff Mahoney 已提交
1064 1065 1066
	if (!r)
		return -ENOMEM;

1067
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1068

1069
	r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
1070
	if (r)
L
Linus Torvalds 已提交
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
		goto out;

	if (m >= (1 << MINORBITS)) {
		idr_remove(&_minor_idr, m);
		r = -ENOSPC;
		goto out;
	}

	*minor = m;

out:
1082
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
1083 1084 1085 1086 1087
	return r;
}

static struct block_device_operations dm_blk_dops;

1088 1089
static void dm_wq_work(struct work_struct *work);

L
Linus Torvalds 已提交
1090 1091 1092
/*
 * Allocate and initialise a blank device with a given minor.
 */
1093
static struct mapped_device *alloc_dev(int minor)
L
Linus Torvalds 已提交
1094 1095
{
	int r;
1096
	struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1097
	void *old_md;
L
Linus Torvalds 已提交
1098 1099 1100 1101 1102 1103

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

1104
	if (!try_module_get(THIS_MODULE))
M
Milan Broz 已提交
1105
		goto bad_module_get;
1106

L
Linus Torvalds 已提交
1107
	/* get a minor number for the dev */
1108
	if (minor == DM_ANY_MINOR)
1109
		r = next_free_minor(&minor);
1110
	else
1111
		r = specific_minor(minor);
L
Linus Torvalds 已提交
1112
	if (r < 0)
M
Milan Broz 已提交
1113
		goto bad_minor;
L
Linus Torvalds 已提交
1114

1115
	init_rwsem(&md->io_lock);
1116
	mutex_init(&md->suspend_lock);
1117
	spin_lock_init(&md->deferred_lock);
L
Linus Torvalds 已提交
1118 1119
	rwlock_init(&md->map_lock);
	atomic_set(&md->holders, 1);
1120
	atomic_set(&md->open_count, 0);
L
Linus Torvalds 已提交
1121
	atomic_set(&md->event_nr, 0);
M
Mike Anderson 已提交
1122 1123 1124
	atomic_set(&md->uevent_seq, 0);
	INIT_LIST_HEAD(&md->uevent_list);
	spin_lock_init(&md->uevent_lock);
L
Linus Torvalds 已提交
1125 1126 1127

	md->queue = blk_alloc_queue(GFP_KERNEL);
	if (!md->queue)
M
Milan Broz 已提交
1128
		goto bad_queue;
L
Linus Torvalds 已提交
1129 1130 1131 1132 1133

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
M
Mikulas Patocka 已提交
1134
	blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
1135
	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
L
Linus Torvalds 已提交
1136
	md->queue->unplug_fn = dm_unplug_all;
M
Milan Broz 已提交
1137
	blk_queue_merge_bvec(md->queue, dm_merge_bvec);
L
Linus Torvalds 已提交
1138

1139
	md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
K
Kiyoshi Ueda 已提交
1140
	if (!md->io_pool)
M
Milan Broz 已提交
1141
		goto bad_io_pool;
L
Linus Torvalds 已提交
1142

1143
	md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
L
Linus Torvalds 已提交
1144
	if (!md->tio_pool)
M
Milan Broz 已提交
1145
		goto bad_tio_pool;
L
Linus Torvalds 已提交
1146

1147
	md->bs = bioset_create(16, 0);
S
Stefan Bader 已提交
1148 1149 1150
	if (!md->bs)
		goto bad_no_bioset;

L
Linus Torvalds 已提交
1151 1152
	md->disk = alloc_disk(1);
	if (!md->disk)
M
Milan Broz 已提交
1153
		goto bad_disk;
L
Linus Torvalds 已提交
1154

1155 1156
	atomic_set(&md->pending, 0);
	init_waitqueue_head(&md->wait);
1157
	INIT_WORK(&md->work, dm_wq_work);
1158 1159
	init_waitqueue_head(&md->eventq);

L
Linus Torvalds 已提交
1160 1161 1162 1163 1164 1165 1166
	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);
M
Mike Anderson 已提交
1167
	format_dev_t(md->name, MKDEV(_major, minor));
L
Linus Torvalds 已提交
1168

1169 1170 1171 1172
	md->wq = create_singlethread_workqueue("kdmflush");
	if (!md->wq)
		goto bad_thread;

1173
	/* Populate the mapping, nobody knows we exist yet */
1174
	spin_lock(&_minor_lock);
1175
	old_md = idr_replace(&_minor_idr, md, minor);
1176
	spin_unlock(&_minor_lock);
1177 1178 1179

	BUG_ON(old_md != MINOR_ALLOCED);

L
Linus Torvalds 已提交
1180 1181
	return md;

1182 1183
bad_thread:
	put_disk(md->disk);
M
Milan Broz 已提交
1184
bad_disk:
S
Stefan Bader 已提交
1185
	bioset_free(md->bs);
M
Milan Broz 已提交
1186
bad_no_bioset:
L
Linus Torvalds 已提交
1187
	mempool_destroy(md->tio_pool);
M
Milan Broz 已提交
1188
bad_tio_pool:
L
Linus Torvalds 已提交
1189
	mempool_destroy(md->io_pool);
M
Milan Broz 已提交
1190
bad_io_pool:
1191
	blk_cleanup_queue(md->queue);
M
Milan Broz 已提交
1192
bad_queue:
L
Linus Torvalds 已提交
1193
	free_minor(minor);
M
Milan Broz 已提交
1194
bad_minor:
1195
	module_put(THIS_MODULE);
M
Milan Broz 已提交
1196
bad_module_get:
L
Linus Torvalds 已提交
1197 1198 1199 1200
	kfree(md);
	return NULL;
}

J
Jun'ichi Nomura 已提交
1201 1202
static void unlock_fs(struct mapped_device *md);

L
Linus Torvalds 已提交
1203 1204
static void free_dev(struct mapped_device *md)
{
1205
	int minor = MINOR(disk_devt(md->disk));
1206

1207
	if (md->suspended_bdev) {
J
Jun'ichi Nomura 已提交
1208
		unlock_fs(md);
1209 1210
		bdput(md->suspended_bdev);
	}
1211
	destroy_workqueue(md->wq);
L
Linus Torvalds 已提交
1212 1213
	mempool_destroy(md->tio_pool);
	mempool_destroy(md->io_pool);
S
Stefan Bader 已提交
1214
	bioset_free(md->bs);
M
Martin K. Petersen 已提交
1215
	blk_integrity_unregister(md->disk);
L
Linus Torvalds 已提交
1216
	del_gendisk(md->disk);
1217
	free_minor(minor);
J
Jeff Mahoney 已提交
1218 1219 1220 1221 1222

	spin_lock(&_minor_lock);
	md->disk->private_data = NULL;
	spin_unlock(&_minor_lock);

L
Linus Torvalds 已提交
1223
	put_disk(md->disk);
1224
	blk_cleanup_queue(md->queue);
1225
	module_put(THIS_MODULE);
L
Linus Torvalds 已提交
1226 1227 1228 1229 1230 1231 1232 1233
	kfree(md);
}

/*
 * Bind a table to the device.
 */
static void event_callback(void *context)
{
M
Mike Anderson 已提交
1234 1235
	unsigned long flags;
	LIST_HEAD(uevents);
L
Linus Torvalds 已提交
1236 1237
	struct mapped_device *md = (struct mapped_device *) context;

M
Mike Anderson 已提交
1238 1239 1240 1241
	spin_lock_irqsave(&md->uevent_lock, flags);
	list_splice_init(&md->uevent_list, &uevents);
	spin_unlock_irqrestore(&md->uevent_lock, flags);

1242
	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
M
Mike Anderson 已提交
1243

L
Linus Torvalds 已提交
1244 1245 1246 1247
	atomic_inc(&md->event_nr);
	wake_up(&md->eventq);
}

1248
static void __set_size(struct mapped_device *md, sector_t size)
L
Linus Torvalds 已提交
1249
{
1250
	set_capacity(md->disk, size);
L
Linus Torvalds 已提交
1251

1252
	mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
1253
	i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1254
	mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
L
Linus Torvalds 已提交
1255 1256 1257 1258
}

static int __bind(struct mapped_device *md, struct dm_table *t)
{
1259
	struct request_queue *q = md->queue;
L
Linus Torvalds 已提交
1260 1261 1262
	sector_t size;

	size = dm_table_get_size(t);
D
Darrick J. Wong 已提交
1263 1264 1265 1266 1267 1268 1269

	/*
	 * Wipe any geometry if the size of the table changed.
	 */
	if (size != get_capacity(md->disk))
		memset(&md->geometry, 0, sizeof(md->geometry));

1270 1271
	if (md->suspended_bdev)
		__set_size(md, size);
1272 1273 1274

	if (!size) {
		dm_table_destroy(t);
L
Linus Torvalds 已提交
1275
		return 0;
1276
	}
L
Linus Torvalds 已提交
1277

1278 1279
	dm_table_event_callback(t, event_callback, md);

L
Linus Torvalds 已提交
1280 1281
	write_lock(&md->map_lock);
	md->map = t;
1282
	dm_table_set_restrictions(t, q);
L
Linus Torvalds 已提交
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
	write_unlock(&md->map_lock);

	return 0;
}

static void __unbind(struct mapped_device *md)
{
	struct dm_table *map = md->map;

	if (!map)
		return;

	dm_table_event_callback(map, NULL, NULL);
	write_lock(&md->map_lock);
	md->map = NULL;
	write_unlock(&md->map_lock);
1299
	dm_table_destroy(map);
L
Linus Torvalds 已提交
1300 1301 1302 1303 1304
}

/*
 * Constructor for a new device.
 */
1305
int dm_create(int minor, struct mapped_device **result)
L
Linus Torvalds 已提交
1306 1307 1308
{
	struct mapped_device *md;

1309
	md = alloc_dev(minor);
L
Linus Torvalds 已提交
1310 1311 1312
	if (!md)
		return -ENXIO;

M
Milan Broz 已提交
1313 1314
	dm_sysfs_init(md);

L
Linus Torvalds 已提交
1315 1316 1317 1318
	*result = md;
	return 0;
}

1319
static struct mapped_device *dm_find_md(dev_t dev)
L
Linus Torvalds 已提交
1320 1321 1322 1323 1324 1325 1326
{
	struct mapped_device *md;
	unsigned minor = MINOR(dev);

	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
		return NULL;

1327
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1328 1329

	md = idr_find(&_minor_idr, minor);
J
Jeff Mahoney 已提交
1330
	if (md && (md == MINOR_ALLOCED ||
1331
		   (MINOR(disk_devt(dm_disk(md))) != minor) ||
A
Alasdair G Kergon 已提交
1332
		   test_bit(DMF_FREEING, &md->flags))) {
1333
		md = NULL;
J
Jeff Mahoney 已提交
1334 1335
		goto out;
	}
L
Linus Torvalds 已提交
1336

J
Jeff Mahoney 已提交
1337
out:
1338
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
1339

1340 1341 1342
	return md;
}

1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
struct mapped_device *dm_get_md(dev_t dev)
{
	struct mapped_device *md = dm_find_md(dev);

	if (md)
		dm_get(md);

	return md;
}

A
Alasdair G Kergon 已提交
1353
void *dm_get_mdptr(struct mapped_device *md)
1354
{
A
Alasdair G Kergon 已提交
1355
	return md->interface_ptr;
L
Linus Torvalds 已提交
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
}

void dm_set_mdptr(struct mapped_device *md, void *ptr)
{
	md->interface_ptr = ptr;
}

void dm_get(struct mapped_device *md)
{
	atomic_inc(&md->holders);
}

1368 1369 1370 1371 1372 1373
const char *dm_device_name(struct mapped_device *md)
{
	return md->name;
}
EXPORT_SYMBOL_GPL(dm_device_name);

L
Linus Torvalds 已提交
1374 1375
void dm_put(struct mapped_device *md)
{
M
Mike Anderson 已提交
1376
	struct dm_table *map;
L
Linus Torvalds 已提交
1377

J
Jeff Mahoney 已提交
1378 1379
	BUG_ON(test_bit(DMF_FREEING, &md->flags));

1380
	if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
M
Mike Anderson 已提交
1381
		map = dm_get_table(md);
1382 1383
		idr_replace(&_minor_idr, MINOR_ALLOCED,
			    MINOR(disk_devt(dm_disk(md))));
J
Jeff Mahoney 已提交
1384
		set_bit(DMF_FREEING, &md->flags);
1385
		spin_unlock(&_minor_lock);
1386
		if (!dm_suspended(md)) {
L
Linus Torvalds 已提交
1387 1388 1389
			dm_table_presuspend_targets(map);
			dm_table_postsuspend_targets(map);
		}
M
Milan Broz 已提交
1390
		dm_sysfs_exit(md);
M
Mike Anderson 已提交
1391
		dm_table_put(map);
1392
		__unbind(md);
L
Linus Torvalds 已提交
1393 1394 1395
		free_dev(md);
	}
}
E
Edward Goggin 已提交
1396
EXPORT_SYMBOL_GPL(dm_put);
L
Linus Torvalds 已提交
1397

1398
static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
1399 1400
{
	int r = 0;
1401 1402 1403 1404 1405
	DECLARE_WAITQUEUE(wait, current);

	dm_unplug_all(md->queue);

	add_wait_queue(&md->wait, &wait);
1406 1407

	while (1) {
1408
		set_current_state(interruptible);
1409 1410 1411 1412 1413

		smp_mb();
		if (!atomic_read(&md->pending))
			break;

1414 1415
		if (interruptible == TASK_INTERRUPTIBLE &&
		    signal_pending(current)) {
1416 1417 1418 1419 1420 1421 1422 1423
			r = -EINTR;
			break;
		}

		io_schedule();
	}
	set_current_state(TASK_RUNNING);

1424 1425
	remove_wait_queue(&md->wait, &wait);

1426 1427 1428
	return r;
}

L
Linus Torvalds 已提交
1429 1430 1431
/*
 * Process the deferred bios
 */
1432
static void dm_wq_work(struct work_struct *work)
L
Linus Torvalds 已提交
1433
{
1434 1435
	struct mapped_device *md = container_of(work, struct mapped_device,
						work);
1436
	struct bio *c;
L
Linus Torvalds 已提交
1437

1438 1439
	down_write(&md->io_lock);

1440 1441 1442 1443 1444 1445
next_bio:
	spin_lock_irq(&md->deferred_lock);
	c = bio_list_pop(&md->deferred);
	spin_unlock_irq(&md->deferred_lock);

	if (c) {
1446
		__split_and_process_bio(md, c);
1447 1448
		goto next_bio;
	}
M
Milan Broz 已提交
1449 1450

	clear_bit(DMF_BLOCK_IO, &md->flags);
1451 1452

	up_write(&md->io_lock);
L
Linus Torvalds 已提交
1453 1454
}

1455
static void dm_queue_flush(struct mapped_device *md)
1456
{
1457
	queue_work(md->wq, &md->work);
1458 1459 1460
	flush_workqueue(md->wq);
}

L
Linus Torvalds 已提交
1461 1462 1463 1464 1465
/*
 * Swap in a new table (destroying old one).
 */
int dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
1466
	int r = -EINVAL;
L
Linus Torvalds 已提交
1467

1468
	mutex_lock(&md->suspend_lock);
L
Linus Torvalds 已提交
1469 1470

	/* device must be suspended */
1471
	if (!dm_suspended(md))
1472
		goto out;
L
Linus Torvalds 已提交
1473

1474 1475 1476 1477 1478
	/* without bdev, the device size cannot be changed */
	if (!md->suspended_bdev)
		if (get_capacity(md->disk) != dm_table_get_size(table))
			goto out;

L
Linus Torvalds 已提交
1479 1480 1481
	__unbind(md);
	r = __bind(md, table);

1482
out:
1483
	mutex_unlock(&md->suspend_lock);
1484
	return r;
L
Linus Torvalds 已提交
1485 1486 1487 1488 1489 1490
}

/*
 * Functions to lock and unlock any filesystem running on the
 * device.
 */
1491
static int lock_fs(struct mapped_device *md)
L
Linus Torvalds 已提交
1492
{
1493
	int r;
L
Linus Torvalds 已提交
1494 1495

	WARN_ON(md->frozen_sb);
1496

1497
	md->frozen_sb = freeze_bdev(md->suspended_bdev);
1498
	if (IS_ERR(md->frozen_sb)) {
1499
		r = PTR_ERR(md->frozen_sb);
1500 1501
		md->frozen_sb = NULL;
		return r;
1502 1503
	}

1504 1505
	set_bit(DMF_FROZEN, &md->flags);

L
Linus Torvalds 已提交
1506
	/* don't bdput right now, we don't want the bdev
1507
	 * to go away while it is locked.
L
Linus Torvalds 已提交
1508 1509 1510 1511
	 */
	return 0;
}

1512
static void unlock_fs(struct mapped_device *md)
L
Linus Torvalds 已提交
1513
{
1514 1515 1516
	if (!test_bit(DMF_FROZEN, &md->flags))
		return;

1517
	thaw_bdev(md->suspended_bdev, md->frozen_sb);
L
Linus Torvalds 已提交
1518
	md->frozen_sb = NULL;
1519
	clear_bit(DMF_FROZEN, &md->flags);
L
Linus Torvalds 已提交
1520 1521 1522 1523 1524 1525 1526 1527 1528
}

/*
 * We need to be able to change a mapping table under a mounted
 * filesystem.  For example we might want to move some data in
 * the background.  Before the table can be swapped with
 * dm_bind_table, dm_suspend must be called to flush any in
 * flight bios and ensure that any further io gets deferred.
 */
1529
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
L
Linus Torvalds 已提交
1530
{
1531
	struct dm_table *map = NULL;
1532
	int r = 0;
1533
	int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
1534
	int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
L
Linus Torvalds 已提交
1535

1536
	mutex_lock(&md->suspend_lock);
1537

M
Milan Broz 已提交
1538 1539
	if (dm_suspended(md)) {
		r = -EINVAL;
1540
		goto out_unlock;
M
Milan Broz 已提交
1541
	}
L
Linus Torvalds 已提交
1542 1543 1544

	map = dm_get_table(md);

1545 1546 1547 1548 1549 1550 1551
	/*
	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
	 * This flag is cleared before dm_suspend returns.
	 */
	if (noflush)
		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);

1552 1553 1554
	/* This does not get reverted if there's an error later. */
	dm_table_presuspend_targets(map);

1555 1556 1557 1558 1559 1560
	/* bdget() can stall if the pending I/Os are not flushed */
	if (!noflush) {
		md->suspended_bdev = bdget_disk(md->disk, 0);
		if (!md->suspended_bdev) {
			DMWARN("bdget failed in dm_suspend");
			r = -ENOMEM;
K
Kiyoshi Ueda 已提交
1561
			goto out;
1562
		}
1563

1564 1565 1566 1567 1568 1569 1570 1571 1572
		/*
		 * Flush I/O to the device. noflush supersedes do_lockfs,
		 * because lock_fs() needs to flush I/Os.
		 */
		if (do_lockfs) {
			r = lock_fs(md);
			if (r)
				goto out;
		}
1573
	}
L
Linus Torvalds 已提交
1574 1575

	/*
1576
	 * First we set the BLOCK_IO flag so no more ios will be mapped.
L
Linus Torvalds 已提交
1577
	 */
1578 1579
	down_write(&md->io_lock);
	set_bit(DMF_BLOCK_IO, &md->flags);
L
Linus Torvalds 已提交
1580

1581
	up_write(&md->io_lock);
L
Linus Torvalds 已提交
1582 1583

	/*
1584
	 * Wait for the already-mapped ios to complete.
L
Linus Torvalds 已提交
1585
	 */
1586
	r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
L
Linus Torvalds 已提交
1587

1588
	down_write(&md->io_lock);
L
Linus Torvalds 已提交
1589

1590
	if (noflush)
1591
		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1592
	up_write(&md->io_lock);
1593

L
Linus Torvalds 已提交
1594
	/* were we interrupted ? */
1595
	if (r < 0) {
1596
		dm_queue_flush(md);
M
Milan Broz 已提交
1597

1598
		unlock_fs(md);
1599
		goto out; /* pushback list is already flushed, so skip flush */
1600
	}
L
Linus Torvalds 已提交
1601

1602
	dm_table_postsuspend_targets(map);
L
Linus Torvalds 已提交
1603

1604
	set_bit(DMF_SUSPENDED, &md->flags);
1605

1606
out:
1607 1608 1609 1610 1611
	if (r && md->suspended_bdev) {
		bdput(md->suspended_bdev);
		md->suspended_bdev = NULL;
	}

1612
	dm_table_put(map);
1613 1614

out_unlock:
1615
	mutex_unlock(&md->suspend_lock);
1616
	return r;
L
Linus Torvalds 已提交
1617 1618 1619 1620
}

int dm_resume(struct mapped_device *md)
{
1621 1622
	int r = -EINVAL;
	struct dm_table *map = NULL;
L
Linus Torvalds 已提交
1623

1624
	mutex_lock(&md->suspend_lock);
1625
	if (!dm_suspended(md))
1626 1627 1628
		goto out;

	map = dm_get_table(md);
1629
	if (!map || !dm_table_get_size(map))
1630
		goto out;
L
Linus Torvalds 已提交
1631

1632 1633 1634
	r = dm_table_resume_targets(map);
	if (r)
		goto out;
1635

1636
	dm_queue_flush(md);
1637 1638 1639

	unlock_fs(md);

1640 1641 1642 1643
	if (md->suspended_bdev) {
		bdput(md->suspended_bdev);
		md->suspended_bdev = NULL;
	}
1644

1645 1646
	clear_bit(DMF_SUSPENDED, &md->flags);

L
Linus Torvalds 已提交
1647 1648
	dm_table_unplug_all(map);

1649
	dm_kobject_uevent(md);
1650

1651
	r = 0;
1652

1653 1654
out:
	dm_table_put(map);
1655
	mutex_unlock(&md->suspend_lock);
1656

1657
	return r;
L
Linus Torvalds 已提交
1658 1659 1660 1661 1662
}

/*-----------------------------------------------------------------
 * Event notification.
 *---------------------------------------------------------------*/
1663 1664
void dm_kobject_uevent(struct mapped_device *md)
{
1665
	kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
1666 1667
}

M
Mike Anderson 已提交
1668 1669 1670 1671 1672
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
	return atomic_add_return(1, &md->uevent_seq);
}

L
Linus Torvalds 已提交
1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
uint32_t dm_get_event_nr(struct mapped_device *md)
{
	return atomic_read(&md->event_nr);
}

int dm_wait_event(struct mapped_device *md, int event_nr)
{
	return wait_event_interruptible(md->eventq,
			(event_nr != atomic_read(&md->event_nr)));
}

M
Mike Anderson 已提交
1684 1685 1686 1687 1688 1689 1690 1691 1692
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{
	unsigned long flags;

	spin_lock_irqsave(&md->uevent_lock, flags);
	list_add(elist, &md->uevent_list);
	spin_unlock_irqrestore(&md->uevent_lock, flags);
}

L
Linus Torvalds 已提交
1693 1694 1695 1696 1697 1698 1699 1700 1701
/*
 * The gendisk is only valid as long as you have a reference
 * count on 'md'.
 */
struct gendisk *dm_disk(struct mapped_device *md)
{
	return md->disk;
}

M
Milan Broz 已提交
1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
struct kobject *dm_kobject(struct mapped_device *md)
{
	return &md->kobj;
}

/*
 * struct mapped_device should not be exported outside of dm.c
 * so use this check to verify that kobj is part of md structure
 */
struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
	struct mapped_device *md;

	md = container_of(kobj, struct mapped_device, kobj);
	if (&md->kobj != kobj)
		return NULL;

	dm_get(md);
	return md;
}

L
Linus Torvalds 已提交
1723 1724 1725 1726 1727
int dm_suspended(struct mapped_device *md)
{
	return test_bit(DMF_SUSPENDED, &md->flags);
}

1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738
int dm_noflush_suspending(struct dm_target *ti)
{
	struct mapped_device *md = dm_table_get_md(ti->table);
	int r = __noflush_suspending(md);

	dm_put(md);

	return r;
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);

L
Linus Torvalds 已提交
1739 1740 1741
static struct block_device_operations dm_blk_dops = {
	.open = dm_blk_open,
	.release = dm_blk_close,
1742
	.ioctl = dm_blk_ioctl,
D
Darrick J. Wong 已提交
1743
	.getgeo = dm_blk_getgeo,
L
Linus Torvalds 已提交
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
	.owner = THIS_MODULE
};

EXPORT_SYMBOL(dm_get_mapinfo);

/*
 * module hooks
 */
module_init(dm_init);
module_exit(dm_exit);

module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");