dm.c 74.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
M
Milan Broz 已提交
3
 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7 8
 *
 * This file is released under the GPL.
 */

#include "dm.h"
M
Mike Anderson 已提交
9
#include "dm-uevent.h"
L
Linus Torvalds 已提交
10 11 12

#include <linux/init.h>
#include <linux/module.h>
A
Arjan van de Ven 已提交
13
#include <linux/mutex.h>
L
Linus Torvalds 已提交
14 15 16 17 18 19
#include <linux/moduleparam.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
D
Darrick J. Wong 已提交
20
#include <linux/hdreg.h>
21
#include <linux/delay.h>
22
#include <linux/wait.h>
23
#include <linux/kthread.h>
24 25

#include <trace/events/block.h>
L
Linus Torvalds 已提交
26

27 28
#define DM_MSG_PREFIX "core"

N
Namhyung Kim 已提交
29 30 31 32 33 34 35 36 37 38
#ifdef CONFIG_PRINTK
/*
 * ratelimit state to be used in DMXXX_LIMIT().
 */
DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
		       DEFAULT_RATELIMIT_INTERVAL,
		       DEFAULT_RATELIMIT_BURST);
EXPORT_SYMBOL(dm_ratelimit_state);
#endif

M
Milan Broz 已提交
39 40 41 42 43 44 45
/*
 * Cookies are numeric values sent with CHANGE and REMOVE
 * uevents while resuming, removing or renaming the device.
 */
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24

L
Linus Torvalds 已提交
46 47 48 49 50
static const char *_name = DM_NAME;

static unsigned int major = 0;
static unsigned int _major = 0;

51 52
static DEFINE_IDR(_minor_idr);

53
static DEFINE_SPINLOCK(_minor_lock);
M
Mikulas Patocka 已提交
54 55 56 57 58

static void do_deferred_remove(struct work_struct *w);

static DECLARE_WORK(deferred_remove_work, do_deferred_remove);

59 60
static struct workqueue_struct *deferred_remove_workqueue;

L
Linus Torvalds 已提交
61
/*
K
Kiyoshi Ueda 已提交
62
 * For bio-based dm.
L
Linus Torvalds 已提交
63 64 65 66 67 68
 * One of these is allocated per bio.
 */
struct dm_io {
	struct mapped_device *md;
	int error;
	atomic_t io_count;
R
Richard Kennedy 已提交
69
	struct bio *bio;
70
	unsigned long start_time;
71
	spinlock_t endio_lock;
M
Mikulas Patocka 已提交
72
	struct dm_stats_aux stats_aux;
L
Linus Torvalds 已提交
73 74
};

K
Kiyoshi Ueda 已提交
75 76 77 78 79 80 81
/*
 * For request-based dm.
 * One of these is allocated per request.
 */
struct dm_rq_target_io {
	struct mapped_device *md;
	struct dm_target *ti;
82
	struct request *orig, *clone;
83
	struct kthread_work work;
K
Kiyoshi Ueda 已提交
84 85 86 87 88
	int error;
	union map_info info;
};

/*
89 90 91 92 93 94
 * For request-based dm - the bio clones we allocate are embedded in these
 * structs.
 *
 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
 * the bioset is created - this means the bio has to come at the end of the
 * struct.
K
Kiyoshi Ueda 已提交
95 96 97
 */
struct dm_rq_clone_bio_info {
	struct bio *orig;
98
	struct dm_rq_target_io *tio;
99
	struct bio clone;
K
Kiyoshi Ueda 已提交
100 101
};

102 103 104 105 106 107 108 109
union map_info *dm_get_rq_mapinfo(struct request *rq)
{
	if (rq && rq->end_io_data)
		return &((struct dm_rq_target_io *)rq->end_io_data)->info;
	return NULL;
}
EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);

110 111
#define MINOR_ALLOCED ((void *)-1)

L
Linus Torvalds 已提交
112 113 114
/*
 * Bits for the md->flags field.
 */
115
#define DMF_BLOCK_IO_FOR_SUSPEND 0
L
Linus Torvalds 已提交
116
#define DMF_SUSPENDED 1
117
#define DMF_FROZEN 2
J
Jeff Mahoney 已提交
118
#define DMF_FREEING 3
119
#define DMF_DELETING 4
120
#define DMF_NOFLUSH_SUSPENDING 5
121
#define DMF_MERGE_IS_OPTIONAL 6
M
Mikulas Patocka 已提交
122
#define DMF_DEFERRED_REMOVE 7
123
#define DMF_SUSPENDED_INTERNALLY 8
L
Linus Torvalds 已提交
124

M
Mikulas Patocka 已提交
125 126 127 128 129 130 131 132
/*
 * A dummy definition to make RCU happy.
 * struct dm_table should never be dereferenced in this file.
 */
struct dm_table {
	int undefined__;
};

133 134 135
/*
 * Work processed by per-device workqueue.
 */
L
Linus Torvalds 已提交
136
struct mapped_device {
M
Mikulas Patocka 已提交
137
	struct srcu_struct io_barrier;
138
	struct mutex suspend_lock;
L
Linus Torvalds 已提交
139
	atomic_t holders;
140
	atomic_t open_count;
L
Linus Torvalds 已提交
141

M
Mikulas Patocka 已提交
142 143 144 145 146
	/*
	 * The current mapping.
	 * Use dm_get_live_table{_fast} or take suspend_lock for
	 * dereference.
	 */
147
	struct dm_table __rcu *map;
M
Mikulas Patocka 已提交
148

149 150 151
	struct list_head table_devices;
	struct mutex table_devices_lock;

L
Linus Torvalds 已提交
152 153
	unsigned long flags;

154
	struct request_queue *queue;
155
	unsigned type;
156
	/* Protect queue and type against concurrent access. */
157 158
	struct mutex type_lock;

159 160
	struct target_type *immutable_target_type;

L
Linus Torvalds 已提交
161
	struct gendisk *disk;
M
Mike Anderson 已提交
162
	char name[16];
L
Linus Torvalds 已提交
163 164 165 166 167 168

	void *interface_ptr;

	/*
	 * A list of ios that arrived while we were suspended.
	 */
169
	atomic_t pending[2];
L
Linus Torvalds 已提交
170
	wait_queue_head_t wait;
171
	struct work_struct work;
K
Kiyoshi Ueda 已提交
172
	struct bio_list deferred;
173
	spinlock_t deferred_lock;
L
Linus Torvalds 已提交
174

175
	/*
176
	 * Processing queue (flush)
177 178 179
	 */
	struct workqueue_struct *wq;

L
Linus Torvalds 已提交
180 181 182 183
	/*
	 * io objects are allocated from here.
	 */
	mempool_t *io_pool;
184
	mempool_t *rq_pool;
L
Linus Torvalds 已提交
185

S
Stefan Bader 已提交
186 187
	struct bio_set *bs;

L
Linus Torvalds 已提交
188 189 190 191 192
	/*
	 * Event handling.
	 */
	atomic_t event_nr;
	wait_queue_head_t eventq;
M
Mike Anderson 已提交
193 194 195
	atomic_t uevent_seq;
	struct list_head uevent_list;
	spinlock_t uevent_lock; /* Protect access to uevent_list */
L
Linus Torvalds 已提交
196 197 198 199 200

	/*
	 * freeze/thaw support require holding onto a super block
	 */
	struct super_block *frozen_sb;
201
	struct block_device *bdev;
D
Darrick J. Wong 已提交
202 203 204

	/* forced geometry settings */
	struct hd_geometry geometry;
M
Milan Broz 已提交
205

206 207
	/* kobject and completion */
	struct dm_kobject_holder kobj_holder;
208

209 210
	/* zero-length flush that will be cloned and submitted to targets */
	struct bio flush_bio;
M
Mikulas Patocka 已提交
211 212

	struct dm_stats stats;
213 214 215

	struct kthread_worker kworker;
	struct task_struct *kworker_task;
L
Linus Torvalds 已提交
216 217
};

K
Kiyoshi Ueda 已提交
218 219 220 221 222
/*
 * For mempools pre-allocation at the table loading time.
 */
struct dm_md_mempools {
	mempool_t *io_pool;
223
	mempool_t *rq_pool;
K
Kiyoshi Ueda 已提交
224 225 226
	struct bio_set *bs;
};

227 228 229 230 231 232
struct table_device {
	struct list_head list;
	atomic_t count;
	struct dm_dev dm_dev;
};

233 234
#define RESERVED_BIO_BASED_IOS		16
#define RESERVED_REQUEST_BASED_IOS	256
235
#define RESERVED_MAX_IOS		1024
236
static struct kmem_cache *_io_cache;
K
Kiyoshi Ueda 已提交
237
static struct kmem_cache *_rq_tio_cache;
238
static struct kmem_cache *_rq_cache;
239

240 241 242 243 244
/*
 * Bio-based DM's mempools' reserved IOs set by the user.
 */
static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
/*
 * Request-based DM's mempools' reserved IOs set by the user.
 */
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;

static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
				      unsigned def, unsigned max)
{
	unsigned ios = ACCESS_ONCE(*reserved_ios);
	unsigned modified_ios = 0;

	if (!ios)
		modified_ios = def;
	else if (ios > max)
		modified_ios = max;

	if (modified_ios) {
		(void)cmpxchg(reserved_ios, ios, modified_ios);
		ios = modified_ios;
	}

	return ios;
}

269 270 271 272 273 274 275
unsigned dm_get_reserved_bio_based_ios(void)
{
	return __dm_get_reserved_ios(&reserved_bio_based_ios,
				     RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);

276 277 278 279 280 281 282
unsigned dm_get_reserved_rq_based_ios(void)
{
	return __dm_get_reserved_ios(&reserved_rq_based_ios,
				     RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);

L
Linus Torvalds 已提交
283 284
static int __init local_init(void)
{
K
Kiyoshi Ueda 已提交
285
	int r = -ENOMEM;
L
Linus Torvalds 已提交
286 287

	/* allocate a slab for the dm_ios */
A
Alasdair G Kergon 已提交
288
	_io_cache = KMEM_CACHE(dm_io, 0);
L
Linus Torvalds 已提交
289
	if (!_io_cache)
K
Kiyoshi Ueda 已提交
290
		return r;
L
Linus Torvalds 已提交
291

K
Kiyoshi Ueda 已提交
292 293
	_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
	if (!_rq_tio_cache)
294
		goto out_free_io_cache;
K
Kiyoshi Ueda 已提交
295

296 297 298 299 300
	_rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request),
				      __alignof__(struct request), 0, NULL);
	if (!_rq_cache)
		goto out_free_rq_tio_cache;

M
Mike Anderson 已提交
301
	r = dm_uevent_init();
K
Kiyoshi Ueda 已提交
302
	if (r)
303
		goto out_free_rq_cache;
M
Mike Anderson 已提交
304

305 306 307 308 309 310
	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
	if (!deferred_remove_workqueue) {
		r = -ENOMEM;
		goto out_uevent_exit;
	}

L
Linus Torvalds 已提交
311 312
	_major = major;
	r = register_blkdev(_major, _name);
K
Kiyoshi Ueda 已提交
313
	if (r < 0)
314
		goto out_free_workqueue;
L
Linus Torvalds 已提交
315 316 317 318 319

	if (!_major)
		_major = r;

	return 0;
K
Kiyoshi Ueda 已提交
320

321 322
out_free_workqueue:
	destroy_workqueue(deferred_remove_workqueue);
K
Kiyoshi Ueda 已提交
323 324
out_uevent_exit:
	dm_uevent_exit();
325 326
out_free_rq_cache:
	kmem_cache_destroy(_rq_cache);
K
Kiyoshi Ueda 已提交
327 328
out_free_rq_tio_cache:
	kmem_cache_destroy(_rq_tio_cache);
K
Kiyoshi Ueda 已提交
329 330 331 332
out_free_io_cache:
	kmem_cache_destroy(_io_cache);

	return r;
L
Linus Torvalds 已提交
333 334 335 336
}

static void local_exit(void)
{
M
Mikulas Patocka 已提交
337
	flush_scheduled_work();
338
	destroy_workqueue(deferred_remove_workqueue);
M
Mikulas Patocka 已提交
339

340
	kmem_cache_destroy(_rq_cache);
K
Kiyoshi Ueda 已提交
341
	kmem_cache_destroy(_rq_tio_cache);
L
Linus Torvalds 已提交
342
	kmem_cache_destroy(_io_cache);
343
	unregister_blkdev(_major, _name);
M
Mike Anderson 已提交
344
	dm_uevent_exit();
L
Linus Torvalds 已提交
345 346 347 348 349 350

	_major = 0;

	DMINFO("cleaned up");
}

351
static int (*_inits[])(void) __initdata = {
L
Linus Torvalds 已提交
352 353 354 355
	local_init,
	dm_target_init,
	dm_linear_init,
	dm_stripe_init,
M
Mikulas Patocka 已提交
356
	dm_io_init,
357
	dm_kcopyd_init,
L
Linus Torvalds 已提交
358
	dm_interface_init,
M
Mikulas Patocka 已提交
359
	dm_statistics_init,
L
Linus Torvalds 已提交
360 361
};

362
static void (*_exits[])(void) = {
L
Linus Torvalds 已提交
363 364 365 366
	local_exit,
	dm_target_exit,
	dm_linear_exit,
	dm_stripe_exit,
M
Mikulas Patocka 已提交
367
	dm_io_exit,
368
	dm_kcopyd_exit,
L
Linus Torvalds 已提交
369
	dm_interface_exit,
M
Mikulas Patocka 已提交
370
	dm_statistics_exit,
L
Linus Torvalds 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
};

static int __init dm_init(void)
{
	const int count = ARRAY_SIZE(_inits);

	int r, i;

	for (i = 0; i < count; i++) {
		r = _inits[i]();
		if (r)
			goto bad;
	}

	return 0;

      bad:
	while (i--)
		_exits[i]();

	return r;
}

static void __exit dm_exit(void)
{
	int i = ARRAY_SIZE(_exits);

	while (i--)
		_exits[i]();
400 401 402 403 404

	/*
	 * Should be empty by this point.
	 */
	idr_destroy(&_minor_idr);
L
Linus Torvalds 已提交
405 406 407 408 409
}

/*
 * Block device functions
 */
M
Mike Anderson 已提交
410 411 412 413 414
int dm_deleting_md(struct mapped_device *md)
{
	return test_bit(DMF_DELETING, &md->flags);
}

A
Al Viro 已提交
415
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
L
Linus Torvalds 已提交
416 417 418
{
	struct mapped_device *md;

J
Jeff Mahoney 已提交
419 420
	spin_lock(&_minor_lock);

A
Al Viro 已提交
421
	md = bdev->bd_disk->private_data;
J
Jeff Mahoney 已提交
422 423 424
	if (!md)
		goto out;

425
	if (test_bit(DMF_FREEING, &md->flags) ||
M
Mike Anderson 已提交
426
	    dm_deleting_md(md)) {
J
Jeff Mahoney 已提交
427 428 429 430
		md = NULL;
		goto out;
	}

L
Linus Torvalds 已提交
431
	dm_get(md);
432
	atomic_inc(&md->open_count);
J
Jeff Mahoney 已提交
433 434 435 436 437

out:
	spin_unlock(&_minor_lock);

	return md ? 0 : -ENXIO;
L
Linus Torvalds 已提交
438 439
}

440
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
L
Linus Torvalds 已提交
441
{
A
Al Viro 已提交
442
	struct mapped_device *md = disk->private_data;
443

444 445
	spin_lock(&_minor_lock);

M
Mikulas Patocka 已提交
446 447
	if (atomic_dec_and_test(&md->open_count) &&
	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
448
		queue_work(deferred_remove_workqueue, &deferred_remove_work);
M
Mikulas Patocka 已提交
449

L
Linus Torvalds 已提交
450
	dm_put(md);
451 452

	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
453 454
}

455 456 457 458 459 460 461 462
int dm_open_count(struct mapped_device *md)
{
	return atomic_read(&md->open_count);
}

/*
 * Guarantees nothing is using the device before it's deleted.
 */
M
Mikulas Patocka 已提交
463
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
464 465 466 467 468
{
	int r = 0;

	spin_lock(&_minor_lock);

M
Mikulas Patocka 已提交
469
	if (dm_open_count(md)) {
470
		r = -EBUSY;
M
Mikulas Patocka 已提交
471 472 473 474
		if (mark_deferred)
			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
		r = -EEXIST;
475 476 477 478 479 480 481 482
	else
		set_bit(DMF_DELETING, &md->flags);

	spin_unlock(&_minor_lock);

	return r;
}

M
Mikulas Patocka 已提交
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
int dm_cancel_deferred_remove(struct mapped_device *md)
{
	int r = 0;

	spin_lock(&_minor_lock);

	if (test_bit(DMF_DELETING, &md->flags))
		r = -EBUSY;
	else
		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);

	spin_unlock(&_minor_lock);

	return r;
}

static void do_deferred_remove(struct work_struct *w)
{
	dm_deferred_remove();
}

M
Mikulas Patocka 已提交
504 505 506 507 508
sector_t dm_get_size(struct mapped_device *md)
{
	return get_capacity(md->disk);
}

509 510 511 512 513
struct request_queue *dm_get_md_queue(struct mapped_device *md)
{
	return md->queue;
}

M
Mikulas Patocka 已提交
514 515 516 517 518
struct dm_stats *dm_get_stats(struct mapped_device *md)
{
	return &md->stats;
}

D
Darrick J. Wong 已提交
519 520 521 522 523 524 525
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	struct mapped_device *md = bdev->bd_disk->private_data;

	return dm_get_geometry(md, geo);
}

A
Al Viro 已提交
526
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
527 528
			unsigned int cmd, unsigned long arg)
{
A
Al Viro 已提交
529
	struct mapped_device *md = bdev->bd_disk->private_data;
M
Mikulas Patocka 已提交
530
	int srcu_idx;
531
	struct dm_table *map;
532 533 534
	struct dm_target *tgt;
	int r = -ENOTTY;

535
retry:
M
Mikulas Patocka 已提交
536 537
	map = dm_get_live_table(md, &srcu_idx);

538 539 540 541 542 543 544 545
	if (!map || !dm_table_get_size(map))
		goto out;

	/* We only support devices that have a single target */
	if (dm_table_get_num_targets(map) != 1)
		goto out;

	tgt = dm_table_get_target(map, 0);
546 547
	if (!tgt->type->ioctl)
		goto out;
548

549
	if (dm_suspended_md(md)) {
550 551 552 553
		r = -EAGAIN;
		goto out;
	}

554
	r = tgt->type->ioctl(tgt, cmd, arg);
555 556

out:
M
Mikulas Patocka 已提交
557
	dm_put_live_table(md, srcu_idx);
558

559 560 561 562 563
	if (r == -ENOTCONN) {
		msleep(10);
		goto retry;
	}

564 565 566
	return r;
}

A
Alasdair G Kergon 已提交
567
static struct dm_io *alloc_io(struct mapped_device *md)
L
Linus Torvalds 已提交
568 569 570 571
{
	return mempool_alloc(md->io_pool, GFP_NOIO);
}

A
Alasdair G Kergon 已提交
572
static void free_io(struct mapped_device *md, struct dm_io *io)
L
Linus Torvalds 已提交
573 574 575 576
{
	mempool_free(io, md->io_pool);
}

A
Alasdair G Kergon 已提交
577
static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
L
Linus Torvalds 已提交
578
{
579
	bio_put(&tio->clone);
L
Linus Torvalds 已提交
580 581
}

K
Kiyoshi Ueda 已提交
582 583
static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
					    gfp_t gfp_mask)
584
{
J
Jun'ichi Nomura 已提交
585
	return mempool_alloc(md->io_pool, gfp_mask);
586 587 588 589
}

static void free_rq_tio(struct dm_rq_target_io *tio)
{
J
Jun'ichi Nomura 已提交
590
	mempool_free(tio, tio->md->io_pool);
591 592
}

593 594 595 596 597 598 599 600 601 602 603
static struct request *alloc_clone_request(struct mapped_device *md,
					   gfp_t gfp_mask)
{
	return mempool_alloc(md->rq_pool, gfp_mask);
}

static void free_clone_request(struct mapped_device *md, struct request *rq)
{
	mempool_free(rq, md->rq_pool);
}

K
Kiyoshi Ueda 已提交
604 605 606 607 608 609
static int md_in_flight(struct mapped_device *md)
{
	return atomic_read(&md->pending[READ]) +
	       atomic_read(&md->pending[WRITE]);
}

610 611 612
static void start_io_acct(struct dm_io *io)
{
	struct mapped_device *md = io->md;
M
Mikulas Patocka 已提交
613
	struct bio *bio = io->bio;
T
Tejun Heo 已提交
614
	int cpu;
M
Mikulas Patocka 已提交
615
	int rw = bio_data_dir(bio);
616 617 618

	io->start_time = jiffies;

T
Tejun Heo 已提交
619 620 621
	cpu = part_stat_lock();
	part_round_stats(cpu, &dm_disk(md)->part0);
	part_stat_unlock();
622 623
	atomic_set(&dm_disk(md)->part0.in_flight[rw],
		atomic_inc_return(&md->pending[rw]));
M
Mikulas Patocka 已提交
624 625

	if (unlikely(dm_stats_used(&md->stats)))
626
		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
M
Mikulas Patocka 已提交
627
				    bio_sectors(bio), false, 0, &io->stats_aux);
628 629
}

630
static void end_io_acct(struct dm_io *io)
631 632 633 634
{
	struct mapped_device *md = io->md;
	struct bio *bio = io->bio;
	unsigned long duration = jiffies - io->start_time;
635
	int pending;
636 637
	int rw = bio_data_dir(bio);

638
	generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
639

M
Mikulas Patocka 已提交
640
	if (unlikely(dm_stats_used(&md->stats)))
641
		dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
M
Mikulas Patocka 已提交
642 643
				    bio_sectors(bio), true, duration, &io->stats_aux);

644 645
	/*
	 * After this is decremented the bio must not be touched if it is
646
	 * a flush.
647
	 */
648 649
	pending = atomic_dec_return(&md->pending[rw]);
	atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
650
	pending += atomic_read(&md->pending[rw^0x1]);
651

652 653 654
	/* nudge anyone waiting on suspend queue */
	if (!pending)
		wake_up(&md->wait);
655 656
}

L
Linus Torvalds 已提交
657 658 659
/*
 * Add the bio to the list of deferred io.
 */
M
Mikulas Patocka 已提交
660
static void queue_io(struct mapped_device *md, struct bio *bio)
L
Linus Torvalds 已提交
661
{
662
	unsigned long flags;
L
Linus Torvalds 已提交
663

664
	spin_lock_irqsave(&md->deferred_lock, flags);
L
Linus Torvalds 已提交
665
	bio_list_add(&md->deferred, bio);
666
	spin_unlock_irqrestore(&md->deferred_lock, flags);
667
	queue_work(md->wq, &md->work);
L
Linus Torvalds 已提交
668 669 670 671 672
}

/*
 * Everyone (including functions in this file), should use this
 * function to access the md->map field, and make sure they call
M
Mikulas Patocka 已提交
673
 * dm_put_live_table() when finished.
L
Linus Torvalds 已提交
674
 */
M
Mikulas Patocka 已提交
675
struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
L
Linus Torvalds 已提交
676
{
M
Mikulas Patocka 已提交
677 678 679 680
	*srcu_idx = srcu_read_lock(&md->io_barrier);

	return srcu_dereference(md->map, &md->io_barrier);
}
L
Linus Torvalds 已提交
681

M
Mikulas Patocka 已提交
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
{
	srcu_read_unlock(&md->io_barrier, srcu_idx);
}

void dm_sync_table(struct mapped_device *md)
{
	synchronize_srcu(&md->io_barrier);
	synchronize_rcu_expedited();
}

/*
 * A fast alternative to dm_get_live_table/dm_put_live_table.
 * The caller must not block between these two functions.
 */
static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
{
	rcu_read_lock();
	return rcu_dereference(md->map);
}
L
Linus Torvalds 已提交
702

M
Mikulas Patocka 已提交
703 704 705
static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
{
	rcu_read_unlock();
L
Linus Torvalds 已提交
706 707
}

708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
/*
 * Open a table device so we can use it as a map destination.
 */
static int open_table_device(struct table_device *td, dev_t dev,
			     struct mapped_device *md)
{
	static char *_claim_ptr = "I belong to device-mapper";
	struct block_device *bdev;

	int r;

	BUG_ON(td->dm_dev.bdev);

	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr);
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);

	r = bd_link_disk_holder(bdev, dm_disk(md));
	if (r) {
		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
		return r;
	}

	td->dm_dev.bdev = bdev;
	return 0;
}

/*
 * Close a table device that we've been using.
 */
static void close_table_device(struct table_device *td, struct mapped_device *md)
{
	if (!td->dm_dev.bdev)
		return;

	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
	td->dm_dev.bdev = NULL;
}

static struct table_device *find_table_device(struct list_head *l, dev_t dev,
					      fmode_t mode) {
	struct table_device *td;

	list_for_each_entry(td, l, list)
		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
			return td;

	return NULL;
}

int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
			struct dm_dev **result) {
	int r;
	struct table_device *td;

	mutex_lock(&md->table_devices_lock);
	td = find_table_device(&md->table_devices, dev, mode);
	if (!td) {
		td = kmalloc(sizeof(*td), GFP_KERNEL);
		if (!td) {
			mutex_unlock(&md->table_devices_lock);
			return -ENOMEM;
		}

		td->dm_dev.mode = mode;
		td->dm_dev.bdev = NULL;

		if ((r = open_table_device(td, dev, md))) {
			mutex_unlock(&md->table_devices_lock);
			kfree(td);
			return r;
		}

		format_dev_t(td->dm_dev.name, dev);

		atomic_set(&td->count, 0);
		list_add(&td->list, &md->table_devices);
	}
	atomic_inc(&td->count);
	mutex_unlock(&md->table_devices_lock);

	*result = &td->dm_dev;
	return 0;
}
EXPORT_SYMBOL_GPL(dm_get_table_device);

void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{
	struct table_device *td = container_of(d, struct table_device, dm_dev);

	mutex_lock(&md->table_devices_lock);
	if (atomic_dec_and_test(&td->count)) {
		close_table_device(td, md);
		list_del(&td->list);
		kfree(td);
	}
	mutex_unlock(&md->table_devices_lock);
}
EXPORT_SYMBOL(dm_put_table_device);

static void free_table_devices(struct list_head *devices)
{
	struct list_head *tmp, *next;

	list_for_each_safe(tmp, next, devices) {
		struct table_device *td = list_entry(tmp, struct table_device, list);

		DMWARN("dm_destroy: %s still exists with %d references",
		       td->dm_dev.name, atomic_read(&td->count));
		kfree(td);
	}
}

D
Darrick J. Wong 已提交
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
/*
 * Get the geometry associated with a dm device
 */
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	*geo = md->geometry;

	return 0;
}

/*
 * Set the geometry of a device.
 */
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;

	if (geo->start > sz) {
		DMWARN("Start sector is beyond the geometry limits.");
		return -EINVAL;
	}

	md->geometry = *geo;

	return 0;
}

L
Linus Torvalds 已提交
849 850 851 852 853 854 855 856 857
/*-----------------------------------------------------------------
 * CRUD START:
 *   A more elegant soln is in the works that uses the queue
 *   merge fn, unfortunately there are a couple of changes to
 *   the block layer that I want to make for this.  So in the
 *   interests of getting something for people to use I give
 *   you this clearly demarcated crap.
 *---------------------------------------------------------------*/

858 859 860 861 862
static int __noflush_suspending(struct mapped_device *md)
{
	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}

L
Linus Torvalds 已提交
863 864 865 866
/*
 * Decrements the number of outstanding ios that a bio has been
 * cloned into, completing the original io if necc.
 */
867
static void dec_pending(struct dm_io *io, int error)
L
Linus Torvalds 已提交
868
{
869
	unsigned long flags;
870 871 872
	int io_error;
	struct bio *bio;
	struct mapped_device *md = io->md;
873 874

	/* Push-back supersedes any I/O errors */
875 876 877 878 879 880
	if (unlikely(error)) {
		spin_lock_irqsave(&io->endio_lock, flags);
		if (!(io->error > 0 && __noflush_suspending(md)))
			io->error = error;
		spin_unlock_irqrestore(&io->endio_lock, flags);
	}
L
Linus Torvalds 已提交
881 882

	if (atomic_dec_and_test(&io->io_count)) {
883 884 885 886
		if (io->error == DM_ENDIO_REQUEUE) {
			/*
			 * Target requested pushing back the I/O.
			 */
887
			spin_lock_irqsave(&md->deferred_lock, flags);
888 889 890
			if (__noflush_suspending(md))
				bio_list_add_head(&md->deferred, io->bio);
			else
891 892
				/* noflush suspend was interrupted. */
				io->error = -EIO;
893
			spin_unlock_irqrestore(&md->deferred_lock, flags);
894 895
		}

896 897
		io_error = io->error;
		bio = io->bio;
898 899 900 901 902
		end_io_acct(io);
		free_io(md, io);

		if (io_error == DM_ENDIO_REQUEUE)
			return;
903

904
		if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
905
			/*
906 907
			 * Preflush done for flush with data, reissue
			 * without REQ_FLUSH.
908
			 */
909 910
			bio->bi_rw &= ~REQ_FLUSH;
			queue_io(md, bio);
911
		} else {
912
			/* done with normal IO or empty flush */
913
			trace_block_bio_complete(md->queue, bio, io_error);
914
			bio_endio(bio, io_error);
915
		}
L
Linus Torvalds 已提交
916 917 918
	}
}

919 920 921 922 923 924 925 926
static void disable_write_same(struct mapped_device *md)
{
	struct queue_limits *limits = dm_get_queue_limits(md);

	/* device doesn't really support WRITE SAME, disable it */
	limits->max_write_same_sectors = 0;
}

927
static void clone_endio(struct bio *bio, int error)
L
Linus Torvalds 已提交
928
{
929
	int r = error;
M
Mikulas Patocka 已提交
930
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
931
	struct dm_io *io = tio->io;
S
Stefan Bader 已提交
932
	struct mapped_device *md = tio->io->md;
L
Linus Torvalds 已提交
933 934 935 936 937 938
	dm_endio_fn endio = tio->ti->type->end_io;

	if (!bio_flagged(bio, BIO_UPTODATE) && !error)
		error = -EIO;

	if (endio) {
M
Mikulas Patocka 已提交
939
		r = endio(tio->ti, bio, error);
940 941 942 943 944
		if (r < 0 || r == DM_ENDIO_REQUEUE)
			/*
			 * error and requeue request are handled
			 * in dec_pending().
			 */
L
Linus Torvalds 已提交
945
			error = r;
946 947
		else if (r == DM_ENDIO_INCOMPLETE)
			/* The target will handle the io */
948
			return;
949 950 951 952
		else if (r) {
			DMWARN("unimplemented target endio return value: %d", r);
			BUG();
		}
L
Linus Torvalds 已提交
953 954
	}

955 956 957 958
	if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
		     !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
		disable_write_same(md);

S
Stefan Bader 已提交
959
	free_tio(md, tio);
960
	dec_pending(io, error);
L
Linus Torvalds 已提交
961 962
}

963 964 965 966 967
/*
 * Partial completion handling for request-based dm
 */
static void end_clone_bio(struct bio *clone, int error)
{
M
Mikulas Patocka 已提交
968 969
	struct dm_rq_clone_bio_info *info =
		container_of(clone, struct dm_rq_clone_bio_info, clone);
970 971
	struct dm_rq_target_io *tio = info->tio;
	struct bio *bio = info->orig;
972
	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018

	bio_put(clone);

	if (tio->error)
		/*
		 * An error has already been detected on the request.
		 * Once error occurred, just let clone->end_io() handle
		 * the remainder.
		 */
		return;
	else if (error) {
		/*
		 * Don't notice the error to the upper layer yet.
		 * The error handling decision is made by the target driver,
		 * when the request is completed.
		 */
		tio->error = error;
		return;
	}

	/*
	 * I/O for the bio successfully completed.
	 * Notice the data completion to the upper layer.
	 */

	/*
	 * bios are processed from the head of the list.
	 * So the completing bio should always be rq->bio.
	 * If it's not, something wrong is happening.
	 */
	if (tio->orig->bio != bio)
		DMERR("bio completion is going in the middle of the request");

	/*
	 * Update the original request.
	 * Do not use blk_end_request() here, because it may complete
	 * the original request before the clone, and break the ordering.
	 */
	blk_update_request(tio->orig, 0, nr_bytes);
}

/*
 * Don't touch any member of the md after calling this function because
 * the md may be freed in dm_put() at the end of this function.
 * Or do dm_get() before calling this function and dm_put() later.
 */
1019
static void rq_completed(struct mapped_device *md, int rw, int run_queue)
1020
{
1021
	atomic_dec(&md->pending[rw]);
1022 1023

	/* nudge anyone waiting on suspend queue */
1024
	if (!md_in_flight(md))
1025 1026
		wake_up(&md->wait);

1027 1028 1029 1030 1031 1032
	/*
	 * Run this off this callpath, as drivers could invoke end_io while
	 * inside their request_fn (and holding the queue lock). Calling
	 * back into ->request_fn() could deadlock attempting to grab the
	 * queue lock again.
	 */
1033
	if (run_queue)
1034
		blk_run_queue_async(md->queue);
1035 1036 1037 1038 1039 1040 1041

	/*
	 * dm_put() must be at the end of this function. See the comment above
	 */
	dm_put(md);
}

1042 1043 1044 1045 1046
static void free_rq_clone(struct request *clone)
{
	struct dm_rq_target_io *tio = clone->end_io_data;

	blk_rq_unprep_clone(clone);
1047
	free_clone_request(tio->md, clone);
1048 1049 1050
	free_rq_tio(tio);
}

K
Kiyoshi Ueda 已提交
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
/*
 * Complete the clone and the original request.
 * Must be called without queue lock.
 */
static void dm_end_request(struct request *clone, int error)
{
	int rw = rq_data_dir(clone);
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct mapped_device *md = tio->md;
	struct request *rq = tio->orig;

1062
	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
K
Kiyoshi Ueda 已提交
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
		rq->errors = clone->errors;
		rq->resid_len = clone->resid_len;

		if (rq->sense)
			/*
			 * We are using the sense buffer of the original
			 * request.
			 * So setting the length of the sense data is enough.
			 */
			rq->sense_len = clone->sense_len;
	}

	free_rq_clone(clone);
1076 1077
	blk_end_request_all(rq, error);
	rq_completed(md, rw, true);
K
Kiyoshi Ueda 已提交
1078 1079
}

1080 1081 1082 1083 1084 1085 1086
static void dm_unprep_request(struct request *rq)
{
	struct request *clone = rq->special;

	rq->special = NULL;
	rq->cmd_flags &= ~REQ_DONTPREP;

1087
	free_rq_clone(clone);
1088 1089 1090 1091 1092
}

/*
 * Requeue the original request of a clone.
 */
1093
static void dm_requeue_unmapped_request(struct request *clone)
1094
{
1095
	int rw = rq_data_dir(clone);
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct mapped_device *md = tio->md;
	struct request *rq = tio->orig;
	struct request_queue *q = rq->q;
	unsigned long flags;

	dm_unprep_request(rq);

	spin_lock_irqsave(q->queue_lock, flags);
	blk_requeue_request(q, rq);
	spin_unlock_irqrestore(q->queue_lock, flags);

1108
	rq_completed(md, rw, 0);
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
}

static void __stop_queue(struct request_queue *q)
{
	blk_stop_queue(q);
}

static void stop_queue(struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	__stop_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
}

static void __start_queue(struct request_queue *q)
{
	if (blk_queue_stopped(q))
		blk_start_queue(q);
}

static void start_queue(struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	__start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
}

1140
static void dm_done(struct request *clone, int error, bool mapped)
1141
{
1142
	int r = error;
1143
	struct dm_rq_target_io *tio = clone->end_io_data;
1144
	dm_request_endio_fn rq_end_io = NULL;
1145

1146 1147 1148 1149 1150 1151
	if (tio->ti) {
		rq_end_io = tio->ti->type->rq_end_io;

		if (mapped && rq_end_io)
			r = rq_end_io(tio->ti, clone, error, &tio->info);
	}
1152

1153 1154 1155 1156
	if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
		     !clone->q->limits.max_write_same_sectors))
		disable_write_same(tio->md);

1157
	if (r <= 0)
1158
		/* The target wants to complete the I/O */
1159 1160
		dm_end_request(clone, r);
	else if (r == DM_ENDIO_INCOMPLETE)
1161 1162
		/* The target will handle the I/O */
		return;
1163
	else if (r == DM_ENDIO_REQUEUE)
1164 1165 1166
		/* The target wants to requeue the I/O */
		dm_requeue_unmapped_request(clone);
	else {
1167
		DMWARN("unimplemented target endio return value: %d", r);
1168 1169 1170 1171
		BUG();
	}
}

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
/*
 * Request completion handler for request-based dm
 */
static void dm_softirq_done(struct request *rq)
{
	bool mapped = true;
	struct request *clone = rq->completion_data;
	struct dm_rq_target_io *tio = clone->end_io_data;

	if (rq->cmd_flags & REQ_FAILED)
		mapped = false;

	dm_done(clone, tio->error, mapped);
}

1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
/*
 * Complete the clone and the original request with the error status
 * through softirq context.
 */
static void dm_complete_request(struct request *clone, int error)
{
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct request *rq = tio->orig;

	tio->error = error;
	rq->completion_data = clone;
	blk_complete_request(rq);
}

/*
 * Complete the not-mapped clone and the original request with the error status
 * through softirq context.
 * Target's rq_end_io() function isn't called.
 * This may be used when the target's map_rq() function fails.
 */
1207
static void dm_kill_unmapped_request(struct request *clone, int error)
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
{
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct request *rq = tio->orig;

	rq->cmd_flags |= REQ_FAILED;
	dm_complete_request(clone, error);
}

/*
 * Called with the queue lock held
 */
static void end_clone_request(struct request *clone, int error)
{
	/*
	 * For just cleaning up the information of the queue in which
	 * the clone was dispatched.
	 * The clone is *NOT* freed actually here because it is alloced from
	 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
	 */
	__blk_put_request(clone->q, clone);

	/*
	 * Actual request completion is done in a softirq context which doesn't
	 * hold the queue lock.  Otherwise, deadlock could occur because:
	 *     - another request may be submitted by the upper level driver
	 *       of the stacking during the completion
	 *     - the submission which requires queue lock may be done
	 *       against this queue
	 */
	dm_complete_request(clone, error);
}

1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
/*
 * Return maximum size of I/O possible at the supplied sector up to the current
 * target boundary.
 */
static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
{
	sector_t target_offset = dm_target_offset(ti, sector);

	return ti->len - target_offset;
}

static sector_t max_io_len(sector_t sector, struct dm_target *ti)
L
Linus Torvalds 已提交
1252
{
1253
	sector_t len = max_io_len_target_boundary(sector, ti);
1254
	sector_t offset, max_len;
L
Linus Torvalds 已提交
1255 1256

	/*
1257
	 * Does the target need to split even further?
L
Linus Torvalds 已提交
1258
	 */
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
	if (ti->max_io_len) {
		offset = dm_target_offset(ti, sector);
		if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
			max_len = sector_div(offset, ti->max_io_len);
		else
			max_len = offset & (ti->max_io_len - 1);
		max_len = ti->max_io_len - max_len;

		if (len > max_len)
			len = max_len;
L
Linus Torvalds 已提交
1269 1270 1271 1272 1273
	}

	return len;
}

1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
{
	if (len > UINT_MAX) {
		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
		      (unsigned long long)len, UINT_MAX);
		ti->error = "Maximum size of target IO is too large";
		return -EINVAL;
	}

	ti->max_io_len = (uint32_t) len;

	return 0;
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);

1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
/*
 * A target may call dm_accept_partial_bio only from the map routine.  It is
 * allowed for all bio types except REQ_FLUSH.
 *
 * dm_accept_partial_bio informs the dm that the target only wants to process
 * additional n_sectors sectors of the bio and the rest of the data should be
 * sent in a next bio.
 *
 * A diagram that explains the arithmetics:
 * +--------------------+---------------+-------+
 * |         1          |       2       |   3   |
 * +--------------------+---------------+-------+
 *
 * <-------------- *tio->len_ptr --------------->
 *                      <------- bi_size ------->
 *                      <-- n_sectors -->
 *
 * Region 1 was already iterated over with bio_advance or similar function.
 *	(it may be empty if the target doesn't use bio_advance)
 * Region 2 is the remaining bio size that the target wants to process.
 *	(it may be empty if region 1 is non-empty, although there is no reason
 *	 to make it empty)
 * The target requires that region 3 is to be sent in the next bio.
 *
 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
 * the partially processed part (the sum of regions 1+2) must be the same for all
 * copies of the bio.
 */
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
	BUG_ON(bio->bi_rw & REQ_FLUSH);
	BUG_ON(bi_size > *tio->len_ptr);
	BUG_ON(n_sectors > bi_size);
	*tio->len_ptr -= bi_size - n_sectors;
	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);

A
Alasdair G Kergon 已提交
1329
static void __map_bio(struct dm_target_io *tio)
L
Linus Torvalds 已提交
1330 1331
{
	int r;
1332
	sector_t sector;
S
Stefan Bader 已提交
1333
	struct mapped_device *md;
1334
	struct bio *clone = &tio->clone;
A
Alasdair G Kergon 已提交
1335
	struct dm_target *ti = tio->ti;
L
Linus Torvalds 已提交
1336 1337 1338 1339 1340 1341 1342 1343 1344

	clone->bi_end_io = clone_endio;

	/*
	 * Map the clone.  If r == 0 we don't need to do
	 * anything, the target has assumed ownership of
	 * this io.
	 */
	atomic_inc(&tio->io->io_count);
1345
	sector = clone->bi_iter.bi_sector;
M
Mikulas Patocka 已提交
1346
	r = ti->type->map(ti, clone);
1347
	if (r == DM_MAPIO_REMAPPED) {
L
Linus Torvalds 已提交
1348
		/* the bio has been remapped so dispatch it */
1349

1350 1351
		trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
				      tio->io->bio->bi_bdev->bd_dev, sector);
1352

L
Linus Torvalds 已提交
1353
		generic_make_request(clone);
1354 1355
	} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
		/* error the io and bail out, or requeue it if needed */
S
Stefan Bader 已提交
1356 1357 1358
		md = tio->io->md;
		dec_pending(tio->io, r);
		free_tio(md, tio);
1359 1360 1361
	} else if (r) {
		DMWARN("unimplemented target map return value: %d", r);
		BUG();
L
Linus Torvalds 已提交
1362 1363 1364 1365 1366 1367 1368 1369 1370
	}
}

struct clone_info {
	struct mapped_device *md;
	struct dm_table *map;
	struct bio *bio;
	struct dm_io *io;
	sector_t sector;
1371
	unsigned sector_count;
L
Linus Torvalds 已提交
1372 1373
};

1374
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
A
Alasdair G Kergon 已提交
1375
{
1376 1377
	bio->bi_iter.bi_sector = sector;
	bio->bi_iter.bi_size = to_bytes(len);
L
Linus Torvalds 已提交
1378 1379 1380 1381 1382
}

/*
 * Creates a bio that consists of range of complete bvecs.
 */
1383
static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1384
		      sector_t sector, unsigned len)
L
Linus Torvalds 已提交
1385
{
1386
	struct bio *clone = &tio->clone;
L
Linus Torvalds 已提交
1387

1388 1389 1390 1391
	__bio_clone_fast(clone, bio);

	if (bio_integrity(bio))
		bio_integrity_clone(clone, bio, GFP_NOIO);
A
Alasdair G Kergon 已提交
1392

1393 1394 1395 1396 1397
	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
	clone->bi_iter.bi_size = to_bytes(len);

	if (bio_integrity(bio))
		bio_integrity_trim(clone, 0, len);
L
Linus Torvalds 已提交
1398 1399
}

1400
static struct dm_target_io *alloc_tio(struct clone_info *ci,
1401
				      struct dm_target *ti,
1402
				      unsigned target_bio_nr)
1403
{
1404 1405 1406
	struct dm_target_io *tio;
	struct bio *clone;

1407
	clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1408
	tio = container_of(clone, struct dm_target_io, clone);
1409 1410 1411

	tio->io = ci->io;
	tio->ti = ti;
1412
	tio->target_bio_nr = target_bio_nr;
1413 1414 1415 1416

	return tio;
}

1417 1418
static void __clone_and_map_simple_bio(struct clone_info *ci,
				       struct dm_target *ti,
1419
				       unsigned target_bio_nr, unsigned *len)
1420
{
1421
	struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
1422
	struct bio *clone = &tio->clone;
1423

1424 1425
	tio->len_ptr = len;

1426
	__bio_clone_fast(clone, ci->bio);
A
Alasdair G Kergon 已提交
1427
	if (len)
1428
		bio_setup_sector(clone, ci->sector, *len);
1429

A
Alasdair G Kergon 已提交
1430
	__map_bio(tio);
1431 1432
}

1433
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1434
				  unsigned num_bios, unsigned *len)
1435
{
1436
	unsigned target_bio_nr;
1437

1438
	for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1439
		__clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1440 1441
}

1442
static int __send_empty_flush(struct clone_info *ci)
1443
{
1444
	unsigned target_nr = 0;
1445 1446
	struct dm_target *ti;

1447
	BUG_ON(bio_has_data(ci->bio));
1448
	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1449
		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1450 1451 1452 1453

	return 0;
}

A
Alasdair G Kergon 已提交
1454
static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1455
				     sector_t sector, unsigned *len)
M
Mike Snitzer 已提交
1456
{
1457
	struct bio *bio = ci->bio;
M
Mike Snitzer 已提交
1458
	struct dm_target_io *tio;
1459 1460
	unsigned target_bio_nr;
	unsigned num_target_bios = 1;
M
Mike Snitzer 已提交
1461

1462 1463 1464 1465 1466
	/*
	 * Does the target want to receive duplicate copies of the bio?
	 */
	if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
		num_target_bios = ti->num_write_bios(ti, bio);
A
Alasdair G Kergon 已提交
1467

1468
	for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1469
		tio = alloc_tio(ci, ti, target_bio_nr);
1470 1471
		tio->len_ptr = len;
		clone_bio(tio, bio, sector, *len);
1472 1473
		__map_bio(tio);
	}
M
Mike Snitzer 已提交
1474 1475
}

1476
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
M
Mike Snitzer 已提交
1477

1478
static unsigned get_num_discard_bios(struct dm_target *ti)
M
Mike Snitzer 已提交
1479
{
1480
	return ti->num_discard_bios;
M
Mike Snitzer 已提交
1481 1482
}

1483
static unsigned get_num_write_same_bios(struct dm_target *ti)
M
Mike Snitzer 已提交
1484
{
1485
	return ti->num_write_same_bios;
M
Mike Snitzer 已提交
1486 1487 1488 1489 1490 1491
}

typedef bool (*is_split_required_fn)(struct dm_target *ti);

static bool is_split_required_for_discard(struct dm_target *ti)
{
1492
	return ti->split_discard_bios;
M
Mike Snitzer 已提交
1493 1494
}

1495 1496 1497
static int __send_changing_extent_only(struct clone_info *ci,
				       get_num_bios_fn get_num_bios,
				       is_split_required_fn is_split_required)
M
Mike Snitzer 已提交
1498 1499
{
	struct dm_target *ti;
1500
	unsigned len;
1501
	unsigned num_bios;
M
Mike Snitzer 已提交
1502

1503 1504 1505 1506
	do {
		ti = dm_table_find_target(ci->map, ci->sector);
		if (!dm_target_is_valid(ti))
			return -EIO;
M
Mike Snitzer 已提交
1507 1508

		/*
M
Mike Snitzer 已提交
1509 1510
		 * Even though the device advertised support for this type of
		 * request, that does not mean every target supports it, and
M
Mike Snitzer 已提交
1511
		 * reconfiguration might also have changed that since the
1512
		 * check was performed.
M
Mike Snitzer 已提交
1513
		 */
1514 1515
		num_bios = get_num_bios ? get_num_bios(ti) : 0;
		if (!num_bios)
1516
			return -EOPNOTSUPP;
M
Mike Snitzer 已提交
1517

M
Mike Snitzer 已提交
1518
		if (is_split_required && !is_split_required(ti))
1519
			len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1520
		else
1521
			len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
1522

1523
		__send_duplicate_bios(ci, ti, num_bios, &len);
1524 1525 1526

		ci->sector += len;
	} while (ci->sector_count -= len);
M
Mike Snitzer 已提交
1527 1528 1529 1530

	return 0;
}

1531
static int __send_discard(struct clone_info *ci)
M
Mike Snitzer 已提交
1532
{
1533 1534
	return __send_changing_extent_only(ci, get_num_discard_bios,
					   is_split_required_for_discard);
M
Mike Snitzer 已提交
1535 1536
}

1537
static int __send_write_same(struct clone_info *ci)
M
Mike Snitzer 已提交
1538
{
1539
	return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
M
Mike Snitzer 已提交
1540 1541
}

A
Alasdair G Kergon 已提交
1542 1543 1544
/*
 * Select the correct strategy for processing a non-flush bio.
 */
1545
static int __split_and_process_non_flush(struct clone_info *ci)
L
Linus Torvalds 已提交
1546
{
1547
	struct bio *bio = ci->bio;
1548
	struct dm_target *ti;
1549
	unsigned len;
L
Linus Torvalds 已提交
1550

M
Mike Snitzer 已提交
1551
	if (unlikely(bio->bi_rw & REQ_DISCARD))
1552
		return __send_discard(ci);
M
Mike Snitzer 已提交
1553
	else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1554
		return __send_write_same(ci);
M
Mike Snitzer 已提交
1555

1556 1557 1558 1559
	ti = dm_table_find_target(ci->map, ci->sector);
	if (!dm_target_is_valid(ti))
		return -EIO;

1560
	len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
L
Linus Torvalds 已提交
1561

1562
	__clone_and_map_data_bio(ci, ti, ci->sector, &len);
L
Linus Torvalds 已提交
1563

1564 1565
	ci->sector += len;
	ci->sector_count -= len;
L
Linus Torvalds 已提交
1566

1567
	return 0;
L
Linus Torvalds 已提交
1568 1569 1570
}

/*
1571
 * Entry point to split a bio into clones and submit them to the targets.
L
Linus Torvalds 已提交
1572
 */
M
Mikulas Patocka 已提交
1573 1574
static void __split_and_process_bio(struct mapped_device *md,
				    struct dm_table *map, struct bio *bio)
L
Linus Torvalds 已提交
1575 1576
{
	struct clone_info ci;
1577
	int error = 0;
L
Linus Torvalds 已提交
1578

M
Mikulas Patocka 已提交
1579
	if (unlikely(!map)) {
1580
		bio_io_error(bio);
1581 1582
		return;
	}
1583

M
Mikulas Patocka 已提交
1584
	ci.map = map;
L
Linus Torvalds 已提交
1585 1586 1587 1588 1589 1590
	ci.md = md;
	ci.io = alloc_io(md);
	ci.io->error = 0;
	atomic_set(&ci.io->io_count, 1);
	ci.io->bio = bio;
	ci.io->md = md;
1591
	spin_lock_init(&ci.io->endio_lock);
1592
	ci.sector = bio->bi_iter.bi_sector;
L
Linus Torvalds 已提交
1593

1594
	start_io_acct(ci.io);
A
Alasdair G Kergon 已提交
1595

1596 1597 1598
	if (bio->bi_rw & REQ_FLUSH) {
		ci.bio = &ci.md->flush_bio;
		ci.sector_count = 0;
1599
		error = __send_empty_flush(&ci);
1600 1601
		/* dec_pending submits any data associated with flush */
	} else {
1602
		ci.bio = bio;
1603
		ci.sector_count = bio_sectors(bio);
1604
		while (ci.sector_count && !error)
1605
			error = __split_and_process_non_flush(&ci);
1606
	}
L
Linus Torvalds 已提交
1607 1608

	/* drop the extra reference count */
1609
	dec_pending(ci.io, error);
L
Linus Torvalds 已提交
1610 1611 1612 1613 1614
}
/*-----------------------------------------------------------------
 * CRUD END
 *---------------------------------------------------------------*/

M
Milan Broz 已提交
1615 1616 1617 1618 1619
static int dm_merge_bvec(struct request_queue *q,
			 struct bvec_merge_data *bvm,
			 struct bio_vec *biovec)
{
	struct mapped_device *md = q->queuedata;
M
Mikulas Patocka 已提交
1620
	struct dm_table *map = dm_get_live_table_fast(md);
M
Milan Broz 已提交
1621 1622
	struct dm_target *ti;
	sector_t max_sectors;
1623
	int max_size = 0;
M
Milan Broz 已提交
1624 1625

	if (unlikely(!map))
1626
		goto out;
M
Milan Broz 已提交
1627 1628

	ti = dm_table_find_target(map, bvm->bi_sector);
1629
	if (!dm_target_is_valid(ti))
M
Mikulas Patocka 已提交
1630
		goto out;
M
Milan Broz 已提交
1631 1632 1633 1634

	/*
	 * Find maximum amount of I/O that won't need splitting
	 */
1635
	max_sectors = min(max_io_len(bvm->bi_sector, ti),
1636
			  (sector_t) queue_max_sectors(q));
M
Milan Broz 已提交
1637
	max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1638
	if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
M
Milan Broz 已提交
1639 1640 1641 1642 1643 1644 1645 1646 1647
		max_size = 0;

	/*
	 * merge_bvec_fn() returns number of bytes
	 * it can accept at this offset
	 * max is precomputed maximal io size
	 */
	if (max_size && ti->type->merge)
		max_size = ti->type->merge(ti, bvm, biovec, max_size);
1648 1649
	/*
	 * If the target doesn't support merge method and some of the devices
1650 1651 1652 1653
	 * provided their merge_bvec method (we know this by looking for the
	 * max_hw_sectors that dm_set_device_limits may set), then we can't
	 * allow bios with multiple vector entries.  So always set max_size
	 * to 0, and the code below allows just one page.
1654 1655 1656
	 */
	else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
		max_size = 0;
M
Milan Broz 已提交
1657

1658
out:
M
Mikulas Patocka 已提交
1659
	dm_put_live_table_fast(md);
M
Milan Broz 已提交
1660 1661 1662 1663 1664 1665 1666 1667 1668
	/*
	 * Always allow an entire first page
	 */
	if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
		max_size = biovec->bv_len;

	return max_size;
}

L
Linus Torvalds 已提交
1669 1670 1671 1672
/*
 * The request function that just remaps the bio built up by
 * dm_merge_bvec.
 */
1673
static void _dm_request(struct request_queue *q, struct bio *bio)
L
Linus Torvalds 已提交
1674
{
1675
	int rw = bio_data_dir(bio);
L
Linus Torvalds 已提交
1676
	struct mapped_device *md = q->queuedata;
M
Mikulas Patocka 已提交
1677 1678
	int srcu_idx;
	struct dm_table *map;
L
Linus Torvalds 已提交
1679

M
Mikulas Patocka 已提交
1680
	map = dm_get_live_table(md, &srcu_idx);
L
Linus Torvalds 已提交
1681

1682
	generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
1683

1684 1685
	/* if we're suspended, we have to queue this io for later */
	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
M
Mikulas Patocka 已提交
1686
		dm_put_live_table(md, srcu_idx);
L
Linus Torvalds 已提交
1687

1688 1689 1690
		if (bio_rw(bio) != READA)
			queue_io(md, bio);
		else
A
Alasdair G Kergon 已提交
1691
			bio_io_error(bio);
1692
		return;
L
Linus Torvalds 已提交
1693 1694
	}

M
Mikulas Patocka 已提交
1695 1696
	__split_and_process_bio(md, map, bio);
	dm_put_live_table(md, srcu_idx);
1697
	return;
1698 1699
}

M
Mikulas Patocka 已提交
1700
int dm_request_based(struct mapped_device *md)
1701 1702 1703 1704
{
	return blk_queue_stackable(md->queue);
}

1705
static void dm_request(struct request_queue *q, struct bio *bio)
1706 1707 1708 1709
{
	struct mapped_device *md = q->queuedata;

	if (dm_request_based(md))
1710 1711 1712
		blk_queue_bio(q, bio);
	else
		_dm_request(q, bio);
1713 1714
}

1715
static void dm_dispatch_request(struct request *rq)
1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
{
	int r;

	if (blk_queue_io_stat(rq->q))
		rq->cmd_flags |= REQ_IO_STAT;

	rq->start_time = jiffies;
	r = blk_insert_cloned_request(rq->q, rq);
	if (r)
		dm_complete_request(rq, r);
}

static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
				 void *data)
{
	struct dm_rq_target_io *tio = data;
1732 1733
	struct dm_rq_clone_bio_info *info =
		container_of(bio, struct dm_rq_clone_bio_info, clone);
1734 1735 1736 1737 1738 1739 1740 1741 1742

	info->orig = bio_orig;
	info->tio = tio;
	bio->bi_end_io = end_clone_bio;

	return 0;
}

static int setup_clone(struct request *clone, struct request *rq,
1743
		       struct dm_rq_target_io *tio, gfp_t gfp_mask)
1744
{
1745
	int r;
1746

1747
	r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
1748 1749 1750
			      dm_rq_bio_constructor, tio);
	if (r)
		return r;
1751

1752 1753 1754
	clone->cmd = rq->cmd;
	clone->cmd_len = rq->cmd_len;
	clone->sense = rq->sense;
1755 1756 1757
	clone->end_io = end_clone_request;
	clone->end_io_data = tio;

1758 1759
	tio->clone = clone;

1760 1761 1762
	return 0;
}

1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
static struct request *__clone_rq(struct request *rq, struct mapped_device *md,
				  struct dm_rq_target_io *tio, gfp_t gfp_mask)
{
	struct request *clone = alloc_clone_request(md, gfp_mask);

	if (!clone)
		return NULL;

	blk_rq_init(NULL, clone);
	if (setup_clone(clone, rq, tio, gfp_mask)) {
		/* -ENOMEM */
		free_clone_request(md, clone);
		return NULL;
	}

	return clone;
}

1781 1782
static void map_tio_request(struct kthread_work *work);

K
Kiyoshi Ueda 已提交
1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794
static struct request *clone_rq(struct request *rq, struct mapped_device *md,
				gfp_t gfp_mask)
{
	struct request *clone;
	struct dm_rq_target_io *tio;

	tio = alloc_rq_tio(md, gfp_mask);
	if (!tio)
		return NULL;

	tio->md = md;
	tio->ti = NULL;
1795
	tio->clone = NULL;
K
Kiyoshi Ueda 已提交
1796 1797 1798
	tio->orig = rq;
	tio->error = 0;
	memset(&tio->info, 0, sizeof(tio->info));
1799
	init_kthread_work(&tio->work, map_tio_request);
K
Kiyoshi Ueda 已提交
1800

1801 1802
	clone = __clone_rq(rq, md, tio, GFP_ATOMIC);
	if (!clone) {
K
Kiyoshi Ueda 已提交
1803 1804 1805 1806 1807 1808 1809
		free_rq_tio(tio);
		return NULL;
	}

	return clone;
}

1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
/*
 * Called with the queue lock held.
 */
static int dm_prep_fn(struct request_queue *q, struct request *rq)
{
	struct mapped_device *md = q->queuedata;
	struct request *clone;

	if (unlikely(rq->special)) {
		DMWARN("Already has something in rq->special.");
		return BLKPREP_KILL;
	}

K
Kiyoshi Ueda 已提交
1823 1824
	clone = clone_rq(rq, md, GFP_ATOMIC);
	if (!clone)
1825 1826 1827 1828 1829 1830 1831 1832
		return BLKPREP_DEFER;

	rq->special = clone;
	rq->cmd_flags |= REQ_DONTPREP;

	return BLKPREP_OK;
}

1833 1834 1835 1836 1837 1838 1839
/*
 * Returns:
 * 0  : the request has been processed (not requeued)
 * !0 : the request has been requeued
 */
static int map_request(struct dm_target *ti, struct request *clone,
		       struct mapped_device *md)
1840
{
1841
	int r, requeued = 0;
1842 1843 1844 1845 1846 1847 1848 1849 1850
	struct dm_rq_target_io *tio = clone->end_io_data;

	r = ti->type->map_rq(ti, clone, &tio->info);
	switch (r) {
	case DM_MAPIO_SUBMITTED:
		/* The target has taken the I/O to submit by itself later */
		break;
	case DM_MAPIO_REMAPPED:
		/* The target has remapped the I/O so dispatch it */
1851 1852
		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
				     blk_rq_pos(tio->orig));
1853 1854 1855 1856 1857
		dm_dispatch_request(clone);
		break;
	case DM_MAPIO_REQUEUE:
		/* The target wants to requeue the I/O */
		dm_requeue_unmapped_request(clone);
1858
		requeued = 1;
1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869
		break;
	default:
		if (r > 0) {
			DMWARN("unimplemented target map return value: %d", r);
			BUG();
		}

		/* The target wants to complete the I/O */
		dm_kill_unmapped_request(clone, r);
		break;
	}
1870 1871

	return requeued;
1872 1873
}

1874 1875 1876 1877 1878 1879 1880
static void map_tio_request(struct kthread_work *work)
{
	struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);

	map_request(tio->ti, tio->clone, tio->md);
}

1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
{
	struct request *clone;

	blk_start_request(orig);
	clone = orig->special;
	atomic_inc(&md->pending[rq_data_dir(clone)]);

	/*
	 * Hold the md reference here for the in-flight I/O.
	 * We can't rely on the reference count by device opener,
	 * because the device may be closed during the request completion
	 * when all bios are completed.
	 * See the comment in rq_completed() too.
	 */
	dm_get(md);

	return clone;
}

1901 1902 1903 1904 1905 1906 1907
/*
 * q->request_fn for request-based dm.
 * Called with the queue lock held.
 */
static void dm_request_fn(struct request_queue *q)
{
	struct mapped_device *md = q->queuedata;
M
Mikulas Patocka 已提交
1908 1909
	int srcu_idx;
	struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1910
	struct dm_target *ti;
1911
	struct request *rq, *clone;
1912
	struct dm_rq_target_io *tio;
1913
	sector_t pos;
1914 1915

	/*
1916 1917 1918 1919
	 * For suspend, check blk_queue_stopped() and increment
	 * ->pending within a single queue_lock not to increment the
	 * number of in-flight I/Os after the queue is stopped in
	 * dm_suspend().
1920
	 */
J
Jens Axboe 已提交
1921
	while (!blk_queue_stopped(q)) {
1922 1923
		rq = blk_peek_request(q);
		if (!rq)
J
Jens Axboe 已提交
1924
			goto delay_and_out;
1925

1926 1927 1928 1929 1930 1931
		/* always use block 0 to find the target for flushes for now */
		pos = 0;
		if (!(rq->cmd_flags & REQ_FLUSH))
			pos = blk_rq_pos(rq);

		ti = dm_table_find_target(map, pos);
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941
		if (!dm_target_is_valid(ti)) {
			/*
			 * Must perform setup, that dm_done() requires,
			 * before calling dm_kill_unmapped_request
			 */
			DMERR_LIMIT("request attempted access beyond the end of device");
			clone = dm_start_request(md, rq);
			dm_kill_unmapped_request(clone, -EIO);
			continue;
		}
1942

1943
		if (ti->type->busy && ti->type->busy(ti))
J
Jens Axboe 已提交
1944
			goto delay_and_out;
1945

1946
		clone = dm_start_request(md, rq);
1947

1948 1949 1950 1951
		tio = rq->special;
		/* Establish tio->ti before queuing work (map_tio_request) */
		tio->ti = ti;
		queue_kthread_work(&md->kworker, &tio->work);
1952
		BUG_ON(!irqs_disabled());
1953 1954 1955 1956
	}

	goto out;

J
Jens Axboe 已提交
1957 1958
delay_and_out:
	blk_delay_queue(q, HZ / 10);
1959
out:
M
Mikulas Patocka 已提交
1960
	dm_put_live_table(md, srcu_idx);
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
}

int dm_underlying_device_busy(struct request_queue *q)
{
	return blk_lld_busy(q);
}
EXPORT_SYMBOL_GPL(dm_underlying_device_busy);

static int dm_lld_busy(struct request_queue *q)
{
	int r;
	struct mapped_device *md = q->queuedata;
M
Mikulas Patocka 已提交
1973
	struct dm_table *map = dm_get_live_table_fast(md);
1974 1975 1976 1977 1978 1979

	if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
		r = 1;
	else
		r = dm_table_any_busy_target(map);

M
Mikulas Patocka 已提交
1980
	dm_put_live_table_fast(md);
1981 1982 1983 1984

	return r;
}

L
Linus Torvalds 已提交
1985 1986
static int dm_any_congested(void *congested_data, int bdi_bits)
{
1987 1988 1989
	int r = bdi_bits;
	struct mapped_device *md = congested_data;
	struct dm_table *map;
L
Linus Torvalds 已提交
1990

1991
	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
M
Mikulas Patocka 已提交
1992
		map = dm_get_live_table_fast(md);
1993
		if (map) {
1994 1995 1996 1997 1998 1999 2000 2001 2002
			/*
			 * Request-based dm cares about only own queue for
			 * the query about congestion status of request_queue
			 */
			if (dm_request_based(md))
				r = md->queue->backing_dev_info.state &
				    bdi_bits;
			else
				r = dm_table_any_congested(map, bdi_bits);
2003
		}
M
Mikulas Patocka 已提交
2004
		dm_put_live_table_fast(md);
2005 2006
	}

L
Linus Torvalds 已提交
2007 2008 2009 2010 2011 2012
	return r;
}

/*-----------------------------------------------------------------
 * An IDR is used to keep track of allocated minor numbers.
 *---------------------------------------------------------------*/
2013
static void free_minor(int minor)
L
Linus Torvalds 已提交
2014
{
2015
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
2016
	idr_remove(&_minor_idr, minor);
2017
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
2018 2019 2020 2021 2022
}

/*
 * See if the device with a specific minor # is free.
 */
2023
static int specific_minor(int minor)
L
Linus Torvalds 已提交
2024
{
T
Tejun Heo 已提交
2025
	int r;
L
Linus Torvalds 已提交
2026 2027 2028 2029

	if (minor >= (1 << MINORBITS))
		return -EINVAL;

T
Tejun Heo 已提交
2030
	idr_preload(GFP_KERNEL);
2031
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
2032

T
Tejun Heo 已提交
2033
	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
L
Linus Torvalds 已提交
2034

2035
	spin_unlock(&_minor_lock);
T
Tejun Heo 已提交
2036 2037 2038 2039
	idr_preload_end();
	if (r < 0)
		return r == -ENOSPC ? -EBUSY : r;
	return 0;
L
Linus Torvalds 已提交
2040 2041
}

2042
static int next_free_minor(int *minor)
L
Linus Torvalds 已提交
2043
{
T
Tejun Heo 已提交
2044
	int r;
J
Jeff Mahoney 已提交
2045

T
Tejun Heo 已提交
2046
	idr_preload(GFP_KERNEL);
2047
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
2048

T
Tejun Heo 已提交
2049
	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
L
Linus Torvalds 已提交
2050

2051
	spin_unlock(&_minor_lock);
T
Tejun Heo 已提交
2052 2053 2054 2055 2056
	idr_preload_end();
	if (r < 0)
		return r;
	*minor = r;
	return 0;
L
Linus Torvalds 已提交
2057 2058
}

2059
static const struct block_device_operations dm_blk_dops;
L
Linus Torvalds 已提交
2060

2061 2062
static void dm_wq_work(struct work_struct *work);

2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
static void dm_init_md_queue(struct mapped_device *md)
{
	/*
	 * Request-based dm devices cannot be stacked on top of bio-based dm
	 * devices.  The type of this dm device has not been decided yet.
	 * The type is decided at the first table loading time.
	 * To prevent problematic device stacking, clear the queue flag
	 * for request stacking support until then.
	 *
	 * This queue is new, so no concurrency on the queue_flags.
	 */
	queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
	blk_queue_merge_bvec(md->queue, dm_merge_bvec);
}

L
Linus Torvalds 已提交
2084 2085 2086
/*
 * Allocate and initialise a blank device with a given minor.
 */
2087
static struct mapped_device *alloc_dev(int minor)
L
Linus Torvalds 已提交
2088 2089
{
	int r;
2090
	struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
2091
	void *old_md;
L
Linus Torvalds 已提交
2092 2093 2094 2095 2096 2097

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

2098
	if (!try_module_get(THIS_MODULE))
M
Milan Broz 已提交
2099
		goto bad_module_get;
2100

L
Linus Torvalds 已提交
2101
	/* get a minor number for the dev */
2102
	if (minor == DM_ANY_MINOR)
2103
		r = next_free_minor(&minor);
2104
	else
2105
		r = specific_minor(minor);
L
Linus Torvalds 已提交
2106
	if (r < 0)
M
Milan Broz 已提交
2107
		goto bad_minor;
L
Linus Torvalds 已提交
2108

M
Mikulas Patocka 已提交
2109 2110 2111 2112
	r = init_srcu_struct(&md->io_barrier);
	if (r < 0)
		goto bad_io_barrier;

2113
	md->type = DM_TYPE_NONE;
2114
	mutex_init(&md->suspend_lock);
2115
	mutex_init(&md->type_lock);
2116
	mutex_init(&md->table_devices_lock);
2117
	spin_lock_init(&md->deferred_lock);
L
Linus Torvalds 已提交
2118
	atomic_set(&md->holders, 1);
2119
	atomic_set(&md->open_count, 0);
L
Linus Torvalds 已提交
2120
	atomic_set(&md->event_nr, 0);
M
Mike Anderson 已提交
2121 2122
	atomic_set(&md->uevent_seq, 0);
	INIT_LIST_HEAD(&md->uevent_list);
2123
	INIT_LIST_HEAD(&md->table_devices);
M
Mike Anderson 已提交
2124
	spin_lock_init(&md->uevent_lock);
L
Linus Torvalds 已提交
2125

2126
	md->queue = blk_alloc_queue(GFP_KERNEL);
L
Linus Torvalds 已提交
2127
	if (!md->queue)
M
Milan Broz 已提交
2128
		goto bad_queue;
L
Linus Torvalds 已提交
2129

2130
	dm_init_md_queue(md);
S
Stefan Bader 已提交
2131

L
Linus Torvalds 已提交
2132 2133
	md->disk = alloc_disk(1);
	if (!md->disk)
M
Milan Broz 已提交
2134
		goto bad_disk;
L
Linus Torvalds 已提交
2135

2136 2137
	atomic_set(&md->pending[0], 0);
	atomic_set(&md->pending[1], 0);
2138
	init_waitqueue_head(&md->wait);
2139
	INIT_WORK(&md->work, dm_wq_work);
2140
	init_waitqueue_head(&md->eventq);
2141
	init_completion(&md->kobj_holder.completion);
2142
	md->kworker_task = NULL;
2143

L
Linus Torvalds 已提交
2144 2145 2146 2147 2148 2149 2150
	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);
M
Mike Anderson 已提交
2151
	format_dev_t(md->name, MKDEV(_major, minor));
L
Linus Torvalds 已提交
2152

T
Tejun Heo 已提交
2153
	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
2154 2155 2156
	if (!md->wq)
		goto bad_thread;

M
Mikulas Patocka 已提交
2157 2158 2159 2160
	md->bdev = bdget_disk(md->disk, 0);
	if (!md->bdev)
		goto bad_bdev;

2161 2162 2163 2164
	bio_init(&md->flush_bio);
	md->flush_bio.bi_bdev = md->bdev;
	md->flush_bio.bi_rw = WRITE_FLUSH;

M
Mikulas Patocka 已提交
2165 2166
	dm_stats_init(&md->stats);

2167
	/* Populate the mapping, nobody knows we exist yet */
2168
	spin_lock(&_minor_lock);
2169
	old_md = idr_replace(&_minor_idr, md, minor);
2170
	spin_unlock(&_minor_lock);
2171 2172 2173

	BUG_ON(old_md != MINOR_ALLOCED);

L
Linus Torvalds 已提交
2174 2175
	return md;

M
Mikulas Patocka 已提交
2176 2177
bad_bdev:
	destroy_workqueue(md->wq);
2178
bad_thread:
2179
	del_gendisk(md->disk);
2180
	put_disk(md->disk);
M
Milan Broz 已提交
2181
bad_disk:
2182
	blk_cleanup_queue(md->queue);
M
Milan Broz 已提交
2183
bad_queue:
M
Mikulas Patocka 已提交
2184 2185
	cleanup_srcu_struct(&md->io_barrier);
bad_io_barrier:
L
Linus Torvalds 已提交
2186
	free_minor(minor);
M
Milan Broz 已提交
2187
bad_minor:
2188
	module_put(THIS_MODULE);
M
Milan Broz 已提交
2189
bad_module_get:
L
Linus Torvalds 已提交
2190 2191 2192 2193
	kfree(md);
	return NULL;
}

J
Jun'ichi Nomura 已提交
2194 2195
static void unlock_fs(struct mapped_device *md);

L
Linus Torvalds 已提交
2196 2197
static void free_dev(struct mapped_device *md)
{
2198
	int minor = MINOR(disk_devt(md->disk));
2199

M
Mikulas Patocka 已提交
2200 2201
	unlock_fs(md);
	bdput(md->bdev);
2202
	destroy_workqueue(md->wq);
2203 2204 2205

	if (md->kworker_task)
		kthread_stop(md->kworker_task);
K
Kiyoshi Ueda 已提交
2206 2207
	if (md->io_pool)
		mempool_destroy(md->io_pool);
2208 2209
	if (md->rq_pool)
		mempool_destroy(md->rq_pool);
K
Kiyoshi Ueda 已提交
2210 2211
	if (md->bs)
		bioset_free(md->bs);
M
Martin K. Petersen 已提交
2212
	blk_integrity_unregister(md->disk);
L
Linus Torvalds 已提交
2213
	del_gendisk(md->disk);
M
Mikulas Patocka 已提交
2214
	cleanup_srcu_struct(&md->io_barrier);
2215
	free_table_devices(&md->table_devices);
2216
	free_minor(minor);
J
Jeff Mahoney 已提交
2217 2218 2219 2220 2221

	spin_lock(&_minor_lock);
	md->disk->private_data = NULL;
	spin_unlock(&_minor_lock);

L
Linus Torvalds 已提交
2222
	put_disk(md->disk);
2223
	blk_cleanup_queue(md->queue);
M
Mikulas Patocka 已提交
2224
	dm_stats_cleanup(&md->stats);
2225
	module_put(THIS_MODULE);
L
Linus Torvalds 已提交
2226 2227 2228
	kfree(md);
}

K
Kiyoshi Ueda 已提交
2229 2230
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
M
Mikulas Patocka 已提交
2231
	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
K
Kiyoshi Ueda 已提交
2232

J
Jun'ichi Nomura 已提交
2233
	if (md->io_pool && md->bs) {
2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
		/* The md already has necessary mempools. */
		if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
			/*
			 * Reload bioset because front_pad may have changed
			 * because a different table was loaded.
			 */
			bioset_free(md->bs);
			md->bs = p->bs;
			p->bs = NULL;
		} else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
			/*
			 * There's no need to reload with request-based dm
			 * because the size of front_pad doesn't change.
			 * Note for future: If you are to reload bioset,
			 * prep-ed requests in the queue may refer
			 * to bio from the old bioset, so you must walk
			 * through the queue to unprep.
			 */
		}
K
Kiyoshi Ueda 已提交
2253
		goto out;
M
Mikulas Patocka 已提交
2254
	}
K
Kiyoshi Ueda 已提交
2255

2256
	BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
K
Kiyoshi Ueda 已提交
2257 2258 2259

	md->io_pool = p->io_pool;
	p->io_pool = NULL;
2260 2261
	md->rq_pool = p->rq_pool;
	p->rq_pool = NULL;
K
Kiyoshi Ueda 已提交
2262 2263 2264 2265 2266 2267 2268 2269
	md->bs = p->bs;
	p->bs = NULL;

out:
	/* mempool bind completed, now no need any mempools in the table */
	dm_table_free_md_mempools(t);
}

L
Linus Torvalds 已提交
2270 2271 2272 2273 2274
/*
 * Bind a table to the device.
 */
static void event_callback(void *context)
{
M
Mike Anderson 已提交
2275 2276
	unsigned long flags;
	LIST_HEAD(uevents);
L
Linus Torvalds 已提交
2277 2278
	struct mapped_device *md = (struct mapped_device *) context;

M
Mike Anderson 已提交
2279 2280 2281 2282
	spin_lock_irqsave(&md->uevent_lock, flags);
	list_splice_init(&md->uevent_list, &uevents);
	spin_unlock_irqrestore(&md->uevent_lock, flags);

2283
	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
M
Mike Anderson 已提交
2284

L
Linus Torvalds 已提交
2285 2286 2287 2288
	atomic_inc(&md->event_nr);
	wake_up(&md->eventq);
}

2289 2290 2291
/*
 * Protected by md->suspend_lock obtained by dm_swap_table().
 */
2292
static void __set_size(struct mapped_device *md, sector_t size)
L
Linus Torvalds 已提交
2293
{
2294
	set_capacity(md->disk, size);
L
Linus Torvalds 已提交
2295

2296
	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
L
Linus Torvalds 已提交
2297 2298
}

2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
/*
 * Return 1 if the queue has a compulsory merge_bvec_fn function.
 *
 * If this function returns 0, then the device is either a non-dm
 * device without a merge_bvec_fn, or it is a dm device that is
 * able to split any bios it receives that are too big.
 */
int dm_queue_merge_is_compulsory(struct request_queue *q)
{
	struct mapped_device *dev_md;

	if (!q->merge_bvec_fn)
		return 0;

	if (q->make_request_fn == dm_request) {
		dev_md = q->queuedata;
		if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
			return 0;
	}

	return 1;
}

static int dm_device_merge_is_compulsory(struct dm_target *ti,
					 struct dm_dev *dev, sector_t start,
					 sector_t len, void *data)
{
	struct block_device *bdev = dev->bdev;
	struct request_queue *q = bdev_get_queue(bdev);

	return dm_queue_merge_is_compulsory(q);
}

/*
 * Return 1 if it is acceptable to ignore merge_bvec_fn based
 * on the properties of the underlying devices.
 */
static int dm_table_merge_is_optional(struct dm_table *table)
{
	unsigned i = 0;
	struct dm_target *ti;

	while (i < dm_table_get_num_targets(table)) {
		ti = dm_table_get_target(table, i++);

		if (ti->type->iterate_devices &&
		    ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
			return 0;
	}

	return 1;
}

2352 2353 2354 2355 2356
/*
 * Returns old map, which caller must destroy.
 */
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
			       struct queue_limits *limits)
L
Linus Torvalds 已提交
2357
{
2358
	struct dm_table *old_map;
2359
	struct request_queue *q = md->queue;
L
Linus Torvalds 已提交
2360
	sector_t size;
2361
	int merge_is_optional;
L
Linus Torvalds 已提交
2362 2363

	size = dm_table_get_size(t);
D
Darrick J. Wong 已提交
2364 2365 2366 2367

	/*
	 * Wipe any geometry if the size of the table changed.
	 */
M
Mikulas Patocka 已提交
2368
	if (size != dm_get_size(md))
D
Darrick J. Wong 已提交
2369 2370
		memset(&md->geometry, 0, sizeof(md->geometry));

M
Mikulas Patocka 已提交
2371
	__set_size(md, size);
2372

2373 2374
	dm_table_event_callback(t, event_callback, md);

K
Kiyoshi Ueda 已提交
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386
	/*
	 * The queue hasn't been stopped yet, if the old table type wasn't
	 * for request-based during suspension.  So stop it to prevent
	 * I/O mapping before resume.
	 * This must be done before setting the queue restrictions,
	 * because request-based dm may be run just after the setting.
	 */
	if (dm_table_request_based(t) && !blk_queue_stopped(q))
		stop_queue(q);

	__bind_mempools(md, t);

2387 2388
	merge_is_optional = dm_table_merge_is_optional(t);

2389
	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
M
Mikulas Patocka 已提交
2390
	rcu_assign_pointer(md->map, t);
2391 2392
	md->immutable_target_type = dm_table_get_immutable_target_type(t);

2393
	dm_table_set_restrictions(t, q, limits);
2394 2395 2396 2397
	if (merge_is_optional)
		set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
	else
		clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2398 2399
	if (old_map)
		dm_sync_table(md);
L
Linus Torvalds 已提交
2400

2401
	return old_map;
L
Linus Torvalds 已提交
2402 2403
}

2404 2405 2406 2407
/*
 * Returns unbound table for the caller to free.
 */
static struct dm_table *__unbind(struct mapped_device *md)
L
Linus Torvalds 已提交
2408
{
2409
	struct dm_table *map = rcu_dereference_protected(md->map, 1);
L
Linus Torvalds 已提交
2410 2411

	if (!map)
2412
		return NULL;
L
Linus Torvalds 已提交
2413 2414

	dm_table_event_callback(map, NULL, NULL);
2415
	RCU_INIT_POINTER(md->map, NULL);
M
Mikulas Patocka 已提交
2416
	dm_sync_table(md);
2417 2418

	return map;
L
Linus Torvalds 已提交
2419 2420 2421 2422 2423
}

/*
 * Constructor for a new device.
 */
2424
int dm_create(int minor, struct mapped_device **result)
L
Linus Torvalds 已提交
2425 2426 2427
{
	struct mapped_device *md;

2428
	md = alloc_dev(minor);
L
Linus Torvalds 已提交
2429 2430 2431
	if (!md)
		return -ENXIO;

M
Milan Broz 已提交
2432 2433
	dm_sysfs_init(md);

L
Linus Torvalds 已提交
2434 2435 2436 2437
	*result = md;
	return 0;
}

2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453
/*
 * Functions to manage md->type.
 * All are required to hold md->type_lock.
 */
void dm_lock_md_type(struct mapped_device *md)
{
	mutex_lock(&md->type_lock);
}

void dm_unlock_md_type(struct mapped_device *md)
{
	mutex_unlock(&md->type_lock);
}

void dm_set_md_type(struct mapped_device *md, unsigned type)
{
2454
	BUG_ON(!mutex_is_locked(&md->type_lock));
2455 2456 2457 2458 2459
	md->type = type;
}

unsigned dm_get_md_type(struct mapped_device *md)
{
2460
	BUG_ON(!mutex_is_locked(&md->type_lock));
2461 2462 2463
	return md->type;
}

2464 2465 2466 2467 2468
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
	return md->immutable_target_type;
}

2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479
/*
 * The queue_limits are only valid as long as you have a reference
 * count on 'md'.
 */
struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
{
	BUG_ON(!atomic_read(&md->holders));
	return &md->queue->limits;
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);

2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500
/*
 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
 */
static int dm_init_request_based_queue(struct mapped_device *md)
{
	struct request_queue *q = NULL;

	if (md->queue->elevator)
		return 1;

	/* Fully initialize the queue */
	q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
	if (!q)
		return 0;

	md->queue = q;
	dm_init_md_queue(md);
	blk_queue_softirq_done(md->queue, dm_softirq_done);
	blk_queue_prep_rq(md->queue, dm_prep_fn);
	blk_queue_lld_busy(md->queue, dm_lld_busy);

2501 2502 2503 2504 2505
	/* Also initialize the request-based DM worker thread */
	init_kthread_worker(&md->kworker);
	md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
				       "kdmwork-%s", dm_device_name(md));

2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524
	elv_register_queue(md->queue);

	return 1;
}

/*
 * Setup the DM device's queue based on md's type
 */
int dm_setup_md_queue(struct mapped_device *md)
{
	if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
	    !dm_init_request_based_queue(md)) {
		DMWARN("Cannot initialize queue for request-based mapped device");
		return -EINVAL;
	}

	return 0;
}

2525
static struct mapped_device *dm_find_md(dev_t dev)
L
Linus Torvalds 已提交
2526 2527 2528 2529 2530 2531 2532
{
	struct mapped_device *md;
	unsigned minor = MINOR(dev);

	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
		return NULL;

2533
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
2534 2535

	md = idr_find(&_minor_idr, minor);
J
Jeff Mahoney 已提交
2536
	if (md && (md == MINOR_ALLOCED ||
2537
		   (MINOR(disk_devt(dm_disk(md))) != minor) ||
2538
		   dm_deleting_md(md) ||
A
Alasdair G Kergon 已提交
2539
		   test_bit(DMF_FREEING, &md->flags))) {
2540
		md = NULL;
J
Jeff Mahoney 已提交
2541 2542
		goto out;
	}
L
Linus Torvalds 已提交
2543

J
Jeff Mahoney 已提交
2544
out:
2545
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
2546

2547 2548 2549
	return md;
}

2550 2551 2552 2553 2554 2555 2556 2557 2558
struct mapped_device *dm_get_md(dev_t dev)
{
	struct mapped_device *md = dm_find_md(dev);

	if (md)
		dm_get(md);

	return md;
}
A
Alasdair G Kergon 已提交
2559
EXPORT_SYMBOL_GPL(dm_get_md);
2560

A
Alasdair G Kergon 已提交
2561
void *dm_get_mdptr(struct mapped_device *md)
2562
{
A
Alasdair G Kergon 已提交
2563
	return md->interface_ptr;
L
Linus Torvalds 已提交
2564 2565 2566 2567 2568 2569 2570 2571 2572 2573
}

void dm_set_mdptr(struct mapped_device *md, void *ptr)
{
	md->interface_ptr = ptr;
}

void dm_get(struct mapped_device *md)
{
	atomic_inc(&md->holders);
2574
	BUG_ON(test_bit(DMF_FREEING, &md->flags));
L
Linus Torvalds 已提交
2575 2576
}

2577 2578 2579 2580 2581 2582
const char *dm_device_name(struct mapped_device *md)
{
	return md->name;
}
EXPORT_SYMBOL_GPL(dm_device_name);

2583
static void __dm_destroy(struct mapped_device *md, bool wait)
L
Linus Torvalds 已提交
2584
{
M
Mike Anderson 已提交
2585
	struct dm_table *map;
M
Mikulas Patocka 已提交
2586
	int srcu_idx;
L
Linus Torvalds 已提交
2587

2588
	might_sleep();
J
Jeff Mahoney 已提交
2589

2590
	spin_lock(&_minor_lock);
M
Mikulas Patocka 已提交
2591
	map = dm_get_live_table(md, &srcu_idx);
2592 2593 2594 2595
	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
	set_bit(DMF_FREEING, &md->flags);
	spin_unlock(&_minor_lock);

2596 2597 2598
	if (dm_request_based(md))
		flush_kthread_worker(&md->kworker);

2599 2600 2601
	if (!dm_suspended_md(md)) {
		dm_table_presuspend_targets(map);
		dm_table_postsuspend_targets(map);
L
Linus Torvalds 已提交
2602
	}
2603

M
Mikulas Patocka 已提交
2604 2605 2606
	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
	dm_put_live_table(md, srcu_idx);

2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
	/*
	 * Rare, but there may be I/O requests still going to complete,
	 * for example.  Wait for all references to disappear.
	 * No one should increment the reference count of the mapped_device,
	 * after the mapped_device state becomes DMF_FREEING.
	 */
	if (wait)
		while (atomic_read(&md->holders))
			msleep(1);
	else if (atomic_read(&md->holders))
		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
		       dm_device_name(md), atomic_read(&md->holders));

	dm_sysfs_exit(md);
	dm_table_destroy(__unbind(md));
	free_dev(md);
}

void dm_destroy(struct mapped_device *md)
{
	__dm_destroy(md, true);
}

void dm_destroy_immediate(struct mapped_device *md)
{
	__dm_destroy(md, false);
}

void dm_put(struct mapped_device *md)
{
	atomic_dec(&md->holders);
L
Linus Torvalds 已提交
2638
}
E
Edward Goggin 已提交
2639
EXPORT_SYMBOL_GPL(dm_put);
L
Linus Torvalds 已提交
2640

2641
static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2642 2643
{
	int r = 0;
2644 2645 2646
	DECLARE_WAITQUEUE(wait, current);

	add_wait_queue(&md->wait, &wait);
2647 2648

	while (1) {
2649
		set_current_state(interruptible);
2650

2651
		if (!md_in_flight(md))
2652 2653
			break;

2654 2655
		if (interruptible == TASK_INTERRUPTIBLE &&
		    signal_pending(current)) {
2656 2657 2658 2659 2660 2661 2662 2663
			r = -EINTR;
			break;
		}

		io_schedule();
	}
	set_current_state(TASK_RUNNING);

2664 2665
	remove_wait_queue(&md->wait, &wait);

2666 2667 2668
	return r;
}

L
Linus Torvalds 已提交
2669 2670 2671
/*
 * Process the deferred bios
 */
2672
static void dm_wq_work(struct work_struct *work)
L
Linus Torvalds 已提交
2673
{
2674 2675
	struct mapped_device *md = container_of(work, struct mapped_device,
						work);
2676
	struct bio *c;
M
Mikulas Patocka 已提交
2677 2678
	int srcu_idx;
	struct dm_table *map;
L
Linus Torvalds 已提交
2679

M
Mikulas Patocka 已提交
2680
	map = dm_get_live_table(md, &srcu_idx);
2681

2682
	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
A
Alasdair G Kergon 已提交
2683 2684 2685 2686
		spin_lock_irq(&md->deferred_lock);
		c = bio_list_pop(&md->deferred);
		spin_unlock_irq(&md->deferred_lock);

2687
		if (!c)
A
Alasdair G Kergon 已提交
2688
			break;
2689

K
Kiyoshi Ueda 已提交
2690 2691
		if (dm_request_based(md))
			generic_make_request(c);
2692
		else
M
Mikulas Patocka 已提交
2693
			__split_and_process_bio(md, map, c);
2694
	}
M
Milan Broz 已提交
2695

M
Mikulas Patocka 已提交
2696
	dm_put_live_table(md, srcu_idx);
L
Linus Torvalds 已提交
2697 2698
}

2699
static void dm_queue_flush(struct mapped_device *md)
2700
{
2701
	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2702
	smp_mb__after_atomic();
2703
	queue_work(md->wq, &md->work);
2704 2705
}

L
Linus Torvalds 已提交
2706
/*
2707
 * Swap in a new table, returning the old one for the caller to destroy.
L
Linus Torvalds 已提交
2708
 */
2709
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
L
Linus Torvalds 已提交
2710
{
2711
	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2712
	struct queue_limits limits;
2713
	int r;
L
Linus Torvalds 已提交
2714

2715
	mutex_lock(&md->suspend_lock);
L
Linus Torvalds 已提交
2716 2717

	/* device must be suspended */
2718
	if (!dm_suspended_md(md))
2719
		goto out;
L
Linus Torvalds 已提交
2720

2721 2722 2723 2724 2725 2726 2727
	/*
	 * If the new table has no data devices, retain the existing limits.
	 * This helps multipath with queue_if_no_path if all paths disappear,
	 * then new I/O is queued based on these limits, and then some paths
	 * reappear.
	 */
	if (dm_table_has_no_data_devices(table)) {
M
Mikulas Patocka 已提交
2728
		live_map = dm_get_live_table_fast(md);
2729 2730
		if (live_map)
			limits = md->queue->limits;
M
Mikulas Patocka 已提交
2731
		dm_put_live_table_fast(md);
2732 2733
	}

2734 2735 2736 2737 2738 2739
	if (!live_map) {
		r = dm_calculate_queue_limits(table, &limits);
		if (r) {
			map = ERR_PTR(r);
			goto out;
		}
2740
	}
2741

2742
	map = __bind(md, table, &limits);
L
Linus Torvalds 已提交
2743

2744
out:
2745
	mutex_unlock(&md->suspend_lock);
2746
	return map;
L
Linus Torvalds 已提交
2747 2748 2749 2750 2751 2752
}

/*
 * Functions to lock and unlock any filesystem running on the
 * device.
 */
2753
static int lock_fs(struct mapped_device *md)
L
Linus Torvalds 已提交
2754
{
2755
	int r;
L
Linus Torvalds 已提交
2756 2757

	WARN_ON(md->frozen_sb);
2758

2759
	md->frozen_sb = freeze_bdev(md->bdev);
2760
	if (IS_ERR(md->frozen_sb)) {
2761
		r = PTR_ERR(md->frozen_sb);
2762 2763
		md->frozen_sb = NULL;
		return r;
2764 2765
	}

2766 2767
	set_bit(DMF_FROZEN, &md->flags);

L
Linus Torvalds 已提交
2768 2769 2770
	return 0;
}

2771
static void unlock_fs(struct mapped_device *md)
L
Linus Torvalds 已提交
2772
{
2773 2774 2775
	if (!test_bit(DMF_FROZEN, &md->flags))
		return;

2776
	thaw_bdev(md->bdev, md->frozen_sb);
L
Linus Torvalds 已提交
2777
	md->frozen_sb = NULL;
2778
	clear_bit(DMF_FROZEN, &md->flags);
L
Linus Torvalds 已提交
2779 2780 2781
}

/*
2782 2783 2784
 * If __dm_suspend returns 0, the device is completely quiescent
 * now. There is no request-processing activity. All new requests
 * are being added to md->deferred list.
2785
 *
2786
 * Caller must hold md->suspend_lock
2787
 */
2788 2789
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
			unsigned suspend_flags, int interruptible)
L
Linus Torvalds 已提交
2790
{
2791 2792 2793
	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
	int r;
L
Linus Torvalds 已提交
2794

2795 2796 2797 2798 2799 2800 2801
	/*
	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
	 * This flag is cleared before dm_suspend returns.
	 */
	if (noflush)
		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);

2802 2803 2804 2805
	/*
	 * This gets reverted if there's an error later and the targets
	 * provide the .presuspend_undo hook.
	 */
2806 2807
	dm_table_presuspend_targets(map);

M
Mikulas Patocka 已提交
2808
	/*
K
Kiyoshi Ueda 已提交
2809 2810 2811 2812
	 * Flush I/O to the device.
	 * Any I/O submitted after lock_fs() may not be flushed.
	 * noflush takes precedence over do_lockfs.
	 * (lock_fs() flushes I/Os and waits for them to complete.)
M
Mikulas Patocka 已提交
2813 2814 2815
	 */
	if (!noflush && do_lockfs) {
		r = lock_fs(md);
2816 2817
		if (r) {
			dm_table_presuspend_undo_targets(map);
2818
			return r;
2819
		}
2820
	}
L
Linus Torvalds 已提交
2821 2822

	/*
2823 2824 2825 2826 2827 2828 2829
	 * Here we must make sure that no processes are submitting requests
	 * to target drivers i.e. no one may be executing
	 * __split_and_process_bio. This is called from dm_request and
	 * dm_wq_work.
	 *
	 * To get all processes out of __split_and_process_bio in dm_request,
	 * we take the write lock. To prevent any process from reentering
2830 2831 2832
	 * __split_and_process_bio from dm_request and quiesce the thread
	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
	 * flush_workqueue(md->wq).
L
Linus Torvalds 已提交
2833
	 */
2834
	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2835 2836
	if (map)
		synchronize_srcu(&md->io_barrier);
L
Linus Torvalds 已提交
2837

2838
	/*
2839 2840
	 * Stop md->queue before flushing md->wq in case request-based
	 * dm defers requests to md->wq from md->queue.
2841
	 */
2842
	if (dm_request_based(md)) {
K
Kiyoshi Ueda 已提交
2843
		stop_queue(md->queue);
2844 2845
		flush_kthread_worker(&md->kworker);
	}
2846

2847 2848
	flush_workqueue(md->wq);

L
Linus Torvalds 已提交
2849
	/*
2850 2851 2852
	 * At this point no more requests are entering target request routines.
	 * We call dm_wait_for_completion to wait for all existing requests
	 * to finish.
L
Linus Torvalds 已提交
2853
	 */
2854
	r = dm_wait_for_completion(md, interruptible);
L
Linus Torvalds 已提交
2855

2856
	if (noflush)
2857
		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2858 2859
	if (map)
		synchronize_srcu(&md->io_barrier);
2860

L
Linus Torvalds 已提交
2861
	/* were we interrupted ? */
2862
	if (r < 0) {
2863
		dm_queue_flush(md);
M
Milan Broz 已提交
2864

2865
		if (dm_request_based(md))
K
Kiyoshi Ueda 已提交
2866
			start_queue(md->queue);
2867

2868
		unlock_fs(md);
2869
		dm_table_presuspend_undo_targets(map);
2870
		/* pushback list is already flushed, so skip flush */
2871
	}
L
Linus Torvalds 已提交
2872

2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913
	return r;
}

/*
 * We need to be able to change a mapping table under a mounted
 * filesystem.  For example we might want to move some data in
 * the background.  Before the table can be swapped with
 * dm_bind_table, dm_suspend must be called to flush any in
 * flight bios and ensure that any further io gets deferred.
 */
/*
 * Suspend mechanism in request-based dm.
 *
 * 1. Flush all I/Os by lock_fs() if needed.
 * 2. Stop dispatching any I/O by stopping the request_queue.
 * 3. Wait for all in-flight I/Os to be completed or requeued.
 *
 * To abort suspend, start the request_queue.
 */
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
	struct dm_table *map = NULL;
	int r = 0;

retry:
	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);

	if (dm_suspended_md(md)) {
		r = -EINVAL;
		goto out_unlock;
	}

	if (dm_suspended_internally_md(md)) {
		/* already internally suspended, wait for internal resume */
		mutex_unlock(&md->suspend_lock);
		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
		if (r)
			return r;
		goto retry;
	}

2914
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2915 2916 2917 2918

	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
	if (r)
		goto out_unlock;
2919

2920
	set_bit(DMF_SUSPENDED, &md->flags);
2921

2922 2923
	dm_table_postsuspend_targets(map);

2924
out_unlock:
2925
	mutex_unlock(&md->suspend_lock);
2926
	return r;
L
Linus Torvalds 已提交
2927 2928
}

2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951
static int __dm_resume(struct mapped_device *md, struct dm_table *map)
{
	if (map) {
		int r = dm_table_resume_targets(map);
		if (r)
			return r;
	}

	dm_queue_flush(md);

	/*
	 * Flushing deferred I/Os must be done after targets are resumed
	 * so that mapping of targets can work correctly.
	 * Request-based dm is queueing the deferred I/Os in its request_queue.
	 */
	if (dm_request_based(md))
		start_queue(md->queue);

	unlock_fs(md);

	return 0;
}

L
Linus Torvalds 已提交
2952 2953
int dm_resume(struct mapped_device *md)
{
2954 2955
	int r = -EINVAL;
	struct dm_table *map = NULL;
L
Linus Torvalds 已提交
2956

2957 2958 2959
retry:
	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);

2960
	if (!dm_suspended_md(md))
2961 2962
		goto out;

2963 2964 2965 2966 2967 2968 2969 2970 2971
	if (dm_suspended_internally_md(md)) {
		/* already internally suspended, wait for internal resume */
		mutex_unlock(&md->suspend_lock);
		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
		if (r)
			return r;
		goto retry;
	}

2972
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2973
	if (!map || !dm_table_get_size(map))
2974
		goto out;
L
Linus Torvalds 已提交
2975

2976
	r = __dm_resume(md, map);
2977 2978
	if (r)
		goto out;
2979 2980 2981

	clear_bit(DMF_SUSPENDED, &md->flags);

2982 2983
	r = 0;
out:
2984
	mutex_unlock(&md->suspend_lock);
2985

2986
	return r;
L
Linus Torvalds 已提交
2987 2988
}

M
Mikulas Patocka 已提交
2989 2990 2991 2992 2993 2994
/*
 * Internal suspend/resume works like userspace-driven suspend. It waits
 * until all bios finish and prevents issuing new bios to the target drivers.
 * It may be used only from the kernel.
 */

2995
static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
M
Mikulas Patocka 已提交
2996
{
2997 2998 2999 3000 3001 3002 3003 3004 3005 3006
	struct dm_table *map = NULL;

	if (dm_suspended_internally_md(md))
		return; /* nested internal suspend */

	if (dm_suspended_md(md)) {
		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
		return; /* nest suspend */
	}

3007
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026

	/*
	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
	 * would require changing .presuspend to return an error -- avoid this
	 * until there is a need for more elaborate variants of internal suspend.
	 */
	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);

	set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);

	dm_table_postsuspend_targets(map);
}

static void __dm_internal_resume(struct mapped_device *md)
{
	if (!dm_suspended_internally_md(md))
		return; /* resume from nested internal suspend */

M
Mikulas Patocka 已提交
3027
	if (dm_suspended_md(md))
3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066
		goto done; /* resume from nested suspend */

	/*
	 * NOTE: existing callers don't need to call dm_table_resume_targets
	 * (which may fail -- so best to avoid it for now by passing NULL map)
	 */
	(void) __dm_resume(md, NULL);

done:
	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
	smp_mb__after_atomic();
	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
}

void dm_internal_suspend_noflush(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
	mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);

void dm_internal_resume(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	__dm_internal_resume(md);
	mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_resume);

/*
 * Fast variants of internal suspend/resume hold md->suspend_lock,
 * which prevents interaction with userspace-driven suspend.
 */

void dm_internal_suspend_fast(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
M
Mikulas Patocka 已提交
3067 3068 3069 3070 3071 3072 3073 3074
		return;

	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
	synchronize_srcu(&md->io_barrier);
	flush_workqueue(md->wq);
	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
}

3075
void dm_internal_resume_fast(struct mapped_device *md)
M
Mikulas Patocka 已提交
3076
{
3077
	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
M
Mikulas Patocka 已提交
3078 3079 3080 3081 3082 3083 3084 3085
		goto done;

	dm_queue_flush(md);

done:
	mutex_unlock(&md->suspend_lock);
}

L
Linus Torvalds 已提交
3086 3087 3088
/*-----------------------------------------------------------------
 * Event notification.
 *---------------------------------------------------------------*/
3089
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
M
Milan Broz 已提交
3090
		       unsigned cookie)
3091
{
M
Milan Broz 已提交
3092 3093 3094 3095
	char udev_cookie[DM_COOKIE_LENGTH];
	char *envp[] = { udev_cookie, NULL };

	if (!cookie)
3096
		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
M
Milan Broz 已提交
3097 3098 3099
	else {
		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
			 DM_COOKIE_ENV_VAR_NAME, cookie);
3100 3101
		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
					  action, envp);
M
Milan Broz 已提交
3102
	}
3103 3104
}

M
Mike Anderson 已提交
3105 3106 3107 3108 3109
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
	return atomic_add_return(1, &md->uevent_seq);
}

L
Linus Torvalds 已提交
3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120
uint32_t dm_get_event_nr(struct mapped_device *md)
{
	return atomic_read(&md->event_nr);
}

int dm_wait_event(struct mapped_device *md, int event_nr)
{
	return wait_event_interruptible(md->eventq,
			(event_nr != atomic_read(&md->event_nr)));
}

M
Mike Anderson 已提交
3121 3122 3123 3124 3125 3126 3127 3128 3129
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{
	unsigned long flags;

	spin_lock_irqsave(&md->uevent_lock, flags);
	list_add(elist, &md->uevent_list);
	spin_unlock_irqrestore(&md->uevent_lock, flags);
}

L
Linus Torvalds 已提交
3130 3131 3132 3133 3134 3135 3136 3137 3138
/*
 * The gendisk is only valid as long as you have a reference
 * count on 'md'.
 */
struct gendisk *dm_disk(struct mapped_device *md)
{
	return md->disk;
}

M
Milan Broz 已提交
3139 3140
struct kobject *dm_kobject(struct mapped_device *md)
{
3141
	return &md->kobj_holder.kobj;
M
Milan Broz 已提交
3142 3143 3144 3145 3146 3147
}

struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
	struct mapped_device *md;

3148
	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
M
Milan Broz 已提交
3149

3150
	if (test_bit(DMF_FREEING, &md->flags) ||
M
Mike Anderson 已提交
3151
	    dm_deleting_md(md))
3152 3153
		return NULL;

M
Milan Broz 已提交
3154 3155 3156 3157
	dm_get(md);
	return md;
}

3158
int dm_suspended_md(struct mapped_device *md)
L
Linus Torvalds 已提交
3159 3160 3161 3162
{
	return test_bit(DMF_SUSPENDED, &md->flags);
}

3163 3164 3165 3166 3167
int dm_suspended_internally_md(struct mapped_device *md)
{
	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
}

M
Mikulas Patocka 已提交
3168 3169 3170 3171 3172
int dm_test_deferred_remove_flag(struct mapped_device *md)
{
	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
}

3173 3174
int dm_suspended(struct dm_target *ti)
{
3175
	return dm_suspended_md(dm_table_get_md(ti->table));
3176 3177 3178
}
EXPORT_SYMBOL_GPL(dm_suspended);

3179 3180
int dm_noflush_suspending(struct dm_target *ti)
{
3181
	return __noflush_suspending(dm_table_get_md(ti->table));
3182 3183 3184
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);

M
Mikulas Patocka 已提交
3185
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
K
Kiyoshi Ueda 已提交
3186
{
J
Jun'ichi Nomura 已提交
3187 3188 3189 3190
	struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
	struct kmem_cache *cachep;
	unsigned int pool_size;
	unsigned int front_pad;
K
Kiyoshi Ueda 已提交
3191 3192 3193 3194

	if (!pools)
		return NULL;

3195
	if (type == DM_TYPE_BIO_BASED) {
J
Jun'ichi Nomura 已提交
3196
		cachep = _io_cache;
3197
		pool_size = dm_get_reserved_bio_based_ios();
J
Jun'ichi Nomura 已提交
3198 3199 3200
		front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
	} else if (type == DM_TYPE_REQUEST_BASED) {
		cachep = _rq_tio_cache;
3201
		pool_size = dm_get_reserved_rq_based_ios();
3202 3203 3204
		pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
		if (!pools->rq_pool)
			goto out;
J
Jun'ichi Nomura 已提交
3205 3206 3207 3208 3209
		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
		/* per_bio_data_size is not used. See __bind_mempools(). */
		WARN_ON(per_bio_data_size != 0);
	} else
		goto out;
K
Kiyoshi Ueda 已提交
3210

3211
	pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
J
Jun'ichi Nomura 已提交
3212 3213
	if (!pools->io_pool)
		goto out;
K
Kiyoshi Ueda 已提交
3214

J
Junichi Nomura 已提交
3215
	pools->bs = bioset_create_nobvec(pool_size, front_pad);
K
Kiyoshi Ueda 已提交
3216
	if (!pools->bs)
J
Jun'ichi Nomura 已提交
3217
		goto out;
K
Kiyoshi Ueda 已提交
3218

3219
	if (integrity && bioset_integrity_create(pools->bs, pool_size))
J
Jun'ichi Nomura 已提交
3220
		goto out;
3221

K
Kiyoshi Ueda 已提交
3222 3223
	return pools;

J
Jun'ichi Nomura 已提交
3224 3225
out:
	dm_free_md_mempools(pools);
K
Kiyoshi Ueda 已提交
3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237

	return NULL;
}

void dm_free_md_mempools(struct dm_md_mempools *pools)
{
	if (!pools)
		return;

	if (pools->io_pool)
		mempool_destroy(pools->io_pool);

3238 3239 3240
	if (pools->rq_pool)
		mempool_destroy(pools->rq_pool);

K
Kiyoshi Ueda 已提交
3241 3242 3243 3244 3245 3246
	if (pools->bs)
		bioset_free(pools->bs);

	kfree(pools);
}

3247
static const struct block_device_operations dm_blk_dops = {
L
Linus Torvalds 已提交
3248 3249
	.open = dm_blk_open,
	.release = dm_blk_close,
3250
	.ioctl = dm_blk_ioctl,
D
Darrick J. Wong 已提交
3251
	.getgeo = dm_blk_getgeo,
L
Linus Torvalds 已提交
3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262
	.owner = THIS_MODULE
};

/*
 * module hooks
 */
module_init(dm_init);
module_exit(dm_exit);

module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
3263

3264 3265 3266
module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");

3267 3268 3269
module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");

L
Linus Torvalds 已提交
3270 3271 3272
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");