dm.c 74.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
M
Milan Broz 已提交
3
 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7
 *
 * This file is released under the GPL.
 */

8 9
#include "dm-core.h"
#include "dm-rq.h"
M
Mike Anderson 已提交
10
#include "dm-uevent.h"
L
Linus Torvalds 已提交
11 12 13

#include <linux/init.h>
#include <linux/module.h>
A
Arjan van de Ven 已提交
14
#include <linux/mutex.h>
15
#include <linux/sched/signal.h>
L
Linus Torvalds 已提交
16 17 18
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
19
#include <linux/dax.h>
L
Linus Torvalds 已提交
20 21
#include <linux/slab.h>
#include <linux/idr.h>
22
#include <linux/uio.h>
D
Darrick J. Wong 已提交
23
#include <linux/hdreg.h>
24
#include <linux/delay.h>
25
#include <linux/wait.h>
26
#include <linux/pr.h>
27
#include <linux/refcount.h>
28
#include <linux/part_stat.h>
29
#include <linux/blk-crypto.h>
30

31 32
#define DM_MSG_PREFIX "core"

M
Milan Broz 已提交
33 34 35 36 37 38 39
/*
 * Cookies are numeric values sent with CHANGE and REMOVE
 * uevents while resuming, removing or renaming the device.
 */
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24

L
Linus Torvalds 已提交
40 41 42 43 44
static const char *_name = DM_NAME;

static unsigned int major = 0;
static unsigned int _major = 0;

45 46
static DEFINE_IDR(_minor_idr);

47
static DEFINE_SPINLOCK(_minor_lock);
M
Mikulas Patocka 已提交
48 49 50 51 52

static void do_deferred_remove(struct work_struct *w);

static DECLARE_WORK(deferred_remove_work, do_deferred_remove);

53 54
static struct workqueue_struct *deferred_remove_workqueue;

55 56 57
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);

58 59 60 61 62 63
void dm_issue_global_event(void)
{
	atomic_inc(&dm_global_event_nr);
	wake_up(&dm_global_eventq);
}

L
Linus Torvalds 已提交
64
/*
65
 * One of these is allocated (on-stack) per original bio.
L
Linus Torvalds 已提交
66
 */
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
struct clone_info {
	struct dm_table *map;
	struct bio *bio;
	struct dm_io *io;
	sector_t sector;
	unsigned sector_count;
};

/*
 * One of these is allocated per clone bio.
 */
#define DM_TIO_MAGIC 7282014
struct dm_target_io {
	unsigned magic;
	struct dm_io *io;
	struct dm_target *ti;
	unsigned target_bio_nr;
	unsigned *len_ptr;
	bool inside_dm_io;
	struct bio clone;
};

L
Linus Torvalds 已提交
89
/*
90
 * One of these is allocated per original bio.
91
 * It contains the first clone used for that original.
L
Linus Torvalds 已提交
92
 */
93
#define DM_IO_MAGIC 5191977
L
Linus Torvalds 已提交
94
struct dm_io {
95
	unsigned magic;
L
Linus Torvalds 已提交
96
	struct mapped_device *md;
97
	blk_status_t status;
L
Linus Torvalds 已提交
98
	atomic_t io_count;
99
	struct bio *orig_bio;
100
	unsigned long start_time;
101
	spinlock_t endio_lock;
M
Mikulas Patocka 已提交
102
	struct dm_stats_aux stats_aux;
103 104
	/* last member of dm_target_io is 'struct bio' */
	struct dm_target_io tio;
L
Linus Torvalds 已提交
105 106
};

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
void *dm_per_bio_data(struct bio *bio, size_t data_size)
{
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
	if (!tio->inside_dm_io)
		return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
	return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
}
EXPORT_SYMBOL_GPL(dm_per_bio_data);

struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
{
	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
	if (io->magic == DM_IO_MAGIC)
		return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
	BUG_ON(io->magic != DM_TIO_MAGIC);
	return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
}
EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);

unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
{
	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
}
EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);

132 133
#define MINOR_ALLOCED ((void *)-1)

L
Linus Torvalds 已提交
134 135 136
/*
 * Bits for the md->flags field.
 */
137
#define DMF_BLOCK_IO_FOR_SUSPEND 0
L
Linus Torvalds 已提交
138
#define DMF_SUSPENDED 1
139
#define DMF_FROZEN 2
J
Jeff Mahoney 已提交
140
#define DMF_FREEING 3
141
#define DMF_DELETING 4
142
#define DMF_NOFLUSH_SUSPENDING 5
143 144
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
L
Linus Torvalds 已提交
145

146 147
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
148

K
Kiyoshi Ueda 已提交
149 150 151 152
/*
 * For mempools pre-allocation at the table loading time.
 */
struct dm_md_mempools {
153 154
	struct bio_set bs;
	struct bio_set io_bs;
K
Kiyoshi Ueda 已提交
155 156
};

157 158
struct table_device {
	struct list_head list;
159
	refcount_t count;
160 161 162
	struct dm_dev dm_dev;
};

163 164 165
/*
 * Bio-based DM's mempools' reserved IOs set by the user.
 */
166
#define RESERVED_BIO_BASED_IOS		16
167 168
static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;

169 170
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
171
	int param = READ_ONCE(*module_param);
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
	int modified_param = 0;
	bool modified = true;

	if (param < min)
		modified_param = min;
	else if (param > max)
		modified_param = max;
	else
		modified = false;

	if (modified) {
		(void)cmpxchg(module_param, param, modified_param);
		param = modified_param;
	}

	return param;
}

190 191
unsigned __dm_get_module_param(unsigned *module_param,
			       unsigned def, unsigned max)
192
{
193
	unsigned param = READ_ONCE(*module_param);
194
	unsigned modified_param = 0;
195

196 197 198 199
	if (!param)
		modified_param = def;
	else if (param > max)
		modified_param = max;
200

201 202 203
	if (modified_param) {
		(void)cmpxchg(module_param, param, modified_param);
		param = modified_param;
204 205
	}

206
	return param;
207 208
}

209 210
unsigned dm_get_reserved_bio_based_ios(void)
{
211
	return __dm_get_module_param(&reserved_bio_based_ios,
212
				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
213 214 215
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);

216 217 218 219 220 221
static unsigned dm_get_numa_node(void)
{
	return __dm_get_module_param_int(&dm_numa_node,
					 DM_NUMA_NODE, num_online_nodes() - 1);
}

L
Linus Torvalds 已提交
222 223
static int __init local_init(void)
{
224
	int r;
225

M
Mike Anderson 已提交
226
	r = dm_uevent_init();
K
Kiyoshi Ueda 已提交
227
	if (r)
228
		return r;
M
Mike Anderson 已提交
229

230 231 232 233 234 235
	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
	if (!deferred_remove_workqueue) {
		r = -ENOMEM;
		goto out_uevent_exit;
	}

L
Linus Torvalds 已提交
236 237
	_major = major;
	r = register_blkdev(_major, _name);
K
Kiyoshi Ueda 已提交
238
	if (r < 0)
239
		goto out_free_workqueue;
L
Linus Torvalds 已提交
240 241 242 243 244

	if (!_major)
		_major = r;

	return 0;
K
Kiyoshi Ueda 已提交
245

246 247
out_free_workqueue:
	destroy_workqueue(deferred_remove_workqueue);
K
Kiyoshi Ueda 已提交
248 249 250 251
out_uevent_exit:
	dm_uevent_exit();

	return r;
L
Linus Torvalds 已提交
252 253 254 255
}

static void local_exit(void)
{
M
Mikulas Patocka 已提交
256
	flush_scheduled_work();
257
	destroy_workqueue(deferred_remove_workqueue);
M
Mikulas Patocka 已提交
258

259
	unregister_blkdev(_major, _name);
M
Mike Anderson 已提交
260
	dm_uevent_exit();
L
Linus Torvalds 已提交
261 262 263 264 265 266

	_major = 0;

	DMINFO("cleaned up");
}

267
static int (*_inits[])(void) __initdata = {
L
Linus Torvalds 已提交
268 269 270 271
	local_init,
	dm_target_init,
	dm_linear_init,
	dm_stripe_init,
M
Mikulas Patocka 已提交
272
	dm_io_init,
273
	dm_kcopyd_init,
L
Linus Torvalds 已提交
274
	dm_interface_init,
M
Mikulas Patocka 已提交
275
	dm_statistics_init,
L
Linus Torvalds 已提交
276 277
};

278
static void (*_exits[])(void) = {
L
Linus Torvalds 已提交
279 280 281 282
	local_exit,
	dm_target_exit,
	dm_linear_exit,
	dm_stripe_exit,
M
Mikulas Patocka 已提交
283
	dm_io_exit,
284
	dm_kcopyd_exit,
L
Linus Torvalds 已提交
285
	dm_interface_exit,
M
Mikulas Patocka 已提交
286
	dm_statistics_exit,
L
Linus Torvalds 已提交
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
};

static int __init dm_init(void)
{
	const int count = ARRAY_SIZE(_inits);

	int r, i;

	for (i = 0; i < count; i++) {
		r = _inits[i]();
		if (r)
			goto bad;
	}

	return 0;

      bad:
	while (i--)
		_exits[i]();

	return r;
}

static void __exit dm_exit(void)
{
	int i = ARRAY_SIZE(_exits);

	while (i--)
		_exits[i]();
316 317 318 319 320

	/*
	 * Should be empty by this point.
	 */
	idr_destroy(&_minor_idr);
L
Linus Torvalds 已提交
321 322 323 324 325
}

/*
 * Block device functions
 */
M
Mike Anderson 已提交
326 327 328 329 330
int dm_deleting_md(struct mapped_device *md)
{
	return test_bit(DMF_DELETING, &md->flags);
}

A
Al Viro 已提交
331
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
L
Linus Torvalds 已提交
332 333 334
{
	struct mapped_device *md;

J
Jeff Mahoney 已提交
335 336
	spin_lock(&_minor_lock);

A
Al Viro 已提交
337
	md = bdev->bd_disk->private_data;
J
Jeff Mahoney 已提交
338 339 340
	if (!md)
		goto out;

341
	if (test_bit(DMF_FREEING, &md->flags) ||
M
Mike Anderson 已提交
342
	    dm_deleting_md(md)) {
J
Jeff Mahoney 已提交
343 344 345 346
		md = NULL;
		goto out;
	}

L
Linus Torvalds 已提交
347
	dm_get(md);
348
	atomic_inc(&md->open_count);
J
Jeff Mahoney 已提交
349 350 351 352
out:
	spin_unlock(&_minor_lock);

	return md ? 0 : -ENXIO;
L
Linus Torvalds 已提交
353 354
}

355
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
L
Linus Torvalds 已提交
356
{
357
	struct mapped_device *md;
358

359 360
	spin_lock(&_minor_lock);

361 362 363 364
	md = disk->private_data;
	if (WARN_ON(!md))
		goto out;

M
Mikulas Patocka 已提交
365 366
	if (atomic_dec_and_test(&md->open_count) &&
	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
367
		queue_work(deferred_remove_workqueue, &deferred_remove_work);
M
Mikulas Patocka 已提交
368

L
Linus Torvalds 已提交
369
	dm_put(md);
370
out:
371
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
372 373
}

374 375 376 377 378 379 380 381
int dm_open_count(struct mapped_device *md)
{
	return atomic_read(&md->open_count);
}

/*
 * Guarantees nothing is using the device before it's deleted.
 */
M
Mikulas Patocka 已提交
382
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
383 384 385 386 387
{
	int r = 0;

	spin_lock(&_minor_lock);

M
Mikulas Patocka 已提交
388
	if (dm_open_count(md)) {
389
		r = -EBUSY;
M
Mikulas Patocka 已提交
390 391 392 393
		if (mark_deferred)
			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
		r = -EEXIST;
394 395 396 397 398 399 400 401
	else
		set_bit(DMF_DELETING, &md->flags);

	spin_unlock(&_minor_lock);

	return r;
}

M
Mikulas Patocka 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
int dm_cancel_deferred_remove(struct mapped_device *md)
{
	int r = 0;

	spin_lock(&_minor_lock);

	if (test_bit(DMF_DELETING, &md->flags))
		r = -EBUSY;
	else
		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);

	spin_unlock(&_minor_lock);

	return r;
}

static void do_deferred_remove(struct work_struct *w)
{
	dm_deferred_remove();
}

M
Mikulas Patocka 已提交
423 424 425 426 427
sector_t dm_get_size(struct mapped_device *md)
{
	return get_capacity(md->disk);
}

428 429 430 431 432
struct request_queue *dm_get_md_queue(struct mapped_device *md)
{
	return md->queue;
}

M
Mikulas Patocka 已提交
433 434 435 436 437
struct dm_stats *dm_get_stats(struct mapped_device *md)
{
	return &md->stats;
}

D
Darrick J. Wong 已提交
438 439 440 441 442 443 444
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	struct mapped_device *md = bdev->bd_disk->private_data;

	return dm_get_geometry(md, geo);
}

C
Christoph Hellwig 已提交
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
#ifdef CONFIG_BLK_DEV_ZONED
int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data)
{
	struct dm_report_zones_args *args = data;
	sector_t sector_diff = args->tgt->begin - args->start;

	/*
	 * Ignore zones beyond the target range.
	 */
	if (zone->start >= args->start + args->tgt->len)
		return 0;

	/*
	 * Remap the start sector and write pointer position of the zone
	 * to match its position in the target range.
	 */
	zone->start += sector_diff;
	if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
		if (zone->cond == BLK_ZONE_COND_FULL)
			zone->wp = zone->start + zone->len;
		else if (zone->cond == BLK_ZONE_COND_EMPTY)
			zone->wp = zone->start;
		else
			zone->wp += sector_diff;
	}

	args->next_sector = zone->start + zone->len;
	return args->orig_cb(zone, args->zone_idx++, args->orig_data);
}
EXPORT_SYMBOL_GPL(dm_report_zones_cb);

476
static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
C
Christoph Hellwig 已提交
477
		unsigned int nr_zones, report_zones_cb cb, void *data)
478 479 480 481
{
	struct mapped_device *md = disk->private_data;
	struct dm_table *map;
	int srcu_idx, ret;
C
Christoph Hellwig 已提交
482 483 484 485 486
	struct dm_report_zones_args args = {
		.next_sector = sector,
		.orig_data = data,
		.orig_cb = cb,
	};
487 488 489 490 491 492 493 494

	if (dm_suspended_md(md))
		return -EAGAIN;

	map = dm_get_live_table(md, &srcu_idx);
	if (!map)
		return -EIO;

C
Christoph Hellwig 已提交
495 496
	do {
		struct dm_target *tgt;
497

C
Christoph Hellwig 已提交
498 499 500 501 502
		tgt = dm_table_find_target(map, args.next_sector);
		if (WARN_ON_ONCE(!tgt->type->report_zones)) {
			ret = -EIO;
			goto out;
		}
503

C
Christoph Hellwig 已提交
504 505 506 507 508 509
		args.tgt = tgt;
		ret = tgt->type->report_zones(tgt, &args, nr_zones);
		if (ret < 0)
			goto out;
	} while (args.zone_idx < nr_zones &&
		 args.next_sector < get_capacity(disk));
510

C
Christoph Hellwig 已提交
511
	ret = args.zone_idx;
512 513 514 515
out:
	dm_put_live_table(md, srcu_idx);
	return ret;
}
C
Christoph Hellwig 已提交
516 517 518
#else
#define dm_blk_report_zones		NULL
#endif /* CONFIG_BLK_DEV_ZONED */
519

520
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
521
			    struct block_device **bdev)
522
	__acquires(md->io_barrier)
523
{
524
	struct dm_target *tgt;
525
	struct dm_table *map;
526
	int r;
527

528
retry:
C
Christoph Hellwig 已提交
529
	r = -ENOTTY;
530
	map = dm_get_live_table(md, srcu_idx);
531
	if (!map || !dm_table_get_size(map))
532
		return r;
533 534 535

	/* We only support devices that have a single target */
	if (dm_table_get_num_targets(map) != 1)
536
		return r;
537

538 539
	tgt = dm_table_get_target(map, 0);
	if (!tgt->type->prepare_ioctl)
540
		return r;
541

542 543
	if (dm_suspended_md(md))
		return -EAGAIN;
544

545
	r = tgt->type->prepare_ioctl(tgt, bdev);
546
	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
547
		dm_put_live_table(md, *srcu_idx);
548 549 550
		msleep(10);
		goto retry;
	}
551

C
Christoph Hellwig 已提交
552 553 554
	return r;
}

555 556 557 558 559 560
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
	__releases(md->io_barrier)
{
	dm_put_live_table(md, srcu_idx);
}

C
Christoph Hellwig 已提交
561 562 563 564
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
			unsigned int cmd, unsigned long arg)
{
	struct mapped_device *md = bdev->bd_disk->private_data;
565
	int r, srcu_idx;
C
Christoph Hellwig 已提交
566

567
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
C
Christoph Hellwig 已提交
568
	if (r < 0)
569
		goto out;
570

C
Christoph Hellwig 已提交
571 572
	if (r > 0) {
		/*
573 574
		 * Target determined this ioctl is being issued against a
		 * subset of the parent bdev; require extra privileges.
C
Christoph Hellwig 已提交
575
		 */
576 577 578 579 580
		if (!capable(CAP_SYS_RAWIO)) {
			DMWARN_LIMIT(
	"%s: sending ioctl %x to DM device without required privilege.",
				current->comm, cmd);
			r = -ENOIOCTLCMD;
C
Christoph Hellwig 已提交
581
			goto out;
582
		}
C
Christoph Hellwig 已提交
583
	}
584

585
	r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
C
Christoph Hellwig 已提交
586
out:
587
	dm_unprepare_ioctl(md, srcu_idx);
588 589 590
	return r;
}

591 592 593
static void start_io_acct(struct dm_io *io);

static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
L
Linus Torvalds 已提交
594
{
595 596 597 598
	struct dm_io *io;
	struct dm_target_io *tio;
	struct bio *clone;

599
	clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
600 601 602 603 604 605 606 607 608
	if (!clone)
		return NULL;

	tio = container_of(clone, struct dm_target_io, clone);
	tio->inside_dm_io = true;
	tio->io = NULL;

	io = container_of(tio, struct dm_io, tio);
	io->magic = DM_IO_MAGIC;
609 610 611 612 613 614 615
	io->status = 0;
	atomic_set(&io->io_count, 1);
	io->orig_bio = bio;
	io->md = md;
	spin_lock_init(&io->endio_lock);

	start_io_acct(io);
616 617

	return io;
L
Linus Torvalds 已提交
618 619
}

A
Alasdair G Kergon 已提交
620
static void free_io(struct mapped_device *md, struct dm_io *io)
L
Linus Torvalds 已提交
621
{
622 623 624 625 626 627 628 629 630 631 632 633
	bio_put(&io->tio.clone);
}

static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
				      unsigned target_bio_nr, gfp_t gfp_mask)
{
	struct dm_target_io *tio;

	if (!ci->io->tio.io) {
		/* the dm_target_io embedded in ci->io is available */
		tio = &ci->io->tio;
	} else {
634
		struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
635 636 637 638 639 640 641 642 643 644 645 646 647
		if (!clone)
			return NULL;

		tio = container_of(clone, struct dm_target_io, clone);
		tio->inside_dm_io = false;
	}

	tio->magic = DM_TIO_MAGIC;
	tio->io = ci->io;
	tio->ti = ti;
	tio->target_bio_nr = target_bio_nr;

	return tio;
L
Linus Torvalds 已提交
648 649
}

650
static void free_tio(struct dm_target_io *tio)
L
Linus Torvalds 已提交
651
{
652 653
	if (tio->inside_dm_io)
		return;
654
	bio_put(&tio->clone);
L
Linus Torvalds 已提交
655 656
}

657
static bool md_in_flight_bios(struct mapped_device *md)
K
Kiyoshi Ueda 已提交
658
{
659 660
	int cpu;
	struct hd_struct *part = &dm_disk(md)->part0;
J
Jens Axboe 已提交
661
	long sum = 0;
662 663

	for_each_possible_cpu(cpu) {
J
Jens Axboe 已提交
664 665
		sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
		sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
666 667
	}

J
Jens Axboe 已提交
668
	return sum != 0;
K
Kiyoshi Ueda 已提交
669 670
}

671 672 673
static bool md_in_flight(struct mapped_device *md)
{
	if (queue_is_mq(md->queue))
674
		return blk_mq_queue_inflight(md->queue);
675 676
	else
		return md_in_flight_bios(md);
K
Kiyoshi Ueda 已提交
677 678
}

679 680 681 682 683 684 685 686 687
u64 dm_start_time_ns_from_clone(struct bio *bio)
{
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
	struct dm_io *io = tio->io;

	return jiffies_to_nsecs(io->start_time);
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);

688 689 690
static void start_io_acct(struct dm_io *io)
{
	struct mapped_device *md = io->md;
691
	struct bio *bio = io->orig_bio;
692

693
	io->start_time = bio_start_io_acct(bio);
M
Mikulas Patocka 已提交
694
	if (unlikely(dm_stats_used(&md->stats)))
695 696 697
		dm_stats_account_io(&md->stats, bio_data_dir(bio),
				    bio->bi_iter.bi_sector, bio_sectors(bio),
				    false, 0, &io->stats_aux);
698 699
}

700
static void end_io_acct(struct dm_io *io)
701 702
{
	struct mapped_device *md = io->md;
703
	struct bio *bio = io->orig_bio;
704 705
	unsigned long duration = jiffies - io->start_time;

706
	bio_end_io_acct(bio, io->start_time);
707

M
Mikulas Patocka 已提交
708
	if (unlikely(dm_stats_used(&md->stats)))
709 710 711
		dm_stats_account_io(&md->stats, bio_data_dir(bio),
				    bio->bi_iter.bi_sector, bio_sectors(bio),
				    true, duration, &io->stats_aux);
M
Mikulas Patocka 已提交
712

713
	/* nudge anyone waiting on suspend queue */
714
	if (unlikely(wq_has_sleeper(&md->wait)))
715
		wake_up(&md->wait);
716 717
}

L
Linus Torvalds 已提交
718 719 720
/*
 * Add the bio to the list of deferred io.
 */
M
Mikulas Patocka 已提交
721
static void queue_io(struct mapped_device *md, struct bio *bio)
L
Linus Torvalds 已提交
722
{
723
	unsigned long flags;
L
Linus Torvalds 已提交
724

725
	spin_lock_irqsave(&md->deferred_lock, flags);
L
Linus Torvalds 已提交
726
	bio_list_add(&md->deferred, bio);
727
	spin_unlock_irqrestore(&md->deferred_lock, flags);
728
	queue_work(md->wq, &md->work);
L
Linus Torvalds 已提交
729 730 731 732 733
}

/*
 * Everyone (including functions in this file), should use this
 * function to access the md->map field, and make sure they call
M
Mikulas Patocka 已提交
734
 * dm_put_live_table() when finished.
L
Linus Torvalds 已提交
735
 */
M
Mikulas Patocka 已提交
736
struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
L
Linus Torvalds 已提交
737
{
M
Mikulas Patocka 已提交
738 739 740 741
	*srcu_idx = srcu_read_lock(&md->io_barrier);

	return srcu_dereference(md->map, &md->io_barrier);
}
L
Linus Torvalds 已提交
742

M
Mikulas Patocka 已提交
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
{
	srcu_read_unlock(&md->io_barrier, srcu_idx);
}

void dm_sync_table(struct mapped_device *md)
{
	synchronize_srcu(&md->io_barrier);
	synchronize_rcu_expedited();
}

/*
 * A fast alternative to dm_get_live_table/dm_put_live_table.
 * The caller must not block between these two functions.
 */
static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
{
	rcu_read_lock();
	return rcu_dereference(md->map);
}
L
Linus Torvalds 已提交
763

M
Mikulas Patocka 已提交
764 765 766
static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
{
	rcu_read_unlock();
L
Linus Torvalds 已提交
767 768
}

769 770
static char *_dm_claim_ptr = "I belong to device-mapper";

771 772 773 774 775 776 777 778 779 780 781 782
/*
 * Open a table device so we can use it as a map destination.
 */
static int open_table_device(struct table_device *td, dev_t dev,
			     struct mapped_device *md)
{
	struct block_device *bdev;

	int r;

	BUG_ON(td->dm_dev.bdev);

783
	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
784 785 786 787 788 789 790 791 792 793
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);

	r = bd_link_disk_holder(bdev, dm_disk(md));
	if (r) {
		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
		return r;
	}

	td->dm_dev.bdev = bdev;
794
	td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
795 796 797 798 799 800 801 802 803 804 805 806 807
	return 0;
}

/*
 * Close a table device that we've been using.
 */
static void close_table_device(struct table_device *td, struct mapped_device *md)
{
	if (!td->dm_dev.bdev)
		return;

	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
808
	put_dax(td->dm_dev.dax_dev);
809
	td->dm_dev.bdev = NULL;
810
	td->dm_dev.dax_dev = NULL;
811 812 813
}

static struct table_device *find_table_device(struct list_head *l, dev_t dev,
814 815
					      fmode_t mode)
{
816 817 818 819 820 821 822 823 824 825
	struct table_device *td;

	list_for_each_entry(td, l, list)
		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
			return td;

	return NULL;
}

int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
826 827
			struct dm_dev **result)
{
828 829 830 831 832 833
	int r;
	struct table_device *td;

	mutex_lock(&md->table_devices_lock);
	td = find_table_device(&md->table_devices, dev, mode);
	if (!td) {
834
		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
		if (!td) {
			mutex_unlock(&md->table_devices_lock);
			return -ENOMEM;
		}

		td->dm_dev.mode = mode;
		td->dm_dev.bdev = NULL;

		if ((r = open_table_device(td, dev, md))) {
			mutex_unlock(&md->table_devices_lock);
			kfree(td);
			return r;
		}

		format_dev_t(td->dm_dev.name, dev);

851
		refcount_set(&td->count, 1);
852
		list_add(&td->list, &md->table_devices);
853 854
	} else {
		refcount_inc(&td->count);
855 856 857 858 859 860 861 862 863 864 865 866 867
	}
	mutex_unlock(&md->table_devices_lock);

	*result = &td->dm_dev;
	return 0;
}
EXPORT_SYMBOL_GPL(dm_get_table_device);

void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{
	struct table_device *td = container_of(d, struct table_device, dm_dev);

	mutex_lock(&md->table_devices_lock);
868
	if (refcount_dec_and_test(&td->count)) {
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
		close_table_device(td, md);
		list_del(&td->list);
		kfree(td);
	}
	mutex_unlock(&md->table_devices_lock);
}
EXPORT_SYMBOL(dm_put_table_device);

static void free_table_devices(struct list_head *devices)
{
	struct list_head *tmp, *next;

	list_for_each_safe(tmp, next, devices) {
		struct table_device *td = list_entry(tmp, struct table_device, list);

		DMWARN("dm_destroy: %s still exists with %d references",
885
		       td->dm_dev.name, refcount_read(&td->count));
886 887 888 889
		kfree(td);
	}
}

D
Darrick J. Wong 已提交
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
/*
 * Get the geometry associated with a dm device
 */
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	*geo = md->geometry;

	return 0;
}

/*
 * Set the geometry of a device.
 */
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;

	if (geo->start > sz) {
		DMWARN("Start sector is beyond the geometry limits.");
		return -EINVAL;
	}

	md->geometry = *geo;

	return 0;
}

917 918 919 920 921
static int __noflush_suspending(struct mapped_device *md)
{
	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}

L
Linus Torvalds 已提交
922 923 924 925
/*
 * Decrements the number of outstanding ios that a bio has been
 * cloned into, completing the original io if necc.
 */
926
static void dec_pending(struct dm_io *io, blk_status_t error)
L
Linus Torvalds 已提交
927
{
928
	unsigned long flags;
929
	blk_status_t io_error;
930 931
	struct bio *bio;
	struct mapped_device *md = io->md;
932 933

	/* Push-back supersedes any I/O errors */
934 935
	if (unlikely(error)) {
		spin_lock_irqsave(&io->endio_lock, flags);
936
		if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
937
			io->status = error;
938 939
		spin_unlock_irqrestore(&io->endio_lock, flags);
	}
L
Linus Torvalds 已提交
940 941

	if (atomic_dec_and_test(&io->io_count)) {
942
		if (io->status == BLK_STS_DM_REQUEUE) {
943 944 945
			/*
			 * Target requested pushing back the I/O.
			 */
946
			spin_lock_irqsave(&md->deferred_lock, flags);
947
			if (__noflush_suspending(md))
948 949
				/* NOTE early return due to BLK_STS_DM_REQUEUE below */
				bio_list_add_head(&md->deferred, io->orig_bio);
950
			else
951
				/* noflush suspend was interrupted. */
952
				io->status = BLK_STS_IOERR;
953
			spin_unlock_irqrestore(&md->deferred_lock, flags);
954 955
		}

956
		io_error = io->status;
957
		bio = io->orig_bio;
958 959 960
		end_io_acct(io);
		free_io(md, io);

961
		if (io_error == BLK_STS_DM_REQUEUE)
962
			return;
963

J
Jens Axboe 已提交
964
		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
965
			/*
966
			 * Preflush done for flush with data, reissue
967
			 * without REQ_PREFLUSH.
968
			 */
J
Jens Axboe 已提交
969
			bio->bi_opf &= ~REQ_PREFLUSH;
970
			queue_io(md, bio);
971
		} else {
972
			/* done with normal IO or empty flush */
973 974
			if (io_error)
				bio->bi_status = io_error;
975
			bio_endio(bio);
976
		}
L
Linus Torvalds 已提交
977 978 979
	}
}

980 981 982 983 984 985 986 987 988
void disable_discard(struct mapped_device *md)
{
	struct queue_limits *limits = dm_get_queue_limits(md);

	/* device doesn't really support DISCARD, disable it */
	limits->max_discard_sectors = 0;
	blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
}

989
void disable_write_same(struct mapped_device *md)
990 991 992 993 994 995 996
{
	struct queue_limits *limits = dm_get_queue_limits(md);

	/* device doesn't really support WRITE SAME, disable it */
	limits->max_write_same_sectors = 0;
}

997 998 999 1000 1001 1002 1003 1004
void disable_write_zeroes(struct mapped_device *md)
{
	struct queue_limits *limits = dm_get_queue_limits(md);

	/* device doesn't really support WRITE ZEROES, disable it */
	limits->max_write_zeroes_sectors = 0;
}

1005
static void clone_endio(struct bio *bio)
L
Linus Torvalds 已提交
1006
{
1007
	blk_status_t error = bio->bi_status;
M
Mikulas Patocka 已提交
1008
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1009
	struct dm_io *io = tio->io;
S
Stefan Bader 已提交
1010
	struct mapped_device *md = tio->io->md;
L
Linus Torvalds 已提交
1011 1012
	dm_endio_fn endio = tio->ti->type->end_io;

1013
	if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
1014 1015 1016 1017 1018
		if (bio_op(bio) == REQ_OP_DISCARD &&
		    !bio->bi_disk->queue->limits.max_discard_sectors)
			disable_discard(md);
		else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
			 !bio->bi_disk->queue->limits.max_write_same_sectors)
1019
			disable_write_same(md);
1020 1021
		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
			 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
1022 1023
			disable_write_zeroes(md);
	}
1024

1025
	if (endio) {
1026
		int r = endio(tio->ti, bio, &error);
1027 1028
		switch (r) {
		case DM_ENDIO_REQUEUE:
1029
			error = BLK_STS_DM_REQUEUE;
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
			/*FALLTHRU*/
		case DM_ENDIO_DONE:
			break;
		case DM_ENDIO_INCOMPLETE:
			/* The target will handle the io */
			return;
		default:
			DMWARN("unimplemented target endio return value: %d", r);
			BUG();
		}
	}

1042
	free_tio(tio);
1043
	dec_pending(io, error);
L
Linus Torvalds 已提交
1044 1045
}

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
/*
 * Return maximum size of I/O possible at the supplied sector up to the current
 * target boundary.
 */
static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
{
	sector_t target_offset = dm_target_offset(ti, sector);

	return ti->len - target_offset;
}

static sector_t max_io_len(sector_t sector, struct dm_target *ti)
L
Linus Torvalds 已提交
1058
{
1059
	sector_t len = max_io_len_target_boundary(sector, ti);
1060
	sector_t offset, max_len;
L
Linus Torvalds 已提交
1061 1062

	/*
1063
	 * Does the target need to split even further?
L
Linus Torvalds 已提交
1064
	 */
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
	if (ti->max_io_len) {
		offset = dm_target_offset(ti, sector);
		if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
			max_len = sector_div(offset, ti->max_io_len);
		else
			max_len = offset & (ti->max_io_len - 1);
		max_len = ti->max_io_len - max_len;

		if (len > max_len)
			len = max_len;
L
Linus Torvalds 已提交
1075 1076 1077 1078 1079
	}

	return len;
}

1080 1081 1082 1083 1084 1085 1086 1087 1088
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
{
	if (len > UINT_MAX) {
		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
		      (unsigned long long)len, UINT_MAX);
		ti->error = "Maximum size of target IO is too large";
		return -EINVAL;
	}

1089
	ti->max_io_len = (uint32_t) len;
1090 1091 1092 1093 1094

	return 0;
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);

1095
static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1096 1097
						sector_t sector, int *srcu_idx)
	__acquires(md->io_barrier)
1098 1099 1100 1101
{
	struct dm_table *map;
	struct dm_target *ti;

1102
	map = dm_get_live_table(md, srcu_idx);
1103
	if (!map)
1104
		return NULL;
1105 1106

	ti = dm_table_find_target(map, sector);
1107
	if (!ti)
1108
		return NULL;
1109

1110 1111
	return ti;
}
1112

1113
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1114
				 long nr_pages, void **kaddr, pfn_t *pfn)
1115 1116 1117 1118 1119 1120
{
	struct mapped_device *md = dax_get_private(dax_dev);
	sector_t sector = pgoff * PAGE_SECTORS;
	struct dm_target *ti;
	long len, ret = -EIO;
	int srcu_idx;
1121

1122
	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1123

1124 1125 1126 1127 1128 1129 1130 1131
	if (!ti)
		goto out;
	if (!ti->type->direct_access)
		goto out;
	len = max_io_len(sector, ti) / PAGE_SECTORS;
	if (len < 1)
		goto out;
	nr_pages = min(len, nr_pages);
1132
	ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
1133

1134
 out:
1135
	dm_put_live_table(md, srcu_idx);
1136 1137

	return ret;
1138 1139
}

1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
		int blocksize, sector_t start, sector_t len)
{
	struct mapped_device *md = dax_get_private(dax_dev);
	struct dm_table *map;
	int srcu_idx;
	bool ret;

	map = dm_get_live_table(md, &srcu_idx);
	if (!map)
		return false;

P
Pankaj Gupta 已提交
1152
	ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
1153 1154 1155 1156 1157 1158

	dm_put_live_table(md, srcu_idx);

	return ret;
}

1159
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1160
				    void *addr, size_t bytes, struct iov_iter *i)
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
{
	struct mapped_device *md = dax_get_private(dax_dev);
	sector_t sector = pgoff * PAGE_SECTORS;
	struct dm_target *ti;
	long ret = 0;
	int srcu_idx;

	ti = dm_dax_get_live_target(md, sector, &srcu_idx);

	if (!ti)
		goto out;
	if (!ti->type->dax_copy_from_iter) {
		ret = copy_from_iter(addr, bytes, i);
		goto out;
	}
	ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
 out:
	dm_put_live_table(md, srcu_idx);

	return ret;
}

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
		void *addr, size_t bytes, struct iov_iter *i)
{
	struct mapped_device *md = dax_get_private(dax_dev);
	sector_t sector = pgoff * PAGE_SECTORS;
	struct dm_target *ti;
	long ret = 0;
	int srcu_idx;

	ti = dm_dax_get_live_target(md, sector, &srcu_idx);

	if (!ti)
		goto out;
	if (!ti->type->dax_copy_to_iter) {
		ret = copy_to_iter(addr, bytes, i);
		goto out;
	}
	ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
 out:
	dm_put_live_table(md, srcu_idx);

	return ret;
}

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
				  size_t nr_pages)
{
	struct mapped_device *md = dax_get_private(dax_dev);
	sector_t sector = pgoff * PAGE_SECTORS;
	struct dm_target *ti;
	int ret = -EIO;
	int srcu_idx;

	ti = dm_dax_get_live_target(md, sector, &srcu_idx);

	if (!ti)
		goto out;
	if (WARN_ON(!ti->type->dax_zero_page_range)) {
		/*
		 * ->zero_page_range() is mandatory dax operation. If we are
		 *  here, something is wrong.
		 */
		dm_put_live_table(md, srcu_idx);
		goto out;
	}
	ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);

 out:
	dm_put_live_table(md, srcu_idx);

	return ret;
}

1236 1237
/*
 * A target may call dm_accept_partial_bio only from the map routine.  It is
1238 1239
 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
 *
 * dm_accept_partial_bio informs the dm that the target only wants to process
 * additional n_sectors sectors of the bio and the rest of the data should be
 * sent in a next bio.
 *
 * A diagram that explains the arithmetics:
 * +--------------------+---------------+-------+
 * |         1          |       2       |   3   |
 * +--------------------+---------------+-------+
 *
 * <-------------- *tio->len_ptr --------------->
 *                      <------- bi_size ------->
 *                      <-- n_sectors -->
 *
 * Region 1 was already iterated over with bio_advance or similar function.
 *	(it may be empty if the target doesn't use bio_advance)
 * Region 2 is the remaining bio size that the target wants to process.
 *	(it may be empty if region 1 is non-empty, although there is no reason
 *	 to make it empty)
 * The target requires that region 3 is to be sent in the next bio.
 *
 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
 * the partially processed part (the sum of regions 1+2) must be the same for all
 * copies of the bio.
 */
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
J
Jens Axboe 已提交
1269
	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
1270 1271 1272 1273 1274 1275 1276
	BUG_ON(bi_size > *tio->len_ptr);
	BUG_ON(n_sectors > bi_size);
	*tio->len_ptr -= bi_size - n_sectors;
	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);

1277
static blk_qc_t __map_bio(struct dm_target_io *tio)
L
Linus Torvalds 已提交
1278 1279
{
	int r;
1280
	sector_t sector;
1281
	struct bio *clone = &tio->clone;
1282
	struct dm_io *io = tio->io;
A
Alasdair G Kergon 已提交
1283
	struct dm_target *ti = tio->ti;
1284
	blk_qc_t ret = BLK_QC_T_NONE;
L
Linus Torvalds 已提交
1285 1286 1287 1288 1289 1290 1291 1292

	clone->bi_end_io = clone_endio;

	/*
	 * Map the clone.  If r == 0 we don't need to do
	 * anything, the target has assumed ownership of
	 * this io.
	 */
1293
	atomic_inc(&io->io_count);
1294
	sector = clone->bi_iter.bi_sector;
1295

M
Mikulas Patocka 已提交
1296
	r = ti->type->map(ti, clone);
1297 1298 1299 1300
	switch (r) {
	case DM_MAPIO_SUBMITTED:
		break;
	case DM_MAPIO_REMAPPED:
L
Linus Torvalds 已提交
1301
		/* the bio has been remapped so dispatch it */
1302
		trace_block_bio_remap(clone->bi_disk->queue, clone,
1303
				      bio_dev(io->orig_bio), sector);
1304
		ret = submit_bio_noacct(clone);
1305 1306
		break;
	case DM_MAPIO_KILL:
1307
		free_tio(tio);
1308
		dec_pending(io, BLK_STS_IOERR);
1309
		break;
1310
	case DM_MAPIO_REQUEUE:
1311
		free_tio(tio);
1312
		dec_pending(io, BLK_STS_DM_REQUEUE);
1313 1314
		break;
	default:
1315 1316
		DMWARN("unimplemented target map return value: %d", r);
		BUG();
L
Linus Torvalds 已提交
1317 1318
	}

1319
	return ret;
L
Linus Torvalds 已提交
1320 1321
}

1322
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
A
Alasdair G Kergon 已提交
1323
{
1324 1325
	bio->bi_iter.bi_sector = sector;
	bio->bi_iter.bi_size = to_bytes(len);
L
Linus Torvalds 已提交
1326 1327 1328 1329 1330
}

/*
 * Creates a bio that consists of range of complete bvecs.
 */
1331 1332
static int clone_bio(struct dm_target_io *tio, struct bio *bio,
		     sector_t sector, unsigned len)
L
Linus Torvalds 已提交
1333
{
1334
	struct bio *clone = &tio->clone;
L
Linus Torvalds 已提交
1335

1336 1337
	__bio_clone_fast(clone, bio);

1338 1339
	bio_crypt_clone(clone, bio, GFP_NOIO);

1340
	if (bio_integrity(bio)) {
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
		int r;

		if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
			     !dm_target_passes_integrity(tio->ti->type))) {
			DMWARN("%s: the target %s doesn't support integrity data.",
				dm_device_name(tio->io->md),
				tio->ti->type->name);
			return -EIO;
		}

		r = bio_integrity_clone(clone, bio, GFP_NOIO);
1352 1353 1354
		if (r < 0)
			return r;
	}
A
Alasdair G Kergon 已提交
1355

M
Mike Snitzer 已提交
1356 1357 1358 1359 1360
	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
	clone->bi_iter.bi_size = to_bytes(len);

	if (bio_integrity(bio))
		bio_integrity_trim(clone);
1361 1362

	return 0;
L
Linus Torvalds 已提交
1363 1364
}

1365 1366
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
				struct dm_target *ti, unsigned num_bios)
1367
{
1368
	struct dm_target_io *tio;
1369
	int try;
1370

1371 1372
	if (!num_bios)
		return;
1373

1374 1375 1376 1377 1378
	if (num_bios == 1) {
		tio = alloc_tio(ci, ti, 0, GFP_NOIO);
		bio_list_add(blist, &tio->clone);
		return;
	}
1379

1380 1381 1382 1383 1384
	for (try = 0; try < 2; try++) {
		int bio_nr;
		struct bio *bio;

		if (try)
1385
			mutex_lock(&ci->io->md->table_devices_lock);
1386 1387 1388 1389 1390 1391 1392 1393
		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
			tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
			if (!tio)
				break;

			bio_list_add(blist, &tio->clone);
		}
		if (try)
1394
			mutex_unlock(&ci->io->md->table_devices_lock);
1395 1396 1397 1398 1399 1400 1401 1402
		if (bio_nr == num_bios)
			return;

		while ((bio = bio_list_pop(blist))) {
			tio = container_of(bio, struct dm_target_io, clone);
			free_tio(tio);
		}
	}
1403 1404
}

1405 1406
static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
					   struct dm_target_io *tio, unsigned *len)
1407
{
1408
	struct bio *clone = &tio->clone;
1409

1410 1411
	tio->len_ptr = len;

1412
	__bio_clone_fast(clone, ci->bio);
A
Alasdair G Kergon 已提交
1413
	if (len)
1414
		bio_setup_sector(clone, ci->sector, *len);
1415

1416
	return __map_bio(tio);
1417 1418
}

1419
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1420
				  unsigned num_bios, unsigned *len)
1421
{
1422 1423 1424 1425 1426
	struct bio_list blist = BIO_EMPTY_LIST;
	struct bio *bio;
	struct dm_target_io *tio;

	alloc_multiple_bios(&blist, ci, ti, num_bios);
1427

1428 1429
	while ((bio = bio_list_pop(&blist))) {
		tio = container_of(bio, struct dm_target_io, clone);
1430
		(void) __clone_and_map_simple_bio(ci, tio, len);
1431
	}
1432 1433
}

1434
static int __send_empty_flush(struct clone_info *ci)
1435
{
1436
	unsigned target_nr = 0;
1437 1438
	struct dm_target *ti;

1439
	/*
J
Jens Axboe 已提交
1440 1441 1442 1443 1444
	 * Empty flush uses a statically initialized bio, as the base for
	 * cloning.  However, blkg association requires that a bdev is
	 * associated with a gendisk, which doesn't happen until the bdev is
	 * opened.  So, blkg association is done at issue time of the flush
	 * rather than when the device is created in alloc_dev().
1445 1446 1447
	 */
	bio_set_dev(ci->bio, ci->io->md->bdev);

1448
	BUG_ON(bio_has_data(ci->bio));
1449
	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1450
		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1451 1452 1453
	return 0;
}

1454
static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1455
				    sector_t sector, unsigned *len)
M
Mike Snitzer 已提交
1456
{
1457
	struct bio *bio = ci->bio;
M
Mike Snitzer 已提交
1458
	struct dm_target_io *tio;
1459
	int r;
M
Mike Snitzer 已提交
1460

1461
	tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1462 1463 1464 1465 1466
	tio->len_ptr = len;
	r = clone_bio(tio, bio, sector, *len);
	if (r < 0) {
		free_tio(tio);
		return r;
1467
	}
1468
	(void) __map_bio(tio);
1469

1470
	return 0;
M
Mike Snitzer 已提交
1471 1472
}

1473
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
M
Mike Snitzer 已提交
1474

1475
static unsigned get_num_discard_bios(struct dm_target *ti)
M
Mike Snitzer 已提交
1476
{
1477
	return ti->num_discard_bios;
M
Mike Snitzer 已提交
1478 1479
}

1480 1481 1482 1483 1484
static unsigned get_num_secure_erase_bios(struct dm_target *ti)
{
	return ti->num_secure_erase_bios;
}

1485
static unsigned get_num_write_same_bios(struct dm_target *ti)
M
Mike Snitzer 已提交
1486
{
1487
	return ti->num_write_same_bios;
M
Mike Snitzer 已提交
1488 1489
}

1490 1491 1492 1493 1494
static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
{
	return ti->num_write_zeroes_bios;
}

1495
static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1496
				       unsigned num_bios)
1497
{
1498
	unsigned len;
1499

1500 1501 1502 1503 1504 1505 1506 1507
	/*
	 * Even though the device advertised support for this type of
	 * request, that does not mean every target supports it, and
	 * reconfiguration might also have changed that since the
	 * check was performed.
	 */
	if (!num_bios)
		return -EOPNOTSUPP;
1508

1509 1510
	len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));

1511
	__send_duplicate_bios(ci, ti, num_bios, &len);
1512

1513 1514
	ci->sector += len;
	ci->sector_count -= len;
M
Mike Snitzer 已提交
1515 1516

	return 0;
1517 1518
}

1519
static int __send_discard(struct clone_info *ci, struct dm_target *ti)
M
Mike Snitzer 已提交
1520
{
1521
	return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
M
Mike Snitzer 已提交
1522
}
1523

1524 1525
static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
{
1526
	return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
1527 1528
}

1529
static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
1530
{
1531
	return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
1532 1533
}

1534
static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
1535
{
1536
	return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
1537 1538
}

1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
static bool is_abnormal_io(struct bio *bio)
{
	bool r = false;

	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
	case REQ_OP_WRITE_SAME:
	case REQ_OP_WRITE_ZEROES:
		r = true;
		break;
	}

	return r;
}

1555 1556 1557 1558 1559 1560 1561
static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
				  int *result)
{
	struct bio *bio = ci->bio;

	if (bio_op(bio) == REQ_OP_DISCARD)
		*result = __send_discard(ci, ti);
1562 1563
	else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
		*result = __send_secure_erase(ci, ti);
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
	else if (bio_op(bio) == REQ_OP_WRITE_SAME)
		*result = __send_write_same(ci, ti);
	else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
		*result = __send_write_zeroes(ci, ti);
	else
		return false;

	return true;
}

A
Alasdair G Kergon 已提交
1574 1575 1576
/*
 * Select the correct strategy for processing a non-flush bio.
 */
1577
static int __split_and_process_non_flush(struct clone_info *ci)
1578
{
1579
	struct dm_target *ti;
1580
	unsigned len;
1581
	int r;
1582

1583
	ti = dm_table_find_target(ci->map, ci->sector);
1584
	if (!ti)
1585 1586
		return -EIO;

1587
	if (__process_abnormal_io(ci, ti, &r))
1588
		return r;
1589

1590
	len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1591

1592 1593 1594
	r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
	if (r < 0)
		return r;
1595

1596 1597
	ci->sector += len;
	ci->sector_count -= len;
1598

1599
	return 0;
1600 1601
}

1602 1603 1604 1605 1606 1607 1608 1609
static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
			    struct dm_table *map, struct bio *bio)
{
	ci->map = map;
	ci->io = alloc_io(md, bio);
	ci->sector = bio->bi_iter.bi_sector;
}

1610 1611 1612
#define __dm_part_stat_sub(part, field, subnd)	\
	(part_stat_get(part, field) -= (subnd))

L
Linus Torvalds 已提交
1613
/*
1614
 * Entry point to split a bio into clones and submit them to the targets.
L
Linus Torvalds 已提交
1615
 */
1616 1617
static blk_qc_t __split_and_process_bio(struct mapped_device *md,
					struct dm_table *map, struct bio *bio)
1618
{
L
Linus Torvalds 已提交
1619
	struct clone_info ci;
1620
	blk_qc_t ret = BLK_QC_T_NONE;
1621
	int error = 0;
L
Linus Torvalds 已提交
1622

1623
	init_clone_info(&ci, md, map, bio);
1624

J
Jens Axboe 已提交
1625
	if (bio->bi_opf & REQ_PREFLUSH) {
J
Jens Axboe 已提交
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
		struct bio flush_bio;

		/*
		 * Use an on-stack bio for this, it's safe since we don't
		 * need to reference it after submit. It's just used as
		 * the basis for the clone(s).
		 */
		bio_init(&flush_bio, NULL, 0);
		flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
		ci.bio = &flush_bio;
1636
		ci.sector_count = 0;
1637
		error = __send_empty_flush(&ci);
1638
		bio_uninit(ci.bio);
1639
		/* dec_pending submits any data associated with flush */
1640
	} else if (op_is_zone_mgmt(bio_op(bio))) {
1641 1642 1643
		ci.bio = bio;
		ci.sector_count = 0;
		error = __split_and_process_non_flush(&ci);
1644
	} else {
1645
		ci.bio = bio;
1646
		ci.sector_count = bio_sectors(bio);
1647
		while (ci.sector_count && !error) {
1648
			error = __split_and_process_non_flush(&ci);
1649 1650
			if (current->bio_list && ci.sector_count && !error) {
				/*
1651
				 * Remainder must be passed to submit_bio_noacct()
1652 1653 1654
				 * so that it gets handled *after* bios already submitted
				 * have been completely processed.
				 * We take a clone of the original to store in
1655
				 * ci.io->orig_bio to be used by end_io_acct() and
1656 1657
				 * for dec_pending to use for completion handling.
				 */
1658 1659
				struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
							  GFP_NOIO, &md->queue->bio_split);
1660
				ci.io->orig_bio = b;
1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673

				/*
				 * Adjust IO stats for each split, otherwise upon queue
				 * reentry there will be redundant IO accounting.
				 * NOTE: this is a stop-gap fix, a proper fix involves
				 * significant refactoring of DM core's bio splitting
				 * (by eliminating DM's splitting and just using bio_split)
				 */
				part_stat_lock();
				__dm_part_stat_sub(&dm_disk(md)->part0,
						   sectors[op_stat_group(bio_op(bio))], ci.sector_count);
				part_stat_unlock();

1674
				bio_chain(b, bio);
1675
				trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
1676
				ret = submit_bio_noacct(bio);
1677 1678 1679
				break;
			}
		}
1680
	}
1681

L
Linus Torvalds 已提交
1682
	/* drop the extra reference count */
1683
	dec_pending(ci.io, errno_to_blk_status(error));
1684
	return ret;
1685 1686
}

1687
/*
1688 1689
 * Optimized variant of __split_and_process_bio that leverages the
 * fact that targets that use it do _not_ have a need to split bios.
1690
 */
1691 1692
static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
			      struct bio *bio, struct dm_target *ti)
1693 1694 1695 1696 1697 1698 1699 1700
{
	struct clone_info ci;
	blk_qc_t ret = BLK_QC_T_NONE;
	int error = 0;

	init_clone_info(&ci, md, map, bio);

	if (bio->bi_opf & REQ_PREFLUSH) {
J
Jens Axboe 已提交
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
		struct bio flush_bio;

		/*
		 * Use an on-stack bio for this, it's safe since we don't
		 * need to reference it after submit. It's just used as
		 * the basis for the clone(s).
		 */
		bio_init(&flush_bio, NULL, 0);
		flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
		ci.bio = &flush_bio;
1711 1712
		ci.sector_count = 0;
		error = __send_empty_flush(&ci);
1713
		bio_uninit(ci.bio);
1714 1715 1716 1717 1718 1719
		/* dec_pending submits any data associated with flush */
	} else {
		struct dm_target_io *tio;

		ci.bio = bio;
		ci.sector_count = bio_sectors(bio);
1720
		if (__process_abnormal_io(&ci, ti, &error))
1721 1722 1723
			goto out;

		tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
1724 1725 1726 1727 1728 1729 1730 1731
		ret = __clone_and_map_simple_bio(&ci, tio, NULL);
	}
out:
	/* drop the extra reference count */
	dec_pending(ci.io, errno_to_blk_status(error));
	return ret;
}

1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
{
	unsigned len, sector_count;

	sector_count = bio_sectors(*bio);
	len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);

	if (sector_count > len) {
		struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);

		bio_chain(split, *bio);
		trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
1744
		submit_bio_noacct(*bio);
1745 1746 1747 1748
		*bio = split;
	}
}

1749 1750 1751
static blk_qc_t dm_process_bio(struct mapped_device *md,
			       struct dm_table *map, struct bio *bio)
{
1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
	blk_qc_t ret = BLK_QC_T_NONE;
	struct dm_target *ti = md->immutable_target;

	if (unlikely(!map)) {
		bio_io_error(bio);
		return ret;
	}

	if (!ti) {
		ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
1762
		if (unlikely(!ti)) {
1763 1764 1765 1766 1767 1768
			bio_io_error(bio);
			return ret;
		}
	}

	/*
1769
	 * If in ->queue_bio we need to use blk_queue_split(), otherwise
1770 1771 1772 1773
	 * queue_limits for abnormal requests (e.g. discard, writesame, etc)
	 * won't be imposed.
	 */
	if (current->bio_list) {
1774
		if (is_abnormal_io(bio))
1775
			blk_queue_split(&bio);
1776
		else
1777 1778 1779
			dm_queue_split(md, ti, &bio);
	}

1780
	if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
1781
		return __process_bio(md, map, bio, ti);
1782 1783 1784 1785
	else
		return __split_and_process_bio(md, map, bio);
}

1786
static blk_qc_t dm_submit_bio(struct bio *bio)
1787
{
C
Christoph Hellwig 已提交
1788
	struct mapped_device *md = bio->bi_disk->private_data;
1789
	blk_qc_t ret = BLK_QC_T_NONE;
M
Mikulas Patocka 已提交
1790 1791
	int srcu_idx;
	struct dm_table *map;
1792

1793 1794 1795 1796
	if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
		/*
		 * We are called with a live reference on q_usage_counter, but
		 * that one will be released as soon as we return.  Grab an
1797 1798 1799
		 * extra one as blk_mq_submit_bio expects to be able to consume
		 * a reference (which lives until the request is freed in case a
		 * request is allocated).
1800
		 */
1801 1802
		percpu_ref_get(&bio->bi_disk->queue->q_usage_counter);
		return blk_mq_submit_bio(bio);
1803
	}
1804

M
Mikulas Patocka 已提交
1805
	map = dm_get_live_table(md, &srcu_idx);
1806

1807 1808
	/* if we're suspended, we have to queue this io for later */
	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
M
Mikulas Patocka 已提交
1809
		dm_put_live_table(md, srcu_idx);
1810

J
Jens Axboe 已提交
1811
		if (!(bio->bi_opf & REQ_RAHEAD))
1812 1813
			queue_io(md, bio);
		else
A
Alasdair G Kergon 已提交
1814
			bio_io_error(bio);
1815
		return ret;
1816
	}
L
Linus Torvalds 已提交
1817

1818
	ret = dm_process_bio(md, map, bio);
1819

M
Mikulas Patocka 已提交
1820
	dm_put_live_table(md, srcu_idx);
1821 1822 1823
	return ret;
}

L
Linus Torvalds 已提交
1824 1825
static int dm_any_congested(void *congested_data, int bdi_bits)
{
1826 1827 1828
	int r = bdi_bits;
	struct mapped_device *md = congested_data;
	struct dm_table *map;
L
Linus Torvalds 已提交
1829

1830
	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
M
Mike Snitzer 已提交
1831
		if (dm_request_based(md)) {
1832
			/*
M
Mike Snitzer 已提交
1833 1834
			 * With request-based DM we only need to check the
			 * top-level queue for congestion.
1835
			 */
1836 1837
			struct backing_dev_info *bdi = md->queue->backing_dev_info;
			r = bdi->wb.congested->state & bdi_bits;
M
Mike Snitzer 已提交
1838 1839 1840
		} else {
			map = dm_get_live_table_fast(md);
			if (map)
1841
				r = dm_table_any_congested(map, bdi_bits);
M
Mike Snitzer 已提交
1842
			dm_put_live_table_fast(md);
1843 1844 1845
		}
	}

L
Linus Torvalds 已提交
1846 1847 1848 1849 1850 1851
	return r;
}

/*-----------------------------------------------------------------
 * An IDR is used to keep track of allocated minor numbers.
 *---------------------------------------------------------------*/
1852
static void free_minor(int minor)
L
Linus Torvalds 已提交
1853
{
1854
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1855
	idr_remove(&_minor_idr, minor);
1856
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
1857 1858 1859 1860 1861
}

/*
 * See if the device with a specific minor # is free.
 */
1862
static int specific_minor(int minor)
L
Linus Torvalds 已提交
1863
{
T
Tejun Heo 已提交
1864
	int r;
L
Linus Torvalds 已提交
1865 1866 1867 1868

	if (minor >= (1 << MINORBITS))
		return -EINVAL;

T
Tejun Heo 已提交
1869
	idr_preload(GFP_KERNEL);
1870
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1871

T
Tejun Heo 已提交
1872
	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
L
Linus Torvalds 已提交
1873

1874
	spin_unlock(&_minor_lock);
T
Tejun Heo 已提交
1875 1876 1877 1878
	idr_preload_end();
	if (r < 0)
		return r == -ENOSPC ? -EBUSY : r;
	return 0;
L
Linus Torvalds 已提交
1879 1880
}

1881
static int next_free_minor(int *minor)
L
Linus Torvalds 已提交
1882
{
T
Tejun Heo 已提交
1883
	int r;
J
Jeff Mahoney 已提交
1884

T
Tejun Heo 已提交
1885
	idr_preload(GFP_KERNEL);
1886
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1887

T
Tejun Heo 已提交
1888
	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
L
Linus Torvalds 已提交
1889

1890
	spin_unlock(&_minor_lock);
T
Tejun Heo 已提交
1891 1892 1893 1894 1895
	idr_preload_end();
	if (r < 0)
		return r;
	*minor = r;
	return 0;
L
Linus Torvalds 已提交
1896 1897
}

1898
static const struct block_device_operations dm_blk_dops;
1899
static const struct dax_operations dm_dax_ops;
L
Linus Torvalds 已提交
1900

1901 1902
static void dm_wq_work(struct work_struct *work);

1903 1904 1905 1906
static void cleanup_mapped_device(struct mapped_device *md)
{
	if (md->wq)
		destroy_workqueue(md->wq);
1907 1908
	bioset_exit(&md->bs);
	bioset_exit(&md->io_bs);
1909

1910 1911 1912 1913 1914 1915
	if (md->dax_dev) {
		kill_dax(md->dax_dev);
		put_dax(md->dax_dev);
		md->dax_dev = NULL;
	}

1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
	if (md->disk) {
		spin_lock(&_minor_lock);
		md->disk->private_data = NULL;
		spin_unlock(&_minor_lock);
		del_gendisk(md->disk);
		put_disk(md->disk);
	}

	if (md->queue)
		blk_cleanup_queue(md->queue);

1927 1928
	cleanup_srcu_struct(&md->io_barrier);

1929 1930 1931 1932
	if (md->bdev) {
		bdput(md->bdev);
		md->bdev = NULL;
	}
1933

1934 1935 1936 1937
	mutex_destroy(&md->suspend_lock);
	mutex_destroy(&md->type_lock);
	mutex_destroy(&md->table_devices_lock);

1938
	dm_mq_cleanup_mapped_device(md);
1939 1940
}

L
Linus Torvalds 已提交
1941 1942 1943
/*
 * Allocate and initialise a blank device with a given minor.
 */
1944
static struct mapped_device *alloc_dev(int minor)
L
Linus Torvalds 已提交
1945
{
1946 1947
	int r, numa_node_id = dm_get_numa_node();
	struct mapped_device *md;
1948
	void *old_md;
L
Linus Torvalds 已提交
1949

1950
	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
L
Linus Torvalds 已提交
1951 1952 1953 1954 1955
	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

1956
	if (!try_module_get(THIS_MODULE))
M
Milan Broz 已提交
1957
		goto bad_module_get;
1958

L
Linus Torvalds 已提交
1959
	/* get a minor number for the dev */
1960
	if (minor == DM_ANY_MINOR)
1961
		r = next_free_minor(&minor);
1962
	else
1963
		r = specific_minor(minor);
L
Linus Torvalds 已提交
1964
	if (r < 0)
M
Milan Broz 已提交
1965
		goto bad_minor;
L
Linus Torvalds 已提交
1966

M
Mikulas Patocka 已提交
1967 1968 1969 1970
	r = init_srcu_struct(&md->io_barrier);
	if (r < 0)
		goto bad_io_barrier;

1971
	md->numa_node_id = numa_node_id;
1972
	md->init_tio_pdu = false;
1973
	md->type = DM_TYPE_NONE;
1974
	mutex_init(&md->suspend_lock);
1975
	mutex_init(&md->type_lock);
1976
	mutex_init(&md->table_devices_lock);
1977
	spin_lock_init(&md->deferred_lock);
L
Linus Torvalds 已提交
1978
	atomic_set(&md->holders, 1);
1979
	atomic_set(&md->open_count, 0);
L
Linus Torvalds 已提交
1980
	atomic_set(&md->event_nr, 0);
M
Mike Anderson 已提交
1981 1982
	atomic_set(&md->uevent_seq, 0);
	INIT_LIST_HEAD(&md->uevent_list);
1983
	INIT_LIST_HEAD(&md->table_devices);
M
Mike Anderson 已提交
1984
	spin_lock_init(&md->uevent_lock);
L
Linus Torvalds 已提交
1985

1986
	/*
1987 1988 1989
	 * default to bio-based until DM table is loaded and md->type
	 * established. If request-based table is loaded: blk-mq will
	 * override accordingly.
1990
	 */
1991
	md->queue = blk_alloc_queue(numa_node_id);
1992 1993
	if (!md->queue)
		goto bad;
L
Linus Torvalds 已提交
1994

1995
	md->disk = alloc_disk_node(1, md->numa_node_id);
L
Linus Torvalds 已提交
1996
	if (!md->disk)
1997
		goto bad;
L
Linus Torvalds 已提交
1998

1999
	init_waitqueue_head(&md->wait);
2000
	INIT_WORK(&md->work, dm_wq_work);
2001
	init_waitqueue_head(&md->eventq);
2002
	init_completion(&md->kobj_holder.completion);
2003

L
Linus Torvalds 已提交
2004 2005 2006 2007 2008 2009
	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
2010

2011
	if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
P
Pankaj Gupta 已提交
2012 2013
		md->dax_dev = alloc_dax(md, md->disk->disk_name,
					&dm_dax_ops, 0);
2014
		if (IS_ERR(md->dax_dev))
2015 2016
			goto bad;
	}
2017

2018
	add_disk_no_queue_reg(md->disk);
M
Mike Anderson 已提交
2019
	format_dev_t(md->name, MKDEV(_major, minor));
L
Linus Torvalds 已提交
2020

T
Tejun Heo 已提交
2021
	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
2022
	if (!md->wq)
2023
		goto bad;
2024

M
Mikulas Patocka 已提交
2025 2026
	md->bdev = bdget_disk(md->disk, 0);
	if (!md->bdev)
2027
		goto bad;
M
Mikulas Patocka 已提交
2028

M
Mikulas Patocka 已提交
2029 2030
	dm_stats_init(&md->stats);

2031
	/* Populate the mapping, nobody knows we exist yet */
2032
	spin_lock(&_minor_lock);
2033
	old_md = idr_replace(&_minor_idr, md, minor);
2034
	spin_unlock(&_minor_lock);
2035 2036 2037

	BUG_ON(old_md != MINOR_ALLOCED);

L
Linus Torvalds 已提交
2038 2039
	return md;

2040 2041
bad:
	cleanup_mapped_device(md);
M
Mikulas Patocka 已提交
2042
bad_io_barrier:
L
Linus Torvalds 已提交
2043
	free_minor(minor);
M
Milan Broz 已提交
2044
bad_minor:
2045
	module_put(THIS_MODULE);
M
Milan Broz 已提交
2046
bad_module_get:
2047
	kvfree(md);
L
Linus Torvalds 已提交
2048 2049 2050
	return NULL;
}

J
Jun'ichi Nomura 已提交
2051 2052
static void unlock_fs(struct mapped_device *md);

L
Linus Torvalds 已提交
2053 2054
static void free_dev(struct mapped_device *md)
{
2055
	int minor = MINOR(disk_devt(md->disk));
2056

M
Mikulas Patocka 已提交
2057
	unlock_fs(md);
2058

2059
	cleanup_mapped_device(md);
2060

2061
	free_table_devices(&md->table_devices);
2062 2063 2064
	dm_stats_cleanup(&md->stats);
	free_minor(minor);

2065
	module_put(THIS_MODULE);
2066
	kvfree(md);
L
Linus Torvalds 已提交
2067 2068
}

2069
static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
K
Kiyoshi Ueda 已提交
2070
{
M
Mikulas Patocka 已提交
2071
	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2072
	int ret = 0;
K
Kiyoshi Ueda 已提交
2073

2074
	if (dm_table_bio_based(t)) {
2075 2076 2077 2078 2079
		/*
		 * The md may already have mempools that need changing.
		 * If so, reload bioset because front_pad may have changed
		 * because a different table was loaded.
		 */
2080 2081
		bioset_exit(&md->bs);
		bioset_exit(&md->io_bs);
2082

2083
	} else if (bioset_initialized(&md->bs)) {
2084 2085 2086 2087 2088 2089 2090 2091 2092
		/*
		 * There's no need to reload with request-based dm
		 * because the size of front_pad doesn't change.
		 * Note for future: If you are to reload bioset,
		 * prep-ed requests in the queue may refer
		 * to bio from the old bioset, so you must walk
		 * through the queue to unprep.
		 */
		goto out;
M
Mikulas Patocka 已提交
2093
	}
K
Kiyoshi Ueda 已提交
2094

2095 2096 2097
	BUG_ON(!p ||
	       bioset_initialized(&md->bs) ||
	       bioset_initialized(&md->io_bs));
2098

2099 2100 2101 2102 2103 2104
	ret = bioset_init_from_src(&md->bs, &p->bs);
	if (ret)
		goto out;
	ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
	if (ret)
		bioset_exit(&md->bs);
K
Kiyoshi Ueda 已提交
2105
out:
2106
	/* mempool bind completed, no longer need any mempools in the table */
K
Kiyoshi Ueda 已提交
2107
	dm_table_free_md_mempools(t);
2108
	return ret;
K
Kiyoshi Ueda 已提交
2109 2110
}

L
Linus Torvalds 已提交
2111 2112 2113 2114 2115
/*
 * Bind a table to the device.
 */
static void event_callback(void *context)
{
M
Mike Anderson 已提交
2116 2117
	unsigned long flags;
	LIST_HEAD(uevents);
L
Linus Torvalds 已提交
2118 2119
	struct mapped_device *md = (struct mapped_device *) context;

M
Mike Anderson 已提交
2120 2121 2122 2123
	spin_lock_irqsave(&md->uevent_lock, flags);
	list_splice_init(&md->uevent_list, &uevents);
	spin_unlock_irqrestore(&md->uevent_lock, flags);

2124
	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
M
Mike Anderson 已提交
2125

L
Linus Torvalds 已提交
2126 2127
	atomic_inc(&md->event_nr);
	wake_up(&md->eventq);
2128
	dm_issue_global_event();
L
Linus Torvalds 已提交
2129 2130
}

2131 2132 2133
/*
 * Protected by md->suspend_lock obtained by dm_swap_table().
 */
2134
static void __set_size(struct mapped_device *md, sector_t size)
L
Linus Torvalds 已提交
2135
{
2136 2137
	lockdep_assert_held(&md->suspend_lock);

2138
	set_capacity(md->disk, size);
L
Linus Torvalds 已提交
2139

2140
	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
L
Linus Torvalds 已提交
2141 2142
}

2143 2144 2145 2146 2147
/*
 * Returns old map, which caller must destroy.
 */
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
			       struct queue_limits *limits)
L
Linus Torvalds 已提交
2148
{
2149
	struct dm_table *old_map;
2150
	struct request_queue *q = md->queue;
2151
	bool request_based = dm_table_request_based(t);
L
Linus Torvalds 已提交
2152
	sector_t size;
2153
	int ret;
L
Linus Torvalds 已提交
2154

2155 2156
	lockdep_assert_held(&md->suspend_lock);

L
Linus Torvalds 已提交
2157
	size = dm_table_get_size(t);
D
Darrick J. Wong 已提交
2158 2159 2160 2161

	/*
	 * Wipe any geometry if the size of the table changed.
	 */
M
Mikulas Patocka 已提交
2162
	if (size != dm_get_size(md))
D
Darrick J. Wong 已提交
2163 2164
		memset(&md->geometry, 0, sizeof(md->geometry));

M
Mikulas Patocka 已提交
2165
	__set_size(md, size);
2166

2167 2168
	dm_table_event_callback(t, event_callback, md);

K
Kiyoshi Ueda 已提交
2169 2170 2171 2172 2173 2174 2175
	/*
	 * The queue hasn't been stopped yet, if the old table type wasn't
	 * for request-based during suspension.  So stop it to prevent
	 * I/O mapping before resume.
	 * This must be done before setting the queue restrictions,
	 * because request-based dm may be run just after the setting.
	 */
2176
	if (request_based)
2177
		dm_stop_queue(q);
2178 2179

	if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
M
Mike Snitzer 已提交
2180
		/*
2181 2182 2183 2184
		 * Leverage the fact that request-based DM targets and
		 * NVMe bio based targets are immutable singletons
		 * - used to optimize both dm_request_fn and dm_mq_queue_rq;
		 *   and __process_bio.
M
Mike Snitzer 已提交
2185 2186 2187
		 */
		md->immutable_target = dm_table_get_immutable_target(t);
	}
K
Kiyoshi Ueda 已提交
2188

2189 2190 2191 2192 2193
	ret = __bind_mempools(md, t);
	if (ret) {
		old_map = ERR_PTR(ret);
		goto out;
	}
K
Kiyoshi Ueda 已提交
2194

2195
	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2196
	rcu_assign_pointer(md->map, (void *)t);
2197 2198
	md->immutable_target_type = dm_table_get_immutable_target_type(t);

2199
	dm_table_set_restrictions(t, q, limits);
2200 2201
	if (old_map)
		dm_sync_table(md);
L
Linus Torvalds 已提交
2202

2203
out:
2204
	return old_map;
L
Linus Torvalds 已提交
2205 2206
}

2207 2208 2209 2210
/*
 * Returns unbound table for the caller to free.
 */
static struct dm_table *__unbind(struct mapped_device *md)
L
Linus Torvalds 已提交
2211
{
2212
	struct dm_table *map = rcu_dereference_protected(md->map, 1);
L
Linus Torvalds 已提交
2213 2214

	if (!map)
2215
		return NULL;
L
Linus Torvalds 已提交
2216 2217

	dm_table_event_callback(map, NULL, NULL);
2218
	RCU_INIT_POINTER(md->map, NULL);
M
Mikulas Patocka 已提交
2219
	dm_sync_table(md);
2220 2221

	return map;
L
Linus Torvalds 已提交
2222 2223 2224 2225 2226
}

/*
 * Constructor for a new device.
 */
2227
int dm_create(int minor, struct mapped_device **result)
L
Linus Torvalds 已提交
2228
{
2229
	int r;
L
Linus Torvalds 已提交
2230 2231
	struct mapped_device *md;

2232
	md = alloc_dev(minor);
L
Linus Torvalds 已提交
2233 2234 2235
	if (!md)
		return -ENXIO;

2236 2237 2238 2239 2240
	r = dm_sysfs_init(md);
	if (r) {
		free_dev(md);
		return r;
	}
M
Milan Broz 已提交
2241

L
Linus Torvalds 已提交
2242 2243 2244 2245
	*result = md;
	return 0;
}

2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259
/*
 * Functions to manage md->type.
 * All are required to hold md->type_lock.
 */
void dm_lock_md_type(struct mapped_device *md)
{
	mutex_lock(&md->type_lock);
}

void dm_unlock_md_type(struct mapped_device *md)
{
	mutex_unlock(&md->type_lock);
}

2260
void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2261
{
2262
	BUG_ON(!mutex_is_locked(&md->type_lock));
2263 2264 2265
	md->type = type;
}

2266
enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2267 2268 2269 2270
{
	return md->type;
}

2271 2272 2273 2274 2275
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
	return md->immutable_target_type;
}

2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
/*
 * The queue_limits are only valid as long as you have a reference
 * count on 'md'.
 */
struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
{
	BUG_ON(!atomic_read(&md->holders));
	return &md->queue->limits;
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);

2287 2288 2289 2290 2291 2292
static void dm_init_congested_fn(struct mapped_device *md)
{
	md->queue->backing_dev_info->congested_data = md;
	md->queue->backing_dev_info->congested_fn = dm_any_congested;
}

2293 2294 2295
/*
 * Setup the DM device's queue based on md's type
 */
2296
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2297
{
2298
	int r;
2299
	struct queue_limits limits;
2300
	enum dm_queue_mode type = dm_get_md_type(md);
2301

2302
	switch (type) {
2303
	case DM_TYPE_REQUEST_BASED:
2304
		r = dm_mq_init_request_queue(md, t);
2305
		if (r) {
2306
			DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2307 2308
			return r;
		}
2309
		dm_init_congested_fn(md);
2310 2311
		break;
	case DM_TYPE_BIO_BASED:
2312
	case DM_TYPE_DAX_BIO_BASED:
2313
	case DM_TYPE_NVME_BIO_BASED:
2314
		dm_init_congested_fn(md);
2315
		break;
2316 2317 2318
	case DM_TYPE_NONE:
		WARN_ON_ONCE(true);
		break;
2319 2320
	}

2321 2322 2323 2324 2325 2326 2327 2328
	r = dm_calculate_queue_limits(t, &limits);
	if (r) {
		DMERR("Cannot calculate initial queue limits");
		return r;
	}
	dm_table_set_restrictions(t, md->queue, &limits);
	blk_register_queue(md->disk);

2329 2330 2331
	return 0;
}

2332
struct mapped_device *dm_get_md(dev_t dev)
L
Linus Torvalds 已提交
2333 2334 2335 2336 2337 2338 2339
{
	struct mapped_device *md;
	unsigned minor = MINOR(dev);

	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
		return NULL;

2340
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
2341 2342

	md = idr_find(&_minor_idr, minor);
M
Mike Snitzer 已提交
2343 2344 2345 2346
	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
		md = NULL;
		goto out;
J
Jeff Mahoney 已提交
2347
	}
M
Mike Snitzer 已提交
2348
	dm_get(md);
J
Jeff Mahoney 已提交
2349
out:
2350
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
2351

2352 2353
	return md;
}
A
Alasdair G Kergon 已提交
2354
EXPORT_SYMBOL_GPL(dm_get_md);
2355

A
Alasdair G Kergon 已提交
2356
void *dm_get_mdptr(struct mapped_device *md)
2357
{
A
Alasdair G Kergon 已提交
2358
	return md->interface_ptr;
L
Linus Torvalds 已提交
2359 2360 2361 2362 2363 2364 2365 2366 2367 2368
}

void dm_set_mdptr(struct mapped_device *md, void *ptr)
{
	md->interface_ptr = ptr;
}

void dm_get(struct mapped_device *md)
{
	atomic_inc(&md->holders);
2369
	BUG_ON(test_bit(DMF_FREEING, &md->flags));
L
Linus Torvalds 已提交
2370 2371
}

2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384
int dm_hold(struct mapped_device *md)
{
	spin_lock(&_minor_lock);
	if (test_bit(DMF_FREEING, &md->flags)) {
		spin_unlock(&_minor_lock);
		return -EBUSY;
	}
	dm_get(md);
	spin_unlock(&_minor_lock);
	return 0;
}
EXPORT_SYMBOL_GPL(dm_hold);

2385 2386 2387 2388 2389 2390
const char *dm_device_name(struct mapped_device *md)
{
	return md->name;
}
EXPORT_SYMBOL_GPL(dm_device_name);

2391
static void __dm_destroy(struct mapped_device *md, bool wait)
L
Linus Torvalds 已提交
2392
{
M
Mike Anderson 已提交
2393
	struct dm_table *map;
M
Mikulas Patocka 已提交
2394
	int srcu_idx;
L
Linus Torvalds 已提交
2395

2396
	might_sleep();
J
Jeff Mahoney 已提交
2397

2398
	spin_lock(&_minor_lock);
2399 2400 2401
	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
	set_bit(DMF_FREEING, &md->flags);
	spin_unlock(&_minor_lock);
2402

2403
	blk_set_queue_dying(md->queue);
2404

2405 2406 2407 2408 2409
	/*
	 * Take suspend_lock so that presuspend and postsuspend methods
	 * do not race with internal suspend.
	 */
	mutex_lock(&md->suspend_lock);
2410
	map = dm_get_live_table(md, &srcu_idx);
2411 2412
	if (!dm_suspended_md(md)) {
		dm_table_presuspend_targets(map);
2413
		set_bit(DMF_SUSPENDED, &md->flags);
2414
		dm_table_postsuspend_targets(map);
L
Linus Torvalds 已提交
2415
	}
M
Mikulas Patocka 已提交
2416 2417
	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
	dm_put_live_table(md, srcu_idx);
2418
	mutex_unlock(&md->suspend_lock);
M
Mikulas Patocka 已提交
2419

2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450
	/*
	 * Rare, but there may be I/O requests still going to complete,
	 * for example.  Wait for all references to disappear.
	 * No one should increment the reference count of the mapped_device,
	 * after the mapped_device state becomes DMF_FREEING.
	 */
	if (wait)
		while (atomic_read(&md->holders))
			msleep(1);
	else if (atomic_read(&md->holders))
		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
		       dm_device_name(md), atomic_read(&md->holders));

	dm_sysfs_exit(md);
	dm_table_destroy(__unbind(md));
	free_dev(md);
}

void dm_destroy(struct mapped_device *md)
{
	__dm_destroy(md, true);
}

void dm_destroy_immediate(struct mapped_device *md)
{
	__dm_destroy(md, false);
}

void dm_put(struct mapped_device *md)
{
	atomic_dec(&md->holders);
L
Linus Torvalds 已提交
2451
}
E
Edward Goggin 已提交
2452
EXPORT_SYMBOL_GPL(dm_put);
L
Linus Torvalds 已提交
2453

2454
static int dm_wait_for_completion(struct mapped_device *md, long task_state)
2455 2456
{
	int r = 0;
2457
	DEFINE_WAIT(wait);
2458 2459

	while (1) {
2460
		prepare_to_wait(&md->wait, &wait, task_state);
2461

2462
		if (!md_in_flight(md))
2463 2464
			break;

2465
		if (signal_pending_state(task_state, current)) {
2466 2467 2468 2469 2470 2471
			r = -EINTR;
			break;
		}

		io_schedule();
	}
2472
	finish_wait(&md->wait, &wait);
2473

2474 2475 2476
	return r;
}

L
Linus Torvalds 已提交
2477 2478 2479
/*
 * Process the deferred bios
 */
2480
static void dm_wq_work(struct work_struct *work)
L
Linus Torvalds 已提交
2481
{
2482 2483
	struct mapped_device *md = container_of(work, struct mapped_device,
						work);
2484
	struct bio *c;
M
Mikulas Patocka 已提交
2485 2486
	int srcu_idx;
	struct dm_table *map;
L
Linus Torvalds 已提交
2487

M
Mikulas Patocka 已提交
2488
	map = dm_get_live_table(md, &srcu_idx);
2489

2490
	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
A
Alasdair G Kergon 已提交
2491 2492 2493 2494
		spin_lock_irq(&md->deferred_lock);
		c = bio_list_pop(&md->deferred);
		spin_unlock_irq(&md->deferred_lock);

2495
		if (!c)
A
Alasdair G Kergon 已提交
2496
			break;
2497

K
Kiyoshi Ueda 已提交
2498
		if (dm_request_based(md))
2499
			(void) submit_bio_noacct(c);
2500
		else
2501
			(void) dm_process_bio(md, map, c);
2502
	}
M
Milan Broz 已提交
2503

M
Mikulas Patocka 已提交
2504
	dm_put_live_table(md, srcu_idx);
L
Linus Torvalds 已提交
2505 2506
}

2507
static void dm_queue_flush(struct mapped_device *md)
2508
{
2509
	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2510
	smp_mb__after_atomic();
2511
	queue_work(md->wq, &md->work);
2512 2513
}

L
Linus Torvalds 已提交
2514
/*
2515
 * Swap in a new table, returning the old one for the caller to destroy.
L
Linus Torvalds 已提交
2516
 */
2517
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
L
Linus Torvalds 已提交
2518
{
2519
	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2520
	struct queue_limits limits;
2521
	int r;
L
Linus Torvalds 已提交
2522

2523
	mutex_lock(&md->suspend_lock);
L
Linus Torvalds 已提交
2524 2525

	/* device must be suspended */
2526
	if (!dm_suspended_md(md))
2527
		goto out;
L
Linus Torvalds 已提交
2528

2529 2530 2531 2532 2533 2534 2535
	/*
	 * If the new table has no data devices, retain the existing limits.
	 * This helps multipath with queue_if_no_path if all paths disappear,
	 * then new I/O is queued based on these limits, and then some paths
	 * reappear.
	 */
	if (dm_table_has_no_data_devices(table)) {
M
Mikulas Patocka 已提交
2536
		live_map = dm_get_live_table_fast(md);
2537 2538
		if (live_map)
			limits = md->queue->limits;
M
Mikulas Patocka 已提交
2539
		dm_put_live_table_fast(md);
2540 2541
	}

2542 2543 2544 2545 2546 2547
	if (!live_map) {
		r = dm_calculate_queue_limits(table, &limits);
		if (r) {
			map = ERR_PTR(r);
			goto out;
		}
2548
	}
2549

2550
	map = __bind(md, table, &limits);
2551
	dm_issue_global_event();
L
Linus Torvalds 已提交
2552

2553
out:
2554
	mutex_unlock(&md->suspend_lock);
2555
	return map;
L
Linus Torvalds 已提交
2556 2557 2558 2559 2560 2561
}

/*
 * Functions to lock and unlock any filesystem running on the
 * device.
 */
2562
static int lock_fs(struct mapped_device *md)
L
Linus Torvalds 已提交
2563
{
2564
	int r;
L
Linus Torvalds 已提交
2565 2566

	WARN_ON(md->frozen_sb);
2567

2568
	md->frozen_sb = freeze_bdev(md->bdev);
2569
	if (IS_ERR(md->frozen_sb)) {
2570
		r = PTR_ERR(md->frozen_sb);
2571 2572
		md->frozen_sb = NULL;
		return r;
2573 2574
	}

2575 2576
	set_bit(DMF_FROZEN, &md->flags);

L
Linus Torvalds 已提交
2577 2578 2579
	return 0;
}

2580
static void unlock_fs(struct mapped_device *md)
L
Linus Torvalds 已提交
2581
{
2582 2583 2584
	if (!test_bit(DMF_FROZEN, &md->flags))
		return;

2585
	thaw_bdev(md->bdev, md->frozen_sb);
L
Linus Torvalds 已提交
2586
	md->frozen_sb = NULL;
2587
	clear_bit(DMF_FROZEN, &md->flags);
L
Linus Torvalds 已提交
2588 2589 2590
}

/*
2591 2592 2593 2594
 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
 *
2595 2596 2597
 * If __dm_suspend returns 0, the device is completely quiescent
 * now. There is no request-processing activity. All new requests
 * are being added to md->deferred list.
2598
 */
2599
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2600
			unsigned suspend_flags, long task_state,
2601
			int dmf_suspended_flag)
L
Linus Torvalds 已提交
2602
{
2603 2604 2605
	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
	int r;
L
Linus Torvalds 已提交
2606

2607 2608
	lockdep_assert_held(&md->suspend_lock);

2609 2610 2611 2612 2613 2614
	/*
	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
	 * This flag is cleared before dm_suspend returns.
	 */
	if (noflush)
		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2615
	else
2616
		DMDEBUG("%s: suspending with flush", dm_device_name(md));
2617

2618 2619 2620 2621
	/*
	 * This gets reverted if there's an error later and the targets
	 * provide the .presuspend_undo hook.
	 */
2622 2623
	dm_table_presuspend_targets(map);

M
Mikulas Patocka 已提交
2624
	/*
K
Kiyoshi Ueda 已提交
2625 2626 2627 2628
	 * Flush I/O to the device.
	 * Any I/O submitted after lock_fs() may not be flushed.
	 * noflush takes precedence over do_lockfs.
	 * (lock_fs() flushes I/Os and waits for them to complete.)
M
Mikulas Patocka 已提交
2629 2630 2631
	 */
	if (!noflush && do_lockfs) {
		r = lock_fs(md);
2632 2633
		if (r) {
			dm_table_presuspend_undo_targets(map);
2634
			return r;
2635
		}
2636
	}
L
Linus Torvalds 已提交
2637 2638

	/*
2639 2640 2641 2642 2643 2644 2645
	 * Here we must make sure that no processes are submitting requests
	 * to target drivers i.e. no one may be executing
	 * __split_and_process_bio. This is called from dm_request and
	 * dm_wq_work.
	 *
	 * To get all processes out of __split_and_process_bio in dm_request,
	 * we take the write lock. To prevent any process from reentering
2646 2647 2648
	 * __split_and_process_bio from dm_request and quiesce the thread
	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
	 * flush_workqueue(md->wq).
L
Linus Torvalds 已提交
2649
	 */
2650
	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2651 2652
	if (map)
		synchronize_srcu(&md->io_barrier);
L
Linus Torvalds 已提交
2653

2654
	/*
2655 2656
	 * Stop md->queue before flushing md->wq in case request-based
	 * dm defers requests to md->wq from md->queue.
2657
	 */
2658
	if (dm_request_based(md))
2659
		dm_stop_queue(md->queue);
2660

2661 2662
	flush_workqueue(md->wq);

L
Linus Torvalds 已提交
2663
	/*
2664 2665 2666
	 * At this point no more requests are entering target request routines.
	 * We call dm_wait_for_completion to wait for all existing requests
	 * to finish.
L
Linus Torvalds 已提交
2667
	 */
2668
	r = dm_wait_for_completion(md, task_state);
2669 2670
	if (!r)
		set_bit(dmf_suspended_flag, &md->flags);
L
Linus Torvalds 已提交
2671

2672
	if (noflush)
2673
		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2674 2675
	if (map)
		synchronize_srcu(&md->io_barrier);
2676

L
Linus Torvalds 已提交
2677
	/* were we interrupted ? */
2678
	if (r < 0) {
2679
		dm_queue_flush(md);
M
Milan Broz 已提交
2680

2681
		if (dm_request_based(md))
2682
			dm_start_queue(md->queue);
2683

2684
		unlock_fs(md);
2685
		dm_table_presuspend_undo_targets(map);
2686
		/* pushback list is already flushed, so skip flush */
2687
	}
L
Linus Torvalds 已提交
2688

2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729
	return r;
}

/*
 * We need to be able to change a mapping table under a mounted
 * filesystem.  For example we might want to move some data in
 * the background.  Before the table can be swapped with
 * dm_bind_table, dm_suspend must be called to flush any in
 * flight bios and ensure that any further io gets deferred.
 */
/*
 * Suspend mechanism in request-based dm.
 *
 * 1. Flush all I/Os by lock_fs() if needed.
 * 2. Stop dispatching any I/O by stopping the request_queue.
 * 3. Wait for all in-flight I/Os to be completed or requeued.
 *
 * To abort suspend, start the request_queue.
 */
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
	struct dm_table *map = NULL;
	int r = 0;

retry:
	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);

	if (dm_suspended_md(md)) {
		r = -EINVAL;
		goto out_unlock;
	}

	if (dm_suspended_internally_md(md)) {
		/* already internally suspended, wait for internal resume */
		mutex_unlock(&md->suspend_lock);
		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
		if (r)
			return r;
		goto retry;
	}

2730
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2731

2732
	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2733 2734
	if (r)
		goto out_unlock;
2735

2736 2737
	dm_table_postsuspend_targets(map);

2738
out_unlock:
2739
	mutex_unlock(&md->suspend_lock);
2740
	return r;
L
Linus Torvalds 已提交
2741 2742
}

2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
static int __dm_resume(struct mapped_device *md, struct dm_table *map)
{
	if (map) {
		int r = dm_table_resume_targets(map);
		if (r)
			return r;
	}

	dm_queue_flush(md);

	/*
	 * Flushing deferred I/Os must be done after targets are resumed
	 * so that mapping of targets can work correctly.
	 * Request-based dm is queueing the deferred I/Os in its request_queue.
	 */
	if (dm_request_based(md))
2759
		dm_start_queue(md->queue);
2760 2761 2762 2763 2764 2765

	unlock_fs(md);

	return 0;
}

L
Linus Torvalds 已提交
2766 2767
int dm_resume(struct mapped_device *md)
{
2768
	int r;
2769
	struct dm_table *map = NULL;
L
Linus Torvalds 已提交
2770

2771
retry:
2772
	r = -EINVAL;
2773 2774
	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);

2775
	if (!dm_suspended_md(md))
2776 2777
		goto out;

2778 2779 2780 2781 2782 2783 2784 2785 2786
	if (dm_suspended_internally_md(md)) {
		/* already internally suspended, wait for internal resume */
		mutex_unlock(&md->suspend_lock);
		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
		if (r)
			return r;
		goto retry;
	}

2787
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2788
	if (!map || !dm_table_get_size(map))
2789
		goto out;
L
Linus Torvalds 已提交
2790

2791
	r = __dm_resume(md, map);
2792 2793
	if (r)
		goto out;
2794 2795

	clear_bit(DMF_SUSPENDED, &md->flags);
2796
out:
2797
	mutex_unlock(&md->suspend_lock);
2798

2799
	return r;
L
Linus Torvalds 已提交
2800 2801
}

M
Mikulas Patocka 已提交
2802 2803 2804 2805 2806 2807
/*
 * Internal suspend/resume works like userspace-driven suspend. It waits
 * until all bios finish and prevents issuing new bios to the target drivers.
 * It may be used only from the kernel.
 */

2808
static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
M
Mikulas Patocka 已提交
2809
{
2810 2811
	struct dm_table *map = NULL;

2812 2813
	lockdep_assert_held(&md->suspend_lock);

2814
	if (md->internal_suspend_count++)
2815 2816 2817 2818 2819 2820 2821
		return; /* nested internal suspend */

	if (dm_suspended_md(md)) {
		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
		return; /* nest suspend */
	}

2822
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2823 2824 2825 2826 2827 2828 2829

	/*
	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
	 * would require changing .presuspend to return an error -- avoid this
	 * until there is a need for more elaborate variants of internal suspend.
	 */
2830 2831
	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
			    DMF_SUSPENDED_INTERNALLY);
2832 2833 2834 2835 2836 2837

	dm_table_postsuspend_targets(map);
}

static void __dm_internal_resume(struct mapped_device *md)
{
2838 2839 2840
	BUG_ON(!md->internal_suspend_count);

	if (--md->internal_suspend_count)
2841 2842
		return; /* resume from nested internal suspend */

M
Mikulas Patocka 已提交
2843
	if (dm_suspended_md(md))
2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882
		goto done; /* resume from nested suspend */

	/*
	 * NOTE: existing callers don't need to call dm_table_resume_targets
	 * (which may fail -- so best to avoid it for now by passing NULL map)
	 */
	(void) __dm_resume(md, NULL);

done:
	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
	smp_mb__after_atomic();
	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
}

void dm_internal_suspend_noflush(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
	mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);

void dm_internal_resume(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	__dm_internal_resume(md);
	mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_resume);

/*
 * Fast variants of internal suspend/resume hold md->suspend_lock,
 * which prevents interaction with userspace-driven suspend.
 */

void dm_internal_suspend_fast(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
M
Mikulas Patocka 已提交
2883 2884 2885 2886 2887 2888 2889
		return;

	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
	synchronize_srcu(&md->io_barrier);
	flush_workqueue(md->wq);
	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
}
2890
EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
M
Mikulas Patocka 已提交
2891

2892
void dm_internal_resume_fast(struct mapped_device *md)
M
Mikulas Patocka 已提交
2893
{
2894
	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
M
Mikulas Patocka 已提交
2895 2896 2897 2898 2899 2900 2901
		goto done;

	dm_queue_flush(md);

done:
	mutex_unlock(&md->suspend_lock);
}
2902
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
M
Mikulas Patocka 已提交
2903

L
Linus Torvalds 已提交
2904 2905 2906
/*-----------------------------------------------------------------
 * Event notification.
 *---------------------------------------------------------------*/
2907
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
M
Milan Broz 已提交
2908
		       unsigned cookie)
2909
{
M
Milan Broz 已提交
2910 2911 2912 2913
	char udev_cookie[DM_COOKIE_LENGTH];
	char *envp[] = { udev_cookie, NULL };

	if (!cookie)
2914
		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
M
Milan Broz 已提交
2915 2916 2917
	else {
		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
			 DM_COOKIE_ENV_VAR_NAME, cookie);
2918 2919
		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
					  action, envp);
M
Milan Broz 已提交
2920
	}
2921 2922
}

M
Mike Anderson 已提交
2923 2924 2925 2926 2927
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
	return atomic_add_return(1, &md->uevent_seq);
}

L
Linus Torvalds 已提交
2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938
uint32_t dm_get_event_nr(struct mapped_device *md)
{
	return atomic_read(&md->event_nr);
}

int dm_wait_event(struct mapped_device *md, int event_nr)
{
	return wait_event_interruptible(md->eventq,
			(event_nr != atomic_read(&md->event_nr)));
}

M
Mike Anderson 已提交
2939 2940 2941 2942 2943 2944 2945 2946 2947
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{
	unsigned long flags;

	spin_lock_irqsave(&md->uevent_lock, flags);
	list_add(elist, &md->uevent_list);
	spin_unlock_irqrestore(&md->uevent_lock, flags);
}

L
Linus Torvalds 已提交
2948 2949 2950 2951 2952 2953 2954 2955
/*
 * The gendisk is only valid as long as you have a reference
 * count on 'md'.
 */
struct gendisk *dm_disk(struct mapped_device *md)
{
	return md->disk;
}
2956
EXPORT_SYMBOL_GPL(dm_disk);
L
Linus Torvalds 已提交
2957

M
Milan Broz 已提交
2958 2959
struct kobject *dm_kobject(struct mapped_device *md)
{
2960
	return &md->kobj_holder.kobj;
M
Milan Broz 已提交
2961 2962 2963 2964 2965 2966
}

struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
	struct mapped_device *md;

2967
	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
M
Milan Broz 已提交
2968

2969 2970 2971 2972 2973
	spin_lock(&_minor_lock);
	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
		md = NULL;
		goto out;
	}
M
Milan Broz 已提交
2974
	dm_get(md);
2975 2976 2977
out:
	spin_unlock(&_minor_lock);

M
Milan Broz 已提交
2978 2979 2980
	return md;
}

2981
int dm_suspended_md(struct mapped_device *md)
L
Linus Torvalds 已提交
2982 2983 2984 2985
{
	return test_bit(DMF_SUSPENDED, &md->flags);
}

2986 2987 2988 2989 2990
int dm_suspended_internally_md(struct mapped_device *md)
{
	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
}

M
Mikulas Patocka 已提交
2991 2992 2993 2994 2995
int dm_test_deferred_remove_flag(struct mapped_device *md)
{
	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
}

2996 2997
int dm_suspended(struct dm_target *ti)
{
2998
	return dm_suspended_md(dm_table_get_md(ti->table));
2999 3000 3001
}
EXPORT_SYMBOL_GPL(dm_suspended);

3002 3003
int dm_noflush_suspending(struct dm_target *ti)
{
3004
	return __noflush_suspending(dm_table_get_md(ti->table));
3005 3006 3007
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);

3008
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
3009 3010
					    unsigned integrity, unsigned per_io_data_size,
					    unsigned min_pool_size)
K
Kiyoshi Ueda 已提交
3011
{
3012
	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
3013
	unsigned int pool_size = 0;
3014
	unsigned int front_pad, io_front_pad;
3015
	int ret;
K
Kiyoshi Ueda 已提交
3016 3017

	if (!pools)
3018
		return NULL;
K
Kiyoshi Ueda 已提交
3019

3020 3021
	switch (type) {
	case DM_TYPE_BIO_BASED:
3022
	case DM_TYPE_DAX_BIO_BASED:
3023
	case DM_TYPE_NVME_BIO_BASED:
3024
		pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
3025
		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
3026
		io_front_pad = roundup(front_pad,  __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
3027 3028
		ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
		if (ret)
3029
			goto out;
3030
		if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
3031
			goto out;
3032 3033
		break;
	case DM_TYPE_REQUEST_BASED:
3034
		pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
3035
		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3036
		/* per_io_data_size is used for blk-mq pdu at queue allocation */
3037 3038 3039 3040 3041
		break;
	default:
		BUG();
	}

3042 3043
	ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
	if (ret)
J
Jun'ichi Nomura 已提交
3044
		goto out;
K
Kiyoshi Ueda 已提交
3045

3046
	if (integrity && bioset_integrity_create(&pools->bs, pool_size))
J
Jun'ichi Nomura 已提交
3047
		goto out;
3048

K
Kiyoshi Ueda 已提交
3049
	return pools;
3050 3051 3052

out:
	dm_free_md_mempools(pools);
3053

3054
	return NULL;
K
Kiyoshi Ueda 已提交
3055 3056 3057 3058 3059 3060 3061
}

void dm_free_md_mempools(struct dm_md_mempools *pools)
{
	if (!pools)
		return;

3062 3063
	bioset_exit(&pools->bs);
	bioset_exit(&pools->io_bs);
K
Kiyoshi Ueda 已提交
3064 3065 3066 3067

	kfree(pools);
}

3068 3069 3070 3071 3072 3073 3074 3075 3076
struct dm_pr {
	u64	old_key;
	u64	new_key;
	u32	flags;
	bool	fail_early;
};

static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
		      void *data)
3077 3078
{
	struct mapped_device *md = bdev->bd_disk->private_data;
3079 3080 3081
	struct dm_table *table;
	struct dm_target *ti;
	int ret = -ENOTTY, srcu_idx;
3082

3083 3084 3085
	table = dm_get_live_table(md, &srcu_idx);
	if (!table || !dm_table_get_size(table))
		goto out;
3086

3087 3088 3089 3090
	/* We only support devices that have a single target */
	if (dm_table_get_num_targets(table) != 1)
		goto out;
	ti = dm_table_get_target(table, 0);
3091

3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137
	ret = -EINVAL;
	if (!ti->type->iterate_devices)
		goto out;

	ret = ti->type->iterate_devices(ti, fn, data);
out:
	dm_put_live_table(md, srcu_idx);
	return ret;
}

/*
 * For register / unregister we need to manually call out to every path.
 */
static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
			    sector_t start, sector_t len, void *data)
{
	struct dm_pr *pr = data;
	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;

	if (!ops || !ops->pr_register)
		return -EOPNOTSUPP;
	return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
}

static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
			  u32 flags)
{
	struct dm_pr pr = {
		.old_key	= old_key,
		.new_key	= new_key,
		.flags		= flags,
		.fail_early	= true,
	};
	int ret;

	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
	if (ret && new_key) {
		/* unregister all paths if we failed to register any path */
		pr.old_key = new_key;
		pr.new_key = 0;
		pr.flags = 0;
		pr.fail_early = false;
		dm_call_pr(bdev, __dm_pr_register, &pr);
	}

	return ret;
3138 3139 3140
}

static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3141
			 u32 flags)
3142 3143 3144
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3145
	int r, srcu_idx;
3146

3147
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3148
	if (r < 0)
3149
		goto out;
3150 3151 3152 3153 3154 3155

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_reserve)
		r = ops->pr_reserve(bdev, key, type, flags);
	else
		r = -EOPNOTSUPP;
3156 3157
out:
	dm_unprepare_ioctl(md, srcu_idx);
3158 3159 3160 3161 3162 3163 3164
	return r;
}

static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3165
	int r, srcu_idx;
3166

3167
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3168
	if (r < 0)
3169
		goto out;
3170 3171 3172 3173 3174 3175

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_release)
		r = ops->pr_release(bdev, key, type);
	else
		r = -EOPNOTSUPP;
3176 3177
out:
	dm_unprepare_ioctl(md, srcu_idx);
3178 3179 3180 3181
	return r;
}

static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3182
			 enum pr_type type, bool abort)
3183 3184 3185
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3186
	int r, srcu_idx;
3187

3188
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3189
	if (r < 0)
3190
		goto out;
3191 3192 3193 3194 3195 3196

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_preempt)
		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
	else
		r = -EOPNOTSUPP;
3197 3198
out:
	dm_unprepare_ioctl(md, srcu_idx);
3199 3200 3201 3202 3203 3204 3205
	return r;
}

static int dm_pr_clear(struct block_device *bdev, u64 key)
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3206
	int r, srcu_idx;
3207

3208
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3209
	if (r < 0)
3210
		goto out;
3211 3212 3213 3214 3215 3216

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_clear)
		r = ops->pr_clear(bdev, key);
	else
		r = -EOPNOTSUPP;
3217 3218
out:
	dm_unprepare_ioctl(md, srcu_idx);
3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229
	return r;
}

static const struct pr_ops dm_pr_ops = {
	.pr_register	= dm_pr_register,
	.pr_reserve	= dm_pr_reserve,
	.pr_release	= dm_pr_release,
	.pr_preempt	= dm_pr_preempt,
	.pr_clear	= dm_pr_clear,
};

3230
static const struct block_device_operations dm_blk_dops = {
3231
	.submit_bio = dm_submit_bio,
L
Linus Torvalds 已提交
3232 3233
	.open = dm_blk_open,
	.release = dm_blk_close,
3234
	.ioctl = dm_blk_ioctl,
D
Darrick J. Wong 已提交
3235
	.getgeo = dm_blk_getgeo,
3236
	.report_zones = dm_blk_report_zones,
3237
	.pr_ops = &dm_pr_ops,
L
Linus Torvalds 已提交
3238 3239 3240
	.owner = THIS_MODULE
};

3241 3242
static const struct dax_operations dm_dax_ops = {
	.direct_access = dm_dax_direct_access,
3243
	.dax_supported = dm_dax_supported,
3244
	.copy_from_iter = dm_dax_copy_from_iter,
3245
	.copy_to_iter = dm_dax_copy_to_iter,
3246
	.zero_page_range = dm_dax_zero_page_range,
3247 3248
};

L
Linus Torvalds 已提交
3249 3250 3251 3252 3253 3254 3255 3256
/*
 * module hooks
 */
module_init(dm_init);
module_exit(dm_exit);

module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
3257

3258 3259 3260
module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");

3261 3262 3263
module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");

L
Linus Torvalds 已提交
3264 3265 3266
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");