dm.c 75.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
M
Milan Broz 已提交
3
 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7
 *
 * This file is released under the GPL.
 */

8 9
#include "dm-core.h"
#include "dm-rq.h"
M
Mike Anderson 已提交
10
#include "dm-uevent.h"
L
Linus Torvalds 已提交
11 12 13

#include <linux/init.h>
#include <linux/module.h>
A
Arjan van de Ven 已提交
14
#include <linux/mutex.h>
15
#include <linux/sched/signal.h>
L
Linus Torvalds 已提交
16 17 18
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
19
#include <linux/dax.h>
L
Linus Torvalds 已提交
20 21
#include <linux/slab.h>
#include <linux/idr.h>
22
#include <linux/uio.h>
D
Darrick J. Wong 已提交
23
#include <linux/hdreg.h>
24
#include <linux/delay.h>
25
#include <linux/wait.h>
26
#include <linux/pr.h>
27
#include <linux/refcount.h>
28
#include <linux/part_stat.h>
29
#include <linux/blk-crypto.h>
30

31 32
#define DM_MSG_PREFIX "core"

M
Milan Broz 已提交
33 34 35 36 37 38 39
/*
 * Cookies are numeric values sent with CHANGE and REMOVE
 * uevents while resuming, removing or renaming the device.
 */
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24

L
Linus Torvalds 已提交
40 41 42 43 44
static const char *_name = DM_NAME;

static unsigned int major = 0;
static unsigned int _major = 0;

45 46
static DEFINE_IDR(_minor_idr);

47
static DEFINE_SPINLOCK(_minor_lock);
M
Mikulas Patocka 已提交
48 49 50 51 52

static void do_deferred_remove(struct work_struct *w);

static DECLARE_WORK(deferred_remove_work, do_deferred_remove);

53 54
static struct workqueue_struct *deferred_remove_workqueue;

55 56 57
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);

58 59 60 61 62 63
void dm_issue_global_event(void)
{
	atomic_inc(&dm_global_event_nr);
	wake_up(&dm_global_eventq);
}

L
Linus Torvalds 已提交
64
/*
65
 * One of these is allocated (on-stack) per original bio.
L
Linus Torvalds 已提交
66
 */
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
struct clone_info {
	struct dm_table *map;
	struct bio *bio;
	struct dm_io *io;
	sector_t sector;
	unsigned sector_count;
};

/*
 * One of these is allocated per clone bio.
 */
#define DM_TIO_MAGIC 7282014
struct dm_target_io {
	unsigned magic;
	struct dm_io *io;
	struct dm_target *ti;
	unsigned target_bio_nr;
	unsigned *len_ptr;
	bool inside_dm_io;
	struct bio clone;
};

L
Linus Torvalds 已提交
89
/*
90
 * One of these is allocated per original bio.
91
 * It contains the first clone used for that original.
L
Linus Torvalds 已提交
92
 */
93
#define DM_IO_MAGIC 5191977
L
Linus Torvalds 已提交
94
struct dm_io {
95
	unsigned magic;
L
Linus Torvalds 已提交
96
	struct mapped_device *md;
97
	blk_status_t status;
L
Linus Torvalds 已提交
98
	atomic_t io_count;
99
	struct bio *orig_bio;
100
	unsigned long start_time;
101
	spinlock_t endio_lock;
M
Mikulas Patocka 已提交
102
	struct dm_stats_aux stats_aux;
103 104
	/* last member of dm_target_io is 'struct bio' */
	struct dm_target_io tio;
L
Linus Torvalds 已提交
105 106
};

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
void *dm_per_bio_data(struct bio *bio, size_t data_size)
{
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
	if (!tio->inside_dm_io)
		return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
	return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
}
EXPORT_SYMBOL_GPL(dm_per_bio_data);

struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
{
	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
	if (io->magic == DM_IO_MAGIC)
		return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
	BUG_ON(io->magic != DM_TIO_MAGIC);
	return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
}
EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);

unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
{
	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
}
EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);

132 133
#define MINOR_ALLOCED ((void *)-1)

L
Linus Torvalds 已提交
134 135 136
/*
 * Bits for the md->flags field.
 */
137
#define DMF_BLOCK_IO_FOR_SUSPEND 0
L
Linus Torvalds 已提交
138
#define DMF_SUSPENDED 1
139
#define DMF_FROZEN 2
J
Jeff Mahoney 已提交
140
#define DMF_FREEING 3
141
#define DMF_DELETING 4
142
#define DMF_NOFLUSH_SUSPENDING 5
143 144
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
L
Linus Torvalds 已提交
145

146 147
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
148

K
Kiyoshi Ueda 已提交
149 150 151 152
/*
 * For mempools pre-allocation at the table loading time.
 */
struct dm_md_mempools {
153 154
	struct bio_set bs;
	struct bio_set io_bs;
K
Kiyoshi Ueda 已提交
155 156
};

157 158
struct table_device {
	struct list_head list;
159
	refcount_t count;
160 161 162
	struct dm_dev dm_dev;
};

163 164 165
/*
 * Bio-based DM's mempools' reserved IOs set by the user.
 */
166
#define RESERVED_BIO_BASED_IOS		16
167 168
static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;

169 170
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
171
	int param = READ_ONCE(*module_param);
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
	int modified_param = 0;
	bool modified = true;

	if (param < min)
		modified_param = min;
	else if (param > max)
		modified_param = max;
	else
		modified = false;

	if (modified) {
		(void)cmpxchg(module_param, param, modified_param);
		param = modified_param;
	}

	return param;
}

190 191
unsigned __dm_get_module_param(unsigned *module_param,
			       unsigned def, unsigned max)
192
{
193
	unsigned param = READ_ONCE(*module_param);
194
	unsigned modified_param = 0;
195

196 197 198 199
	if (!param)
		modified_param = def;
	else if (param > max)
		modified_param = max;
200

201 202 203
	if (modified_param) {
		(void)cmpxchg(module_param, param, modified_param);
		param = modified_param;
204 205
	}

206
	return param;
207 208
}

209 210
unsigned dm_get_reserved_bio_based_ios(void)
{
211
	return __dm_get_module_param(&reserved_bio_based_ios,
212
				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
213 214 215
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);

216 217 218 219 220 221
static unsigned dm_get_numa_node(void)
{
	return __dm_get_module_param_int(&dm_numa_node,
					 DM_NUMA_NODE, num_online_nodes() - 1);
}

L
Linus Torvalds 已提交
222 223
static int __init local_init(void)
{
224
	int r;
225

M
Mike Anderson 已提交
226
	r = dm_uevent_init();
K
Kiyoshi Ueda 已提交
227
	if (r)
228
		return r;
M
Mike Anderson 已提交
229

230 231 232 233 234 235
	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
	if (!deferred_remove_workqueue) {
		r = -ENOMEM;
		goto out_uevent_exit;
	}

L
Linus Torvalds 已提交
236 237
	_major = major;
	r = register_blkdev(_major, _name);
K
Kiyoshi Ueda 已提交
238
	if (r < 0)
239
		goto out_free_workqueue;
L
Linus Torvalds 已提交
240 241 242 243 244

	if (!_major)
		_major = r;

	return 0;
K
Kiyoshi Ueda 已提交
245

246 247
out_free_workqueue:
	destroy_workqueue(deferred_remove_workqueue);
K
Kiyoshi Ueda 已提交
248 249 250 251
out_uevent_exit:
	dm_uevent_exit();

	return r;
L
Linus Torvalds 已提交
252 253 254 255
}

static void local_exit(void)
{
M
Mikulas Patocka 已提交
256
	flush_scheduled_work();
257
	destroy_workqueue(deferred_remove_workqueue);
M
Mikulas Patocka 已提交
258

259
	unregister_blkdev(_major, _name);
M
Mike Anderson 已提交
260
	dm_uevent_exit();
L
Linus Torvalds 已提交
261 262 263 264 265 266

	_major = 0;

	DMINFO("cleaned up");
}

267
static int (*_inits[])(void) __initdata = {
L
Linus Torvalds 已提交
268 269 270 271
	local_init,
	dm_target_init,
	dm_linear_init,
	dm_stripe_init,
M
Mikulas Patocka 已提交
272
	dm_io_init,
273
	dm_kcopyd_init,
L
Linus Torvalds 已提交
274
	dm_interface_init,
M
Mikulas Patocka 已提交
275
	dm_statistics_init,
L
Linus Torvalds 已提交
276 277
};

278
static void (*_exits[])(void) = {
L
Linus Torvalds 已提交
279 280 281 282
	local_exit,
	dm_target_exit,
	dm_linear_exit,
	dm_stripe_exit,
M
Mikulas Patocka 已提交
283
	dm_io_exit,
284
	dm_kcopyd_exit,
L
Linus Torvalds 已提交
285
	dm_interface_exit,
M
Mikulas Patocka 已提交
286
	dm_statistics_exit,
L
Linus Torvalds 已提交
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
};

static int __init dm_init(void)
{
	const int count = ARRAY_SIZE(_inits);

	int r, i;

	for (i = 0; i < count; i++) {
		r = _inits[i]();
		if (r)
			goto bad;
	}

	return 0;

      bad:
	while (i--)
		_exits[i]();

	return r;
}

static void __exit dm_exit(void)
{
	int i = ARRAY_SIZE(_exits);

	while (i--)
		_exits[i]();
316 317 318 319 320

	/*
	 * Should be empty by this point.
	 */
	idr_destroy(&_minor_idr);
L
Linus Torvalds 已提交
321 322 323 324 325
}

/*
 * Block device functions
 */
M
Mike Anderson 已提交
326 327 328 329 330
int dm_deleting_md(struct mapped_device *md)
{
	return test_bit(DMF_DELETING, &md->flags);
}

A
Al Viro 已提交
331
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
L
Linus Torvalds 已提交
332 333 334
{
	struct mapped_device *md;

J
Jeff Mahoney 已提交
335 336
	spin_lock(&_minor_lock);

A
Al Viro 已提交
337
	md = bdev->bd_disk->private_data;
J
Jeff Mahoney 已提交
338 339 340
	if (!md)
		goto out;

341
	if (test_bit(DMF_FREEING, &md->flags) ||
M
Mike Anderson 已提交
342
	    dm_deleting_md(md)) {
J
Jeff Mahoney 已提交
343 344 345 346
		md = NULL;
		goto out;
	}

L
Linus Torvalds 已提交
347
	dm_get(md);
348
	atomic_inc(&md->open_count);
J
Jeff Mahoney 已提交
349 350 351 352
out:
	spin_unlock(&_minor_lock);

	return md ? 0 : -ENXIO;
L
Linus Torvalds 已提交
353 354
}

355
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
L
Linus Torvalds 已提交
356
{
357
	struct mapped_device *md;
358

359 360
	spin_lock(&_minor_lock);

361 362 363 364
	md = disk->private_data;
	if (WARN_ON(!md))
		goto out;

M
Mikulas Patocka 已提交
365 366
	if (atomic_dec_and_test(&md->open_count) &&
	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
367
		queue_work(deferred_remove_workqueue, &deferred_remove_work);
M
Mikulas Patocka 已提交
368

L
Linus Torvalds 已提交
369
	dm_put(md);
370
out:
371
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
372 373
}

374 375 376 377 378 379 380 381
int dm_open_count(struct mapped_device *md)
{
	return atomic_read(&md->open_count);
}

/*
 * Guarantees nothing is using the device before it's deleted.
 */
M
Mikulas Patocka 已提交
382
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
383 384 385 386 387
{
	int r = 0;

	spin_lock(&_minor_lock);

M
Mikulas Patocka 已提交
388
	if (dm_open_count(md)) {
389
		r = -EBUSY;
M
Mikulas Patocka 已提交
390 391 392 393
		if (mark_deferred)
			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
		r = -EEXIST;
394 395 396 397 398 399 400 401
	else
		set_bit(DMF_DELETING, &md->flags);

	spin_unlock(&_minor_lock);

	return r;
}

M
Mikulas Patocka 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
int dm_cancel_deferred_remove(struct mapped_device *md)
{
	int r = 0;

	spin_lock(&_minor_lock);

	if (test_bit(DMF_DELETING, &md->flags))
		r = -EBUSY;
	else
		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);

	spin_unlock(&_minor_lock);

	return r;
}

static void do_deferred_remove(struct work_struct *w)
{
	dm_deferred_remove();
}

M
Mikulas Patocka 已提交
423 424 425 426 427
sector_t dm_get_size(struct mapped_device *md)
{
	return get_capacity(md->disk);
}

428 429 430 431 432
struct request_queue *dm_get_md_queue(struct mapped_device *md)
{
	return md->queue;
}

M
Mikulas Patocka 已提交
433 434 435 436 437
struct dm_stats *dm_get_stats(struct mapped_device *md)
{
	return &md->stats;
}

D
Darrick J. Wong 已提交
438 439 440 441 442 443 444
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	struct mapped_device *md = bdev->bd_disk->private_data;

	return dm_get_geometry(md, geo);
}

C
Christoph Hellwig 已提交
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
#ifdef CONFIG_BLK_DEV_ZONED
int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data)
{
	struct dm_report_zones_args *args = data;
	sector_t sector_diff = args->tgt->begin - args->start;

	/*
	 * Ignore zones beyond the target range.
	 */
	if (zone->start >= args->start + args->tgt->len)
		return 0;

	/*
	 * Remap the start sector and write pointer position of the zone
	 * to match its position in the target range.
	 */
	zone->start += sector_diff;
	if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
		if (zone->cond == BLK_ZONE_COND_FULL)
			zone->wp = zone->start + zone->len;
		else if (zone->cond == BLK_ZONE_COND_EMPTY)
			zone->wp = zone->start;
		else
			zone->wp += sector_diff;
	}

	args->next_sector = zone->start + zone->len;
	return args->orig_cb(zone, args->zone_idx++, args->orig_data);
}
EXPORT_SYMBOL_GPL(dm_report_zones_cb);

476
static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
C
Christoph Hellwig 已提交
477
		unsigned int nr_zones, report_zones_cb cb, void *data)
478 479 480 481
{
	struct mapped_device *md = disk->private_data;
	struct dm_table *map;
	int srcu_idx, ret;
C
Christoph Hellwig 已提交
482 483 484 485 486
	struct dm_report_zones_args args = {
		.next_sector = sector,
		.orig_data = data,
		.orig_cb = cb,
	};
487 488 489 490 491 492 493 494

	if (dm_suspended_md(md))
		return -EAGAIN;

	map = dm_get_live_table(md, &srcu_idx);
	if (!map)
		return -EIO;

C
Christoph Hellwig 已提交
495 496
	do {
		struct dm_target *tgt;
497

C
Christoph Hellwig 已提交
498 499 500 501 502
		tgt = dm_table_find_target(map, args.next_sector);
		if (WARN_ON_ONCE(!tgt->type->report_zones)) {
			ret = -EIO;
			goto out;
		}
503

C
Christoph Hellwig 已提交
504 505 506 507 508 509
		args.tgt = tgt;
		ret = tgt->type->report_zones(tgt, &args, nr_zones);
		if (ret < 0)
			goto out;
	} while (args.zone_idx < nr_zones &&
		 args.next_sector < get_capacity(disk));
510

C
Christoph Hellwig 已提交
511
	ret = args.zone_idx;
512 513 514 515
out:
	dm_put_live_table(md, srcu_idx);
	return ret;
}
C
Christoph Hellwig 已提交
516 517 518
#else
#define dm_blk_report_zones		NULL
#endif /* CONFIG_BLK_DEV_ZONED */
519

520
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
521
			    struct block_device **bdev)
522
	__acquires(md->io_barrier)
523
{
524
	struct dm_target *tgt;
525
	struct dm_table *map;
526
	int r;
527

528
retry:
C
Christoph Hellwig 已提交
529
	r = -ENOTTY;
530
	map = dm_get_live_table(md, srcu_idx);
531
	if (!map || !dm_table_get_size(map))
532
		return r;
533 534 535

	/* We only support devices that have a single target */
	if (dm_table_get_num_targets(map) != 1)
536
		return r;
537

538 539
	tgt = dm_table_get_target(map, 0);
	if (!tgt->type->prepare_ioctl)
540
		return r;
541

542 543
	if (dm_suspended_md(md))
		return -EAGAIN;
544

545
	r = tgt->type->prepare_ioctl(tgt, bdev);
546
	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
547
		dm_put_live_table(md, *srcu_idx);
548 549 550
		msleep(10);
		goto retry;
	}
551

C
Christoph Hellwig 已提交
552 553 554
	return r;
}

555 556 557 558 559 560
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
	__releases(md->io_barrier)
{
	dm_put_live_table(md, srcu_idx);
}

C
Christoph Hellwig 已提交
561 562 563 564
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
			unsigned int cmd, unsigned long arg)
{
	struct mapped_device *md = bdev->bd_disk->private_data;
565
	int r, srcu_idx;
C
Christoph Hellwig 已提交
566

567
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
C
Christoph Hellwig 已提交
568
	if (r < 0)
569
		goto out;
570

C
Christoph Hellwig 已提交
571 572
	if (r > 0) {
		/*
573 574
		 * Target determined this ioctl is being issued against a
		 * subset of the parent bdev; require extra privileges.
C
Christoph Hellwig 已提交
575
		 */
576 577 578 579 580
		if (!capable(CAP_SYS_RAWIO)) {
			DMWARN_LIMIT(
	"%s: sending ioctl %x to DM device without required privilege.",
				current->comm, cmd);
			r = -ENOIOCTLCMD;
C
Christoph Hellwig 已提交
581
			goto out;
582
		}
C
Christoph Hellwig 已提交
583
	}
584

585
	r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
C
Christoph Hellwig 已提交
586
out:
587
	dm_unprepare_ioctl(md, srcu_idx);
588 589 590
	return r;
}

591 592 593
static void start_io_acct(struct dm_io *io);

static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
L
Linus Torvalds 已提交
594
{
595 596 597 598
	struct dm_io *io;
	struct dm_target_io *tio;
	struct bio *clone;

599
	clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
600 601 602 603 604 605 606 607 608
	if (!clone)
		return NULL;

	tio = container_of(clone, struct dm_target_io, clone);
	tio->inside_dm_io = true;
	tio->io = NULL;

	io = container_of(tio, struct dm_io, tio);
	io->magic = DM_IO_MAGIC;
609 610 611 612 613 614 615
	io->status = 0;
	atomic_set(&io->io_count, 1);
	io->orig_bio = bio;
	io->md = md;
	spin_lock_init(&io->endio_lock);

	start_io_acct(io);
616 617

	return io;
L
Linus Torvalds 已提交
618 619
}

A
Alasdair G Kergon 已提交
620
static void free_io(struct mapped_device *md, struct dm_io *io)
L
Linus Torvalds 已提交
621
{
622 623 624 625 626 627 628 629 630 631 632 633
	bio_put(&io->tio.clone);
}

static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
				      unsigned target_bio_nr, gfp_t gfp_mask)
{
	struct dm_target_io *tio;

	if (!ci->io->tio.io) {
		/* the dm_target_io embedded in ci->io is available */
		tio = &ci->io->tio;
	} else {
634
		struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
635 636 637 638 639 640 641 642 643 644 645 646 647
		if (!clone)
			return NULL;

		tio = container_of(clone, struct dm_target_io, clone);
		tio->inside_dm_io = false;
	}

	tio->magic = DM_TIO_MAGIC;
	tio->io = ci->io;
	tio->ti = ti;
	tio->target_bio_nr = target_bio_nr;

	return tio;
L
Linus Torvalds 已提交
648 649
}

650
static void free_tio(struct dm_target_io *tio)
L
Linus Torvalds 已提交
651
{
652 653
	if (tio->inside_dm_io)
		return;
654
	bio_put(&tio->clone);
L
Linus Torvalds 已提交
655 656
}

657
static bool md_in_flight_bios(struct mapped_device *md)
K
Kiyoshi Ueda 已提交
658
{
659 660
	int cpu;
	struct hd_struct *part = &dm_disk(md)->part0;
J
Jens Axboe 已提交
661
	long sum = 0;
662 663

	for_each_possible_cpu(cpu) {
J
Jens Axboe 已提交
664 665
		sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
		sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
666 667
	}

J
Jens Axboe 已提交
668
	return sum != 0;
K
Kiyoshi Ueda 已提交
669 670
}

671 672 673
static bool md_in_flight(struct mapped_device *md)
{
	if (queue_is_mq(md->queue))
674
		return blk_mq_queue_inflight(md->queue);
675 676
	else
		return md_in_flight_bios(md);
K
Kiyoshi Ueda 已提交
677 678
}

679 680 681 682 683 684 685 686 687
u64 dm_start_time_ns_from_clone(struct bio *bio)
{
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
	struct dm_io *io = tio->io;

	return jiffies_to_nsecs(io->start_time);
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);

688 689 690
static void start_io_acct(struct dm_io *io)
{
	struct mapped_device *md = io->md;
691
	struct bio *bio = io->orig_bio;
692

693
	io->start_time = bio_start_io_acct(bio);
M
Mikulas Patocka 已提交
694
	if (unlikely(dm_stats_used(&md->stats)))
695 696 697
		dm_stats_account_io(&md->stats, bio_data_dir(bio),
				    bio->bi_iter.bi_sector, bio_sectors(bio),
				    false, 0, &io->stats_aux);
698 699
}

700
static void end_io_acct(struct dm_io *io)
701 702
{
	struct mapped_device *md = io->md;
703
	struct bio *bio = io->orig_bio;
704 705
	unsigned long duration = jiffies - io->start_time;

706
	bio_end_io_acct(bio, io->start_time);
707

M
Mikulas Patocka 已提交
708
	if (unlikely(dm_stats_used(&md->stats)))
709 710 711
		dm_stats_account_io(&md->stats, bio_data_dir(bio),
				    bio->bi_iter.bi_sector, bio_sectors(bio),
				    true, duration, &io->stats_aux);
M
Mikulas Patocka 已提交
712

713
	/* nudge anyone waiting on suspend queue */
714
	if (unlikely(wq_has_sleeper(&md->wait)))
715
		wake_up(&md->wait);
716 717
}

L
Linus Torvalds 已提交
718 719 720
/*
 * Add the bio to the list of deferred io.
 */
M
Mikulas Patocka 已提交
721
static void queue_io(struct mapped_device *md, struct bio *bio)
L
Linus Torvalds 已提交
722
{
723
	unsigned long flags;
L
Linus Torvalds 已提交
724

725
	spin_lock_irqsave(&md->deferred_lock, flags);
L
Linus Torvalds 已提交
726
	bio_list_add(&md->deferred, bio);
727
	spin_unlock_irqrestore(&md->deferred_lock, flags);
728
	queue_work(md->wq, &md->work);
L
Linus Torvalds 已提交
729 730 731 732 733
}

/*
 * Everyone (including functions in this file), should use this
 * function to access the md->map field, and make sure they call
M
Mikulas Patocka 已提交
734
 * dm_put_live_table() when finished.
L
Linus Torvalds 已提交
735
 */
M
Mikulas Patocka 已提交
736
struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
L
Linus Torvalds 已提交
737
{
M
Mikulas Patocka 已提交
738 739 740 741
	*srcu_idx = srcu_read_lock(&md->io_barrier);

	return srcu_dereference(md->map, &md->io_barrier);
}
L
Linus Torvalds 已提交
742

M
Mikulas Patocka 已提交
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
{
	srcu_read_unlock(&md->io_barrier, srcu_idx);
}

void dm_sync_table(struct mapped_device *md)
{
	synchronize_srcu(&md->io_barrier);
	synchronize_rcu_expedited();
}

/*
 * A fast alternative to dm_get_live_table/dm_put_live_table.
 * The caller must not block between these two functions.
 */
static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
{
	rcu_read_lock();
	return rcu_dereference(md->map);
}
L
Linus Torvalds 已提交
763

M
Mikulas Patocka 已提交
764 765 766
static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
{
	rcu_read_unlock();
L
Linus Torvalds 已提交
767 768
}

769 770
static char *_dm_claim_ptr = "I belong to device-mapper";

771 772 773 774 775 776 777 778 779 780 781 782
/*
 * Open a table device so we can use it as a map destination.
 */
static int open_table_device(struct table_device *td, dev_t dev,
			     struct mapped_device *md)
{
	struct block_device *bdev;

	int r;

	BUG_ON(td->dm_dev.bdev);

783
	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
784 785 786 787 788 789 790 791 792 793
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);

	r = bd_link_disk_holder(bdev, dm_disk(md));
	if (r) {
		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
		return r;
	}

	td->dm_dev.bdev = bdev;
794
	td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
795 796 797 798 799 800 801 802 803 804 805 806 807
	return 0;
}

/*
 * Close a table device that we've been using.
 */
static void close_table_device(struct table_device *td, struct mapped_device *md)
{
	if (!td->dm_dev.bdev)
		return;

	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
808
	put_dax(td->dm_dev.dax_dev);
809
	td->dm_dev.bdev = NULL;
810
	td->dm_dev.dax_dev = NULL;
811 812 813
}

static struct table_device *find_table_device(struct list_head *l, dev_t dev,
814 815
					      fmode_t mode)
{
816 817 818 819 820 821 822 823 824 825
	struct table_device *td;

	list_for_each_entry(td, l, list)
		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
			return td;

	return NULL;
}

int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
826 827
			struct dm_dev **result)
{
828 829 830 831 832 833
	int r;
	struct table_device *td;

	mutex_lock(&md->table_devices_lock);
	td = find_table_device(&md->table_devices, dev, mode);
	if (!td) {
834
		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
		if (!td) {
			mutex_unlock(&md->table_devices_lock);
			return -ENOMEM;
		}

		td->dm_dev.mode = mode;
		td->dm_dev.bdev = NULL;

		if ((r = open_table_device(td, dev, md))) {
			mutex_unlock(&md->table_devices_lock);
			kfree(td);
			return r;
		}

		format_dev_t(td->dm_dev.name, dev);

851
		refcount_set(&td->count, 1);
852
		list_add(&td->list, &md->table_devices);
853 854
	} else {
		refcount_inc(&td->count);
855 856 857 858 859 860 861 862 863 864 865 866 867
	}
	mutex_unlock(&md->table_devices_lock);

	*result = &td->dm_dev;
	return 0;
}
EXPORT_SYMBOL_GPL(dm_get_table_device);

void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{
	struct table_device *td = container_of(d, struct table_device, dm_dev);

	mutex_lock(&md->table_devices_lock);
868
	if (refcount_dec_and_test(&td->count)) {
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
		close_table_device(td, md);
		list_del(&td->list);
		kfree(td);
	}
	mutex_unlock(&md->table_devices_lock);
}
EXPORT_SYMBOL(dm_put_table_device);

static void free_table_devices(struct list_head *devices)
{
	struct list_head *tmp, *next;

	list_for_each_safe(tmp, next, devices) {
		struct table_device *td = list_entry(tmp, struct table_device, list);

		DMWARN("dm_destroy: %s still exists with %d references",
885
		       td->dm_dev.name, refcount_read(&td->count));
886 887 888 889
		kfree(td);
	}
}

D
Darrick J. Wong 已提交
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
/*
 * Get the geometry associated with a dm device
 */
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	*geo = md->geometry;

	return 0;
}

/*
 * Set the geometry of a device.
 */
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;

	if (geo->start > sz) {
		DMWARN("Start sector is beyond the geometry limits.");
		return -EINVAL;
	}

	md->geometry = *geo;

	return 0;
}

917 918 919 920 921
static int __noflush_suspending(struct mapped_device *md)
{
	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}

L
Linus Torvalds 已提交
922 923 924 925
/*
 * Decrements the number of outstanding ios that a bio has been
 * cloned into, completing the original io if necc.
 */
926
static void dec_pending(struct dm_io *io, blk_status_t error)
L
Linus Torvalds 已提交
927
{
928
	unsigned long flags;
929
	blk_status_t io_error;
930 931
	struct bio *bio;
	struct mapped_device *md = io->md;
932 933

	/* Push-back supersedes any I/O errors */
934 935
	if (unlikely(error)) {
		spin_lock_irqsave(&io->endio_lock, flags);
936
		if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
937
			io->status = error;
938 939
		spin_unlock_irqrestore(&io->endio_lock, flags);
	}
L
Linus Torvalds 已提交
940 941

	if (atomic_dec_and_test(&io->io_count)) {
942
		if (io->status == BLK_STS_DM_REQUEUE) {
943 944 945
			/*
			 * Target requested pushing back the I/O.
			 */
946
			spin_lock_irqsave(&md->deferred_lock, flags);
947
			if (__noflush_suspending(md))
948 949
				/* NOTE early return due to BLK_STS_DM_REQUEUE below */
				bio_list_add_head(&md->deferred, io->orig_bio);
950
			else
951
				/* noflush suspend was interrupted. */
952
				io->status = BLK_STS_IOERR;
953
			spin_unlock_irqrestore(&md->deferred_lock, flags);
954 955
		}

956
		io_error = io->status;
957
		bio = io->orig_bio;
958 959 960
		end_io_acct(io);
		free_io(md, io);

961
		if (io_error == BLK_STS_DM_REQUEUE)
962
			return;
963

J
Jens Axboe 已提交
964
		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
965
			/*
966
			 * Preflush done for flush with data, reissue
967
			 * without REQ_PREFLUSH.
968
			 */
J
Jens Axboe 已提交
969
			bio->bi_opf &= ~REQ_PREFLUSH;
970
			queue_io(md, bio);
971
		} else {
972
			/* done with normal IO or empty flush */
973 974
			if (io_error)
				bio->bi_status = io_error;
975
			bio_endio(bio);
976
		}
L
Linus Torvalds 已提交
977 978 979
	}
}

980 981 982 983 984 985 986 987 988
void disable_discard(struct mapped_device *md)
{
	struct queue_limits *limits = dm_get_queue_limits(md);

	/* device doesn't really support DISCARD, disable it */
	limits->max_discard_sectors = 0;
	blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
}

989
void disable_write_same(struct mapped_device *md)
990 991 992 993 994 995 996
{
	struct queue_limits *limits = dm_get_queue_limits(md);

	/* device doesn't really support WRITE SAME, disable it */
	limits->max_write_same_sectors = 0;
}

997 998 999 1000 1001 1002 1003 1004
void disable_write_zeroes(struct mapped_device *md)
{
	struct queue_limits *limits = dm_get_queue_limits(md);

	/* device doesn't really support WRITE ZEROES, disable it */
	limits->max_write_zeroes_sectors = 0;
}

1005
static void clone_endio(struct bio *bio)
L
Linus Torvalds 已提交
1006
{
1007
	blk_status_t error = bio->bi_status;
M
Mikulas Patocka 已提交
1008
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1009
	struct dm_io *io = tio->io;
S
Stefan Bader 已提交
1010
	struct mapped_device *md = tio->io->md;
L
Linus Torvalds 已提交
1011 1012
	dm_endio_fn endio = tio->ti->type->end_io;

1013
	if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
1014 1015 1016 1017 1018
		if (bio_op(bio) == REQ_OP_DISCARD &&
		    !bio->bi_disk->queue->limits.max_discard_sectors)
			disable_discard(md);
		else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
			 !bio->bi_disk->queue->limits.max_write_same_sectors)
1019
			disable_write_same(md);
1020 1021
		else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
			 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
1022 1023
			disable_write_zeroes(md);
	}
1024

1025
	if (endio) {
1026
		int r = endio(tio->ti, bio, &error);
1027 1028
		switch (r) {
		case DM_ENDIO_REQUEUE:
1029
			error = BLK_STS_DM_REQUEUE;
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
			/*FALLTHRU*/
		case DM_ENDIO_DONE:
			break;
		case DM_ENDIO_INCOMPLETE:
			/* The target will handle the io */
			return;
		default:
			DMWARN("unimplemented target endio return value: %d", r);
			BUG();
		}
	}

1042
	free_tio(tio);
1043
	dec_pending(io, error);
L
Linus Torvalds 已提交
1044 1045
}

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
/*
 * Return maximum size of I/O possible at the supplied sector up to the current
 * target boundary.
 */
static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
{
	sector_t target_offset = dm_target_offset(ti, sector);

	return ti->len - target_offset;
}

static sector_t max_io_len(sector_t sector, struct dm_target *ti)
L
Linus Torvalds 已提交
1058
{
1059
	sector_t len = max_io_len_target_boundary(sector, ti);
1060
	sector_t offset, max_len;
L
Linus Torvalds 已提交
1061 1062

	/*
1063
	 * Does the target need to split even further?
L
Linus Torvalds 已提交
1064
	 */
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
	if (ti->max_io_len) {
		offset = dm_target_offset(ti, sector);
		if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
			max_len = sector_div(offset, ti->max_io_len);
		else
			max_len = offset & (ti->max_io_len - 1);
		max_len = ti->max_io_len - max_len;

		if (len > max_len)
			len = max_len;
L
Linus Torvalds 已提交
1075 1076 1077 1078 1079
	}

	return len;
}

1080 1081 1082 1083 1084 1085 1086 1087 1088
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
{
	if (len > UINT_MAX) {
		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
		      (unsigned long long)len, UINT_MAX);
		ti->error = "Maximum size of target IO is too large";
		return -EINVAL;
	}

1089
	ti->max_io_len = (uint32_t) len;
1090 1091 1092 1093 1094

	return 0;
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);

1095
static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1096 1097
						sector_t sector, int *srcu_idx)
	__acquires(md->io_barrier)
1098 1099 1100 1101
{
	struct dm_table *map;
	struct dm_target *ti;

1102
	map = dm_get_live_table(md, srcu_idx);
1103
	if (!map)
1104
		return NULL;
1105 1106

	ti = dm_table_find_target(map, sector);
1107
	if (!ti)
1108
		return NULL;
1109

1110 1111
	return ti;
}
1112

1113
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1114
				 long nr_pages, void **kaddr, pfn_t *pfn)
1115 1116 1117 1118 1119 1120
{
	struct mapped_device *md = dax_get_private(dax_dev);
	sector_t sector = pgoff * PAGE_SECTORS;
	struct dm_target *ti;
	long len, ret = -EIO;
	int srcu_idx;
1121

1122
	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1123

1124 1125 1126 1127 1128 1129 1130 1131
	if (!ti)
		goto out;
	if (!ti->type->direct_access)
		goto out;
	len = max_io_len(sector, ti) / PAGE_SECTORS;
	if (len < 1)
		goto out;
	nr_pages = min(len, nr_pages);
1132
	ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
1133

1134
 out:
1135
	dm_put_live_table(md, srcu_idx);
1136 1137

	return ret;
1138 1139
}

1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
		int blocksize, sector_t start, sector_t len)
{
	struct mapped_device *md = dax_get_private(dax_dev);
	struct dm_table *map;
	int srcu_idx;
	bool ret;

	map = dm_get_live_table(md, &srcu_idx);
	if (!map)
		return false;

P
Pankaj Gupta 已提交
1152
	ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
1153 1154 1155 1156 1157 1158

	dm_put_live_table(md, srcu_idx);

	return ret;
}

1159
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1160
				    void *addr, size_t bytes, struct iov_iter *i)
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
{
	struct mapped_device *md = dax_get_private(dax_dev);
	sector_t sector = pgoff * PAGE_SECTORS;
	struct dm_target *ti;
	long ret = 0;
	int srcu_idx;

	ti = dm_dax_get_live_target(md, sector, &srcu_idx);

	if (!ti)
		goto out;
	if (!ti->type->dax_copy_from_iter) {
		ret = copy_from_iter(addr, bytes, i);
		goto out;
	}
	ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
 out:
	dm_put_live_table(md, srcu_idx);

	return ret;
}

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
		void *addr, size_t bytes, struct iov_iter *i)
{
	struct mapped_device *md = dax_get_private(dax_dev);
	sector_t sector = pgoff * PAGE_SECTORS;
	struct dm_target *ti;
	long ret = 0;
	int srcu_idx;

	ti = dm_dax_get_live_target(md, sector, &srcu_idx);

	if (!ti)
		goto out;
	if (!ti->type->dax_copy_to_iter) {
		ret = copy_to_iter(addr, bytes, i);
		goto out;
	}
	ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
 out:
	dm_put_live_table(md, srcu_idx);

	return ret;
}

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
				  size_t nr_pages)
{
	struct mapped_device *md = dax_get_private(dax_dev);
	sector_t sector = pgoff * PAGE_SECTORS;
	struct dm_target *ti;
	int ret = -EIO;
	int srcu_idx;

	ti = dm_dax_get_live_target(md, sector, &srcu_idx);

	if (!ti)
		goto out;
	if (WARN_ON(!ti->type->dax_zero_page_range)) {
		/*
		 * ->zero_page_range() is mandatory dax operation. If we are
		 *  here, something is wrong.
		 */
		dm_put_live_table(md, srcu_idx);
		goto out;
	}
	ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);

 out:
	dm_put_live_table(md, srcu_idx);

	return ret;
}

1236 1237
/*
 * A target may call dm_accept_partial_bio only from the map routine.  It is
1238 1239
 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
 * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
 *
 * dm_accept_partial_bio informs the dm that the target only wants to process
 * additional n_sectors sectors of the bio and the rest of the data should be
 * sent in a next bio.
 *
 * A diagram that explains the arithmetics:
 * +--------------------+---------------+-------+
 * |         1          |       2       |   3   |
 * +--------------------+---------------+-------+
 *
 * <-------------- *tio->len_ptr --------------->
 *                      <------- bi_size ------->
 *                      <-- n_sectors -->
 *
 * Region 1 was already iterated over with bio_advance or similar function.
 *	(it may be empty if the target doesn't use bio_advance)
 * Region 2 is the remaining bio size that the target wants to process.
 *	(it may be empty if region 1 is non-empty, although there is no reason
 *	 to make it empty)
 * The target requires that region 3 is to be sent in the next bio.
 *
 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
 * the partially processed part (the sum of regions 1+2) must be the same for all
 * copies of the bio.
 */
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
J
Jens Axboe 已提交
1269
	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
1270 1271 1272 1273 1274 1275 1276
	BUG_ON(bi_size > *tio->len_ptr);
	BUG_ON(n_sectors > bi_size);
	*tio->len_ptr -= bi_size - n_sectors;
	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);

1277
static blk_qc_t __map_bio(struct dm_target_io *tio)
L
Linus Torvalds 已提交
1278 1279
{
	int r;
1280
	sector_t sector;
1281
	struct bio *clone = &tio->clone;
1282
	struct dm_io *io = tio->io;
1283
	struct mapped_device *md = io->md;
A
Alasdair G Kergon 已提交
1284
	struct dm_target *ti = tio->ti;
1285
	blk_qc_t ret = BLK_QC_T_NONE;
L
Linus Torvalds 已提交
1286 1287 1288 1289 1290 1291 1292 1293

	clone->bi_end_io = clone_endio;

	/*
	 * Map the clone.  If r == 0 we don't need to do
	 * anything, the target has assumed ownership of
	 * this io.
	 */
1294
	atomic_inc(&io->io_count);
1295
	sector = clone->bi_iter.bi_sector;
1296

M
Mikulas Patocka 已提交
1297
	r = ti->type->map(ti, clone);
1298 1299 1300 1301
	switch (r) {
	case DM_MAPIO_SUBMITTED:
		break;
	case DM_MAPIO_REMAPPED:
L
Linus Torvalds 已提交
1302
		/* the bio has been remapped so dispatch it */
1303
		trace_block_bio_remap(clone->bi_disk->queue, clone,
1304
				      bio_dev(io->orig_bio), sector);
1305 1306 1307 1308
		if (md->type == DM_TYPE_NVME_BIO_BASED)
			ret = direct_make_request(clone);
		else
			ret = generic_make_request(clone);
1309 1310
		break;
	case DM_MAPIO_KILL:
1311
		free_tio(tio);
1312
		dec_pending(io, BLK_STS_IOERR);
1313
		break;
1314
	case DM_MAPIO_REQUEUE:
1315
		free_tio(tio);
1316
		dec_pending(io, BLK_STS_DM_REQUEUE);
1317 1318
		break;
	default:
1319 1320
		DMWARN("unimplemented target map return value: %d", r);
		BUG();
L
Linus Torvalds 已提交
1321 1322
	}

1323
	return ret;
L
Linus Torvalds 已提交
1324 1325
}

1326
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
A
Alasdair G Kergon 已提交
1327
{
1328 1329
	bio->bi_iter.bi_sector = sector;
	bio->bi_iter.bi_size = to_bytes(len);
L
Linus Torvalds 已提交
1330 1331 1332 1333 1334
}

/*
 * Creates a bio that consists of range of complete bvecs.
 */
1335 1336
static int clone_bio(struct dm_target_io *tio, struct bio *bio,
		     sector_t sector, unsigned len)
L
Linus Torvalds 已提交
1337
{
1338
	struct bio *clone = &tio->clone;
L
Linus Torvalds 已提交
1339

1340 1341
	__bio_clone_fast(clone, bio);

1342 1343
	bio_crypt_clone(clone, bio, GFP_NOIO);

1344
	if (bio_integrity(bio)) {
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
		int r;

		if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
			     !dm_target_passes_integrity(tio->ti->type))) {
			DMWARN("%s: the target %s doesn't support integrity data.",
				dm_device_name(tio->io->md),
				tio->ti->type->name);
			return -EIO;
		}

		r = bio_integrity_clone(clone, bio, GFP_NOIO);
1356 1357 1358
		if (r < 0)
			return r;
	}
A
Alasdair G Kergon 已提交
1359

M
Mike Snitzer 已提交
1360 1361 1362 1363 1364
	bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
	clone->bi_iter.bi_size = to_bytes(len);

	if (bio_integrity(bio))
		bio_integrity_trim(clone);
1365 1366

	return 0;
L
Linus Torvalds 已提交
1367 1368
}

1369 1370
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
				struct dm_target *ti, unsigned num_bios)
1371
{
1372
	struct dm_target_io *tio;
1373
	int try;
1374

1375 1376
	if (!num_bios)
		return;
1377

1378 1379 1380 1381 1382
	if (num_bios == 1) {
		tio = alloc_tio(ci, ti, 0, GFP_NOIO);
		bio_list_add(blist, &tio->clone);
		return;
	}
1383

1384 1385 1386 1387 1388
	for (try = 0; try < 2; try++) {
		int bio_nr;
		struct bio *bio;

		if (try)
1389
			mutex_lock(&ci->io->md->table_devices_lock);
1390 1391 1392 1393 1394 1395 1396 1397
		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
			tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
			if (!tio)
				break;

			bio_list_add(blist, &tio->clone);
		}
		if (try)
1398
			mutex_unlock(&ci->io->md->table_devices_lock);
1399 1400 1401 1402 1403 1404 1405 1406
		if (bio_nr == num_bios)
			return;

		while ((bio = bio_list_pop(blist))) {
			tio = container_of(bio, struct dm_target_io, clone);
			free_tio(tio);
		}
	}
1407 1408
}

1409 1410
static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
					   struct dm_target_io *tio, unsigned *len)
1411
{
1412
	struct bio *clone = &tio->clone;
1413

1414 1415
	tio->len_ptr = len;

1416
	__bio_clone_fast(clone, ci->bio);
A
Alasdair G Kergon 已提交
1417
	if (len)
1418
		bio_setup_sector(clone, ci->sector, *len);
1419

1420
	return __map_bio(tio);
1421 1422
}

1423
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1424
				  unsigned num_bios, unsigned *len)
1425
{
1426 1427 1428 1429 1430
	struct bio_list blist = BIO_EMPTY_LIST;
	struct bio *bio;
	struct dm_target_io *tio;

	alloc_multiple_bios(&blist, ci, ti, num_bios);
1431

1432 1433
	while ((bio = bio_list_pop(&blist))) {
		tio = container_of(bio, struct dm_target_io, clone);
1434
		(void) __clone_and_map_simple_bio(ci, tio, len);
1435
	}
1436 1437
}

1438
static int __send_empty_flush(struct clone_info *ci)
1439
{
1440
	unsigned target_nr = 0;
1441 1442
	struct dm_target *ti;

1443
	/*
J
Jens Axboe 已提交
1444 1445 1446 1447 1448
	 * Empty flush uses a statically initialized bio, as the base for
	 * cloning.  However, blkg association requires that a bdev is
	 * associated with a gendisk, which doesn't happen until the bdev is
	 * opened.  So, blkg association is done at issue time of the flush
	 * rather than when the device is created in alloc_dev().
1449 1450 1451
	 */
	bio_set_dev(ci->bio, ci->io->md->bdev);

1452
	BUG_ON(bio_has_data(ci->bio));
1453
	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1454
		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1455 1456 1457
	return 0;
}

1458
static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1459
				    sector_t sector, unsigned *len)
M
Mike Snitzer 已提交
1460
{
1461
	struct bio *bio = ci->bio;
M
Mike Snitzer 已提交
1462
	struct dm_target_io *tio;
1463
	int r;
M
Mike Snitzer 已提交
1464

1465
	tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1466 1467 1468 1469 1470
	tio->len_ptr = len;
	r = clone_bio(tio, bio, sector, *len);
	if (r < 0) {
		free_tio(tio);
		return r;
1471
	}
1472
	(void) __map_bio(tio);
1473

1474
	return 0;
M
Mike Snitzer 已提交
1475 1476
}

1477
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
M
Mike Snitzer 已提交
1478

1479
static unsigned get_num_discard_bios(struct dm_target *ti)
M
Mike Snitzer 已提交
1480
{
1481
	return ti->num_discard_bios;
M
Mike Snitzer 已提交
1482 1483
}

1484 1485 1486 1487 1488
static unsigned get_num_secure_erase_bios(struct dm_target *ti)
{
	return ti->num_secure_erase_bios;
}

1489
static unsigned get_num_write_same_bios(struct dm_target *ti)
M
Mike Snitzer 已提交
1490
{
1491
	return ti->num_write_same_bios;
M
Mike Snitzer 已提交
1492 1493
}

1494 1495 1496 1497 1498
static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
{
	return ti->num_write_zeroes_bios;
}

1499
static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1500
				       unsigned num_bios)
1501
{
1502
	unsigned len;
1503

1504 1505 1506 1507 1508 1509 1510 1511
	/*
	 * Even though the device advertised support for this type of
	 * request, that does not mean every target supports it, and
	 * reconfiguration might also have changed that since the
	 * check was performed.
	 */
	if (!num_bios)
		return -EOPNOTSUPP;
1512

1513 1514
	len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));

1515
	__send_duplicate_bios(ci, ti, num_bios, &len);
1516

1517 1518
	ci->sector += len;
	ci->sector_count -= len;
M
Mike Snitzer 已提交
1519 1520

	return 0;
1521 1522
}

1523
static int __send_discard(struct clone_info *ci, struct dm_target *ti)
M
Mike Snitzer 已提交
1524
{
1525
	return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
M
Mike Snitzer 已提交
1526
}
1527

1528 1529
static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
{
1530
	return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
1531 1532
}

1533
static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
1534
{
1535
	return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
1536 1537
}

1538
static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
1539
{
1540
	return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
1541 1542
}

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
static bool is_abnormal_io(struct bio *bio)
{
	bool r = false;

	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
	case REQ_OP_WRITE_SAME:
	case REQ_OP_WRITE_ZEROES:
		r = true;
		break;
	}

	return r;
}

1559 1560 1561 1562 1563 1564 1565
static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
				  int *result)
{
	struct bio *bio = ci->bio;

	if (bio_op(bio) == REQ_OP_DISCARD)
		*result = __send_discard(ci, ti);
1566 1567
	else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
		*result = __send_secure_erase(ci, ti);
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
	else if (bio_op(bio) == REQ_OP_WRITE_SAME)
		*result = __send_write_same(ci, ti);
	else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
		*result = __send_write_zeroes(ci, ti);
	else
		return false;

	return true;
}

A
Alasdair G Kergon 已提交
1578 1579 1580
/*
 * Select the correct strategy for processing a non-flush bio.
 */
1581
static int __split_and_process_non_flush(struct clone_info *ci)
1582
{
1583
	struct dm_target *ti;
1584
	unsigned len;
1585
	int r;
1586

1587
	ti = dm_table_find_target(ci->map, ci->sector);
1588
	if (!ti)
1589 1590
		return -EIO;

1591
	if (__process_abnormal_io(ci, ti, &r))
1592
		return r;
1593

1594
	len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1595

1596 1597 1598
	r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
	if (r < 0)
		return r;
1599

1600 1601
	ci->sector += len;
	ci->sector_count -= len;
1602

1603
	return 0;
1604 1605
}

1606 1607 1608 1609 1610 1611 1612 1613
static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
			    struct dm_table *map, struct bio *bio)
{
	ci->map = map;
	ci->io = alloc_io(md, bio);
	ci->sector = bio->bi_iter.bi_sector;
}

1614 1615 1616
#define __dm_part_stat_sub(part, field, subnd)	\
	(part_stat_get(part, field) -= (subnd))

L
Linus Torvalds 已提交
1617
/*
1618
 * Entry point to split a bio into clones and submit them to the targets.
L
Linus Torvalds 已提交
1619
 */
1620 1621
static blk_qc_t __split_and_process_bio(struct mapped_device *md,
					struct dm_table *map, struct bio *bio)
1622
{
L
Linus Torvalds 已提交
1623
	struct clone_info ci;
1624
	blk_qc_t ret = BLK_QC_T_NONE;
1625
	int error = 0;
L
Linus Torvalds 已提交
1626

1627
	init_clone_info(&ci, md, map, bio);
1628

J
Jens Axboe 已提交
1629
	if (bio->bi_opf & REQ_PREFLUSH) {
J
Jens Axboe 已提交
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
		struct bio flush_bio;

		/*
		 * Use an on-stack bio for this, it's safe since we don't
		 * need to reference it after submit. It's just used as
		 * the basis for the clone(s).
		 */
		bio_init(&flush_bio, NULL, 0);
		flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
		ci.bio = &flush_bio;
1640
		ci.sector_count = 0;
1641
		error = __send_empty_flush(&ci);
1642
		bio_uninit(ci.bio);
1643
		/* dec_pending submits any data associated with flush */
1644
	} else if (op_is_zone_mgmt(bio_op(bio))) {
1645 1646 1647
		ci.bio = bio;
		ci.sector_count = 0;
		error = __split_and_process_non_flush(&ci);
1648
	} else {
1649
		ci.bio = bio;
1650
		ci.sector_count = bio_sectors(bio);
1651
		while (ci.sector_count && !error) {
1652
			error = __split_and_process_non_flush(&ci);
1653 1654 1655 1656 1657 1658
			if (current->bio_list && ci.sector_count && !error) {
				/*
				 * Remainder must be passed to generic_make_request()
				 * so that it gets handled *after* bios already submitted
				 * have been completely processed.
				 * We take a clone of the original to store in
1659
				 * ci.io->orig_bio to be used by end_io_acct() and
1660 1661
				 * for dec_pending to use for completion handling.
				 */
1662 1663
				struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
							  GFP_NOIO, &md->queue->bio_split);
1664
				ci.io->orig_bio = b;
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677

				/*
				 * Adjust IO stats for each split, otherwise upon queue
				 * reentry there will be redundant IO accounting.
				 * NOTE: this is a stop-gap fix, a proper fix involves
				 * significant refactoring of DM core's bio splitting
				 * (by eliminating DM's splitting and just using bio_split)
				 */
				part_stat_lock();
				__dm_part_stat_sub(&dm_disk(md)->part0,
						   sectors[op_stat_group(bio_op(bio))], ci.sector_count);
				part_stat_unlock();

1678
				bio_chain(b, bio);
1679
				trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
1680
				ret = generic_make_request(bio);
1681 1682 1683
				break;
			}
		}
1684
	}
1685

L
Linus Torvalds 已提交
1686
	/* drop the extra reference count */
1687
	dec_pending(ci.io, errno_to_blk_status(error));
1688
	return ret;
1689 1690
}

1691
/*
1692 1693
 * Optimized variant of __split_and_process_bio that leverages the
 * fact that targets that use it do _not_ have a need to split bios.
1694
 */
1695 1696
static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
			      struct bio *bio, struct dm_target *ti)
1697 1698 1699 1700 1701 1702 1703 1704
{
	struct clone_info ci;
	blk_qc_t ret = BLK_QC_T_NONE;
	int error = 0;

	init_clone_info(&ci, md, map, bio);

	if (bio->bi_opf & REQ_PREFLUSH) {
J
Jens Axboe 已提交
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
		struct bio flush_bio;

		/*
		 * Use an on-stack bio for this, it's safe since we don't
		 * need to reference it after submit. It's just used as
		 * the basis for the clone(s).
		 */
		bio_init(&flush_bio, NULL, 0);
		flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
		ci.bio = &flush_bio;
1715 1716
		ci.sector_count = 0;
		error = __send_empty_flush(&ci);
1717
		bio_uninit(ci.bio);
1718 1719 1720 1721 1722 1723
		/* dec_pending submits any data associated with flush */
	} else {
		struct dm_target_io *tio;

		ci.bio = bio;
		ci.sector_count = bio_sectors(bio);
1724
		if (__process_abnormal_io(&ci, ti, &error))
1725 1726 1727
			goto out;

		tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
1728 1729 1730 1731 1732 1733 1734 1735
		ret = __clone_and_map_simple_bio(&ci, tio, NULL);
	}
out:
	/* drop the extra reference count */
	dec_pending(ci.io, errno_to_blk_status(error));
	return ret;
}

1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
{
	unsigned len, sector_count;

	sector_count = bio_sectors(*bio);
	len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);

	if (sector_count > len) {
		struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);

		bio_chain(split, *bio);
		trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
		generic_make_request(*bio);
		*bio = split;
	}
}

1753 1754 1755
static blk_qc_t dm_process_bio(struct mapped_device *md,
			       struct dm_table *map, struct bio *bio)
{
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
	blk_qc_t ret = BLK_QC_T_NONE;
	struct dm_target *ti = md->immutable_target;

	if (unlikely(!map)) {
		bio_io_error(bio);
		return ret;
	}

	if (!ti) {
		ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
1766
		if (unlikely(!ti)) {
1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
			bio_io_error(bio);
			return ret;
		}
	}

	/*
	 * If in ->make_request_fn we need to use blk_queue_split(), otherwise
	 * queue_limits for abnormal requests (e.g. discard, writesame, etc)
	 * won't be imposed.
	 */
	if (current->bio_list) {
1778 1779 1780
		if (is_abnormal_io(bio))
			blk_queue_split(md->queue, &bio);
		else
1781 1782 1783
			dm_queue_split(md, ti, &bio);
	}

1784
	if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
1785
		return __process_bio(md, map, bio, ti);
1786 1787 1788 1789
	else
		return __split_and_process_bio(md, map, bio);
}

1790
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1791
{
C
Christoph Hellwig 已提交
1792
	struct mapped_device *md = bio->bi_disk->private_data;
1793
	blk_qc_t ret = BLK_QC_T_NONE;
M
Mikulas Patocka 已提交
1794 1795
	int srcu_idx;
	struct dm_table *map;
1796

1797 1798 1799 1800 1801 1802 1803 1804 1805
	if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
		/*
		 * We are called with a live reference on q_usage_counter, but
		 * that one will be released as soon as we return.  Grab an
		 * extra one as blk_mq_make_request expects to be able to
		 * consume a reference (which lives until the request is freed
		 * in case a request is allocated).
		 */
		percpu_ref_get(&q->q_usage_counter);
1806
		return blk_mq_make_request(q, bio);
1807
	}
1808

M
Mikulas Patocka 已提交
1809
	map = dm_get_live_table(md, &srcu_idx);
1810

1811 1812
	/* if we're suspended, we have to queue this io for later */
	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
M
Mikulas Patocka 已提交
1813
		dm_put_live_table(md, srcu_idx);
1814

J
Jens Axboe 已提交
1815
		if (!(bio->bi_opf & REQ_RAHEAD))
1816 1817
			queue_io(md, bio);
		else
A
Alasdair G Kergon 已提交
1818
			bio_io_error(bio);
1819
		return ret;
1820
	}
L
Linus Torvalds 已提交
1821

1822
	ret = dm_process_bio(md, map, bio);
1823

M
Mikulas Patocka 已提交
1824
	dm_put_live_table(md, srcu_idx);
1825 1826 1827
	return ret;
}

L
Linus Torvalds 已提交
1828 1829
static int dm_any_congested(void *congested_data, int bdi_bits)
{
1830 1831 1832
	int r = bdi_bits;
	struct mapped_device *md = congested_data;
	struct dm_table *map;
L
Linus Torvalds 已提交
1833

1834
	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
M
Mike Snitzer 已提交
1835
		if (dm_request_based(md)) {
1836
			/*
M
Mike Snitzer 已提交
1837 1838
			 * With request-based DM we only need to check the
			 * top-level queue for congestion.
1839
			 */
1840 1841
			struct backing_dev_info *bdi = md->queue->backing_dev_info;
			r = bdi->wb.congested->state & bdi_bits;
M
Mike Snitzer 已提交
1842 1843 1844
		} else {
			map = dm_get_live_table_fast(md);
			if (map)
1845
				r = dm_table_any_congested(map, bdi_bits);
M
Mike Snitzer 已提交
1846
			dm_put_live_table_fast(md);
1847 1848 1849
		}
	}

L
Linus Torvalds 已提交
1850 1851 1852 1853 1854 1855
	return r;
}

/*-----------------------------------------------------------------
 * An IDR is used to keep track of allocated minor numbers.
 *---------------------------------------------------------------*/
1856
static void free_minor(int minor)
L
Linus Torvalds 已提交
1857
{
1858
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1859
	idr_remove(&_minor_idr, minor);
1860
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
1861 1862 1863 1864 1865
}

/*
 * See if the device with a specific minor # is free.
 */
1866
static int specific_minor(int minor)
L
Linus Torvalds 已提交
1867
{
T
Tejun Heo 已提交
1868
	int r;
L
Linus Torvalds 已提交
1869 1870 1871 1872

	if (minor >= (1 << MINORBITS))
		return -EINVAL;

T
Tejun Heo 已提交
1873
	idr_preload(GFP_KERNEL);
1874
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1875

T
Tejun Heo 已提交
1876
	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
L
Linus Torvalds 已提交
1877

1878
	spin_unlock(&_minor_lock);
T
Tejun Heo 已提交
1879 1880 1881 1882
	idr_preload_end();
	if (r < 0)
		return r == -ENOSPC ? -EBUSY : r;
	return 0;
L
Linus Torvalds 已提交
1883 1884
}

1885
static int next_free_minor(int *minor)
L
Linus Torvalds 已提交
1886
{
T
Tejun Heo 已提交
1887
	int r;
J
Jeff Mahoney 已提交
1888

T
Tejun Heo 已提交
1889
	idr_preload(GFP_KERNEL);
1890
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1891

T
Tejun Heo 已提交
1892
	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
L
Linus Torvalds 已提交
1893

1894
	spin_unlock(&_minor_lock);
T
Tejun Heo 已提交
1895 1896 1897 1898 1899
	idr_preload_end();
	if (r < 0)
		return r;
	*minor = r;
	return 0;
L
Linus Torvalds 已提交
1900 1901
}

1902
static const struct block_device_operations dm_blk_dops;
1903
static const struct dax_operations dm_dax_ops;
L
Linus Torvalds 已提交
1904

1905 1906
static void dm_wq_work(struct work_struct *work);

1907 1908 1909 1910
static void cleanup_mapped_device(struct mapped_device *md)
{
	if (md->wq)
		destroy_workqueue(md->wq);
1911 1912
	bioset_exit(&md->bs);
	bioset_exit(&md->io_bs);
1913

1914 1915 1916 1917 1918 1919
	if (md->dax_dev) {
		kill_dax(md->dax_dev);
		put_dax(md->dax_dev);
		md->dax_dev = NULL;
	}

1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
	if (md->disk) {
		spin_lock(&_minor_lock);
		md->disk->private_data = NULL;
		spin_unlock(&_minor_lock);
		del_gendisk(md->disk);
		put_disk(md->disk);
	}

	if (md->queue)
		blk_cleanup_queue(md->queue);

1931 1932
	cleanup_srcu_struct(&md->io_barrier);

1933 1934 1935 1936
	if (md->bdev) {
		bdput(md->bdev);
		md->bdev = NULL;
	}
1937

1938 1939 1940 1941
	mutex_destroy(&md->suspend_lock);
	mutex_destroy(&md->type_lock);
	mutex_destroy(&md->table_devices_lock);

1942
	dm_mq_cleanup_mapped_device(md);
1943 1944
}

L
Linus Torvalds 已提交
1945 1946 1947
/*
 * Allocate and initialise a blank device with a given minor.
 */
1948
static struct mapped_device *alloc_dev(int minor)
L
Linus Torvalds 已提交
1949
{
1950 1951
	int r, numa_node_id = dm_get_numa_node();
	struct mapped_device *md;
1952
	void *old_md;
L
Linus Torvalds 已提交
1953

1954
	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
L
Linus Torvalds 已提交
1955 1956 1957 1958 1959
	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

1960
	if (!try_module_get(THIS_MODULE))
M
Milan Broz 已提交
1961
		goto bad_module_get;
1962

L
Linus Torvalds 已提交
1963
	/* get a minor number for the dev */
1964
	if (minor == DM_ANY_MINOR)
1965
		r = next_free_minor(&minor);
1966
	else
1967
		r = specific_minor(minor);
L
Linus Torvalds 已提交
1968
	if (r < 0)
M
Milan Broz 已提交
1969
		goto bad_minor;
L
Linus Torvalds 已提交
1970

M
Mikulas Patocka 已提交
1971 1972 1973 1974
	r = init_srcu_struct(&md->io_barrier);
	if (r < 0)
		goto bad_io_barrier;

1975
	md->numa_node_id = numa_node_id;
1976
	md->init_tio_pdu = false;
1977
	md->type = DM_TYPE_NONE;
1978
	mutex_init(&md->suspend_lock);
1979
	mutex_init(&md->type_lock);
1980
	mutex_init(&md->table_devices_lock);
1981
	spin_lock_init(&md->deferred_lock);
L
Linus Torvalds 已提交
1982
	atomic_set(&md->holders, 1);
1983
	atomic_set(&md->open_count, 0);
L
Linus Torvalds 已提交
1984
	atomic_set(&md->event_nr, 0);
M
Mike Anderson 已提交
1985 1986
	atomic_set(&md->uevent_seq, 0);
	INIT_LIST_HEAD(&md->uevent_list);
1987
	INIT_LIST_HEAD(&md->table_devices);
M
Mike Anderson 已提交
1988
	spin_lock_init(&md->uevent_lock);
L
Linus Torvalds 已提交
1989

1990 1991 1992 1993 1994
	/*
	 * default to bio-based required ->make_request_fn until DM
	 * table is loaded and md->type established. If request-based
	 * table is loaded: blk-mq will override accordingly.
	 */
1995 1996 1997
	md->queue = blk_alloc_queue(dm_make_request, numa_node_id);
	if (!md->queue)
		goto bad;
L
Linus Torvalds 已提交
1998

1999
	md->disk = alloc_disk_node(1, md->numa_node_id);
L
Linus Torvalds 已提交
2000
	if (!md->disk)
2001
		goto bad;
L
Linus Torvalds 已提交
2002

2003
	init_waitqueue_head(&md->wait);
2004
	INIT_WORK(&md->work, dm_wq_work);
2005
	init_waitqueue_head(&md->eventq);
2006
	init_completion(&md->kobj_holder.completion);
2007

L
Linus Torvalds 已提交
2008 2009 2010 2011 2012 2013
	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
2014

2015
	if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
P
Pankaj Gupta 已提交
2016 2017
		md->dax_dev = alloc_dax(md, md->disk->disk_name,
					&dm_dax_ops, 0);
2018
		if (IS_ERR(md->dax_dev))
2019 2020
			goto bad;
	}
2021

2022
	add_disk_no_queue_reg(md->disk);
M
Mike Anderson 已提交
2023
	format_dev_t(md->name, MKDEV(_major, minor));
L
Linus Torvalds 已提交
2024

T
Tejun Heo 已提交
2025
	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
2026
	if (!md->wq)
2027
		goto bad;
2028

M
Mikulas Patocka 已提交
2029 2030
	md->bdev = bdget_disk(md->disk, 0);
	if (!md->bdev)
2031
		goto bad;
M
Mikulas Patocka 已提交
2032

M
Mikulas Patocka 已提交
2033 2034
	dm_stats_init(&md->stats);

2035
	/* Populate the mapping, nobody knows we exist yet */
2036
	spin_lock(&_minor_lock);
2037
	old_md = idr_replace(&_minor_idr, md, minor);
2038
	spin_unlock(&_minor_lock);
2039 2040 2041

	BUG_ON(old_md != MINOR_ALLOCED);

L
Linus Torvalds 已提交
2042 2043
	return md;

2044 2045
bad:
	cleanup_mapped_device(md);
M
Mikulas Patocka 已提交
2046
bad_io_barrier:
L
Linus Torvalds 已提交
2047
	free_minor(minor);
M
Milan Broz 已提交
2048
bad_minor:
2049
	module_put(THIS_MODULE);
M
Milan Broz 已提交
2050
bad_module_get:
2051
	kvfree(md);
L
Linus Torvalds 已提交
2052 2053 2054
	return NULL;
}

J
Jun'ichi Nomura 已提交
2055 2056
static void unlock_fs(struct mapped_device *md);

L
Linus Torvalds 已提交
2057 2058
static void free_dev(struct mapped_device *md)
{
2059
	int minor = MINOR(disk_devt(md->disk));
2060

M
Mikulas Patocka 已提交
2061
	unlock_fs(md);
2062

2063
	cleanup_mapped_device(md);
2064

2065
	free_table_devices(&md->table_devices);
2066 2067 2068
	dm_stats_cleanup(&md->stats);
	free_minor(minor);

2069
	module_put(THIS_MODULE);
2070
	kvfree(md);
L
Linus Torvalds 已提交
2071 2072
}

2073
static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
K
Kiyoshi Ueda 已提交
2074
{
M
Mikulas Patocka 已提交
2075
	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2076
	int ret = 0;
K
Kiyoshi Ueda 已提交
2077

2078
	if (dm_table_bio_based(t)) {
2079 2080 2081 2082 2083
		/*
		 * The md may already have mempools that need changing.
		 * If so, reload bioset because front_pad may have changed
		 * because a different table was loaded.
		 */
2084 2085
		bioset_exit(&md->bs);
		bioset_exit(&md->io_bs);
2086

2087
	} else if (bioset_initialized(&md->bs)) {
2088 2089 2090 2091 2092 2093 2094 2095 2096
		/*
		 * There's no need to reload with request-based dm
		 * because the size of front_pad doesn't change.
		 * Note for future: If you are to reload bioset,
		 * prep-ed requests in the queue may refer
		 * to bio from the old bioset, so you must walk
		 * through the queue to unprep.
		 */
		goto out;
M
Mikulas Patocka 已提交
2097
	}
K
Kiyoshi Ueda 已提交
2098

2099 2100 2101
	BUG_ON(!p ||
	       bioset_initialized(&md->bs) ||
	       bioset_initialized(&md->io_bs));
2102

2103 2104 2105 2106 2107 2108
	ret = bioset_init_from_src(&md->bs, &p->bs);
	if (ret)
		goto out;
	ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
	if (ret)
		bioset_exit(&md->bs);
K
Kiyoshi Ueda 已提交
2109
out:
2110
	/* mempool bind completed, no longer need any mempools in the table */
K
Kiyoshi Ueda 已提交
2111
	dm_table_free_md_mempools(t);
2112
	return ret;
K
Kiyoshi Ueda 已提交
2113 2114
}

L
Linus Torvalds 已提交
2115 2116 2117 2118 2119
/*
 * Bind a table to the device.
 */
static void event_callback(void *context)
{
M
Mike Anderson 已提交
2120 2121
	unsigned long flags;
	LIST_HEAD(uevents);
L
Linus Torvalds 已提交
2122 2123
	struct mapped_device *md = (struct mapped_device *) context;

M
Mike Anderson 已提交
2124 2125 2126 2127
	spin_lock_irqsave(&md->uevent_lock, flags);
	list_splice_init(&md->uevent_list, &uevents);
	spin_unlock_irqrestore(&md->uevent_lock, flags);

2128
	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
M
Mike Anderson 已提交
2129

L
Linus Torvalds 已提交
2130 2131
	atomic_inc(&md->event_nr);
	wake_up(&md->eventq);
2132
	dm_issue_global_event();
L
Linus Torvalds 已提交
2133 2134
}

2135 2136 2137
/*
 * Protected by md->suspend_lock obtained by dm_swap_table().
 */
2138
static void __set_size(struct mapped_device *md, sector_t size)
L
Linus Torvalds 已提交
2139
{
2140 2141
	lockdep_assert_held(&md->suspend_lock);

2142
	set_capacity(md->disk, size);
L
Linus Torvalds 已提交
2143

2144
	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
L
Linus Torvalds 已提交
2145 2146
}

2147 2148 2149 2150 2151
/*
 * Returns old map, which caller must destroy.
 */
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
			       struct queue_limits *limits)
L
Linus Torvalds 已提交
2152
{
2153
	struct dm_table *old_map;
2154
	struct request_queue *q = md->queue;
2155
	bool request_based = dm_table_request_based(t);
L
Linus Torvalds 已提交
2156
	sector_t size;
2157
	int ret;
L
Linus Torvalds 已提交
2158

2159 2160
	lockdep_assert_held(&md->suspend_lock);

L
Linus Torvalds 已提交
2161
	size = dm_table_get_size(t);
D
Darrick J. Wong 已提交
2162 2163 2164 2165

	/*
	 * Wipe any geometry if the size of the table changed.
	 */
M
Mikulas Patocka 已提交
2166
	if (size != dm_get_size(md))
D
Darrick J. Wong 已提交
2167 2168
		memset(&md->geometry, 0, sizeof(md->geometry));

M
Mikulas Patocka 已提交
2169
	__set_size(md, size);
2170

2171 2172
	dm_table_event_callback(t, event_callback, md);

K
Kiyoshi Ueda 已提交
2173 2174 2175 2176 2177 2178 2179
	/*
	 * The queue hasn't been stopped yet, if the old table type wasn't
	 * for request-based during suspension.  So stop it to prevent
	 * I/O mapping before resume.
	 * This must be done before setting the queue restrictions,
	 * because request-based dm may be run just after the setting.
	 */
2180
	if (request_based)
2181
		dm_stop_queue(q);
2182 2183

	if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
M
Mike Snitzer 已提交
2184
		/*
2185 2186 2187 2188
		 * Leverage the fact that request-based DM targets and
		 * NVMe bio based targets are immutable singletons
		 * - used to optimize both dm_request_fn and dm_mq_queue_rq;
		 *   and __process_bio.
M
Mike Snitzer 已提交
2189 2190 2191
		 */
		md->immutable_target = dm_table_get_immutable_target(t);
	}
K
Kiyoshi Ueda 已提交
2192

2193 2194 2195 2196 2197
	ret = __bind_mempools(md, t);
	if (ret) {
		old_map = ERR_PTR(ret);
		goto out;
	}
K
Kiyoshi Ueda 已提交
2198

2199
	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2200
	rcu_assign_pointer(md->map, (void *)t);
2201 2202
	md->immutable_target_type = dm_table_get_immutable_target_type(t);

2203
	dm_table_set_restrictions(t, q, limits);
2204 2205
	if (old_map)
		dm_sync_table(md);
L
Linus Torvalds 已提交
2206

2207
out:
2208
	return old_map;
L
Linus Torvalds 已提交
2209 2210
}

2211 2212 2213 2214
/*
 * Returns unbound table for the caller to free.
 */
static struct dm_table *__unbind(struct mapped_device *md)
L
Linus Torvalds 已提交
2215
{
2216
	struct dm_table *map = rcu_dereference_protected(md->map, 1);
L
Linus Torvalds 已提交
2217 2218

	if (!map)
2219
		return NULL;
L
Linus Torvalds 已提交
2220 2221

	dm_table_event_callback(map, NULL, NULL);
2222
	RCU_INIT_POINTER(md->map, NULL);
M
Mikulas Patocka 已提交
2223
	dm_sync_table(md);
2224 2225

	return map;
L
Linus Torvalds 已提交
2226 2227 2228 2229 2230
}

/*
 * Constructor for a new device.
 */
2231
int dm_create(int minor, struct mapped_device **result)
L
Linus Torvalds 已提交
2232
{
2233
	int r;
L
Linus Torvalds 已提交
2234 2235
	struct mapped_device *md;

2236
	md = alloc_dev(minor);
L
Linus Torvalds 已提交
2237 2238 2239
	if (!md)
		return -ENXIO;

2240 2241 2242 2243 2244
	r = dm_sysfs_init(md);
	if (r) {
		free_dev(md);
		return r;
	}
M
Milan Broz 已提交
2245

L
Linus Torvalds 已提交
2246 2247 2248 2249
	*result = md;
	return 0;
}

2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263
/*
 * Functions to manage md->type.
 * All are required to hold md->type_lock.
 */
void dm_lock_md_type(struct mapped_device *md)
{
	mutex_lock(&md->type_lock);
}

void dm_unlock_md_type(struct mapped_device *md)
{
	mutex_unlock(&md->type_lock);
}

2264
void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2265
{
2266
	BUG_ON(!mutex_is_locked(&md->type_lock));
2267 2268 2269
	md->type = type;
}

2270
enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2271 2272 2273 2274
{
	return md->type;
}

2275 2276 2277 2278 2279
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
	return md->immutable_target_type;
}

2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290
/*
 * The queue_limits are only valid as long as you have a reference
 * count on 'md'.
 */
struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
{
	BUG_ON(!atomic_read(&md->holders));
	return &md->queue->limits;
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);

2291 2292 2293 2294 2295 2296
static void dm_init_congested_fn(struct mapped_device *md)
{
	md->queue->backing_dev_info->congested_data = md;
	md->queue->backing_dev_info->congested_fn = dm_any_congested;
}

2297 2298 2299
/*
 * Setup the DM device's queue based on md's type
 */
2300
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2301
{
2302
	int r;
2303
	struct queue_limits limits;
2304
	enum dm_queue_mode type = dm_get_md_type(md);
2305

2306
	switch (type) {
2307
	case DM_TYPE_REQUEST_BASED:
2308
		r = dm_mq_init_request_queue(md, t);
2309
		if (r) {
2310
			DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2311 2312
			return r;
		}
2313
		dm_init_congested_fn(md);
2314 2315
		break;
	case DM_TYPE_BIO_BASED:
2316
	case DM_TYPE_DAX_BIO_BASED:
2317
	case DM_TYPE_NVME_BIO_BASED:
2318
		dm_init_congested_fn(md);
2319
		break;
2320 2321 2322
	case DM_TYPE_NONE:
		WARN_ON_ONCE(true);
		break;
2323 2324
	}

2325 2326 2327 2328 2329 2330 2331 2332
	r = dm_calculate_queue_limits(t, &limits);
	if (r) {
		DMERR("Cannot calculate initial queue limits");
		return r;
	}
	dm_table_set_restrictions(t, md->queue, &limits);
	blk_register_queue(md->disk);

2333 2334 2335
	return 0;
}

2336
struct mapped_device *dm_get_md(dev_t dev)
L
Linus Torvalds 已提交
2337 2338 2339 2340 2341 2342 2343
{
	struct mapped_device *md;
	unsigned minor = MINOR(dev);

	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
		return NULL;

2344
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
2345 2346

	md = idr_find(&_minor_idr, minor);
M
Mike Snitzer 已提交
2347 2348 2349 2350
	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
		md = NULL;
		goto out;
J
Jeff Mahoney 已提交
2351
	}
M
Mike Snitzer 已提交
2352
	dm_get(md);
J
Jeff Mahoney 已提交
2353
out:
2354
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
2355

2356 2357
	return md;
}
A
Alasdair G Kergon 已提交
2358
EXPORT_SYMBOL_GPL(dm_get_md);
2359

A
Alasdair G Kergon 已提交
2360
void *dm_get_mdptr(struct mapped_device *md)
2361
{
A
Alasdair G Kergon 已提交
2362
	return md->interface_ptr;
L
Linus Torvalds 已提交
2363 2364 2365 2366 2367 2368 2369 2370 2371 2372
}

void dm_set_mdptr(struct mapped_device *md, void *ptr)
{
	md->interface_ptr = ptr;
}

void dm_get(struct mapped_device *md)
{
	atomic_inc(&md->holders);
2373
	BUG_ON(test_bit(DMF_FREEING, &md->flags));
L
Linus Torvalds 已提交
2374 2375
}

2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
int dm_hold(struct mapped_device *md)
{
	spin_lock(&_minor_lock);
	if (test_bit(DMF_FREEING, &md->flags)) {
		spin_unlock(&_minor_lock);
		return -EBUSY;
	}
	dm_get(md);
	spin_unlock(&_minor_lock);
	return 0;
}
EXPORT_SYMBOL_GPL(dm_hold);

2389 2390 2391 2392 2393 2394
const char *dm_device_name(struct mapped_device *md)
{
	return md->name;
}
EXPORT_SYMBOL_GPL(dm_device_name);

2395
static void __dm_destroy(struct mapped_device *md, bool wait)
L
Linus Torvalds 已提交
2396
{
M
Mike Anderson 已提交
2397
	struct dm_table *map;
M
Mikulas Patocka 已提交
2398
	int srcu_idx;
L
Linus Torvalds 已提交
2399

2400
	might_sleep();
J
Jeff Mahoney 已提交
2401

2402
	spin_lock(&_minor_lock);
2403 2404 2405
	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
	set_bit(DMF_FREEING, &md->flags);
	spin_unlock(&_minor_lock);
2406

2407
	blk_set_queue_dying(md->queue);
2408

2409 2410 2411 2412 2413
	/*
	 * Take suspend_lock so that presuspend and postsuspend methods
	 * do not race with internal suspend.
	 */
	mutex_lock(&md->suspend_lock);
2414
	map = dm_get_live_table(md, &srcu_idx);
2415 2416
	if (!dm_suspended_md(md)) {
		dm_table_presuspend_targets(map);
2417
		set_bit(DMF_SUSPENDED, &md->flags);
2418
		dm_table_postsuspend_targets(map);
L
Linus Torvalds 已提交
2419
	}
M
Mikulas Patocka 已提交
2420 2421
	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
	dm_put_live_table(md, srcu_idx);
2422
	mutex_unlock(&md->suspend_lock);
M
Mikulas Patocka 已提交
2423

2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
	/*
	 * Rare, but there may be I/O requests still going to complete,
	 * for example.  Wait for all references to disappear.
	 * No one should increment the reference count of the mapped_device,
	 * after the mapped_device state becomes DMF_FREEING.
	 */
	if (wait)
		while (atomic_read(&md->holders))
			msleep(1);
	else if (atomic_read(&md->holders))
		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
		       dm_device_name(md), atomic_read(&md->holders));

	dm_sysfs_exit(md);
	dm_table_destroy(__unbind(md));
	free_dev(md);
}

void dm_destroy(struct mapped_device *md)
{
	__dm_destroy(md, true);
}

void dm_destroy_immediate(struct mapped_device *md)
{
	__dm_destroy(md, false);
}

void dm_put(struct mapped_device *md)
{
	atomic_dec(&md->holders);
L
Linus Torvalds 已提交
2455
}
E
Edward Goggin 已提交
2456
EXPORT_SYMBOL_GPL(dm_put);
L
Linus Torvalds 已提交
2457

2458
static int dm_wait_for_completion(struct mapped_device *md, long task_state)
2459 2460
{
	int r = 0;
2461
	DEFINE_WAIT(wait);
2462 2463

	while (1) {
2464
		prepare_to_wait(&md->wait, &wait, task_state);
2465

2466
		if (!md_in_flight(md))
2467 2468
			break;

2469
		if (signal_pending_state(task_state, current)) {
2470 2471 2472 2473 2474 2475
			r = -EINTR;
			break;
		}

		io_schedule();
	}
2476
	finish_wait(&md->wait, &wait);
2477

2478 2479 2480
	return r;
}

L
Linus Torvalds 已提交
2481 2482 2483
/*
 * Process the deferred bios
 */
2484
static void dm_wq_work(struct work_struct *work)
L
Linus Torvalds 已提交
2485
{
2486 2487
	struct mapped_device *md = container_of(work, struct mapped_device,
						work);
2488
	struct bio *c;
M
Mikulas Patocka 已提交
2489 2490
	int srcu_idx;
	struct dm_table *map;
L
Linus Torvalds 已提交
2491

M
Mikulas Patocka 已提交
2492
	map = dm_get_live_table(md, &srcu_idx);
2493

2494
	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
A
Alasdair G Kergon 已提交
2495 2496 2497 2498
		spin_lock_irq(&md->deferred_lock);
		c = bio_list_pop(&md->deferred);
		spin_unlock_irq(&md->deferred_lock);

2499
		if (!c)
A
Alasdair G Kergon 已提交
2500
			break;
2501

K
Kiyoshi Ueda 已提交
2502
		if (dm_request_based(md))
2503
			(void) generic_make_request(c);
2504
		else
2505
			(void) dm_process_bio(md, map, c);
2506
	}
M
Milan Broz 已提交
2507

M
Mikulas Patocka 已提交
2508
	dm_put_live_table(md, srcu_idx);
L
Linus Torvalds 已提交
2509 2510
}

2511
static void dm_queue_flush(struct mapped_device *md)
2512
{
2513
	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2514
	smp_mb__after_atomic();
2515
	queue_work(md->wq, &md->work);
2516 2517
}

L
Linus Torvalds 已提交
2518
/*
2519
 * Swap in a new table, returning the old one for the caller to destroy.
L
Linus Torvalds 已提交
2520
 */
2521
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
L
Linus Torvalds 已提交
2522
{
2523
	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2524
	struct queue_limits limits;
2525
	int r;
L
Linus Torvalds 已提交
2526

2527
	mutex_lock(&md->suspend_lock);
L
Linus Torvalds 已提交
2528 2529

	/* device must be suspended */
2530
	if (!dm_suspended_md(md))
2531
		goto out;
L
Linus Torvalds 已提交
2532

2533 2534 2535 2536 2537 2538 2539
	/*
	 * If the new table has no data devices, retain the existing limits.
	 * This helps multipath with queue_if_no_path if all paths disappear,
	 * then new I/O is queued based on these limits, and then some paths
	 * reappear.
	 */
	if (dm_table_has_no_data_devices(table)) {
M
Mikulas Patocka 已提交
2540
		live_map = dm_get_live_table_fast(md);
2541 2542
		if (live_map)
			limits = md->queue->limits;
M
Mikulas Patocka 已提交
2543
		dm_put_live_table_fast(md);
2544 2545
	}

2546 2547 2548 2549 2550 2551
	if (!live_map) {
		r = dm_calculate_queue_limits(table, &limits);
		if (r) {
			map = ERR_PTR(r);
			goto out;
		}
2552
	}
2553

2554
	map = __bind(md, table, &limits);
2555
	dm_issue_global_event();
L
Linus Torvalds 已提交
2556

2557
out:
2558
	mutex_unlock(&md->suspend_lock);
2559
	return map;
L
Linus Torvalds 已提交
2560 2561 2562 2563 2564 2565
}

/*
 * Functions to lock and unlock any filesystem running on the
 * device.
 */
2566
static int lock_fs(struct mapped_device *md)
L
Linus Torvalds 已提交
2567
{
2568
	int r;
L
Linus Torvalds 已提交
2569 2570

	WARN_ON(md->frozen_sb);
2571

2572
	md->frozen_sb = freeze_bdev(md->bdev);
2573
	if (IS_ERR(md->frozen_sb)) {
2574
		r = PTR_ERR(md->frozen_sb);
2575 2576
		md->frozen_sb = NULL;
		return r;
2577 2578
	}

2579 2580
	set_bit(DMF_FROZEN, &md->flags);

L
Linus Torvalds 已提交
2581 2582 2583
	return 0;
}

2584
static void unlock_fs(struct mapped_device *md)
L
Linus Torvalds 已提交
2585
{
2586 2587 2588
	if (!test_bit(DMF_FROZEN, &md->flags))
		return;

2589
	thaw_bdev(md->bdev, md->frozen_sb);
L
Linus Torvalds 已提交
2590
	md->frozen_sb = NULL;
2591
	clear_bit(DMF_FROZEN, &md->flags);
L
Linus Torvalds 已提交
2592 2593 2594
}

/*
2595 2596 2597 2598
 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
 *
2599 2600 2601
 * If __dm_suspend returns 0, the device is completely quiescent
 * now. There is no request-processing activity. All new requests
 * are being added to md->deferred list.
2602
 */
2603
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2604
			unsigned suspend_flags, long task_state,
2605
			int dmf_suspended_flag)
L
Linus Torvalds 已提交
2606
{
2607 2608 2609
	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
	int r;
L
Linus Torvalds 已提交
2610

2611 2612
	lockdep_assert_held(&md->suspend_lock);

2613 2614 2615 2616 2617 2618
	/*
	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
	 * This flag is cleared before dm_suspend returns.
	 */
	if (noflush)
		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2619
	else
2620
		DMDEBUG("%s: suspending with flush", dm_device_name(md));
2621

2622 2623 2624 2625
	/*
	 * This gets reverted if there's an error later and the targets
	 * provide the .presuspend_undo hook.
	 */
2626 2627
	dm_table_presuspend_targets(map);

M
Mikulas Patocka 已提交
2628
	/*
K
Kiyoshi Ueda 已提交
2629 2630 2631 2632
	 * Flush I/O to the device.
	 * Any I/O submitted after lock_fs() may not be flushed.
	 * noflush takes precedence over do_lockfs.
	 * (lock_fs() flushes I/Os and waits for them to complete.)
M
Mikulas Patocka 已提交
2633 2634 2635
	 */
	if (!noflush && do_lockfs) {
		r = lock_fs(md);
2636 2637
		if (r) {
			dm_table_presuspend_undo_targets(map);
2638
			return r;
2639
		}
2640
	}
L
Linus Torvalds 已提交
2641 2642

	/*
2643 2644 2645 2646 2647 2648 2649
	 * Here we must make sure that no processes are submitting requests
	 * to target drivers i.e. no one may be executing
	 * __split_and_process_bio. This is called from dm_request and
	 * dm_wq_work.
	 *
	 * To get all processes out of __split_and_process_bio in dm_request,
	 * we take the write lock. To prevent any process from reentering
2650 2651 2652
	 * __split_and_process_bio from dm_request and quiesce the thread
	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
	 * flush_workqueue(md->wq).
L
Linus Torvalds 已提交
2653
	 */
2654
	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2655 2656
	if (map)
		synchronize_srcu(&md->io_barrier);
L
Linus Torvalds 已提交
2657

2658
	/*
2659 2660
	 * Stop md->queue before flushing md->wq in case request-based
	 * dm defers requests to md->wq from md->queue.
2661
	 */
2662
	if (dm_request_based(md))
2663
		dm_stop_queue(md->queue);
2664

2665 2666
	flush_workqueue(md->wq);

L
Linus Torvalds 已提交
2667
	/*
2668 2669 2670
	 * At this point no more requests are entering target request routines.
	 * We call dm_wait_for_completion to wait for all existing requests
	 * to finish.
L
Linus Torvalds 已提交
2671
	 */
2672
	r = dm_wait_for_completion(md, task_state);
2673 2674
	if (!r)
		set_bit(dmf_suspended_flag, &md->flags);
L
Linus Torvalds 已提交
2675

2676
	if (noflush)
2677
		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2678 2679
	if (map)
		synchronize_srcu(&md->io_barrier);
2680

L
Linus Torvalds 已提交
2681
	/* were we interrupted ? */
2682
	if (r < 0) {
2683
		dm_queue_flush(md);
M
Milan Broz 已提交
2684

2685
		if (dm_request_based(md))
2686
			dm_start_queue(md->queue);
2687

2688
		unlock_fs(md);
2689
		dm_table_presuspend_undo_targets(map);
2690
		/* pushback list is already flushed, so skip flush */
2691
	}
L
Linus Torvalds 已提交
2692

2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733
	return r;
}

/*
 * We need to be able to change a mapping table under a mounted
 * filesystem.  For example we might want to move some data in
 * the background.  Before the table can be swapped with
 * dm_bind_table, dm_suspend must be called to flush any in
 * flight bios and ensure that any further io gets deferred.
 */
/*
 * Suspend mechanism in request-based dm.
 *
 * 1. Flush all I/Os by lock_fs() if needed.
 * 2. Stop dispatching any I/O by stopping the request_queue.
 * 3. Wait for all in-flight I/Os to be completed or requeued.
 *
 * To abort suspend, start the request_queue.
 */
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
	struct dm_table *map = NULL;
	int r = 0;

retry:
	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);

	if (dm_suspended_md(md)) {
		r = -EINVAL;
		goto out_unlock;
	}

	if (dm_suspended_internally_md(md)) {
		/* already internally suspended, wait for internal resume */
		mutex_unlock(&md->suspend_lock);
		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
		if (r)
			return r;
		goto retry;
	}

2734
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2735

2736
	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2737 2738
	if (r)
		goto out_unlock;
2739

2740 2741
	dm_table_postsuspend_targets(map);

2742
out_unlock:
2743
	mutex_unlock(&md->suspend_lock);
2744
	return r;
L
Linus Torvalds 已提交
2745 2746
}

2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762
static int __dm_resume(struct mapped_device *md, struct dm_table *map)
{
	if (map) {
		int r = dm_table_resume_targets(map);
		if (r)
			return r;
	}

	dm_queue_flush(md);

	/*
	 * Flushing deferred I/Os must be done after targets are resumed
	 * so that mapping of targets can work correctly.
	 * Request-based dm is queueing the deferred I/Os in its request_queue.
	 */
	if (dm_request_based(md))
2763
		dm_start_queue(md->queue);
2764 2765 2766 2767 2768 2769

	unlock_fs(md);

	return 0;
}

L
Linus Torvalds 已提交
2770 2771
int dm_resume(struct mapped_device *md)
{
2772
	int r;
2773
	struct dm_table *map = NULL;
L
Linus Torvalds 已提交
2774

2775
retry:
2776
	r = -EINVAL;
2777 2778
	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);

2779
	if (!dm_suspended_md(md))
2780 2781
		goto out;

2782 2783 2784 2785 2786 2787 2788 2789 2790
	if (dm_suspended_internally_md(md)) {
		/* already internally suspended, wait for internal resume */
		mutex_unlock(&md->suspend_lock);
		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
		if (r)
			return r;
		goto retry;
	}

2791
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2792
	if (!map || !dm_table_get_size(map))
2793
		goto out;
L
Linus Torvalds 已提交
2794

2795
	r = __dm_resume(md, map);
2796 2797
	if (r)
		goto out;
2798 2799

	clear_bit(DMF_SUSPENDED, &md->flags);
2800
out:
2801
	mutex_unlock(&md->suspend_lock);
2802

2803
	return r;
L
Linus Torvalds 已提交
2804 2805
}

M
Mikulas Patocka 已提交
2806 2807 2808 2809 2810 2811
/*
 * Internal suspend/resume works like userspace-driven suspend. It waits
 * until all bios finish and prevents issuing new bios to the target drivers.
 * It may be used only from the kernel.
 */

2812
static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
M
Mikulas Patocka 已提交
2813
{
2814 2815
	struct dm_table *map = NULL;

2816 2817
	lockdep_assert_held(&md->suspend_lock);

2818
	if (md->internal_suspend_count++)
2819 2820 2821 2822 2823 2824 2825
		return; /* nested internal suspend */

	if (dm_suspended_md(md)) {
		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
		return; /* nest suspend */
	}

2826
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2827 2828 2829 2830 2831 2832 2833

	/*
	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
	 * would require changing .presuspend to return an error -- avoid this
	 * until there is a need for more elaborate variants of internal suspend.
	 */
2834 2835
	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
			    DMF_SUSPENDED_INTERNALLY);
2836 2837 2838 2839 2840 2841

	dm_table_postsuspend_targets(map);
}

static void __dm_internal_resume(struct mapped_device *md)
{
2842 2843 2844
	BUG_ON(!md->internal_suspend_count);

	if (--md->internal_suspend_count)
2845 2846
		return; /* resume from nested internal suspend */

M
Mikulas Patocka 已提交
2847
	if (dm_suspended_md(md))
2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886
		goto done; /* resume from nested suspend */

	/*
	 * NOTE: existing callers don't need to call dm_table_resume_targets
	 * (which may fail -- so best to avoid it for now by passing NULL map)
	 */
	(void) __dm_resume(md, NULL);

done:
	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
	smp_mb__after_atomic();
	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
}

void dm_internal_suspend_noflush(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
	mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);

void dm_internal_resume(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	__dm_internal_resume(md);
	mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_resume);

/*
 * Fast variants of internal suspend/resume hold md->suspend_lock,
 * which prevents interaction with userspace-driven suspend.
 */

void dm_internal_suspend_fast(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
M
Mikulas Patocka 已提交
2887 2888 2889 2890 2891 2892 2893
		return;

	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
	synchronize_srcu(&md->io_barrier);
	flush_workqueue(md->wq);
	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
}
2894
EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
M
Mikulas Patocka 已提交
2895

2896
void dm_internal_resume_fast(struct mapped_device *md)
M
Mikulas Patocka 已提交
2897
{
2898
	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
M
Mikulas Patocka 已提交
2899 2900 2901 2902 2903 2904 2905
		goto done;

	dm_queue_flush(md);

done:
	mutex_unlock(&md->suspend_lock);
}
2906
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
M
Mikulas Patocka 已提交
2907

L
Linus Torvalds 已提交
2908 2909 2910
/*-----------------------------------------------------------------
 * Event notification.
 *---------------------------------------------------------------*/
2911
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
M
Milan Broz 已提交
2912
		       unsigned cookie)
2913
{
M
Milan Broz 已提交
2914 2915 2916 2917
	char udev_cookie[DM_COOKIE_LENGTH];
	char *envp[] = { udev_cookie, NULL };

	if (!cookie)
2918
		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
M
Milan Broz 已提交
2919 2920 2921
	else {
		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
			 DM_COOKIE_ENV_VAR_NAME, cookie);
2922 2923
		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
					  action, envp);
M
Milan Broz 已提交
2924
	}
2925 2926
}

M
Mike Anderson 已提交
2927 2928 2929 2930 2931
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
	return atomic_add_return(1, &md->uevent_seq);
}

L
Linus Torvalds 已提交
2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
uint32_t dm_get_event_nr(struct mapped_device *md)
{
	return atomic_read(&md->event_nr);
}

int dm_wait_event(struct mapped_device *md, int event_nr)
{
	return wait_event_interruptible(md->eventq,
			(event_nr != atomic_read(&md->event_nr)));
}

M
Mike Anderson 已提交
2943 2944 2945 2946 2947 2948 2949 2950 2951
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{
	unsigned long flags;

	spin_lock_irqsave(&md->uevent_lock, flags);
	list_add(elist, &md->uevent_list);
	spin_unlock_irqrestore(&md->uevent_lock, flags);
}

L
Linus Torvalds 已提交
2952 2953 2954 2955 2956 2957 2958 2959
/*
 * The gendisk is only valid as long as you have a reference
 * count on 'md'.
 */
struct gendisk *dm_disk(struct mapped_device *md)
{
	return md->disk;
}
2960
EXPORT_SYMBOL_GPL(dm_disk);
L
Linus Torvalds 已提交
2961

M
Milan Broz 已提交
2962 2963
struct kobject *dm_kobject(struct mapped_device *md)
{
2964
	return &md->kobj_holder.kobj;
M
Milan Broz 已提交
2965 2966 2967 2968 2969 2970
}

struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
	struct mapped_device *md;

2971
	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
M
Milan Broz 已提交
2972

2973 2974 2975 2976 2977
	spin_lock(&_minor_lock);
	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
		md = NULL;
		goto out;
	}
M
Milan Broz 已提交
2978
	dm_get(md);
2979 2980 2981
out:
	spin_unlock(&_minor_lock);

M
Milan Broz 已提交
2982 2983 2984
	return md;
}

2985
int dm_suspended_md(struct mapped_device *md)
L
Linus Torvalds 已提交
2986 2987 2988 2989
{
	return test_bit(DMF_SUSPENDED, &md->flags);
}

2990 2991 2992 2993 2994
int dm_suspended_internally_md(struct mapped_device *md)
{
	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
}

M
Mikulas Patocka 已提交
2995 2996 2997 2998 2999
int dm_test_deferred_remove_flag(struct mapped_device *md)
{
	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
}

3000 3001
int dm_suspended(struct dm_target *ti)
{
3002
	return dm_suspended_md(dm_table_get_md(ti->table));
3003 3004 3005
}
EXPORT_SYMBOL_GPL(dm_suspended);

3006 3007
int dm_noflush_suspending(struct dm_target *ti)
{
3008
	return __noflush_suspending(dm_table_get_md(ti->table));
3009 3010 3011
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);

3012
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
3013 3014
					    unsigned integrity, unsigned per_io_data_size,
					    unsigned min_pool_size)
K
Kiyoshi Ueda 已提交
3015
{
3016
	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
3017
	unsigned int pool_size = 0;
3018
	unsigned int front_pad, io_front_pad;
3019
	int ret;
K
Kiyoshi Ueda 已提交
3020 3021

	if (!pools)
3022
		return NULL;
K
Kiyoshi Ueda 已提交
3023

3024 3025
	switch (type) {
	case DM_TYPE_BIO_BASED:
3026
	case DM_TYPE_DAX_BIO_BASED:
3027
	case DM_TYPE_NVME_BIO_BASED:
3028
		pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
3029
		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
3030
		io_front_pad = roundup(front_pad,  __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
3031 3032
		ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
		if (ret)
3033
			goto out;
3034
		if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
3035
			goto out;
3036 3037
		break;
	case DM_TYPE_REQUEST_BASED:
3038
		pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
3039
		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
3040
		/* per_io_data_size is used for blk-mq pdu at queue allocation */
3041 3042 3043 3044 3045
		break;
	default:
		BUG();
	}

3046 3047
	ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
	if (ret)
J
Jun'ichi Nomura 已提交
3048
		goto out;
K
Kiyoshi Ueda 已提交
3049

3050
	if (integrity && bioset_integrity_create(&pools->bs, pool_size))
J
Jun'ichi Nomura 已提交
3051
		goto out;
3052

K
Kiyoshi Ueda 已提交
3053
	return pools;
3054 3055 3056

out:
	dm_free_md_mempools(pools);
3057

3058
	return NULL;
K
Kiyoshi Ueda 已提交
3059 3060 3061 3062 3063 3064 3065
}

void dm_free_md_mempools(struct dm_md_mempools *pools)
{
	if (!pools)
		return;

3066 3067
	bioset_exit(&pools->bs);
	bioset_exit(&pools->io_bs);
K
Kiyoshi Ueda 已提交
3068 3069 3070 3071

	kfree(pools);
}

3072 3073 3074 3075 3076 3077 3078 3079 3080
struct dm_pr {
	u64	old_key;
	u64	new_key;
	u32	flags;
	bool	fail_early;
};

static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
		      void *data)
3081 3082
{
	struct mapped_device *md = bdev->bd_disk->private_data;
3083 3084 3085
	struct dm_table *table;
	struct dm_target *ti;
	int ret = -ENOTTY, srcu_idx;
3086

3087 3088 3089
	table = dm_get_live_table(md, &srcu_idx);
	if (!table || !dm_table_get_size(table))
		goto out;
3090

3091 3092 3093 3094
	/* We only support devices that have a single target */
	if (dm_table_get_num_targets(table) != 1)
		goto out;
	ti = dm_table_get_target(table, 0);
3095

3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141
	ret = -EINVAL;
	if (!ti->type->iterate_devices)
		goto out;

	ret = ti->type->iterate_devices(ti, fn, data);
out:
	dm_put_live_table(md, srcu_idx);
	return ret;
}

/*
 * For register / unregister we need to manually call out to every path.
 */
static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
			    sector_t start, sector_t len, void *data)
{
	struct dm_pr *pr = data;
	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;

	if (!ops || !ops->pr_register)
		return -EOPNOTSUPP;
	return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
}

static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
			  u32 flags)
{
	struct dm_pr pr = {
		.old_key	= old_key,
		.new_key	= new_key,
		.flags		= flags,
		.fail_early	= true,
	};
	int ret;

	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
	if (ret && new_key) {
		/* unregister all paths if we failed to register any path */
		pr.old_key = new_key;
		pr.new_key = 0;
		pr.flags = 0;
		pr.fail_early = false;
		dm_call_pr(bdev, __dm_pr_register, &pr);
	}

	return ret;
3142 3143 3144
}

static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3145
			 u32 flags)
3146 3147 3148
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3149
	int r, srcu_idx;
3150

3151
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3152
	if (r < 0)
3153
		goto out;
3154 3155 3156 3157 3158 3159

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_reserve)
		r = ops->pr_reserve(bdev, key, type, flags);
	else
		r = -EOPNOTSUPP;
3160 3161
out:
	dm_unprepare_ioctl(md, srcu_idx);
3162 3163 3164 3165 3166 3167 3168
	return r;
}

static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3169
	int r, srcu_idx;
3170

3171
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3172
	if (r < 0)
3173
		goto out;
3174 3175 3176 3177 3178 3179

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_release)
		r = ops->pr_release(bdev, key, type);
	else
		r = -EOPNOTSUPP;
3180 3181
out:
	dm_unprepare_ioctl(md, srcu_idx);
3182 3183 3184 3185
	return r;
}

static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3186
			 enum pr_type type, bool abort)
3187 3188 3189
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3190
	int r, srcu_idx;
3191

3192
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3193
	if (r < 0)
3194
		goto out;
3195 3196 3197 3198 3199 3200

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_preempt)
		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
	else
		r = -EOPNOTSUPP;
3201 3202
out:
	dm_unprepare_ioctl(md, srcu_idx);
3203 3204 3205 3206 3207 3208 3209
	return r;
}

static int dm_pr_clear(struct block_device *bdev, u64 key)
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3210
	int r, srcu_idx;
3211

3212
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3213
	if (r < 0)
3214
		goto out;
3215 3216 3217 3218 3219 3220

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_clear)
		r = ops->pr_clear(bdev, key);
	else
		r = -EOPNOTSUPP;
3221 3222
out:
	dm_unprepare_ioctl(md, srcu_idx);
3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233
	return r;
}

static const struct pr_ops dm_pr_ops = {
	.pr_register	= dm_pr_register,
	.pr_reserve	= dm_pr_reserve,
	.pr_release	= dm_pr_release,
	.pr_preempt	= dm_pr_preempt,
	.pr_clear	= dm_pr_clear,
};

3234
static const struct block_device_operations dm_blk_dops = {
L
Linus Torvalds 已提交
3235 3236
	.open = dm_blk_open,
	.release = dm_blk_close,
3237
	.ioctl = dm_blk_ioctl,
D
Darrick J. Wong 已提交
3238
	.getgeo = dm_blk_getgeo,
3239
	.report_zones = dm_blk_report_zones,
3240
	.pr_ops = &dm_pr_ops,
L
Linus Torvalds 已提交
3241 3242 3243
	.owner = THIS_MODULE
};

3244 3245
static const struct dax_operations dm_dax_ops = {
	.direct_access = dm_dax_direct_access,
3246
	.dax_supported = dm_dax_supported,
3247
	.copy_from_iter = dm_dax_copy_from_iter,
3248
	.copy_to_iter = dm_dax_copy_to_iter,
3249
	.zero_page_range = dm_dax_zero_page_range,
3250 3251
};

L
Linus Torvalds 已提交
3252 3253 3254 3255 3256 3257 3258 3259
/*
 * module hooks
 */
module_init(dm_init);
module_exit(dm_exit);

module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
3260

3261 3262 3263
module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");

3264 3265 3266
module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");

L
Linus Torvalds 已提交
3267 3268 3269
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");