dm.c 73.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
M
Milan Broz 已提交
3
 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7
 *
 * This file is released under the GPL.
 */

8 9
#include "dm-core.h"
#include "dm-rq.h"
M
Mike Anderson 已提交
10
#include "dm-uevent.h"
L
Linus Torvalds 已提交
11 12 13

#include <linux/init.h>
#include <linux/module.h>
A
Arjan van de Ven 已提交
14
#include <linux/mutex.h>
15
#include <linux/sched/signal.h>
L
Linus Torvalds 已提交
16 17 18
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
19
#include <linux/dax.h>
L
Linus Torvalds 已提交
20 21
#include <linux/slab.h>
#include <linux/idr.h>
22
#include <linux/uio.h>
D
Darrick J. Wong 已提交
23
#include <linux/hdreg.h>
24
#include <linux/delay.h>
25
#include <linux/wait.h>
26
#include <linux/pr.h>
27
#include <linux/refcount.h>
28

29 30
#define DM_MSG_PREFIX "core"

M
Milan Broz 已提交
31 32 33 34 35 36 37
/*
 * Cookies are numeric values sent with CHANGE and REMOVE
 * uevents while resuming, removing or renaming the device.
 */
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
#define DM_COOKIE_LENGTH 24

L
Linus Torvalds 已提交
38 39 40 41 42
static const char *_name = DM_NAME;

static unsigned int major = 0;
static unsigned int _major = 0;

43 44
static DEFINE_IDR(_minor_idr);

45
static DEFINE_SPINLOCK(_minor_lock);
M
Mikulas Patocka 已提交
46 47 48 49 50

static void do_deferred_remove(struct work_struct *w);

static DECLARE_WORK(deferred_remove_work, do_deferred_remove);

51 52
static struct workqueue_struct *deferred_remove_workqueue;

53 54 55
atomic_t dm_global_event_nr = ATOMIC_INIT(0);
DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);

56 57 58 59 60 61
void dm_issue_global_event(void)
{
	atomic_inc(&dm_global_event_nr);
	wake_up(&dm_global_eventq);
}

L
Linus Torvalds 已提交
62
/*
63
 * One of these is allocated (on-stack) per original bio.
L
Linus Torvalds 已提交
64
 */
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
struct clone_info {
	struct dm_table *map;
	struct bio *bio;
	struct dm_io *io;
	sector_t sector;
	unsigned sector_count;
};

/*
 * One of these is allocated per clone bio.
 */
#define DM_TIO_MAGIC 7282014
struct dm_target_io {
	unsigned magic;
	struct dm_io *io;
	struct dm_target *ti;
	unsigned target_bio_nr;
	unsigned *len_ptr;
	bool inside_dm_io;
	struct bio clone;
};

L
Linus Torvalds 已提交
87
/*
88
 * One of these is allocated per original bio.
89
 * It contains the first clone used for that original.
L
Linus Torvalds 已提交
90
 */
91
#define DM_IO_MAGIC 5191977
L
Linus Torvalds 已提交
92
struct dm_io {
93
	unsigned magic;
L
Linus Torvalds 已提交
94
	struct mapped_device *md;
95
	blk_status_t status;
L
Linus Torvalds 已提交
96
	atomic_t io_count;
97
	struct bio *orig_bio;
98
	unsigned long start_time;
99
	spinlock_t endio_lock;
M
Mikulas Patocka 已提交
100
	struct dm_stats_aux stats_aux;
101 102
	/* last member of dm_target_io is 'struct bio' */
	struct dm_target_io tio;
L
Linus Torvalds 已提交
103 104
};

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
void *dm_per_bio_data(struct bio *bio, size_t data_size)
{
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
	if (!tio->inside_dm_io)
		return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
	return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
}
EXPORT_SYMBOL_GPL(dm_per_bio_data);

struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
{
	struct dm_io *io = (struct dm_io *)((char *)data + data_size);
	if (io->magic == DM_IO_MAGIC)
		return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
	BUG_ON(io->magic != DM_TIO_MAGIC);
	return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
}
EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);

unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
{
	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
}
EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);

130 131
#define MINOR_ALLOCED ((void *)-1)

L
Linus Torvalds 已提交
132 133 134
/*
 * Bits for the md->flags field.
 */
135
#define DMF_BLOCK_IO_FOR_SUSPEND 0
L
Linus Torvalds 已提交
136
#define DMF_SUSPENDED 1
137
#define DMF_FROZEN 2
J
Jeff Mahoney 已提交
138
#define DMF_FREEING 3
139
#define DMF_DELETING 4
140
#define DMF_NOFLUSH_SUSPENDING 5
141 142
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
L
Linus Torvalds 已提交
143

144 145
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
146

K
Kiyoshi Ueda 已提交
147 148 149 150
/*
 * For mempools pre-allocation at the table loading time.
 */
struct dm_md_mempools {
151 152
	struct bio_set bs;
	struct bio_set io_bs;
K
Kiyoshi Ueda 已提交
153 154
};

155 156
struct table_device {
	struct list_head list;
157
	refcount_t count;
158 159 160
	struct dm_dev dm_dev;
};

K
Kiyoshi Ueda 已提交
161
static struct kmem_cache *_rq_tio_cache;
162
static struct kmem_cache *_rq_cache;
163

164 165 166
/*
 * Bio-based DM's mempools' reserved IOs set by the user.
 */
167
#define RESERVED_BIO_BASED_IOS		16
168 169
static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;

170 171
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
172
	int param = READ_ONCE(*module_param);
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
	int modified_param = 0;
	bool modified = true;

	if (param < min)
		modified_param = min;
	else if (param > max)
		modified_param = max;
	else
		modified = false;

	if (modified) {
		(void)cmpxchg(module_param, param, modified_param);
		param = modified_param;
	}

	return param;
}

191 192
unsigned __dm_get_module_param(unsigned *module_param,
			       unsigned def, unsigned max)
193
{
194
	unsigned param = READ_ONCE(*module_param);
195
	unsigned modified_param = 0;
196

197 198 199 200
	if (!param)
		modified_param = def;
	else if (param > max)
		modified_param = max;
201

202 203 204
	if (modified_param) {
		(void)cmpxchg(module_param, param, modified_param);
		param = modified_param;
205 206
	}

207
	return param;
208 209
}

210 211
unsigned dm_get_reserved_bio_based_ios(void)
{
212
	return __dm_get_module_param(&reserved_bio_based_ios,
213
				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
214 215 216
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);

217 218 219 220 221 222
static unsigned dm_get_numa_node(void)
{
	return __dm_get_module_param_int(&dm_numa_node,
					 DM_NUMA_NODE, num_online_nodes() - 1);
}

L
Linus Torvalds 已提交
223 224
static int __init local_init(void)
{
K
Kiyoshi Ueda 已提交
225
	int r = -ENOMEM;
L
Linus Torvalds 已提交
226

K
Kiyoshi Ueda 已提交
227 228
	_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
	if (!_rq_tio_cache)
229
		return r;
K
Kiyoshi Ueda 已提交
230

231
	_rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
232 233 234 235
				      __alignof__(struct request), 0, NULL);
	if (!_rq_cache)
		goto out_free_rq_tio_cache;

M
Mike Anderson 已提交
236
	r = dm_uevent_init();
K
Kiyoshi Ueda 已提交
237
	if (r)
238
		goto out_free_rq_cache;
M
Mike Anderson 已提交
239

240 241 242 243 244 245
	deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
	if (!deferred_remove_workqueue) {
		r = -ENOMEM;
		goto out_uevent_exit;
	}

L
Linus Torvalds 已提交
246 247
	_major = major;
	r = register_blkdev(_major, _name);
K
Kiyoshi Ueda 已提交
248
	if (r < 0)
249
		goto out_free_workqueue;
L
Linus Torvalds 已提交
250 251 252 253 254

	if (!_major)
		_major = r;

	return 0;
K
Kiyoshi Ueda 已提交
255

256 257
out_free_workqueue:
	destroy_workqueue(deferred_remove_workqueue);
K
Kiyoshi Ueda 已提交
258 259
out_uevent_exit:
	dm_uevent_exit();
260 261
out_free_rq_cache:
	kmem_cache_destroy(_rq_cache);
K
Kiyoshi Ueda 已提交
262 263
out_free_rq_tio_cache:
	kmem_cache_destroy(_rq_tio_cache);
K
Kiyoshi Ueda 已提交
264 265

	return r;
L
Linus Torvalds 已提交
266 267 268 269
}

static void local_exit(void)
{
M
Mikulas Patocka 已提交
270
	flush_scheduled_work();
271
	destroy_workqueue(deferred_remove_workqueue);
M
Mikulas Patocka 已提交
272

273
	kmem_cache_destroy(_rq_cache);
K
Kiyoshi Ueda 已提交
274
	kmem_cache_destroy(_rq_tio_cache);
275
	unregister_blkdev(_major, _name);
M
Mike Anderson 已提交
276
	dm_uevent_exit();
L
Linus Torvalds 已提交
277 278 279 280 281 282

	_major = 0;

	DMINFO("cleaned up");
}

283
static int (*_inits[])(void) __initdata = {
L
Linus Torvalds 已提交
284 285 286 287
	local_init,
	dm_target_init,
	dm_linear_init,
	dm_stripe_init,
M
Mikulas Patocka 已提交
288
	dm_io_init,
289
	dm_kcopyd_init,
L
Linus Torvalds 已提交
290
	dm_interface_init,
M
Mikulas Patocka 已提交
291
	dm_statistics_init,
L
Linus Torvalds 已提交
292 293
};

294
static void (*_exits[])(void) = {
L
Linus Torvalds 已提交
295 296 297 298
	local_exit,
	dm_target_exit,
	dm_linear_exit,
	dm_stripe_exit,
M
Mikulas Patocka 已提交
299
	dm_io_exit,
300
	dm_kcopyd_exit,
L
Linus Torvalds 已提交
301
	dm_interface_exit,
M
Mikulas Patocka 已提交
302
	dm_statistics_exit,
L
Linus Torvalds 已提交
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
};

static int __init dm_init(void)
{
	const int count = ARRAY_SIZE(_inits);

	int r, i;

	for (i = 0; i < count; i++) {
		r = _inits[i]();
		if (r)
			goto bad;
	}

	return 0;

      bad:
	while (i--)
		_exits[i]();

	return r;
}

static void __exit dm_exit(void)
{
	int i = ARRAY_SIZE(_exits);

	while (i--)
		_exits[i]();
332 333 334 335 336

	/*
	 * Should be empty by this point.
	 */
	idr_destroy(&_minor_idr);
L
Linus Torvalds 已提交
337 338 339 340 341
}

/*
 * Block device functions
 */
M
Mike Anderson 已提交
342 343 344 345 346
int dm_deleting_md(struct mapped_device *md)
{
	return test_bit(DMF_DELETING, &md->flags);
}

A
Al Viro 已提交
347
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
L
Linus Torvalds 已提交
348 349 350
{
	struct mapped_device *md;

J
Jeff Mahoney 已提交
351 352
	spin_lock(&_minor_lock);

A
Al Viro 已提交
353
	md = bdev->bd_disk->private_data;
J
Jeff Mahoney 已提交
354 355 356
	if (!md)
		goto out;

357
	if (test_bit(DMF_FREEING, &md->flags) ||
M
Mike Anderson 已提交
358
	    dm_deleting_md(md)) {
J
Jeff Mahoney 已提交
359 360 361 362
		md = NULL;
		goto out;
	}

L
Linus Torvalds 已提交
363
	dm_get(md);
364
	atomic_inc(&md->open_count);
J
Jeff Mahoney 已提交
365 366 367 368
out:
	spin_unlock(&_minor_lock);

	return md ? 0 : -ENXIO;
L
Linus Torvalds 已提交
369 370
}

371
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
L
Linus Torvalds 已提交
372
{
373
	struct mapped_device *md;
374

375 376
	spin_lock(&_minor_lock);

377 378 379 380
	md = disk->private_data;
	if (WARN_ON(!md))
		goto out;

M
Mikulas Patocka 已提交
381 382
	if (atomic_dec_and_test(&md->open_count) &&
	    (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
383
		queue_work(deferred_remove_workqueue, &deferred_remove_work);
M
Mikulas Patocka 已提交
384

L
Linus Torvalds 已提交
385
	dm_put(md);
386
out:
387
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
388 389
}

390 391 392 393 394 395 396 397
int dm_open_count(struct mapped_device *md)
{
	return atomic_read(&md->open_count);
}

/*
 * Guarantees nothing is using the device before it's deleted.
 */
M
Mikulas Patocka 已提交
398
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
399 400 401 402 403
{
	int r = 0;

	spin_lock(&_minor_lock);

M
Mikulas Patocka 已提交
404
	if (dm_open_count(md)) {
405
		r = -EBUSY;
M
Mikulas Patocka 已提交
406 407 408 409
		if (mark_deferred)
			set_bit(DMF_DEFERRED_REMOVE, &md->flags);
	} else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
		r = -EEXIST;
410 411 412 413 414 415 416 417
	else
		set_bit(DMF_DELETING, &md->flags);

	spin_unlock(&_minor_lock);

	return r;
}

M
Mikulas Patocka 已提交
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
int dm_cancel_deferred_remove(struct mapped_device *md)
{
	int r = 0;

	spin_lock(&_minor_lock);

	if (test_bit(DMF_DELETING, &md->flags))
		r = -EBUSY;
	else
		clear_bit(DMF_DEFERRED_REMOVE, &md->flags);

	spin_unlock(&_minor_lock);

	return r;
}

static void do_deferred_remove(struct work_struct *w)
{
	dm_deferred_remove();
}

M
Mikulas Patocka 已提交
439 440 441 442 443
sector_t dm_get_size(struct mapped_device *md)
{
	return get_capacity(md->disk);
}

444 445 446 447 448
struct request_queue *dm_get_md_queue(struct mapped_device *md)
{
	return md->queue;
}

M
Mikulas Patocka 已提交
449 450 451 452 453
struct dm_stats *dm_get_stats(struct mapped_device *md)
{
	return &md->stats;
}

D
Darrick J. Wong 已提交
454 455 456 457 458 459 460
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	struct mapped_device *md = bdev->bd_disk->private_data;

	return dm_get_geometry(md, geo);
}

461
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
462
			    struct block_device **bdev)
463
	__acquires(md->io_barrier)
464
{
465
	struct dm_target *tgt;
466
	struct dm_table *map;
467
	int r;
468

469
retry:
C
Christoph Hellwig 已提交
470
	r = -ENOTTY;
471
	map = dm_get_live_table(md, srcu_idx);
472
	if (!map || !dm_table_get_size(map))
473
		return r;
474 475 476

	/* We only support devices that have a single target */
	if (dm_table_get_num_targets(map) != 1)
477
		return r;
478

479 480
	tgt = dm_table_get_target(map, 0);
	if (!tgt->type->prepare_ioctl)
481
		return r;
482

483 484
	if (dm_suspended_md(md))
		return -EAGAIN;
485

486
	r = tgt->type->prepare_ioctl(tgt, bdev);
487
	if (r == -ENOTCONN && !fatal_signal_pending(current)) {
488
		dm_put_live_table(md, *srcu_idx);
489 490 491
		msleep(10);
		goto retry;
	}
492

C
Christoph Hellwig 已提交
493 494 495
	return r;
}

496 497 498 499 500 501
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
	__releases(md->io_barrier)
{
	dm_put_live_table(md, srcu_idx);
}

C
Christoph Hellwig 已提交
502 503 504 505
static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
			unsigned int cmd, unsigned long arg)
{
	struct mapped_device *md = bdev->bd_disk->private_data;
506
	int r, srcu_idx;
C
Christoph Hellwig 已提交
507

508
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
C
Christoph Hellwig 已提交
509
	if (r < 0)
510
		goto out;
511

C
Christoph Hellwig 已提交
512 513
	if (r > 0) {
		/*
514 515
		 * Target determined this ioctl is being issued against a
		 * subset of the parent bdev; require extra privileges.
C
Christoph Hellwig 已提交
516
		 */
517 518 519 520 521
		if (!capable(CAP_SYS_RAWIO)) {
			DMWARN_LIMIT(
	"%s: sending ioctl %x to DM device without required privilege.",
				current->comm, cmd);
			r = -ENOIOCTLCMD;
C
Christoph Hellwig 已提交
522
			goto out;
523
		}
C
Christoph Hellwig 已提交
524
	}
525

526
	r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
C
Christoph Hellwig 已提交
527
out:
528
	dm_unprepare_ioctl(md, srcu_idx);
529 530 531
	return r;
}

532 533 534
static void start_io_acct(struct dm_io *io);

static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
L
Linus Torvalds 已提交
535
{
536 537 538 539
	struct dm_io *io;
	struct dm_target_io *tio;
	struct bio *clone;

540
	clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs);
541 542 543 544 545 546 547 548 549
	if (!clone)
		return NULL;

	tio = container_of(clone, struct dm_target_io, clone);
	tio->inside_dm_io = true;
	tio->io = NULL;

	io = container_of(tio, struct dm_io, tio);
	io->magic = DM_IO_MAGIC;
550 551 552 553 554 555 556
	io->status = 0;
	atomic_set(&io->io_count, 1);
	io->orig_bio = bio;
	io->md = md;
	spin_lock_init(&io->endio_lock);

	start_io_acct(io);
557 558

	return io;
L
Linus Torvalds 已提交
559 560
}

A
Alasdair G Kergon 已提交
561
static void free_io(struct mapped_device *md, struct dm_io *io)
L
Linus Torvalds 已提交
562
{
563 564 565 566 567 568 569 570 571 572 573 574
	bio_put(&io->tio.clone);
}

static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
				      unsigned target_bio_nr, gfp_t gfp_mask)
{
	struct dm_target_io *tio;

	if (!ci->io->tio.io) {
		/* the dm_target_io embedded in ci->io is available */
		tio = &ci->io->tio;
	} else {
575
		struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
576 577 578 579 580 581 582 583 584 585 586 587 588
		if (!clone)
			return NULL;

		tio = container_of(clone, struct dm_target_io, clone);
		tio->inside_dm_io = false;
	}

	tio->magic = DM_TIO_MAGIC;
	tio->io = ci->io;
	tio->ti = ti;
	tio->target_bio_nr = target_bio_nr;

	return tio;
L
Linus Torvalds 已提交
589 590
}

591
static void free_tio(struct dm_target_io *tio)
L
Linus Torvalds 已提交
592
{
593 594
	if (tio->inside_dm_io)
		return;
595
	bio_put(&tio->clone);
L
Linus Torvalds 已提交
596 597
}

598
int md_in_flight(struct mapped_device *md)
K
Kiyoshi Ueda 已提交
599 600 601 602 603
{
	return atomic_read(&md->pending[READ]) +
	       atomic_read(&md->pending[WRITE]);
}

604 605 606
static void start_io_acct(struct dm_io *io)
{
	struct mapped_device *md = io->md;
607
	struct bio *bio = io->orig_bio;
M
Mikulas Patocka 已提交
608
	int rw = bio_data_dir(bio);
609 610 611

	io->start_time = jiffies;

612 613
	generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
			      &dm_disk(md)->part0);
614

615
	atomic_set(&dm_disk(md)->part0.in_flight[rw],
616
		   atomic_inc_return(&md->pending[rw]));
M
Mikulas Patocka 已提交
617 618

	if (unlikely(dm_stats_used(&md->stats)))
619 620 621
		dm_stats_account_io(&md->stats, bio_data_dir(bio),
				    bio->bi_iter.bi_sector, bio_sectors(bio),
				    false, 0, &io->stats_aux);
622 623
}

624
static void end_io_acct(struct dm_io *io)
625 626
{
	struct mapped_device *md = io->md;
627
	struct bio *bio = io->orig_bio;
628
	unsigned long duration = jiffies - io->start_time;
629
	int pending;
630 631
	int rw = bio_data_dir(bio);

632 633
	generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
			    io->start_time);
634

M
Mikulas Patocka 已提交
635
	if (unlikely(dm_stats_used(&md->stats)))
636 637 638
		dm_stats_account_io(&md->stats, bio_data_dir(bio),
				    bio->bi_iter.bi_sector, bio_sectors(bio),
				    true, duration, &io->stats_aux);
M
Mikulas Patocka 已提交
639

640 641
	/*
	 * After this is decremented the bio must not be touched if it is
642
	 * a flush.
643
	 */
644 645
	pending = atomic_dec_return(&md->pending[rw]);
	atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
646
	pending += atomic_read(&md->pending[rw^0x1]);
647

648 649 650
	/* nudge anyone waiting on suspend queue */
	if (!pending)
		wake_up(&md->wait);
651 652
}

L
Linus Torvalds 已提交
653 654 655
/*
 * Add the bio to the list of deferred io.
 */
M
Mikulas Patocka 已提交
656
static void queue_io(struct mapped_device *md, struct bio *bio)
L
Linus Torvalds 已提交
657
{
658
	unsigned long flags;
L
Linus Torvalds 已提交
659

660
	spin_lock_irqsave(&md->deferred_lock, flags);
L
Linus Torvalds 已提交
661
	bio_list_add(&md->deferred, bio);
662
	spin_unlock_irqrestore(&md->deferred_lock, flags);
663
	queue_work(md->wq, &md->work);
L
Linus Torvalds 已提交
664 665 666 667 668
}

/*
 * Everyone (including functions in this file), should use this
 * function to access the md->map field, and make sure they call
M
Mikulas Patocka 已提交
669
 * dm_put_live_table() when finished.
L
Linus Torvalds 已提交
670
 */
M
Mikulas Patocka 已提交
671
struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
L
Linus Torvalds 已提交
672
{
M
Mikulas Patocka 已提交
673 674 675 676
	*srcu_idx = srcu_read_lock(&md->io_barrier);

	return srcu_dereference(md->map, &md->io_barrier);
}
L
Linus Torvalds 已提交
677

M
Mikulas Patocka 已提交
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
{
	srcu_read_unlock(&md->io_barrier, srcu_idx);
}

void dm_sync_table(struct mapped_device *md)
{
	synchronize_srcu(&md->io_barrier);
	synchronize_rcu_expedited();
}

/*
 * A fast alternative to dm_get_live_table/dm_put_live_table.
 * The caller must not block between these two functions.
 */
static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
{
	rcu_read_lock();
	return rcu_dereference(md->map);
}
L
Linus Torvalds 已提交
698

M
Mikulas Patocka 已提交
699 700 701
static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
{
	rcu_read_unlock();
L
Linus Torvalds 已提交
702 703
}

704 705
static char *_dm_claim_ptr = "I belong to device-mapper";

706 707 708 709 710 711 712 713 714 715 716 717
/*
 * Open a table device so we can use it as a map destination.
 */
static int open_table_device(struct table_device *td, dev_t dev,
			     struct mapped_device *md)
{
	struct block_device *bdev;

	int r;

	BUG_ON(td->dm_dev.bdev);

718
	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
719 720 721 722 723 724 725 726 727 728
	if (IS_ERR(bdev))
		return PTR_ERR(bdev);

	r = bd_link_disk_holder(bdev, dm_disk(md));
	if (r) {
		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
		return r;
	}

	td->dm_dev.bdev = bdev;
729
	td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
730 731 732 733 734 735 736 737 738 739 740 741 742
	return 0;
}

/*
 * Close a table device that we've been using.
 */
static void close_table_device(struct table_device *td, struct mapped_device *md)
{
	if (!td->dm_dev.bdev)
		return;

	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
743
	put_dax(td->dm_dev.dax_dev);
744
	td->dm_dev.bdev = NULL;
745
	td->dm_dev.dax_dev = NULL;
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
}

static struct table_device *find_table_device(struct list_head *l, dev_t dev,
					      fmode_t mode) {
	struct table_device *td;

	list_for_each_entry(td, l, list)
		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
			return td;

	return NULL;
}

int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
			struct dm_dev **result) {
	int r;
	struct table_device *td;

	mutex_lock(&md->table_devices_lock);
	td = find_table_device(&md->table_devices, dev, mode);
	if (!td) {
767
		td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
		if (!td) {
			mutex_unlock(&md->table_devices_lock);
			return -ENOMEM;
		}

		td->dm_dev.mode = mode;
		td->dm_dev.bdev = NULL;

		if ((r = open_table_device(td, dev, md))) {
			mutex_unlock(&md->table_devices_lock);
			kfree(td);
			return r;
		}

		format_dev_t(td->dm_dev.name, dev);

784
		refcount_set(&td->count, 1);
785
		list_add(&td->list, &md->table_devices);
786 787
	} else {
		refcount_inc(&td->count);
788 789 790 791 792 793 794 795 796 797 798 799 800
	}
	mutex_unlock(&md->table_devices_lock);

	*result = &td->dm_dev;
	return 0;
}
EXPORT_SYMBOL_GPL(dm_get_table_device);

void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
{
	struct table_device *td = container_of(d, struct table_device, dm_dev);

	mutex_lock(&md->table_devices_lock);
801
	if (refcount_dec_and_test(&td->count)) {
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
		close_table_device(td, md);
		list_del(&td->list);
		kfree(td);
	}
	mutex_unlock(&md->table_devices_lock);
}
EXPORT_SYMBOL(dm_put_table_device);

static void free_table_devices(struct list_head *devices)
{
	struct list_head *tmp, *next;

	list_for_each_safe(tmp, next, devices) {
		struct table_device *td = list_entry(tmp, struct table_device, list);

		DMWARN("dm_destroy: %s still exists with %d references",
818
		       td->dm_dev.name, refcount_read(&td->count));
819 820 821 822
		kfree(td);
	}
}

D
Darrick J. Wong 已提交
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
/*
 * Get the geometry associated with a dm device
 */
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	*geo = md->geometry;

	return 0;
}

/*
 * Set the geometry of a device.
 */
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
{
	sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;

	if (geo->start > sz) {
		DMWARN("Start sector is beyond the geometry limits.");
		return -EINVAL;
	}

	md->geometry = *geo;

	return 0;
}

850 851 852 853 854
static int __noflush_suspending(struct mapped_device *md)
{
	return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
}

L
Linus Torvalds 已提交
855 856 857 858
/*
 * Decrements the number of outstanding ios that a bio has been
 * cloned into, completing the original io if necc.
 */
859
static void dec_pending(struct dm_io *io, blk_status_t error)
L
Linus Torvalds 已提交
860
{
861
	unsigned long flags;
862
	blk_status_t io_error;
863 864
	struct bio *bio;
	struct mapped_device *md = io->md;
865 866

	/* Push-back supersedes any I/O errors */
867 868
	if (unlikely(error)) {
		spin_lock_irqsave(&io->endio_lock, flags);
869
		if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
870
			io->status = error;
871 872
		spin_unlock_irqrestore(&io->endio_lock, flags);
	}
L
Linus Torvalds 已提交
873 874

	if (atomic_dec_and_test(&io->io_count)) {
875
		if (io->status == BLK_STS_DM_REQUEUE) {
876 877 878
			/*
			 * Target requested pushing back the I/O.
			 */
879
			spin_lock_irqsave(&md->deferred_lock, flags);
880
			if (__noflush_suspending(md))
881 882
				/* NOTE early return due to BLK_STS_DM_REQUEUE below */
				bio_list_add_head(&md->deferred, io->orig_bio);
883
			else
884
				/* noflush suspend was interrupted. */
885
				io->status = BLK_STS_IOERR;
886
			spin_unlock_irqrestore(&md->deferred_lock, flags);
887 888
		}

889
		io_error = io->status;
890
		bio = io->orig_bio;
891 892 893
		end_io_acct(io);
		free_io(md, io);

894
		if (io_error == BLK_STS_DM_REQUEUE)
895
			return;
896

J
Jens Axboe 已提交
897
		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
898
			/*
899
			 * Preflush done for flush with data, reissue
900
			 * without REQ_PREFLUSH.
901
			 */
J
Jens Axboe 已提交
902
			bio->bi_opf &= ~REQ_PREFLUSH;
903
			queue_io(md, bio);
904
		} else {
905
			/* done with normal IO or empty flush */
906 907
			if (io_error)
				bio->bi_status = io_error;
908
			bio_endio(bio);
909
		}
L
Linus Torvalds 已提交
910 911 912
	}
}

913
void disable_write_same(struct mapped_device *md)
914 915 916 917 918 919 920
{
	struct queue_limits *limits = dm_get_queue_limits(md);

	/* device doesn't really support WRITE SAME, disable it */
	limits->max_write_same_sectors = 0;
}

921 922 923 924 925 926 927 928
void disable_write_zeroes(struct mapped_device *md)
{
	struct queue_limits *limits = dm_get_queue_limits(md);

	/* device doesn't really support WRITE ZEROES, disable it */
	limits->max_write_zeroes_sectors = 0;
}

929
static void clone_endio(struct bio *bio)
L
Linus Torvalds 已提交
930
{
931
	blk_status_t error = bio->bi_status;
M
Mikulas Patocka 已提交
932
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
933
	struct dm_io *io = tio->io;
S
Stefan Bader 已提交
934
	struct mapped_device *md = tio->io->md;
L
Linus Torvalds 已提交
935 936
	dm_endio_fn endio = tio->ti->type->end_io;

937
	if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
938
		if (bio_op(bio) == REQ_OP_WRITE_SAME &&
939
		    !bio->bi_disk->queue->limits.max_write_same_sectors)
940 941
			disable_write_same(md);
		if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
942
		    !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
943 944
			disable_write_zeroes(md);
	}
945

946
	if (endio) {
947
		int r = endio(tio->ti, bio, &error);
948 949
		switch (r) {
		case DM_ENDIO_REQUEUE:
950
			error = BLK_STS_DM_REQUEUE;
951 952 953 954 955 956 957 958 959 960 961 962
			/*FALLTHRU*/
		case DM_ENDIO_DONE:
			break;
		case DM_ENDIO_INCOMPLETE:
			/* The target will handle the io */
			return;
		default:
			DMWARN("unimplemented target endio return value: %d", r);
			BUG();
		}
	}

963
	free_tio(tio);
964
	dec_pending(io, error);
L
Linus Torvalds 已提交
965 966
}

967 968 969 970 971 972 973 974 975 976 977 978
/*
 * Return maximum size of I/O possible at the supplied sector up to the current
 * target boundary.
 */
static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
{
	sector_t target_offset = dm_target_offset(ti, sector);

	return ti->len - target_offset;
}

static sector_t max_io_len(sector_t sector, struct dm_target *ti)
L
Linus Torvalds 已提交
979
{
980
	sector_t len = max_io_len_target_boundary(sector, ti);
981
	sector_t offset, max_len;
L
Linus Torvalds 已提交
982 983

	/*
984
	 * Does the target need to split even further?
L
Linus Torvalds 已提交
985
	 */
986 987 988 989 990 991 992 993 994 995
	if (ti->max_io_len) {
		offset = dm_target_offset(ti, sector);
		if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
			max_len = sector_div(offset, ti->max_io_len);
		else
			max_len = offset & (ti->max_io_len - 1);
		max_len = ti->max_io_len - max_len;

		if (len > max_len)
			len = max_len;
L
Linus Torvalds 已提交
996 997 998 999 1000
	}

	return len;
}

1001 1002 1003 1004 1005 1006 1007 1008 1009
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
{
	if (len > UINT_MAX) {
		DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
		      (unsigned long long)len, UINT_MAX);
		ti->error = "Maximum size of target IO is too large";
		return -EINVAL;
	}

1010
	ti->max_io_len = (uint32_t) len;
1011 1012 1013 1014 1015

	return 0;
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);

1016
static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1017 1018
						sector_t sector, int *srcu_idx)
	__acquires(md->io_barrier)
1019 1020 1021 1022
{
	struct dm_table *map;
	struct dm_target *ti;

1023
	map = dm_get_live_table(md, srcu_idx);
1024
	if (!map)
1025
		return NULL;
1026 1027 1028

	ti = dm_table_find_target(map, sector);
	if (!dm_target_is_valid(ti))
1029
		return NULL;
1030

1031 1032
	return ti;
}
1033

1034
static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1035
				 long nr_pages, void **kaddr, pfn_t *pfn)
1036 1037 1038 1039 1040 1041
{
	struct mapped_device *md = dax_get_private(dax_dev);
	sector_t sector = pgoff * PAGE_SECTORS;
	struct dm_target *ti;
	long len, ret = -EIO;
	int srcu_idx;
1042

1043
	ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1044

1045 1046 1047 1048 1049 1050 1051 1052
	if (!ti)
		goto out;
	if (!ti->type->direct_access)
		goto out;
	len = max_io_len(sector, ti) / PAGE_SECTORS;
	if (len < 1)
		goto out;
	nr_pages = min(len, nr_pages);
1053
	ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
1054

1055
 out:
1056
	dm_put_live_table(md, srcu_idx);
1057 1058

	return ret;
1059 1060
}

1061
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
1062
				    void *addr, size_t bytes, struct iov_iter *i)
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
{
	struct mapped_device *md = dax_get_private(dax_dev);
	sector_t sector = pgoff * PAGE_SECTORS;
	struct dm_target *ti;
	long ret = 0;
	int srcu_idx;

	ti = dm_dax_get_live_target(md, sector, &srcu_idx);

	if (!ti)
		goto out;
	if (!ti->type->dax_copy_from_iter) {
		ret = copy_from_iter(addr, bytes, i);
		goto out;
	}
	ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
 out:
	dm_put_live_table(md, srcu_idx);

	return ret;
}

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
		void *addr, size_t bytes, struct iov_iter *i)
{
	struct mapped_device *md = dax_get_private(dax_dev);
	sector_t sector = pgoff * PAGE_SECTORS;
	struct dm_target *ti;
	long ret = 0;
	int srcu_idx;

	ti = dm_dax_get_live_target(md, sector, &srcu_idx);

	if (!ti)
		goto out;
	if (!ti->type->dax_copy_to_iter) {
		ret = copy_to_iter(addr, bytes, i);
		goto out;
	}
	ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
 out:
	dm_put_live_table(md, srcu_idx);

	return ret;
}

1109 1110
/*
 * A target may call dm_accept_partial_bio only from the map routine.  It is
1111
 * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
 *
 * dm_accept_partial_bio informs the dm that the target only wants to process
 * additional n_sectors sectors of the bio and the rest of the data should be
 * sent in a next bio.
 *
 * A diagram that explains the arithmetics:
 * +--------------------+---------------+-------+
 * |         1          |       2       |   3   |
 * +--------------------+---------------+-------+
 *
 * <-------------- *tio->len_ptr --------------->
 *                      <------- bi_size ------->
 *                      <-- n_sectors -->
 *
 * Region 1 was already iterated over with bio_advance or similar function.
 *	(it may be empty if the target doesn't use bio_advance)
 * Region 2 is the remaining bio size that the target wants to process.
 *	(it may be empty if region 1 is non-empty, although there is no reason
 *	 to make it empty)
 * The target requires that region 3 is to be sent in the next bio.
 *
 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
 * the partially processed part (the sum of regions 1+2) must be the same for all
 * copies of the bio.
 */
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
J
Jens Axboe 已提交
1141
	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
1142 1143 1144 1145 1146 1147 1148
	BUG_ON(bi_size > *tio->len_ptr);
	BUG_ON(n_sectors > bi_size);
	*tio->len_ptr -= bi_size - n_sectors;
	bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);

1149
/*
1150 1151 1152 1153 1154 1155 1156 1157
 * The zone descriptors obtained with a zone report indicate zone positions
 * within the target backing device, regardless of that device is a partition
 * and regardless of the target mapping start sector on the device or partition.
 * The zone descriptors start sector and write pointer position must be adjusted
 * to match their relative position within the dm device.
 * A target may call dm_remap_zone_report() after completion of a
 * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
 * backing device.
1158 1159 1160 1161 1162
 */
void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
{
#ifdef CONFIG_BLK_DEV_ZONED
	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1163
	struct bio *report_bio = tio->io->orig_bio;
1164 1165 1166 1167
	struct blk_zone_report_hdr *hdr = NULL;
	struct blk_zone *zone;
	unsigned int nr_rep = 0;
	unsigned int ofst;
1168
	sector_t part_offset;
1169 1170 1171 1172 1173 1174 1175
	struct bio_vec bvec;
	struct bvec_iter iter;
	void *addr;

	if (bio->bi_status)
		return;

1176 1177 1178 1179 1180 1181 1182 1183 1184
	/*
	 * bio sector was incremented by the request size on completion. Taking
	 * into account the original request sector, the target start offset on
	 * the backing device and the target mapping offset (ti->begin), the
	 * start sector of the backing device. The partition offset is always 0
	 * if the target uses a whole device.
	 */
	part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
	/*
	 * Remap the start sector of the reported zones. For sequential zones,
	 * also remap the write pointer position.
	 */
	bio_for_each_segment(bvec, report_bio, iter) {
		addr = kmap_atomic(bvec.bv_page);

		/* Remember the report header in the first page */
		if (!hdr) {
			hdr = addr;
			ofst = sizeof(struct blk_zone_report_hdr);
		} else
			ofst = 0;

		/* Set zones start sector */
		while (hdr->nr_zones && ofst < bvec.bv_len) {
			zone = addr + ofst;
1202
			zone->start -= part_offset;
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
			if (zone->start >= start + ti->len) {
				hdr->nr_zones = 0;
				break;
			}
			zone->start = zone->start + ti->begin - start;
			if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
				if (zone->cond == BLK_ZONE_COND_FULL)
					zone->wp = zone->start + zone->len;
				else if (zone->cond == BLK_ZONE_COND_EMPTY)
					zone->wp = zone->start;
				else
1214
					zone->wp = zone->wp + ti->begin - start - part_offset;
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
			}
			ofst += sizeof(struct blk_zone);
			hdr->nr_zones--;
			nr_rep++;
		}

		if (addr != hdr)
			kunmap_atomic(addr);

		if (!hdr->nr_zones)
			break;
	}

	if (hdr) {
		hdr->nr_zones = nr_rep;
		kunmap_atomic(hdr);
	}

	bio_advance(report_bio, report_bio->bi_iter.bi_size);

#else /* !CONFIG_BLK_DEV_ZONED */
	bio->bi_status = BLK_STS_NOTSUPP;
#endif
}
EXPORT_SYMBOL_GPL(dm_remap_zone_report);

1241
static blk_qc_t __map_bio(struct dm_target_io *tio)
L
Linus Torvalds 已提交
1242 1243
{
	int r;
1244
	sector_t sector;
1245
	struct bio *clone = &tio->clone;
1246
	struct dm_io *io = tio->io;
1247
	struct mapped_device *md = io->md;
A
Alasdair G Kergon 已提交
1248
	struct dm_target *ti = tio->ti;
1249
	blk_qc_t ret = BLK_QC_T_NONE;
L
Linus Torvalds 已提交
1250 1251 1252 1253 1254 1255 1256 1257

	clone->bi_end_io = clone_endio;

	/*
	 * Map the clone.  If r == 0 we don't need to do
	 * anything, the target has assumed ownership of
	 * this io.
	 */
1258
	atomic_inc(&io->io_count);
1259
	sector = clone->bi_iter.bi_sector;
1260

M
Mikulas Patocka 已提交
1261
	r = ti->type->map(ti, clone);
1262 1263 1264 1265
	switch (r) {
	case DM_MAPIO_SUBMITTED:
		break;
	case DM_MAPIO_REMAPPED:
L
Linus Torvalds 已提交
1266
		/* the bio has been remapped so dispatch it */
1267
		trace_block_bio_remap(clone->bi_disk->queue, clone,
1268
				      bio_dev(io->orig_bio), sector);
1269 1270 1271 1272
		if (md->type == DM_TYPE_NVME_BIO_BASED)
			ret = direct_make_request(clone);
		else
			ret = generic_make_request(clone);
1273 1274
		break;
	case DM_MAPIO_KILL:
1275
		free_tio(tio);
1276
		dec_pending(io, BLK_STS_IOERR);
1277
		break;
1278
	case DM_MAPIO_REQUEUE:
1279
		free_tio(tio);
1280
		dec_pending(io, BLK_STS_DM_REQUEUE);
1281 1282
		break;
	default:
1283 1284
		DMWARN("unimplemented target map return value: %d", r);
		BUG();
L
Linus Torvalds 已提交
1285 1286
	}

1287
	return ret;
L
Linus Torvalds 已提交
1288 1289
}

1290
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
A
Alasdair G Kergon 已提交
1291
{
1292 1293
	bio->bi_iter.bi_sector = sector;
	bio->bi_iter.bi_size = to_bytes(len);
L
Linus Torvalds 已提交
1294 1295 1296 1297 1298
}

/*
 * Creates a bio that consists of range of complete bvecs.
 */
1299 1300
static int clone_bio(struct dm_target_io *tio, struct bio *bio,
		     sector_t sector, unsigned len)
L
Linus Torvalds 已提交
1301
{
1302
	struct bio *clone = &tio->clone;
L
Linus Torvalds 已提交
1303

1304 1305
	__bio_clone_fast(clone, bio);

1306
	if (bio_integrity(bio)) {
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
		int r;

		if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
			     !dm_target_passes_integrity(tio->ti->type))) {
			DMWARN("%s: the target %s doesn't support integrity data.",
				dm_device_name(tio->io->md),
				tio->ti->type->name);
			return -EIO;
		}

		r = bio_integrity_clone(clone, bio, GFP_NOIO);
1318 1319 1320
		if (r < 0)
			return r;
	}
A
Alasdair G Kergon 已提交
1321

1322
	bio_trim(clone, sector - clone->bi_iter.bi_sector, len);
1323 1324

	return 0;
L
Linus Torvalds 已提交
1325 1326
}

1327 1328
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
				struct dm_target *ti, unsigned num_bios)
1329
{
1330
	struct dm_target_io *tio;
1331
	int try;
1332

1333 1334
	if (!num_bios)
		return;
1335

1336 1337 1338 1339 1340
	if (num_bios == 1) {
		tio = alloc_tio(ci, ti, 0, GFP_NOIO);
		bio_list_add(blist, &tio->clone);
		return;
	}
1341

1342 1343 1344 1345 1346
	for (try = 0; try < 2; try++) {
		int bio_nr;
		struct bio *bio;

		if (try)
1347
			mutex_lock(&ci->io->md->table_devices_lock);
1348 1349 1350 1351 1352 1353 1354 1355
		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
			tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
			if (!tio)
				break;

			bio_list_add(blist, &tio->clone);
		}
		if (try)
1356
			mutex_unlock(&ci->io->md->table_devices_lock);
1357 1358 1359 1360 1361 1362 1363 1364
		if (bio_nr == num_bios)
			return;

		while ((bio = bio_list_pop(blist))) {
			tio = container_of(bio, struct dm_target_io, clone);
			free_tio(tio);
		}
	}
1365 1366
}

1367 1368
static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
					   struct dm_target_io *tio, unsigned *len)
1369
{
1370
	struct bio *clone = &tio->clone;
1371

1372 1373
	tio->len_ptr = len;

1374
	__bio_clone_fast(clone, ci->bio);
A
Alasdair G Kergon 已提交
1375
	if (len)
1376
		bio_setup_sector(clone, ci->sector, *len);
1377

1378
	return __map_bio(tio);
1379 1380
}

1381
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1382
				  unsigned num_bios, unsigned *len)
1383
{
1384 1385 1386 1387 1388
	struct bio_list blist = BIO_EMPTY_LIST;
	struct bio *bio;
	struct dm_target_io *tio;

	alloc_multiple_bios(&blist, ci, ti, num_bios);
1389

1390 1391
	while ((bio = bio_list_pop(&blist))) {
		tio = container_of(bio, struct dm_target_io, clone);
1392
		(void) __clone_and_map_simple_bio(ci, tio, len);
1393
	}
1394 1395
}

1396
static int __send_empty_flush(struct clone_info *ci)
1397
{
1398
	unsigned target_nr = 0;
1399 1400
	struct dm_target *ti;

1401
	BUG_ON(bio_has_data(ci->bio));
1402
	while ((ti = dm_table_get_target(ci->map, target_nr++)))
1403
		__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
1404 1405 1406 1407

	return 0;
}

1408
static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1409
				    sector_t sector, unsigned *len)
M
Mike Snitzer 已提交
1410
{
1411
	struct bio *bio = ci->bio;
M
Mike Snitzer 已提交
1412
	struct dm_target_io *tio;
1413
	int r;
M
Mike Snitzer 已提交
1414

1415
	tio = alloc_tio(ci, ti, 0, GFP_NOIO);
1416 1417 1418 1419 1420
	tio->len_ptr = len;
	r = clone_bio(tio, bio, sector, *len);
	if (r < 0) {
		free_tio(tio);
		return r;
1421
	}
1422
	(void) __map_bio(tio);
1423

1424
	return 0;
M
Mike Snitzer 已提交
1425 1426
}

1427
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
M
Mike Snitzer 已提交
1428

1429
static unsigned get_num_discard_bios(struct dm_target *ti)
M
Mike Snitzer 已提交
1430
{
1431
	return ti->num_discard_bios;
M
Mike Snitzer 已提交
1432 1433
}

1434 1435 1436 1437 1438
static unsigned get_num_secure_erase_bios(struct dm_target *ti)
{
	return ti->num_secure_erase_bios;
}

1439
static unsigned get_num_write_same_bios(struct dm_target *ti)
M
Mike Snitzer 已提交
1440
{
1441
	return ti->num_write_same_bios;
M
Mike Snitzer 已提交
1442 1443
}

1444 1445 1446 1447 1448
static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
{
	return ti->num_write_zeroes_bios;
}

M
Mike Snitzer 已提交
1449
typedef bool (*is_split_required_fn)(struct dm_target *ti);
1450

M
Mike Snitzer 已提交
1451 1452
static bool is_split_required_for_discard(struct dm_target *ti)
{
1453
	return ti->split_discard_bios;
1454 1455
}

1456
static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
1457 1458
				       get_num_bios_fn get_num_bios,
				       is_split_required_fn is_split_required)
1459
{
1460
	unsigned len;
1461
	unsigned num_bios;
1462

1463 1464 1465 1466 1467 1468 1469 1470 1471
	/*
	 * Even though the device advertised support for this type of
	 * request, that does not mean every target supports it, and
	 * reconfiguration might also have changed that since the
	 * check was performed.
	 */
	num_bios = get_num_bios ? get_num_bios(ti) : 0;
	if (!num_bios)
		return -EOPNOTSUPP;
1472

1473 1474 1475 1476
	if (is_split_required && !is_split_required(ti))
		len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
	else
		len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
1477

1478
	__send_duplicate_bios(ci, ti, num_bios, &len);
1479

1480 1481
	ci->sector += len;
	ci->sector_count -= len;
M
Mike Snitzer 已提交
1482 1483

	return 0;
1484 1485
}

1486
static int __send_discard(struct clone_info *ci, struct dm_target *ti)
M
Mike Snitzer 已提交
1487
{
1488
	return __send_changing_extent_only(ci, ti, get_num_discard_bios,
1489
					   is_split_required_for_discard);
M
Mike Snitzer 已提交
1490
}
1491

1492 1493 1494 1495 1496
static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
{
	return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios, NULL);
}

1497
static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
1498
{
1499
	return __send_changing_extent_only(ci, ti, get_num_write_same_bios, NULL);
1500 1501
}

1502
static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
1503
{
1504
	return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL);
1505 1506
}

1507 1508 1509 1510 1511 1512 1513
static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
				  int *result)
{
	struct bio *bio = ci->bio;

	if (bio_op(bio) == REQ_OP_DISCARD)
		*result = __send_discard(ci, ti);
1514 1515
	else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
		*result = __send_secure_erase(ci, ti);
1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
	else if (bio_op(bio) == REQ_OP_WRITE_SAME)
		*result = __send_write_same(ci, ti);
	else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
		*result = __send_write_zeroes(ci, ti);
	else
		return false;

	return true;
}

A
Alasdair G Kergon 已提交
1526 1527 1528
/*
 * Select the correct strategy for processing a non-flush bio.
 */
1529
static int __split_and_process_non_flush(struct clone_info *ci)
1530
{
1531
	struct bio *bio = ci->bio;
1532
	struct dm_target *ti;
1533
	unsigned len;
1534
	int r;
1535

1536 1537 1538 1539
	ti = dm_table_find_target(ci->map, ci->sector);
	if (!dm_target_is_valid(ti))
		return -EIO;

1540 1541
	if (unlikely(__process_abnormal_io(ci, ti, &r)))
		return r;
1542

1543 1544 1545 1546 1547
	if (bio_op(bio) == REQ_OP_ZONE_REPORT)
		len = ci->sector_count;
	else
		len = min_t(sector_t, max_io_len(ci->sector, ti),
			    ci->sector_count);
1548

1549 1550 1551
	r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
	if (r < 0)
		return r;
1552

1553 1554
	ci->sector += len;
	ci->sector_count -= len;
1555

1556
	return 0;
1557 1558
}

1559 1560 1561 1562 1563 1564 1565 1566
static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
			    struct dm_table *map, struct bio *bio)
{
	ci->map = map;
	ci->io = alloc_io(md, bio);
	ci->sector = bio->bi_iter.bi_sector;
}

1567 1568 1569
#define __dm_part_stat_sub(part, field, subnd)	\
	(part_stat_get(part, field) -= (subnd))

L
Linus Torvalds 已提交
1570
/*
1571
 * Entry point to split a bio into clones and submit them to the targets.
L
Linus Torvalds 已提交
1572
 */
1573 1574
static blk_qc_t __split_and_process_bio(struct mapped_device *md,
					struct dm_table *map, struct bio *bio)
1575
{
L
Linus Torvalds 已提交
1576
	struct clone_info ci;
1577
	blk_qc_t ret = BLK_QC_T_NONE;
1578
	int error = 0;
L
Linus Torvalds 已提交
1579

M
Mikulas Patocka 已提交
1580
	if (unlikely(!map)) {
1581
		bio_io_error(bio);
1582
		return ret;
1583
	}
1584

1585 1586
	blk_queue_split(md->queue, &bio);

1587
	init_clone_info(&ci, md, map, bio);
1588

J
Jens Axboe 已提交
1589
	if (bio->bi_opf & REQ_PREFLUSH) {
1590
		ci.bio = &ci.io->md->flush_bio;
1591
		ci.sector_count = 0;
1592
		error = __send_empty_flush(&ci);
1593
		/* dec_pending submits any data associated with flush */
1594 1595 1596 1597
	} else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
		ci.bio = bio;
		ci.sector_count = 0;
		error = __split_and_process_non_flush(&ci);
1598
	} else {
1599
		ci.bio = bio;
1600
		ci.sector_count = bio_sectors(bio);
1601
		while (ci.sector_count && !error) {
1602
			error = __split_and_process_non_flush(&ci);
1603 1604 1605 1606 1607 1608
			if (current->bio_list && ci.sector_count && !error) {
				/*
				 * Remainder must be passed to generic_make_request()
				 * so that it gets handled *after* bios already submitted
				 * have been completely processed.
				 * We take a clone of the original to store in
1609
				 * ci.io->orig_bio to be used by end_io_acct() and
1610 1611
				 * for dec_pending to use for completion handling.
				 * As this path is not used for REQ_OP_ZONE_REPORT,
1612
				 * the usage of io->orig_bio in dm_remap_zone_report()
1613 1614
				 * won't be affected by this reassignment.
				 */
1615 1616
				struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
							  GFP_NOIO, &md->queue->bio_split);
1617
				ci.io->orig_bio = b;
1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630

				/*
				 * Adjust IO stats for each split, otherwise upon queue
				 * reentry there will be redundant IO accounting.
				 * NOTE: this is a stop-gap fix, a proper fix involves
				 * significant refactoring of DM core's bio splitting
				 * (by eliminating DM's splitting and just using bio_split)
				 */
				part_stat_lock();
				__dm_part_stat_sub(&dm_disk(md)->part0,
						   sectors[op_stat_group(bio_op(bio))], ci.sector_count);
				part_stat_unlock();

1631
				bio_chain(b, bio);
1632
				ret = generic_make_request(bio);
1633 1634 1635
				break;
			}
		}
1636
	}
1637

L
Linus Torvalds 已提交
1638
	/* drop the extra reference count */
1639
	dec_pending(ci.io, errno_to_blk_status(error));
1640
	return ret;
1641 1642
}

1643
/*
1644 1645
 * Optimized variant of __split_and_process_bio that leverages the
 * fact that targets that use it do _not_ have a need to split bios.
1646
 */
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
static blk_qc_t __process_bio(struct mapped_device *md,
			      struct dm_table *map, struct bio *bio)
{
	struct clone_info ci;
	blk_qc_t ret = BLK_QC_T_NONE;
	int error = 0;

	if (unlikely(!map)) {
		bio_io_error(bio);
		return ret;
	}

	init_clone_info(&ci, md, map, bio);

	if (bio->bi_opf & REQ_PREFLUSH) {
		ci.bio = &ci.io->md->flush_bio;
		ci.sector_count = 0;
		error = __send_empty_flush(&ci);
		/* dec_pending submits any data associated with flush */
	} else {
		struct dm_target *ti = md->immutable_target;
		struct dm_target_io *tio;

		/*
		 * Defend against IO still getting in during teardown
		 * - as was seen for a time with nvme-fcloop
		 */
		if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) {
			error = -EIO;
			goto out;
		}

		ci.bio = bio;
		ci.sector_count = bio_sectors(bio);
1681 1682 1683 1684
		if (unlikely(__process_abnormal_io(&ci, ti, &error)))
			goto out;

		tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
1685 1686 1687 1688 1689 1690 1691 1692
		ret = __clone_and_map_simple_bio(&ci, tio, NULL);
	}
out:
	/* drop the extra reference count */
	dec_pending(ci.io, errno_to_blk_status(error));
	return ret;
}

1693 1694 1695 1696 1697 1698 1699 1700 1701
static blk_qc_t dm_process_bio(struct mapped_device *md,
			       struct dm_table *map, struct bio *bio)
{
	if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
		return __process_bio(md, map, bio);
	else
		return __split_and_process_bio(md, map, bio);
}

1702
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1703 1704
{
	struct mapped_device *md = q->queuedata;
1705
	blk_qc_t ret = BLK_QC_T_NONE;
M
Mikulas Patocka 已提交
1706 1707
	int srcu_idx;
	struct dm_table *map;
1708

M
Mikulas Patocka 已提交
1709
	map = dm_get_live_table(md, &srcu_idx);
1710

1711 1712
	/* if we're suspended, we have to queue this io for later */
	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
M
Mikulas Patocka 已提交
1713
		dm_put_live_table(md, srcu_idx);
1714

J
Jens Axboe 已提交
1715
		if (!(bio->bi_opf & REQ_RAHEAD))
1716 1717
			queue_io(md, bio);
		else
A
Alasdair G Kergon 已提交
1718
			bio_io_error(bio);
1719
		return ret;
1720
	}
L
Linus Torvalds 已提交
1721

1722
	ret = dm_process_bio(md, map, bio);
1723

M
Mikulas Patocka 已提交
1724
	dm_put_live_table(md, srcu_idx);
1725 1726 1727
	return ret;
}

L
Linus Torvalds 已提交
1728 1729
static int dm_any_congested(void *congested_data, int bdi_bits)
{
1730 1731 1732
	int r = bdi_bits;
	struct mapped_device *md = congested_data;
	struct dm_table *map;
L
Linus Torvalds 已提交
1733

1734
	if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
M
Mike Snitzer 已提交
1735
		if (dm_request_based(md)) {
1736
			/*
M
Mike Snitzer 已提交
1737 1738
			 * With request-based DM we only need to check the
			 * top-level queue for congestion.
1739
			 */
1740
			r = md->queue->backing_dev_info->wb.state & bdi_bits;
M
Mike Snitzer 已提交
1741 1742 1743
		} else {
			map = dm_get_live_table_fast(md);
			if (map)
1744
				r = dm_table_any_congested(map, bdi_bits);
M
Mike Snitzer 已提交
1745
			dm_put_live_table_fast(md);
1746 1747 1748
		}
	}

L
Linus Torvalds 已提交
1749 1750 1751 1752 1753 1754
	return r;
}

/*-----------------------------------------------------------------
 * An IDR is used to keep track of allocated minor numbers.
 *---------------------------------------------------------------*/
1755
static void free_minor(int minor)
L
Linus Torvalds 已提交
1756
{
1757
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1758
	idr_remove(&_minor_idr, minor);
1759
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
1760 1761 1762 1763 1764
}

/*
 * See if the device with a specific minor # is free.
 */
1765
static int specific_minor(int minor)
L
Linus Torvalds 已提交
1766
{
T
Tejun Heo 已提交
1767
	int r;
L
Linus Torvalds 已提交
1768 1769 1770 1771

	if (minor >= (1 << MINORBITS))
		return -EINVAL;

T
Tejun Heo 已提交
1772
	idr_preload(GFP_KERNEL);
1773
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1774

T
Tejun Heo 已提交
1775
	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
L
Linus Torvalds 已提交
1776

1777
	spin_unlock(&_minor_lock);
T
Tejun Heo 已提交
1778 1779 1780 1781
	idr_preload_end();
	if (r < 0)
		return r == -ENOSPC ? -EBUSY : r;
	return 0;
L
Linus Torvalds 已提交
1782 1783
}

1784
static int next_free_minor(int *minor)
L
Linus Torvalds 已提交
1785
{
T
Tejun Heo 已提交
1786
	int r;
J
Jeff Mahoney 已提交
1787

T
Tejun Heo 已提交
1788
	idr_preload(GFP_KERNEL);
1789
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
1790

T
Tejun Heo 已提交
1791
	r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
L
Linus Torvalds 已提交
1792

1793
	spin_unlock(&_minor_lock);
T
Tejun Heo 已提交
1794 1795 1796 1797 1798
	idr_preload_end();
	if (r < 0)
		return r;
	*minor = r;
	return 0;
L
Linus Torvalds 已提交
1799 1800
}

1801
static const struct block_device_operations dm_blk_dops;
1802
static const struct dax_operations dm_dax_ops;
L
Linus Torvalds 已提交
1803

1804 1805
static void dm_wq_work(struct work_struct *work);

1806
static void dm_init_normal_md_queue(struct mapped_device *md)
1807
{
1808
	md->use_blk_mq = false;
1809 1810 1811 1812

	/*
	 * Initialize aspects of queue that aren't relevant for blk-mq
	 */
1813
	md->queue->backing_dev_info->congested_fn = dm_any_congested;
1814 1815
}

1816 1817 1818 1819 1820 1821
static void cleanup_mapped_device(struct mapped_device *md)
{
	if (md->wq)
		destroy_workqueue(md->wq);
	if (md->kworker_task)
		kthread_stop(md->kworker_task);
1822 1823
	bioset_exit(&md->bs);
	bioset_exit(&md->io_bs);
1824

1825 1826 1827 1828 1829 1830
	if (md->dax_dev) {
		kill_dax(md->dax_dev);
		put_dax(md->dax_dev);
		md->dax_dev = NULL;
	}

1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
	if (md->disk) {
		spin_lock(&_minor_lock);
		md->disk->private_data = NULL;
		spin_unlock(&_minor_lock);
		del_gendisk(md->disk);
		put_disk(md->disk);
	}

	if (md->queue)
		blk_cleanup_queue(md->queue);

1842 1843
	cleanup_srcu_struct(&md->io_barrier);

1844 1845 1846 1847
	if (md->bdev) {
		bdput(md->bdev);
		md->bdev = NULL;
	}
1848

1849 1850 1851 1852
	mutex_destroy(&md->suspend_lock);
	mutex_destroy(&md->type_lock);
	mutex_destroy(&md->table_devices_lock);

1853
	dm_mq_cleanup_mapped_device(md);
1854 1855
}

L
Linus Torvalds 已提交
1856 1857 1858
/*
 * Allocate and initialise a blank device with a given minor.
 */
1859
static struct mapped_device *alloc_dev(int minor)
L
Linus Torvalds 已提交
1860
{
1861
	int r, numa_node_id = dm_get_numa_node();
1862
	struct dax_device *dax_dev = NULL;
1863
	struct mapped_device *md;
1864
	void *old_md;
L
Linus Torvalds 已提交
1865

1866
	md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
L
Linus Torvalds 已提交
1867 1868 1869 1870 1871
	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

1872
	if (!try_module_get(THIS_MODULE))
M
Milan Broz 已提交
1873
		goto bad_module_get;
1874

L
Linus Torvalds 已提交
1875
	/* get a minor number for the dev */
1876
	if (minor == DM_ANY_MINOR)
1877
		r = next_free_minor(&minor);
1878
	else
1879
		r = specific_minor(minor);
L
Linus Torvalds 已提交
1880
	if (r < 0)
M
Milan Broz 已提交
1881
		goto bad_minor;
L
Linus Torvalds 已提交
1882

M
Mikulas Patocka 已提交
1883 1884 1885 1886
	r = init_srcu_struct(&md->io_barrier);
	if (r < 0)
		goto bad_io_barrier;

1887
	md->numa_node_id = numa_node_id;
1888
	md->use_blk_mq = dm_use_blk_mq_default();
1889
	md->init_tio_pdu = false;
1890
	md->type = DM_TYPE_NONE;
1891
	mutex_init(&md->suspend_lock);
1892
	mutex_init(&md->type_lock);
1893
	mutex_init(&md->table_devices_lock);
1894
	spin_lock_init(&md->deferred_lock);
L
Linus Torvalds 已提交
1895
	atomic_set(&md->holders, 1);
1896
	atomic_set(&md->open_count, 0);
L
Linus Torvalds 已提交
1897
	atomic_set(&md->event_nr, 0);
M
Mike Anderson 已提交
1898 1899
	atomic_set(&md->uevent_seq, 0);
	INIT_LIST_HEAD(&md->uevent_list);
1900
	INIT_LIST_HEAD(&md->table_devices);
M
Mike Anderson 已提交
1901
	spin_lock_init(&md->uevent_lock);
L
Linus Torvalds 已提交
1902

1903
	md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL);
L
Linus Torvalds 已提交
1904
	if (!md->queue)
1905
		goto bad;
1906 1907
	md->queue->queuedata = md;
	md->queue->backing_dev_info->congested_data = md;
L
Linus Torvalds 已提交
1908

1909
	md->disk = alloc_disk_node(1, md->numa_node_id);
L
Linus Torvalds 已提交
1910
	if (!md->disk)
1911
		goto bad;
L
Linus Torvalds 已提交
1912

1913 1914
	atomic_set(&md->pending[0], 0);
	atomic_set(&md->pending[1], 0);
1915
	init_waitqueue_head(&md->wait);
1916
	INIT_WORK(&md->work, dm_wq_work);
1917
	init_waitqueue_head(&md->eventq);
1918
	init_completion(&md->kobj_holder.completion);
1919
	md->kworker_task = NULL;
1920

L
Linus Torvalds 已提交
1921 1922 1923 1924 1925 1926
	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
1927

1928 1929 1930 1931 1932
	if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
		dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
		if (!dax_dev)
			goto bad;
	}
1933 1934
	md->dax_dev = dax_dev;

1935
	add_disk_no_queue_reg(md->disk);
M
Mike Anderson 已提交
1936
	format_dev_t(md->name, MKDEV(_major, minor));
L
Linus Torvalds 已提交
1937

T
Tejun Heo 已提交
1938
	md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
1939
	if (!md->wq)
1940
		goto bad;
1941

M
Mikulas Patocka 已提交
1942 1943
	md->bdev = bdget_disk(md->disk, 0);
	if (!md->bdev)
1944
		goto bad;
M
Mikulas Patocka 已提交
1945

1946
	bio_init(&md->flush_bio, NULL, 0);
1947
	bio_set_dev(&md->flush_bio, md->bdev);
J
Jan Kara 已提交
1948
	md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1949

M
Mikulas Patocka 已提交
1950 1951
	dm_stats_init(&md->stats);

1952
	/* Populate the mapping, nobody knows we exist yet */
1953
	spin_lock(&_minor_lock);
1954
	old_md = idr_replace(&_minor_idr, md, minor);
1955
	spin_unlock(&_minor_lock);
1956 1957 1958

	BUG_ON(old_md != MINOR_ALLOCED);

L
Linus Torvalds 已提交
1959 1960
	return md;

1961 1962
bad:
	cleanup_mapped_device(md);
M
Mikulas Patocka 已提交
1963
bad_io_barrier:
L
Linus Torvalds 已提交
1964
	free_minor(minor);
M
Milan Broz 已提交
1965
bad_minor:
1966
	module_put(THIS_MODULE);
M
Milan Broz 已提交
1967
bad_module_get:
1968
	kvfree(md);
L
Linus Torvalds 已提交
1969 1970 1971
	return NULL;
}

J
Jun'ichi Nomura 已提交
1972 1973
static void unlock_fs(struct mapped_device *md);

L
Linus Torvalds 已提交
1974 1975
static void free_dev(struct mapped_device *md)
{
1976
	int minor = MINOR(disk_devt(md->disk));
1977

M
Mikulas Patocka 已提交
1978
	unlock_fs(md);
1979

1980
	cleanup_mapped_device(md);
1981

1982
	free_table_devices(&md->table_devices);
1983 1984 1985
	dm_stats_cleanup(&md->stats);
	free_minor(minor);

1986
	module_put(THIS_MODULE);
1987
	kvfree(md);
L
Linus Torvalds 已提交
1988 1989
}

1990
static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
K
Kiyoshi Ueda 已提交
1991
{
M
Mikulas Patocka 已提交
1992
	struct dm_md_mempools *p = dm_table_get_md_mempools(t);
1993
	int ret = 0;
K
Kiyoshi Ueda 已提交
1994

1995
	if (dm_table_bio_based(t)) {
1996 1997 1998 1999 2000
		/*
		 * The md may already have mempools that need changing.
		 * If so, reload bioset because front_pad may have changed
		 * because a different table was loaded.
		 */
2001 2002
		bioset_exit(&md->bs);
		bioset_exit(&md->io_bs);
2003

2004
	} else if (bioset_initialized(&md->bs)) {
2005 2006 2007 2008 2009 2010 2011 2012 2013
		/*
		 * There's no need to reload with request-based dm
		 * because the size of front_pad doesn't change.
		 * Note for future: If you are to reload bioset,
		 * prep-ed requests in the queue may refer
		 * to bio from the old bioset, so you must walk
		 * through the queue to unprep.
		 */
		goto out;
M
Mikulas Patocka 已提交
2014
	}
K
Kiyoshi Ueda 已提交
2015

2016 2017 2018
	BUG_ON(!p ||
	       bioset_initialized(&md->bs) ||
	       bioset_initialized(&md->io_bs));
2019

2020 2021 2022 2023 2024 2025
	ret = bioset_init_from_src(&md->bs, &p->bs);
	if (ret)
		goto out;
	ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
	if (ret)
		bioset_exit(&md->bs);
K
Kiyoshi Ueda 已提交
2026
out:
2027
	/* mempool bind completed, no longer need any mempools in the table */
K
Kiyoshi Ueda 已提交
2028
	dm_table_free_md_mempools(t);
2029
	return ret;
K
Kiyoshi Ueda 已提交
2030 2031
}

L
Linus Torvalds 已提交
2032 2033 2034 2035 2036
/*
 * Bind a table to the device.
 */
static void event_callback(void *context)
{
M
Mike Anderson 已提交
2037 2038
	unsigned long flags;
	LIST_HEAD(uevents);
L
Linus Torvalds 已提交
2039 2040
	struct mapped_device *md = (struct mapped_device *) context;

M
Mike Anderson 已提交
2041 2042 2043 2044
	spin_lock_irqsave(&md->uevent_lock, flags);
	list_splice_init(&md->uevent_list, &uevents);
	spin_unlock_irqrestore(&md->uevent_lock, flags);

2045
	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
M
Mike Anderson 已提交
2046

L
Linus Torvalds 已提交
2047 2048
	atomic_inc(&md->event_nr);
	wake_up(&md->eventq);
2049
	dm_issue_global_event();
L
Linus Torvalds 已提交
2050 2051
}

2052 2053 2054
/*
 * Protected by md->suspend_lock obtained by dm_swap_table().
 */
2055
static void __set_size(struct mapped_device *md, sector_t size)
L
Linus Torvalds 已提交
2056
{
2057 2058
	lockdep_assert_held(&md->suspend_lock);

2059
	set_capacity(md->disk, size);
L
Linus Torvalds 已提交
2060

2061
	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
L
Linus Torvalds 已提交
2062 2063
}

2064 2065 2066 2067 2068
/*
 * Returns old map, which caller must destroy.
 */
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
			       struct queue_limits *limits)
L
Linus Torvalds 已提交
2069
{
2070
	struct dm_table *old_map;
2071
	struct request_queue *q = md->queue;
2072
	bool request_based = dm_table_request_based(t);
L
Linus Torvalds 已提交
2073
	sector_t size;
2074
	int ret;
L
Linus Torvalds 已提交
2075

2076 2077
	lockdep_assert_held(&md->suspend_lock);

L
Linus Torvalds 已提交
2078
	size = dm_table_get_size(t);
D
Darrick J. Wong 已提交
2079 2080 2081 2082

	/*
	 * Wipe any geometry if the size of the table changed.
	 */
M
Mikulas Patocka 已提交
2083
	if (size != dm_get_size(md))
D
Darrick J. Wong 已提交
2084 2085
		memset(&md->geometry, 0, sizeof(md->geometry));

M
Mikulas Patocka 已提交
2086
	__set_size(md, size);
2087

2088 2089
	dm_table_event_callback(t, event_callback, md);

K
Kiyoshi Ueda 已提交
2090 2091 2092 2093 2094 2095 2096
	/*
	 * The queue hasn't been stopped yet, if the old table type wasn't
	 * for request-based during suspension.  So stop it to prevent
	 * I/O mapping before resume.
	 * This must be done before setting the queue restrictions,
	 * because request-based dm may be run just after the setting.
	 */
2097
	if (request_based)
2098
		dm_stop_queue(q);
2099 2100

	if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
M
Mike Snitzer 已提交
2101
		/*
2102 2103 2104 2105
		 * Leverage the fact that request-based DM targets and
		 * NVMe bio based targets are immutable singletons
		 * - used to optimize both dm_request_fn and dm_mq_queue_rq;
		 *   and __process_bio.
M
Mike Snitzer 已提交
2106 2107 2108
		 */
		md->immutable_target = dm_table_get_immutable_target(t);
	}
K
Kiyoshi Ueda 已提交
2109

2110 2111 2112 2113 2114
	ret = __bind_mempools(md, t);
	if (ret) {
		old_map = ERR_PTR(ret);
		goto out;
	}
K
Kiyoshi Ueda 已提交
2115

2116
	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2117
	rcu_assign_pointer(md->map, (void *)t);
2118 2119
	md->immutable_target_type = dm_table_get_immutable_target_type(t);

2120
	dm_table_set_restrictions(t, q, limits);
2121 2122
	if (old_map)
		dm_sync_table(md);
L
Linus Torvalds 已提交
2123

2124
out:
2125
	return old_map;
L
Linus Torvalds 已提交
2126 2127
}

2128 2129 2130 2131
/*
 * Returns unbound table for the caller to free.
 */
static struct dm_table *__unbind(struct mapped_device *md)
L
Linus Torvalds 已提交
2132
{
2133
	struct dm_table *map = rcu_dereference_protected(md->map, 1);
L
Linus Torvalds 已提交
2134 2135

	if (!map)
2136
		return NULL;
L
Linus Torvalds 已提交
2137 2138

	dm_table_event_callback(map, NULL, NULL);
2139
	RCU_INIT_POINTER(md->map, NULL);
M
Mikulas Patocka 已提交
2140
	dm_sync_table(md);
2141 2142

	return map;
L
Linus Torvalds 已提交
2143 2144 2145 2146 2147
}

/*
 * Constructor for a new device.
 */
2148
int dm_create(int minor, struct mapped_device **result)
L
Linus Torvalds 已提交
2149
{
2150
	int r;
L
Linus Torvalds 已提交
2151 2152
	struct mapped_device *md;

2153
	md = alloc_dev(minor);
L
Linus Torvalds 已提交
2154 2155 2156
	if (!md)
		return -ENXIO;

2157 2158 2159 2160 2161
	r = dm_sysfs_init(md);
	if (r) {
		free_dev(md);
		return r;
	}
M
Milan Broz 已提交
2162

L
Linus Torvalds 已提交
2163 2164 2165 2166
	*result = md;
	return 0;
}

2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
/*
 * Functions to manage md->type.
 * All are required to hold md->type_lock.
 */
void dm_lock_md_type(struct mapped_device *md)
{
	mutex_lock(&md->type_lock);
}

void dm_unlock_md_type(struct mapped_device *md)
{
	mutex_unlock(&md->type_lock);
}

2181
void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2182
{
2183
	BUG_ON(!mutex_is_locked(&md->type_lock));
2184 2185 2186
	md->type = type;
}

2187
enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2188 2189 2190 2191
{
	return md->type;
}

2192 2193 2194 2195 2196
struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
{
	return md->immutable_target_type;
}

2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207
/*
 * The queue_limits are only valid as long as you have a reference
 * count on 'md'.
 */
struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
{
	BUG_ON(!atomic_read(&md->holders));
	return &md->queue->limits;
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);

2208 2209 2210
/*
 * Setup the DM device's queue based on md's type
 */
2211
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2212
{
2213
	int r;
2214
	struct queue_limits limits;
2215
	enum dm_queue_mode type = dm_get_md_type(md);
2216

2217
	switch (type) {
2218
	case DM_TYPE_REQUEST_BASED:
2219
		dm_init_normal_md_queue(md);
2220
		r = dm_old_init_request_queue(md, t);
2221
		if (r) {
2222
			DMERR("Cannot initialize queue for request-based mapped device");
2223
			return r;
2224
		}
2225 2226
		break;
	case DM_TYPE_MQ_REQUEST_BASED:
2227
		r = dm_mq_init_request_queue(md, t);
2228
		if (r) {
2229
			DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2230 2231 2232 2233
			return r;
		}
		break;
	case DM_TYPE_BIO_BASED:
2234
	case DM_TYPE_DAX_BIO_BASED:
2235 2236
	case DM_TYPE_NVME_BIO_BASED:
		dm_init_normal_md_queue(md);
2237
		blk_queue_make_request(md->queue, dm_make_request);
2238
		break;
2239 2240 2241
	case DM_TYPE_NONE:
		WARN_ON_ONCE(true);
		break;
2242 2243
	}

2244 2245 2246 2247 2248 2249 2250 2251
	r = dm_calculate_queue_limits(t, &limits);
	if (r) {
		DMERR("Cannot calculate initial queue limits");
		return r;
	}
	dm_table_set_restrictions(t, md->queue, &limits);
	blk_register_queue(md->disk);

2252 2253 2254
	return 0;
}

2255
struct mapped_device *dm_get_md(dev_t dev)
L
Linus Torvalds 已提交
2256 2257 2258 2259 2260 2261 2262
{
	struct mapped_device *md;
	unsigned minor = MINOR(dev);

	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
		return NULL;

2263
	spin_lock(&_minor_lock);
L
Linus Torvalds 已提交
2264 2265

	md = idr_find(&_minor_idr, minor);
M
Mike Snitzer 已提交
2266 2267 2268 2269
	if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
	    test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
		md = NULL;
		goto out;
J
Jeff Mahoney 已提交
2270
	}
M
Mike Snitzer 已提交
2271
	dm_get(md);
J
Jeff Mahoney 已提交
2272
out:
2273
	spin_unlock(&_minor_lock);
L
Linus Torvalds 已提交
2274

2275 2276
	return md;
}
A
Alasdair G Kergon 已提交
2277
EXPORT_SYMBOL_GPL(dm_get_md);
2278

A
Alasdair G Kergon 已提交
2279
void *dm_get_mdptr(struct mapped_device *md)
2280
{
A
Alasdair G Kergon 已提交
2281
	return md->interface_ptr;
L
Linus Torvalds 已提交
2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
}

void dm_set_mdptr(struct mapped_device *md, void *ptr)
{
	md->interface_ptr = ptr;
}

void dm_get(struct mapped_device *md)
{
	atomic_inc(&md->holders);
2292
	BUG_ON(test_bit(DMF_FREEING, &md->flags));
L
Linus Torvalds 已提交
2293 2294
}

2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
int dm_hold(struct mapped_device *md)
{
	spin_lock(&_minor_lock);
	if (test_bit(DMF_FREEING, &md->flags)) {
		spin_unlock(&_minor_lock);
		return -EBUSY;
	}
	dm_get(md);
	spin_unlock(&_minor_lock);
	return 0;
}
EXPORT_SYMBOL_GPL(dm_hold);

2308 2309 2310 2311 2312 2313
const char *dm_device_name(struct mapped_device *md)
{
	return md->name;
}
EXPORT_SYMBOL_GPL(dm_device_name);

2314
static void __dm_destroy(struct mapped_device *md, bool wait)
L
Linus Torvalds 已提交
2315
{
M
Mike Anderson 已提交
2316
	struct dm_table *map;
M
Mikulas Patocka 已提交
2317
	int srcu_idx;
L
Linus Torvalds 已提交
2318

2319
	might_sleep();
J
Jeff Mahoney 已提交
2320

2321
	spin_lock(&_minor_lock);
2322 2323 2324
	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
	set_bit(DMF_FREEING, &md->flags);
	spin_unlock(&_minor_lock);
2325

2326
	blk_set_queue_dying(md->queue);
2327

2328
	if (dm_request_based(md) && md->kworker_task)
P
Petr Mladek 已提交
2329
		kthread_flush_worker(&md->kworker);
2330

2331 2332 2333 2334 2335
	/*
	 * Take suspend_lock so that presuspend and postsuspend methods
	 * do not race with internal suspend.
	 */
	mutex_lock(&md->suspend_lock);
2336
	map = dm_get_live_table(md, &srcu_idx);
2337 2338 2339
	if (!dm_suspended_md(md)) {
		dm_table_presuspend_targets(map);
		dm_table_postsuspend_targets(map);
L
Linus Torvalds 已提交
2340
	}
M
Mikulas Patocka 已提交
2341 2342
	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
	dm_put_live_table(md, srcu_idx);
2343
	mutex_unlock(&md->suspend_lock);
M
Mikulas Patocka 已提交
2344

2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375
	/*
	 * Rare, but there may be I/O requests still going to complete,
	 * for example.  Wait for all references to disappear.
	 * No one should increment the reference count of the mapped_device,
	 * after the mapped_device state becomes DMF_FREEING.
	 */
	if (wait)
		while (atomic_read(&md->holders))
			msleep(1);
	else if (atomic_read(&md->holders))
		DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
		       dm_device_name(md), atomic_read(&md->holders));

	dm_sysfs_exit(md);
	dm_table_destroy(__unbind(md));
	free_dev(md);
}

void dm_destroy(struct mapped_device *md)
{
	__dm_destroy(md, true);
}

void dm_destroy_immediate(struct mapped_device *md)
{
	__dm_destroy(md, false);
}

void dm_put(struct mapped_device *md)
{
	atomic_dec(&md->holders);
L
Linus Torvalds 已提交
2376
}
E
Edward Goggin 已提交
2377
EXPORT_SYMBOL_GPL(dm_put);
L
Linus Torvalds 已提交
2378

2379
static int dm_wait_for_completion(struct mapped_device *md, long task_state)
2380 2381
{
	int r = 0;
2382
	DEFINE_WAIT(wait);
2383 2384

	while (1) {
2385
		prepare_to_wait(&md->wait, &wait, task_state);
2386

2387
		if (!md_in_flight(md))
2388 2389
			break;

2390
		if (signal_pending_state(task_state, current)) {
2391 2392 2393 2394 2395 2396
			r = -EINTR;
			break;
		}

		io_schedule();
	}
2397
	finish_wait(&md->wait, &wait);
2398

2399 2400 2401
	return r;
}

L
Linus Torvalds 已提交
2402 2403 2404
/*
 * Process the deferred bios
 */
2405
static void dm_wq_work(struct work_struct *work)
L
Linus Torvalds 已提交
2406
{
2407 2408
	struct mapped_device *md = container_of(work, struct mapped_device,
						work);
2409
	struct bio *c;
M
Mikulas Patocka 已提交
2410 2411
	int srcu_idx;
	struct dm_table *map;
L
Linus Torvalds 已提交
2412

M
Mikulas Patocka 已提交
2413
	map = dm_get_live_table(md, &srcu_idx);
2414

2415
	while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
A
Alasdair G Kergon 已提交
2416 2417 2418 2419
		spin_lock_irq(&md->deferred_lock);
		c = bio_list_pop(&md->deferred);
		spin_unlock_irq(&md->deferred_lock);

2420
		if (!c)
A
Alasdair G Kergon 已提交
2421
			break;
2422

K
Kiyoshi Ueda 已提交
2423
		if (dm_request_based(md))
2424
			(void) generic_make_request(c);
2425
		else
2426
			(void) dm_process_bio(md, map, c);
2427
	}
M
Milan Broz 已提交
2428

M
Mikulas Patocka 已提交
2429
	dm_put_live_table(md, srcu_idx);
L
Linus Torvalds 已提交
2430 2431
}

2432
static void dm_queue_flush(struct mapped_device *md)
2433
{
2434
	clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2435
	smp_mb__after_atomic();
2436
	queue_work(md->wq, &md->work);
2437 2438
}

L
Linus Torvalds 已提交
2439
/*
2440
 * Swap in a new table, returning the old one for the caller to destroy.
L
Linus Torvalds 已提交
2441
 */
2442
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
L
Linus Torvalds 已提交
2443
{
2444
	struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2445
	struct queue_limits limits;
2446
	int r;
L
Linus Torvalds 已提交
2447

2448
	mutex_lock(&md->suspend_lock);
L
Linus Torvalds 已提交
2449 2450

	/* device must be suspended */
2451
	if (!dm_suspended_md(md))
2452
		goto out;
L
Linus Torvalds 已提交
2453

2454 2455 2456 2457 2458 2459 2460
	/*
	 * If the new table has no data devices, retain the existing limits.
	 * This helps multipath with queue_if_no_path if all paths disappear,
	 * then new I/O is queued based on these limits, and then some paths
	 * reappear.
	 */
	if (dm_table_has_no_data_devices(table)) {
M
Mikulas Patocka 已提交
2461
		live_map = dm_get_live_table_fast(md);
2462 2463
		if (live_map)
			limits = md->queue->limits;
M
Mikulas Patocka 已提交
2464
		dm_put_live_table_fast(md);
2465 2466
	}

2467 2468 2469 2470 2471 2472
	if (!live_map) {
		r = dm_calculate_queue_limits(table, &limits);
		if (r) {
			map = ERR_PTR(r);
			goto out;
		}
2473
	}
2474

2475
	map = __bind(md, table, &limits);
2476
	dm_issue_global_event();
L
Linus Torvalds 已提交
2477

2478
out:
2479
	mutex_unlock(&md->suspend_lock);
2480
	return map;
L
Linus Torvalds 已提交
2481 2482 2483 2484 2485 2486
}

/*
 * Functions to lock and unlock any filesystem running on the
 * device.
 */
2487
static int lock_fs(struct mapped_device *md)
L
Linus Torvalds 已提交
2488
{
2489
	int r;
L
Linus Torvalds 已提交
2490 2491

	WARN_ON(md->frozen_sb);
2492

2493
	md->frozen_sb = freeze_bdev(md->bdev);
2494
	if (IS_ERR(md->frozen_sb)) {
2495
		r = PTR_ERR(md->frozen_sb);
2496 2497
		md->frozen_sb = NULL;
		return r;
2498 2499
	}

2500 2501
	set_bit(DMF_FROZEN, &md->flags);

L
Linus Torvalds 已提交
2502 2503 2504
	return 0;
}

2505
static void unlock_fs(struct mapped_device *md)
L
Linus Torvalds 已提交
2506
{
2507 2508 2509
	if (!test_bit(DMF_FROZEN, &md->flags))
		return;

2510
	thaw_bdev(md->bdev, md->frozen_sb);
L
Linus Torvalds 已提交
2511
	md->frozen_sb = NULL;
2512
	clear_bit(DMF_FROZEN, &md->flags);
L
Linus Torvalds 已提交
2513 2514 2515
}

/*
2516 2517 2518 2519
 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
 *
2520 2521 2522
 * If __dm_suspend returns 0, the device is completely quiescent
 * now. There is no request-processing activity. All new requests
 * are being added to md->deferred list.
2523
 */
2524
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2525
			unsigned suspend_flags, long task_state,
2526
			int dmf_suspended_flag)
L
Linus Torvalds 已提交
2527
{
2528 2529 2530
	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
	int r;
L
Linus Torvalds 已提交
2531

2532 2533
	lockdep_assert_held(&md->suspend_lock);

2534 2535 2536 2537 2538 2539
	/*
	 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
	 * This flag is cleared before dm_suspend returns.
	 */
	if (noflush)
		set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2540 2541
	else
		pr_debug("%s: suspending with flush\n", dm_device_name(md));
2542

2543 2544 2545 2546
	/*
	 * This gets reverted if there's an error later and the targets
	 * provide the .presuspend_undo hook.
	 */
2547 2548
	dm_table_presuspend_targets(map);

M
Mikulas Patocka 已提交
2549
	/*
K
Kiyoshi Ueda 已提交
2550 2551 2552 2553
	 * Flush I/O to the device.
	 * Any I/O submitted after lock_fs() may not be flushed.
	 * noflush takes precedence over do_lockfs.
	 * (lock_fs() flushes I/Os and waits for them to complete.)
M
Mikulas Patocka 已提交
2554 2555 2556
	 */
	if (!noflush && do_lockfs) {
		r = lock_fs(md);
2557 2558
		if (r) {
			dm_table_presuspend_undo_targets(map);
2559
			return r;
2560
		}
2561
	}
L
Linus Torvalds 已提交
2562 2563

	/*
2564 2565 2566 2567 2568 2569 2570
	 * Here we must make sure that no processes are submitting requests
	 * to target drivers i.e. no one may be executing
	 * __split_and_process_bio. This is called from dm_request and
	 * dm_wq_work.
	 *
	 * To get all processes out of __split_and_process_bio in dm_request,
	 * we take the write lock. To prevent any process from reentering
2571 2572 2573
	 * __split_and_process_bio from dm_request and quiesce the thread
	 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
	 * flush_workqueue(md->wq).
L
Linus Torvalds 已提交
2574
	 */
2575
	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2576 2577
	if (map)
		synchronize_srcu(&md->io_barrier);
L
Linus Torvalds 已提交
2578

2579
	/*
2580 2581
	 * Stop md->queue before flushing md->wq in case request-based
	 * dm defers requests to md->wq from md->queue.
2582
	 */
2583
	if (dm_request_based(md)) {
2584
		dm_stop_queue(md->queue);
2585
		if (md->kworker_task)
P
Petr Mladek 已提交
2586
			kthread_flush_worker(&md->kworker);
2587
	}
2588

2589 2590
	flush_workqueue(md->wq);

L
Linus Torvalds 已提交
2591
	/*
2592 2593 2594
	 * At this point no more requests are entering target request routines.
	 * We call dm_wait_for_completion to wait for all existing requests
	 * to finish.
L
Linus Torvalds 已提交
2595
	 */
2596
	r = dm_wait_for_completion(md, task_state);
2597 2598
	if (!r)
		set_bit(dmf_suspended_flag, &md->flags);
L
Linus Torvalds 已提交
2599

2600
	if (noflush)
2601
		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2602 2603
	if (map)
		synchronize_srcu(&md->io_barrier);
2604

L
Linus Torvalds 已提交
2605
	/* were we interrupted ? */
2606
	if (r < 0) {
2607
		dm_queue_flush(md);
M
Milan Broz 已提交
2608

2609
		if (dm_request_based(md))
2610
			dm_start_queue(md->queue);
2611

2612
		unlock_fs(md);
2613
		dm_table_presuspend_undo_targets(map);
2614
		/* pushback list is already flushed, so skip flush */
2615
	}
L
Linus Torvalds 已提交
2616

2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
	return r;
}

/*
 * We need to be able to change a mapping table under a mounted
 * filesystem.  For example we might want to move some data in
 * the background.  Before the table can be swapped with
 * dm_bind_table, dm_suspend must be called to flush any in
 * flight bios and ensure that any further io gets deferred.
 */
/*
 * Suspend mechanism in request-based dm.
 *
 * 1. Flush all I/Os by lock_fs() if needed.
 * 2. Stop dispatching any I/O by stopping the request_queue.
 * 3. Wait for all in-flight I/Os to be completed or requeued.
 *
 * To abort suspend, start the request_queue.
 */
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
	struct dm_table *map = NULL;
	int r = 0;

retry:
	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);

	if (dm_suspended_md(md)) {
		r = -EINVAL;
		goto out_unlock;
	}

	if (dm_suspended_internally_md(md)) {
		/* already internally suspended, wait for internal resume */
		mutex_unlock(&md->suspend_lock);
		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
		if (r)
			return r;
		goto retry;
	}

2658
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2659

2660
	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2661 2662
	if (r)
		goto out_unlock;
2663

2664 2665
	dm_table_postsuspend_targets(map);

2666
out_unlock:
2667
	mutex_unlock(&md->suspend_lock);
2668
	return r;
L
Linus Torvalds 已提交
2669 2670
}

2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686
static int __dm_resume(struct mapped_device *md, struct dm_table *map)
{
	if (map) {
		int r = dm_table_resume_targets(map);
		if (r)
			return r;
	}

	dm_queue_flush(md);

	/*
	 * Flushing deferred I/Os must be done after targets are resumed
	 * so that mapping of targets can work correctly.
	 * Request-based dm is queueing the deferred I/Os in its request_queue.
	 */
	if (dm_request_based(md))
2687
		dm_start_queue(md->queue);
2688 2689 2690 2691 2692 2693

	unlock_fs(md);

	return 0;
}

L
Linus Torvalds 已提交
2694 2695
int dm_resume(struct mapped_device *md)
{
2696
	int r;
2697
	struct dm_table *map = NULL;
L
Linus Torvalds 已提交
2698

2699
retry:
2700
	r = -EINVAL;
2701 2702
	mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);

2703
	if (!dm_suspended_md(md))
2704 2705
		goto out;

2706 2707 2708 2709 2710 2711 2712 2713 2714
	if (dm_suspended_internally_md(md)) {
		/* already internally suspended, wait for internal resume */
		mutex_unlock(&md->suspend_lock);
		r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
		if (r)
			return r;
		goto retry;
	}

2715
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2716
	if (!map || !dm_table_get_size(map))
2717
		goto out;
L
Linus Torvalds 已提交
2718

2719
	r = __dm_resume(md, map);
2720 2721
	if (r)
		goto out;
2722 2723

	clear_bit(DMF_SUSPENDED, &md->flags);
2724
out:
2725
	mutex_unlock(&md->suspend_lock);
2726

2727
	return r;
L
Linus Torvalds 已提交
2728 2729
}

M
Mikulas Patocka 已提交
2730 2731 2732 2733 2734 2735
/*
 * Internal suspend/resume works like userspace-driven suspend. It waits
 * until all bios finish and prevents issuing new bios to the target drivers.
 * It may be used only from the kernel.
 */

2736
static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
M
Mikulas Patocka 已提交
2737
{
2738 2739
	struct dm_table *map = NULL;

2740 2741
	lockdep_assert_held(&md->suspend_lock);

2742
	if (md->internal_suspend_count++)
2743 2744 2745 2746 2747 2748 2749
		return; /* nested internal suspend */

	if (dm_suspended_md(md)) {
		set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
		return; /* nest suspend */
	}

2750
	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2751 2752 2753 2754 2755 2756 2757

	/*
	 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
	 * supported.  Properly supporting a TASK_INTERRUPTIBLE internal suspend
	 * would require changing .presuspend to return an error -- avoid this
	 * until there is a need for more elaborate variants of internal suspend.
	 */
2758 2759
	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
			    DMF_SUSPENDED_INTERNALLY);
2760 2761 2762 2763 2764 2765

	dm_table_postsuspend_targets(map);
}

static void __dm_internal_resume(struct mapped_device *md)
{
2766 2767 2768
	BUG_ON(!md->internal_suspend_count);

	if (--md->internal_suspend_count)
2769 2770
		return; /* resume from nested internal suspend */

M
Mikulas Patocka 已提交
2771
	if (dm_suspended_md(md))
2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810
		goto done; /* resume from nested suspend */

	/*
	 * NOTE: existing callers don't need to call dm_table_resume_targets
	 * (which may fail -- so best to avoid it for now by passing NULL map)
	 */
	(void) __dm_resume(md, NULL);

done:
	clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
	smp_mb__after_atomic();
	wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
}

void dm_internal_suspend_noflush(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	__dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
	mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);

void dm_internal_resume(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	__dm_internal_resume(md);
	mutex_unlock(&md->suspend_lock);
}
EXPORT_SYMBOL_GPL(dm_internal_resume);

/*
 * Fast variants of internal suspend/resume hold md->suspend_lock,
 * which prevents interaction with userspace-driven suspend.
 */

void dm_internal_suspend_fast(struct mapped_device *md)
{
	mutex_lock(&md->suspend_lock);
	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
M
Mikulas Patocka 已提交
2811 2812 2813 2814 2815 2816 2817
		return;

	set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
	synchronize_srcu(&md->io_barrier);
	flush_workqueue(md->wq);
	dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
}
2818
EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
M
Mikulas Patocka 已提交
2819

2820
void dm_internal_resume_fast(struct mapped_device *md)
M
Mikulas Patocka 已提交
2821
{
2822
	if (dm_suspended_md(md) || dm_suspended_internally_md(md))
M
Mikulas Patocka 已提交
2823 2824 2825 2826 2827 2828 2829
		goto done;

	dm_queue_flush(md);

done:
	mutex_unlock(&md->suspend_lock);
}
2830
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
M
Mikulas Patocka 已提交
2831

L
Linus Torvalds 已提交
2832 2833 2834
/*-----------------------------------------------------------------
 * Event notification.
 *---------------------------------------------------------------*/
2835
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
M
Milan Broz 已提交
2836
		       unsigned cookie)
2837
{
M
Milan Broz 已提交
2838 2839 2840 2841
	char udev_cookie[DM_COOKIE_LENGTH];
	char *envp[] = { udev_cookie, NULL };

	if (!cookie)
2842
		return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
M
Milan Broz 已提交
2843 2844 2845
	else {
		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
			 DM_COOKIE_ENV_VAR_NAME, cookie);
2846 2847
		return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
					  action, envp);
M
Milan Broz 已提交
2848
	}
2849 2850
}

M
Mike Anderson 已提交
2851 2852 2853 2854 2855
uint32_t dm_next_uevent_seq(struct mapped_device *md)
{
	return atomic_add_return(1, &md->uevent_seq);
}

L
Linus Torvalds 已提交
2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866
uint32_t dm_get_event_nr(struct mapped_device *md)
{
	return atomic_read(&md->event_nr);
}

int dm_wait_event(struct mapped_device *md, int event_nr)
{
	return wait_event_interruptible(md->eventq,
			(event_nr != atomic_read(&md->event_nr)));
}

M
Mike Anderson 已提交
2867 2868 2869 2870 2871 2872 2873 2874 2875
void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
{
	unsigned long flags;

	spin_lock_irqsave(&md->uevent_lock, flags);
	list_add(elist, &md->uevent_list);
	spin_unlock_irqrestore(&md->uevent_lock, flags);
}

L
Linus Torvalds 已提交
2876 2877 2878 2879 2880 2881 2882 2883
/*
 * The gendisk is only valid as long as you have a reference
 * count on 'md'.
 */
struct gendisk *dm_disk(struct mapped_device *md)
{
	return md->disk;
}
2884
EXPORT_SYMBOL_GPL(dm_disk);
L
Linus Torvalds 已提交
2885

M
Milan Broz 已提交
2886 2887
struct kobject *dm_kobject(struct mapped_device *md)
{
2888
	return &md->kobj_holder.kobj;
M
Milan Broz 已提交
2889 2890 2891 2892 2893 2894
}

struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
	struct mapped_device *md;

2895
	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
M
Milan Broz 已提交
2896

2897 2898 2899 2900 2901
	spin_lock(&_minor_lock);
	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
		md = NULL;
		goto out;
	}
M
Milan Broz 已提交
2902
	dm_get(md);
2903 2904 2905
out:
	spin_unlock(&_minor_lock);

M
Milan Broz 已提交
2906 2907 2908
	return md;
}

2909
int dm_suspended_md(struct mapped_device *md)
L
Linus Torvalds 已提交
2910 2911 2912 2913
{
	return test_bit(DMF_SUSPENDED, &md->flags);
}

2914 2915 2916 2917 2918
int dm_suspended_internally_md(struct mapped_device *md)
{
	return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
}

M
Mikulas Patocka 已提交
2919 2920 2921 2922 2923
int dm_test_deferred_remove_flag(struct mapped_device *md)
{
	return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
}

2924 2925
int dm_suspended(struct dm_target *ti)
{
2926
	return dm_suspended_md(dm_table_get_md(ti->table));
2927 2928 2929
}
EXPORT_SYMBOL_GPL(dm_suspended);

2930 2931
int dm_noflush_suspending(struct dm_target *ti)
{
2932
	return __noflush_suspending(dm_table_get_md(ti->table));
2933 2934 2935
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);

2936
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
2937 2938
					    unsigned integrity, unsigned per_io_data_size,
					    unsigned min_pool_size)
K
Kiyoshi Ueda 已提交
2939
{
2940
	struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
2941
	unsigned int pool_size = 0;
2942
	unsigned int front_pad, io_front_pad;
2943
	int ret;
K
Kiyoshi Ueda 已提交
2944 2945

	if (!pools)
2946
		return NULL;
K
Kiyoshi Ueda 已提交
2947

2948 2949
	switch (type) {
	case DM_TYPE_BIO_BASED:
2950
	case DM_TYPE_DAX_BIO_BASED:
2951
	case DM_TYPE_NVME_BIO_BASED:
2952
		pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
2953
		front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2954
		io_front_pad = roundup(front_pad,  __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
2955 2956
		ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0);
		if (ret)
2957
			goto out;
2958
		if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
2959
			goto out;
2960 2961 2962
		break;
	case DM_TYPE_REQUEST_BASED:
	case DM_TYPE_MQ_REQUEST_BASED:
2963
		pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
2964
		front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2965
		/* per_io_data_size is used for blk-mq pdu at queue allocation */
2966 2967 2968 2969 2970
		break;
	default:
		BUG();
	}

2971 2972
	ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
	if (ret)
J
Jun'ichi Nomura 已提交
2973
		goto out;
K
Kiyoshi Ueda 已提交
2974

2975
	if (integrity && bioset_integrity_create(&pools->bs, pool_size))
J
Jun'ichi Nomura 已提交
2976
		goto out;
2977

K
Kiyoshi Ueda 已提交
2978
	return pools;
2979 2980 2981

out:
	dm_free_md_mempools(pools);
2982

2983
	return NULL;
K
Kiyoshi Ueda 已提交
2984 2985 2986 2987 2988 2989 2990
}

void dm_free_md_mempools(struct dm_md_mempools *pools)
{
	if (!pools)
		return;

2991 2992
	bioset_exit(&pools->bs);
	bioset_exit(&pools->io_bs);
K
Kiyoshi Ueda 已提交
2993 2994 2995 2996

	kfree(pools);
}

2997 2998 2999 3000 3001 3002 3003 3004 3005
struct dm_pr {
	u64	old_key;
	u64	new_key;
	u32	flags;
	bool	fail_early;
};

static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
		      void *data)
3006 3007
{
	struct mapped_device *md = bdev->bd_disk->private_data;
3008 3009 3010
	struct dm_table *table;
	struct dm_target *ti;
	int ret = -ENOTTY, srcu_idx;
3011

3012 3013 3014
	table = dm_get_live_table(md, &srcu_idx);
	if (!table || !dm_table_get_size(table))
		goto out;
3015

3016 3017 3018 3019
	/* We only support devices that have a single target */
	if (dm_table_get_num_targets(table) != 1)
		goto out;
	ti = dm_table_get_target(table, 0);
3020

3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066
	ret = -EINVAL;
	if (!ti->type->iterate_devices)
		goto out;

	ret = ti->type->iterate_devices(ti, fn, data);
out:
	dm_put_live_table(md, srcu_idx);
	return ret;
}

/*
 * For register / unregister we need to manually call out to every path.
 */
static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
			    sector_t start, sector_t len, void *data)
{
	struct dm_pr *pr = data;
	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;

	if (!ops || !ops->pr_register)
		return -EOPNOTSUPP;
	return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
}

static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
			  u32 flags)
{
	struct dm_pr pr = {
		.old_key	= old_key,
		.new_key	= new_key,
		.flags		= flags,
		.fail_early	= true,
	};
	int ret;

	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
	if (ret && new_key) {
		/* unregister all paths if we failed to register any path */
		pr.old_key = new_key;
		pr.new_key = 0;
		pr.flags = 0;
		pr.fail_early = false;
		dm_call_pr(bdev, __dm_pr_register, &pr);
	}

	return ret;
3067 3068 3069
}

static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3070
			 u32 flags)
3071 3072 3073
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3074
	int r, srcu_idx;
3075

3076
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3077
	if (r < 0)
3078
		goto out;
3079 3080 3081 3082 3083 3084

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_reserve)
		r = ops->pr_reserve(bdev, key, type, flags);
	else
		r = -EOPNOTSUPP;
3085 3086
out:
	dm_unprepare_ioctl(md, srcu_idx);
3087 3088 3089 3090 3091 3092 3093
	return r;
}

static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3094
	int r, srcu_idx;
3095

3096
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3097
	if (r < 0)
3098
		goto out;
3099 3100 3101 3102 3103 3104

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_release)
		r = ops->pr_release(bdev, key, type);
	else
		r = -EOPNOTSUPP;
3105 3106
out:
	dm_unprepare_ioctl(md, srcu_idx);
3107 3108 3109 3110
	return r;
}

static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3111
			 enum pr_type type, bool abort)
3112 3113 3114
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3115
	int r, srcu_idx;
3116

3117
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3118
	if (r < 0)
3119
		goto out;
3120 3121 3122 3123 3124 3125

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_preempt)
		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
	else
		r = -EOPNOTSUPP;
3126 3127
out:
	dm_unprepare_ioctl(md, srcu_idx);
3128 3129 3130 3131 3132 3133 3134
	return r;
}

static int dm_pr_clear(struct block_device *bdev, u64 key)
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	const struct pr_ops *ops;
3135
	int r, srcu_idx;
3136

3137
	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3138
	if (r < 0)
3139
		goto out;
3140 3141 3142 3143 3144 3145

	ops = bdev->bd_disk->fops->pr_ops;
	if (ops && ops->pr_clear)
		r = ops->pr_clear(bdev, key);
	else
		r = -EOPNOTSUPP;
3146 3147
out:
	dm_unprepare_ioctl(md, srcu_idx);
3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158
	return r;
}

static const struct pr_ops dm_pr_ops = {
	.pr_register	= dm_pr_register,
	.pr_reserve	= dm_pr_reserve,
	.pr_release	= dm_pr_release,
	.pr_preempt	= dm_pr_preempt,
	.pr_clear	= dm_pr_clear,
};

3159
static const struct block_device_operations dm_blk_dops = {
L
Linus Torvalds 已提交
3160 3161
	.open = dm_blk_open,
	.release = dm_blk_close,
3162
	.ioctl = dm_blk_ioctl,
D
Darrick J. Wong 已提交
3163
	.getgeo = dm_blk_getgeo,
3164
	.pr_ops = &dm_pr_ops,
L
Linus Torvalds 已提交
3165 3166 3167
	.owner = THIS_MODULE
};

3168 3169
static const struct dax_operations dm_dax_ops = {
	.direct_access = dm_dax_direct_access,
3170
	.copy_from_iter = dm_dax_copy_from_iter,
3171
	.copy_to_iter = dm_dax_copy_to_iter,
3172 3173
};

L
Linus Torvalds 已提交
3174 3175 3176 3177 3178 3179 3180 3181
/*
 * module hooks
 */
module_init(dm_init);
module_exit(dm_exit);

module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
3182

3183 3184 3185
module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");

3186 3187 3188
module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");

L
Linus Torvalds 已提交
3189 3190 3191
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");