dm-table.c 40.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2001 Sistina Software (UK) Limited.
3
 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7
 *
 * This file is released under the GPL.
 */

8
#include "dm-core.h"
L
Linus Torvalds 已提交
9 10 11 12 13 14

#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/namei.h>
#include <linux/ctype.h>
15
#include <linux/string.h>
L
Linus Torvalds 已提交
16 17
#include <linux/slab.h>
#include <linux/interrupt.h>
A
Arjan van de Ven 已提交
18
#include <linux/mutex.h>
19
#include <linux/delay.h>
A
Arun Sharma 已提交
20
#include <linux/atomic.h>
21
#include <linux/blk-mq.h>
22
#include <linux/mount.h>
L
Linus Torvalds 已提交
23

24 25
#define DM_MSG_PREFIX "table"

L
Linus Torvalds 已提交
26 27 28 29 30 31
#define MAX_DEPTH 16
#define NODE_SIZE L1_CACHE_BYTES
#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)

struct dm_table {
M
Mike Anderson 已提交
32
	struct mapped_device *md;
K
Kiyoshi Ueda 已提交
33
	unsigned type;
L
Linus Torvalds 已提交
34 35 36 37 38 39 40 41 42 43 44

	/* btree table */
	unsigned int depth;
	unsigned int counts[MAX_DEPTH];	/* in nodes */
	sector_t *index[MAX_DEPTH];

	unsigned int num_targets;
	unsigned int num_allocated;
	sector_t *highs;
	struct dm_target *targets;

45
	struct target_type *immutable_target_type;
46 47 48 49

	bool integrity_supported:1;
	bool singleton:1;
	bool all_blk_mq:1;
M
Mike Snitzer 已提交
50

L
Linus Torvalds 已提交
51 52 53 54 55
	/*
	 * Indicates the rw permissions for the new logical
	 * device.  This should be a combination of FMODE_READ
	 * and FMODE_WRITE.
	 */
56
	fmode_t mode;
L
Linus Torvalds 已提交
57 58 59 60 61 62 63

	/* a list of devices used by this table */
	struct list_head devices;

	/* events get handed up using this callback */
	void (*event_fn)(void *);
	void *event_context;
K
Kiyoshi Ueda 已提交
64 65

	struct dm_md_mempools *mempools;
66 67

	struct list_head target_callbacks;
L
Linus Torvalds 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
};

/*
 * Similar to ceiling(log_size(n))
 */
static unsigned int int_log(unsigned int n, unsigned int base)
{
	int result = 0;

	while (n > 1) {
		n = dm_div_up(n, base);
		result++;
	}

	return result;
}

/*
 * Calculate the index of the child node of the n'th node k'th key.
 */
static inline unsigned int get_child(unsigned int n, unsigned int k)
{
	return (n * CHILDREN_PER_NODE) + k;
}

/*
 * Return the n'th node of level l from table t.
 */
static inline sector_t *get_node(struct dm_table *t,
				 unsigned int l, unsigned int n)
{
	return t->index[l] + (n * KEYS_PER_NODE);
}

/*
 * Return the highest key that you could lookup from the n'th
 * node on level l of the btree.
 */
static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
{
	for (; l < t->depth - 1; l++)
		n = get_child(n, CHILDREN_PER_NODE - 1);

	if (n >= t->counts[l])
		return (sector_t) - 1;

	return get_node(t, l, n)[KEYS_PER_NODE - 1];
}

/*
 * Fills in a level of the btree based on the highs of the level
 * below it.
 */
static int setup_btree_index(unsigned int l, struct dm_table *t)
{
	unsigned int n, k;
	sector_t *node;

	for (n = 0U; n < t->counts[l]; n++) {
		node = get_node(t, l, n);

		for (k = 0U; k < KEYS_PER_NODE; k++)
			node[k] = high(t, l + 1, get_child(n, k));
	}

	return 0;
}

void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
{
	unsigned long size;
	void *addr;

	/*
	 * Check that we're not going to overflow.
	 */
	if (nmemb > (ULONG_MAX / elem_size))
		return NULL;

	size = nmemb * elem_size;
J
Joe Perches 已提交
148
	addr = vzalloc(size);
L
Linus Torvalds 已提交
149 150 151

	return addr;
}
152
EXPORT_SYMBOL(dm_vcalloc);
L
Linus Torvalds 已提交
153 154 155 156 157 158 159 160 161 162 163 164

/*
 * highs, and targets are managed as dynamic arrays during a
 * table load.
 */
static int alloc_targets(struct dm_table *t, unsigned int num)
{
	sector_t *n_highs;
	struct dm_target *n_targets;

	/*
	 * Allocate both the target array and offset array at once.
165 166
	 * Append an empty entry to catch sectors beyond the end of
	 * the device.
L
Linus Torvalds 已提交
167
	 */
168
	n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
L
Linus Torvalds 已提交
169 170 171 172 173 174
					  sizeof(sector_t));
	if (!n_highs)
		return -ENOMEM;

	n_targets = (struct dm_target *) (n_highs + num);

175
	memset(n_highs, -1, sizeof(*n_highs) * num);
L
Linus Torvalds 已提交
176 177 178 179 180 181 182 183 184
	vfree(t->highs);

	t->num_allocated = num;
	t->highs = n_highs;
	t->targets = n_targets;

	return 0;
}

185
int dm_table_create(struct dm_table **result, fmode_t mode,
M
Mike Anderson 已提交
186
		    unsigned num_targets, struct mapped_device *md)
L
Linus Torvalds 已提交
187
{
D
Dmitry Monakhov 已提交
188
	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
L
Linus Torvalds 已提交
189 190 191 192 193

	if (!t)
		return -ENOMEM;

	INIT_LIST_HEAD(&t->devices);
194
	INIT_LIST_HEAD(&t->target_callbacks);
L
Linus Torvalds 已提交
195 196 197 198 199 200

	if (!num_targets)
		num_targets = KEYS_PER_NODE;

	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);

201 202 203 204 205
	if (!num_targets) {
		kfree(t);
		return -ENOMEM;
	}

L
Linus Torvalds 已提交
206 207 208 209 210
	if (alloc_targets(t, num_targets)) {
		kfree(t);
		return -ENOMEM;
	}

211
	t->type = DM_TYPE_NONE;
L
Linus Torvalds 已提交
212
	t->mode = mode;
M
Mike Anderson 已提交
213
	t->md = md;
L
Linus Torvalds 已提交
214 215 216 217
	*result = t;
	return 0;
}

218
static void free_devices(struct list_head *devices, struct mapped_device *md)
L
Linus Torvalds 已提交
219 220 221
{
	struct list_head *tmp, *next;

P
Paul Jimenez 已提交
222
	list_for_each_safe(tmp, next, devices) {
M
Mikulas Patocka 已提交
223 224
		struct dm_dev_internal *dd =
		    list_entry(tmp, struct dm_dev_internal, list);
225 226 227
		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
		       dm_device_name(md), dd->dm_dev->name);
		dm_put_table_device(md, dd->dm_dev);
L
Linus Torvalds 已提交
228 229 230 231
		kfree(dd);
	}
}

232
void dm_table_destroy(struct dm_table *t)
L
Linus Torvalds 已提交
233 234 235
{
	unsigned int i;

236 237 238
	if (!t)
		return;

239
	/* free the indexes */
L
Linus Torvalds 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
	if (t->depth >= 2)
		vfree(t->index[t->depth - 2]);

	/* free the targets */
	for (i = 0; i < t->num_targets; i++) {
		struct dm_target *tgt = t->targets + i;

		if (tgt->type->dtr)
			tgt->type->dtr(tgt);

		dm_put_target_type(tgt->type);
	}

	vfree(t->highs);

	/* free the device list */
256
	free_devices(&t->devices, t->md);
L
Linus Torvalds 已提交
257

K
Kiyoshi Ueda 已提交
258 259
	dm_free_md_mempools(t->mempools);

L
Linus Torvalds 已提交
260 261 262 263 264 265
	kfree(t);
}

/*
 * See if we've already got a device in the list.
 */
M
Mikulas Patocka 已提交
266
static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
L
Linus Torvalds 已提交
267
{
M
Mikulas Patocka 已提交
268
	struct dm_dev_internal *dd;
L
Linus Torvalds 已提交
269 270

	list_for_each_entry (dd, l, list)
271
		if (dd->dm_dev->bdev->bd_dev == dev)
L
Linus Torvalds 已提交
272 273 274 275 276 277
			return dd;

	return NULL;
}

/*
278
 * If possible, this checks an area of a destination device is invalid.
L
Linus Torvalds 已提交
279
 */
280 281
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
				  sector_t start, sector_t len, void *data)
L
Linus Torvalds 已提交
282
{
283
	struct request_queue *q;
284 285 286 287
	struct queue_limits *limits = data;
	struct block_device *bdev = dev->bdev;
	sector_t dev_size =
		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
288
	unsigned short logical_block_size_sectors =
289
		limits->logical_block_size >> SECTOR_SHIFT;
290
	char b[BDEVNAME_SIZE];
M
Mike Anderson 已提交
291

292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
	/*
	 * Some devices exist without request functions,
	 * such as loop devices not yet bound to backing files.
	 * Forbid the use of such devices.
	 */
	q = bdev_get_queue(bdev);
	if (!q || !q->make_request_fn) {
		DMWARN("%s: %s is not yet initialised: "
		       "start=%llu, len=%llu, dev_size=%llu",
		       dm_device_name(ti->table->md), bdevname(bdev, b),
		       (unsigned long long)start,
		       (unsigned long long)len,
		       (unsigned long long)dev_size);
		return 1;
	}

M
Mike Anderson 已提交
308
	if (!dev_size)
309
		return 0;
M
Mike Anderson 已提交
310

311
	if ((start >= dev_size) || (start + len > dev_size)) {
312 313 314 315 316 317
		DMWARN("%s: %s too small for target: "
		       "start=%llu, len=%llu, dev_size=%llu",
		       dm_device_name(ti->table->md), bdevname(bdev, b),
		       (unsigned long long)start,
		       (unsigned long long)len,
		       (unsigned long long)dev_size);
318
		return 1;
319 320 321
	}

	if (logical_block_size_sectors <= 1)
322
		return 0;
323 324 325

	if (start & (logical_block_size_sectors - 1)) {
		DMWARN("%s: start=%llu not aligned to h/w "
326
		       "logical block size %u of %s",
327 328
		       dm_device_name(ti->table->md),
		       (unsigned long long)start,
329
		       limits->logical_block_size, bdevname(bdev, b));
330
		return 1;
331 332
	}

333
	if (len & (logical_block_size_sectors - 1)) {
334
		DMWARN("%s: len=%llu not aligned to h/w "
335
		       "logical block size %u of %s",
336
		       dm_device_name(ti->table->md),
337
		       (unsigned long long)len,
338
		       limits->logical_block_size, bdevname(bdev, b));
339
		return 1;
340 341
	}

342
	return 0;
L
Linus Torvalds 已提交
343 344 345
}

/*
346
 * This upgrades the mode on an already open dm_dev, being
L
Linus Torvalds 已提交
347
 * careful to leave things as they were if we fail to reopen the
348 349
 * device and not to touch the existing bdev field in case
 * it is accessed concurrently inside dm_table_any_congested().
L
Linus Torvalds 已提交
350
 */
351
static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
M
Mikulas Patocka 已提交
352
			struct mapped_device *md)
L
Linus Torvalds 已提交
353 354
{
	int r;
355
	struct dm_dev *old_dev, *new_dev;
L
Linus Torvalds 已提交
356

357
	old_dev = dd->dm_dev;
358

359 360
	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
				dd->dm_dev->mode | new_mode, &new_dev);
361 362
	if (r)
		return r;
L
Linus Torvalds 已提交
363

364 365
	dd->dm_dev = new_dev;
	dm_put_table_device(md, old_dev);
L
Linus Torvalds 已提交
366

367
	return 0;
L
Linus Torvalds 已提交
368 369
}

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
/*
 * Convert the path to a device
 */
dev_t dm_get_dev_t(const char *path)
{
	dev_t uninitialized_var(dev);
	struct block_device *bdev;

	bdev = lookup_bdev(path);
	if (IS_ERR(bdev))
		dev = name_to_dev_t(path);
	else {
		dev = bdev->bd_dev;
		bdput(bdev);
	}

	return dev;
}
EXPORT_SYMBOL_GPL(dm_get_dev_t);

L
Linus Torvalds 已提交
390 391 392 393
/*
 * Add a device to the list, or just increment the usage count if
 * it's already present.
 */
394 395
int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
		  struct dm_dev **result)
L
Linus Torvalds 已提交
396 397
{
	int r;
398
	dev_t dev;
M
Mikulas Patocka 已提交
399
	struct dm_dev_internal *dd;
400
	struct dm_table *t = ti->table;
L
Linus Torvalds 已提交
401

402
	BUG_ON(!t);
L
Linus Torvalds 已提交
403

404 405 406
	dev = dm_get_dev_t(path);
	if (!dev)
		return -ENODEV;
L
Linus Torvalds 已提交
407 408 409 410 411 412 413

	dd = find_device(&t->devices, dev);
	if (!dd) {
		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
		if (!dd)
			return -ENOMEM;

414
		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
L
Linus Torvalds 已提交
415 416 417 418 419 420 421
			kfree(dd);
			return r;
		}

		atomic_set(&dd->count, 0);
		list_add(&dd->list, &t->devices);

422
	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
423
		r = upgrade_mode(dd, mode, t->md);
L
Linus Torvalds 已提交
424 425 426 427 428
		if (r)
			return r;
	}
	atomic_inc(&dd->count);

429
	*result = dd->dm_dev;
L
Linus Torvalds 已提交
430 431
	return 0;
}
432
EXPORT_SYMBOL(dm_get_device);
L
Linus Torvalds 已提交
433

434 435
static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
				sector_t start, sector_t len, void *data)
L
Linus Torvalds 已提交
436
{
437 438
	struct queue_limits *limits = data;
	struct block_device *bdev = dev->bdev;
439
	struct request_queue *q = bdev_get_queue(bdev);
A
Alasdair G Kergon 已提交
440 441 442 443 444
	char b[BDEVNAME_SIZE];

	if (unlikely(!q)) {
		DMWARN("%s: Cannot set limits for nonexistent device %s",
		       dm_device_name(ti->table->md), bdevname(bdev, b));
445
		return 0;
A
Alasdair G Kergon 已提交
446
	}
447

448 449
	if (bdev_stack_limits(limits, bdev, start) < 0)
		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
450 451 452 453 454 455
		       "physical_block_size=%u, logical_block_size=%u, "
		       "alignment_offset=%u, start=%llu",
		       dm_device_name(ti->table->md), bdevname(bdev, b),
		       q->limits.physical_block_size,
		       q->limits.logical_block_size,
		       q->limits.alignment_offset,
456
		       (unsigned long long) start << SECTOR_SHIFT);
457

458
	return 0;
459
}
460

L
Linus Torvalds 已提交
461
/*
462
 * Decrement a device's use count and remove it if necessary.
L
Linus Torvalds 已提交
463
 */
M
Mikulas Patocka 已提交
464
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
L
Linus Torvalds 已提交
465
{
466 467 468
	int found = 0;
	struct list_head *devices = &ti->table->devices;
	struct dm_dev_internal *dd;
M
Mikulas Patocka 已提交
469

470 471 472 473 474 475 476 477 478 479 480
	list_for_each_entry(dd, devices, list) {
		if (dd->dm_dev == d) {
			found = 1;
			break;
		}
	}
	if (!found) {
		DMWARN("%s: device %s not in table devices list",
		       dm_device_name(ti->table->md), d->name);
		return;
	}
L
Linus Torvalds 已提交
481
	if (atomic_dec_and_test(&dd->count)) {
482
		dm_put_table_device(ti->table->md, d);
L
Linus Torvalds 已提交
483 484 485 486
		list_del(&dd->list);
		kfree(dd);
	}
}
487
EXPORT_SYMBOL(dm_put_device);
L
Linus Torvalds 已提交
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504

/*
 * Checks to see if the target joins onto the end of the table.
 */
static int adjoin(struct dm_table *table, struct dm_target *ti)
{
	struct dm_target *prev;

	if (!table->num_targets)
		return !ti->begin;

	prev = &table->targets[table->num_targets - 1];
	return (ti->begin == (prev->begin + prev->len));
}

/*
 * Used to dynamically allocate the arg array.
505 506 507 508 509 510 511
 *
 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
 * process messages even if some device is suspended. These messages have a
 * small fixed number of arguments.
 *
 * On the other hand, dm-switch needs to process bulk data using messages and
 * excessive use of GFP_NOIO could cause trouble.
L
Linus Torvalds 已提交
512 513 514 515 516
 */
static char **realloc_argv(unsigned *array_size, char **old_argv)
{
	char **argv;
	unsigned new_size;
517
	gfp_t gfp;
L
Linus Torvalds 已提交
518

519 520 521 522 523 524 525 526
	if (*array_size) {
		new_size = *array_size * 2;
		gfp = GFP_KERNEL;
	} else {
		new_size = 8;
		gfp = GFP_NOIO;
	}
	argv = kmalloc(new_size * sizeof(*argv), gfp);
L
Linus Torvalds 已提交
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
	if (argv) {
		memcpy(argv, old_argv, *array_size * sizeof(*argv));
		*array_size = new_size;
	}

	kfree(old_argv);
	return argv;
}

/*
 * Destructively splits up the argument list to pass to ctr.
 */
int dm_split_args(int *argc, char ***argvp, char *input)
{
	char *start, *end = input, *out, **argv = NULL;
	unsigned array_size = 0;

	*argc = 0;
545 546 547 548 549 550

	if (!input) {
		*argvp = NULL;
		return 0;
	}

L
Linus Torvalds 已提交
551 552 553 554 555 556
	argv = realloc_argv(&array_size, argv);
	if (!argv)
		return -ENOMEM;

	while (1) {
		/* Skip whitespace */
557
		start = skip_spaces(end);
L
Linus Torvalds 已提交
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598

		if (!*start)
			break;	/* success, we hit the end */

		/* 'out' is used to remove any back-quotes */
		end = out = start;
		while (*end) {
			/* Everything apart from '\0' can be quoted */
			if (*end == '\\' && *(end + 1)) {
				*out++ = *(end + 1);
				end += 2;
				continue;
			}

			if (isspace(*end))
				break;	/* end of token */

			*out++ = *end++;
		}

		/* have we already filled the array ? */
		if ((*argc + 1) > array_size) {
			argv = realloc_argv(&array_size, argv);
			if (!argv)
				return -ENOMEM;
		}

		/* we know this is whitespace */
		if (*end)
			end++;

		/* terminate the string and put it in the array */
		*out = '\0';
		argv[*argc] = start;
		(*argc)++;
	}

	*argvp = argv;
	return 0;
}

599 600 601 602 603 604 605
/*
 * Impose necessary and sufficient conditions on a devices's table such
 * that any incoming bio which respects its logical_block_size can be
 * processed successfully.  If it falls across the boundary between
 * two or more targets, the size of each piece it gets split into must
 * be compatible with the logical_block_size of the target processing it.
 */
606 607
static int validate_hardware_logical_block_alignment(struct dm_table *table,
						 struct queue_limits *limits)
608 609 610 611 612 613
{
	/*
	 * This function uses arithmetic modulo the logical_block_size
	 * (in units of 512-byte sectors).
	 */
	unsigned short device_logical_block_size_sects =
614
		limits->logical_block_size >> SECTOR_SHIFT;
615 616 617 618 619 620 621 622 623 624 625 626 627

	/*
	 * Offset of the start of the next table entry, mod logical_block_size.
	 */
	unsigned short next_target_start = 0;

	/*
	 * Given an aligned bio that extends beyond the end of a
	 * target, how many sectors must the next target handle?
	 */
	unsigned short remaining = 0;

	struct dm_target *uninitialized_var(ti);
628
	struct queue_limits ti_limits;
629 630 631 632 633 634 635 636
	unsigned i = 0;

	/*
	 * Check each entry in the table in turn.
	 */
	while (i < dm_table_get_num_targets(table)) {
		ti = dm_table_get_target(table, i++);

637
		blk_set_stacking_limits(&ti_limits);
638 639 640 641 642 643

		/* combine all target devices' limits */
		if (ti->type->iterate_devices)
			ti->type->iterate_devices(ti, dm_set_device_limits,
						  &ti_limits);

644 645 646 647 648
		/*
		 * If the remaining sectors fall entirely within this
		 * table entry are they compatible with its logical_block_size?
		 */
		if (remaining < ti->len &&
649
		    remaining & ((ti_limits.logical_block_size >>
650 651 652 653 654 655 656 657 658 659 660 661
				  SECTOR_SHIFT) - 1))
			break;	/* Error */

		next_target_start =
		    (unsigned short) ((next_target_start + ti->len) &
				      (device_logical_block_size_sects - 1));
		remaining = next_target_start ?
		    device_logical_block_size_sects - next_target_start : 0;
	}

	if (remaining) {
		DMWARN("%s: table line %u (start sect %llu len %llu) "
662
		       "not aligned to h/w logical block size %u",
663 664 665
		       dm_device_name(table->md), i,
		       (unsigned long long) ti->begin,
		       (unsigned long long) ti->len,
666
		       limits->logical_block_size);
667 668 669 670 671 672
		return -EINVAL;
	}

	return 0;
}

L
Linus Torvalds 已提交
673 674 675 676 677 678 679
int dm_table_add_target(struct dm_table *t, const char *type,
			sector_t start, sector_t len, char *params)
{
	int r = -EINVAL, argc;
	char **argv;
	struct dm_target *tgt;

680 681 682 683 684 685
	if (t->singleton) {
		DMERR("%s: target type %s must appear alone in table",
		      dm_device_name(t->md), t->targets->type->name);
		return -EINVAL;
	}

686
	BUG_ON(t->num_targets >= t->num_allocated);
L
Linus Torvalds 已提交
687 688 689 690 691

	tgt = t->targets + t->num_targets;
	memset(tgt, 0, sizeof(*tgt));

	if (!len) {
692
		DMERR("%s: zero-length target", dm_device_name(t->md));
L
Linus Torvalds 已提交
693 694 695 696 697
		return -EINVAL;
	}

	tgt->type = dm_get_target_type(type);
	if (!tgt->type) {
698 699
		DMERR("%s: %s: unknown target type", dm_device_name(t->md),
		      type);
L
Linus Torvalds 已提交
700 701 702
		return -EINVAL;
	}

703 704 705 706 707 708
	if (dm_target_needs_singleton(tgt->type)) {
		if (t->num_targets) {
			DMERR("%s: target type %s must appear alone in table",
			      dm_device_name(t->md), type);
			return -EINVAL;
		}
709
		t->singleton = true;
710 711
	}

712 713 714 715 716 717
	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
		DMERR("%s: target type %s may not be included in read-only tables",
		      dm_device_name(t->md), type);
		return -EINVAL;
	}

718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
	if (t->immutable_target_type) {
		if (t->immutable_target_type != tgt->type) {
			DMERR("%s: immutable target type %s cannot be mixed with other target types",
			      dm_device_name(t->md), t->immutable_target_type->name);
			return -EINVAL;
		}
	} else if (dm_target_is_immutable(tgt->type)) {
		if (t->num_targets) {
			DMERR("%s: immutable target type %s cannot be mixed with other target types",
			      dm_device_name(t->md), tgt->type->name);
			return -EINVAL;
		}
		t->immutable_target_type = tgt->type;
	}

L
Linus Torvalds 已提交
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
	tgt->table = t;
	tgt->begin = start;
	tgt->len = len;
	tgt->error = "Unknown error";

	/*
	 * Does this target adjoin the previous one ?
	 */
	if (!adjoin(t, tgt)) {
		tgt->error = "Gap in table";
		r = -EINVAL;
		goto bad;
	}

	r = dm_split_args(&argc, &argv, params);
	if (r) {
		tgt->error = "couldn't split parameters (insufficient memory)";
		goto bad;
	}

	r = tgt->type->ctr(tgt, argc, argv);
	kfree(argv);
	if (r)
		goto bad;

	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;

760 761
	if (!tgt->num_discard_bios && tgt->discards_supported)
		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
M
Mike Snitzer 已提交
762
		       dm_device_name(t->md), type);
M
Mike Snitzer 已提交
763

L
Linus Torvalds 已提交
764 765 766
	return 0;

 bad:
767
	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
L
Linus Torvalds 已提交
768 769 770 771
	dm_put_target_type(tgt->type);
	return r;
}

772 773 774 775 776 777 778
/*
 * Target argument parsing helpers.
 */
static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
			     unsigned *value, char **error, unsigned grouped)
{
	const char *arg_str = dm_shift_arg(arg_set);
779
	char dummy;
780 781

	if (!arg_str ||
782
	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
	    (*value < arg->min) ||
	    (*value > arg->max) ||
	    (grouped && arg_set->argc < *value)) {
		*error = arg->error;
		return -EINVAL;
	}

	return 0;
}

int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
		unsigned *value, char **error)
{
	return validate_next_arg(arg, arg_set, value, error, 0);
}
EXPORT_SYMBOL(dm_read_arg);

int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
		      unsigned *value, char **error)
{
	return validate_next_arg(arg, arg_set, value, error, 1);
}
EXPORT_SYMBOL(dm_read_arg_group);

const char *dm_shift_arg(struct dm_arg_set *as)
{
	char *r;

	if (as->argc) {
		as->argc--;
		r = *as->argv;
		as->argv++;
		return r;
	}

	return NULL;
}
EXPORT_SYMBOL(dm_shift_arg);

void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
{
	BUG_ON(as->argc < num_args);
	as->argc -= num_args;
	as->argv += num_args;
}
EXPORT_SYMBOL(dm_consume_args);

830 831 832 833 834 835
static bool __table_type_request_based(unsigned table_type)
{
	return (table_type == DM_TYPE_REQUEST_BASED ||
		table_type == DM_TYPE_MQ_REQUEST_BASED);
}

836 837 838 839 840 841 842
void dm_table_set_type(struct dm_table *t, unsigned type)
{
	t->type = type;
}
EXPORT_SYMBOL_GPL(dm_table_set_type);

static int dm_table_determine_type(struct dm_table *t)
K
Kiyoshi Ueda 已提交
843 844
{
	unsigned i;
845
	unsigned bio_based = 0, request_based = 0, hybrid = 0;
846
	bool verify_blk_mq = false;
K
Kiyoshi Ueda 已提交
847 848
	struct dm_target *tgt;
	struct dm_dev_internal *dd;
849
	struct list_head *devices = dm_table_get_devices(t);
850
	unsigned live_md_type = dm_get_md_type(t->md);
K
Kiyoshi Ueda 已提交
851

852 853 854 855 856 857 858
	if (t->type != DM_TYPE_NONE) {
		/* target already set the table's type */
		if (t->type == DM_TYPE_BIO_BASED)
			return 0;
		goto verify_rq_based;
	}

K
Kiyoshi Ueda 已提交
859 860
	for (i = 0; i < t->num_targets; i++) {
		tgt = t->targets + i;
861 862 863
		if (dm_target_hybrid(tgt))
			hybrid = 1;
		else if (dm_target_request_based(tgt))
K
Kiyoshi Ueda 已提交
864 865 866 867 868 869 870 871 872 873 874
			request_based = 1;
		else
			bio_based = 1;

		if (bio_based && request_based) {
			DMWARN("Inconsistent table: different target types"
			       " can't be mixed up");
			return -EINVAL;
		}
	}

875 876 877 878 879 880
	if (hybrid && !bio_based && !request_based) {
		/*
		 * The targets can work either way.
		 * Determine the type from the live device.
		 * Default to bio-based if device is new.
		 */
881
		if (__table_type_request_based(live_md_type))
882 883 884 885 886
			request_based = 1;
		else
			bio_based = 1;
	}

K
Kiyoshi Ueda 已提交
887 888 889 890 891 892 893 894
	if (bio_based) {
		/* We must use this table as bio-based */
		t->type = DM_TYPE_BIO_BASED;
		return 0;
	}

	BUG_ON(!request_based); /* No targets in this table */

895 896 897 898 899 900 901 902 903 904 905 906 907
	if (list_empty(devices) && __table_type_request_based(live_md_type)) {
		/* inherit live MD type */
		t->type = live_md_type;
		return 0;
	}

	/*
	 * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
	 * having a compatible target use dm_table_set_type.
	 */
	t->type = DM_TYPE_REQUEST_BASED;

verify_rq_based:
908 909 910 911 912 913 914 915 916 917 918
	/*
	 * Request-based dm supports only tables that have a single target now.
	 * To support multiple targets, request splitting support is needed,
	 * and that needs lots of changes in the block-layer.
	 * (e.g. request completion process for partial completion.)
	 */
	if (t->num_targets > 1) {
		DMWARN("Request-based dm doesn't support multiple targets yet");
		return -EINVAL;
	}

K
Kiyoshi Ueda 已提交
919 920
	/* Non-request-stackable devices can't be used for request-based dm */
	list_for_each_entry(dd, devices, list) {
921 922 923 924 925
		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);

		if (!blk_queue_stackable(q)) {
			DMERR("table load rejected: including"
			      " non-request-stackable devices");
K
Kiyoshi Ueda 已提交
926 927
			return -EINVAL;
		}
928 929

		if (q->mq_ops)
930
			verify_blk_mq = true;
931 932
	}

933
	if (verify_blk_mq) {
934 935 936 937 938 939 940
		/* verify _all_ devices in the table are blk-mq devices */
		list_for_each_entry(dd, devices, list)
			if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
				DMERR("table load rejected: not all devices"
				      " are blk-mq request-stackable");
				return -EINVAL;
			}
K
Kiyoshi Ueda 已提交
941

942 943
		t->all_blk_mq = true;
	}
K
Kiyoshi Ueda 已提交
944 945 946 947 948 949 950 951 952

	return 0;
}

unsigned dm_table_get_type(struct dm_table *t)
{
	return t->type;
}

953 954 955 956 957
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
{
	return t->immutable_target_type;
}

M
Mike Snitzer 已提交
958 959 960 961 962 963 964 965 966 967
struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
{
	/* Immutable target is implicitly a singleton */
	if (t->num_targets > 1 ||
	    !dm_target_is_immutable(t->targets[0].type))
		return NULL;

	return t->targets;
}

968 969 970 971 972 973 974 975 976 977 978 979 980 981
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
{
	struct dm_target *uninitialized_var(ti);
	unsigned i = 0;

	while (i < dm_table_get_num_targets(t)) {
		ti = dm_table_get_target(t, i++);
		if (dm_target_is_wildcard(ti->type))
			return ti;
	}

	return NULL;
}

K
Kiyoshi Ueda 已提交
982 983
bool dm_table_request_based(struct dm_table *t)
{
984
	return __table_type_request_based(dm_table_get_type(t));
985 986
}

987
bool dm_table_all_blk_mq_devices(struct dm_table *t)
988
{
989
	return t->all_blk_mq;
K
Kiyoshi Ueda 已提交
990 991
}

992
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
K
Kiyoshi Ueda 已提交
993 994
{
	unsigned type = dm_table_get_type(t);
995
	unsigned per_io_data_size = 0;
996
	struct dm_target *tgt;
M
Mikulas Patocka 已提交
997
	unsigned i;
K
Kiyoshi Ueda 已提交
998

999
	if (unlikely(type == DM_TYPE_NONE)) {
K
Kiyoshi Ueda 已提交
1000 1001 1002 1003
		DMWARN("no table type is set, can't allocate mempools");
		return -EINVAL;
	}

1004 1005 1006
	if (type == DM_TYPE_BIO_BASED)
		for (i = 0; i < t->num_targets; i++) {
			tgt = t->targets + i;
1007
			per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
1008 1009
		}

1010
	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
1011 1012
	if (!t->mempools)
		return -ENOMEM;
K
Kiyoshi Ueda 已提交
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027

	return 0;
}

void dm_table_free_md_mempools(struct dm_table *t)
{
	dm_free_md_mempools(t->mempools);
	t->mempools = NULL;
}

struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
{
	return t->mempools;
}

L
Linus Torvalds 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
static int setup_indexes(struct dm_table *t)
{
	int i;
	unsigned int total = 0;
	sector_t *indexes;

	/* allocate the space for *all* the indexes */
	for (i = t->depth - 2; i >= 0; i--) {
		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
		total += t->counts[i];
	}

	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
	if (!indexes)
		return -ENOMEM;

	/* set up internal nodes, bottom-up */
J
Jun'ichi Nomura 已提交
1045
	for (i = t->depth - 2; i >= 0; i--) {
L
Linus Torvalds 已提交
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
		t->index[i] = indexes;
		indexes += (KEYS_PER_NODE * t->counts[i]);
		setup_btree_index(i, t);
	}

	return 0;
}

/*
 * Builds the btree to index the map.
 */
1057
static int dm_table_build_index(struct dm_table *t)
L
Linus Torvalds 已提交
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
{
	int r = 0;
	unsigned int leaf_nodes;

	/* how many indexes will the btree have ? */
	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);

	/* leaf layer has already been set up */
	t->counts[t->depth - 1] = leaf_nodes;
	t->index[t->depth - 1] = t->highs;

	if (t->depth >= 2)
		r = setup_indexes(t);

	return r;
}

1076 1077 1078 1079 1080
static bool integrity_profile_exists(struct gendisk *disk)
{
	return !!blk_get_integrity(disk);
}

1081 1082 1083 1084
/*
 * Get a disk whose integrity profile reflects the table's profile.
 * Returns NULL if integrity support was inconsistent or unavailable.
 */
1085
static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
1086 1087 1088 1089 1090 1091
{
	struct list_head *devices = dm_table_get_devices(t);
	struct dm_dev_internal *dd = NULL;
	struct gendisk *prev_disk = NULL, *template_disk = NULL;

	list_for_each_entry(dd, devices, list) {
1092
		template_disk = dd->dm_dev->bdev->bd_disk;
1093
		if (!integrity_profile_exists(template_disk))
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
			goto no_integrity;
		else if (prev_disk &&
			 blk_integrity_compare(prev_disk, template_disk) < 0)
			goto no_integrity;
		prev_disk = template_disk;
	}

	return template_disk;

no_integrity:
	if (prev_disk)
		DMWARN("%s: integrity not set: %s and %s profile mismatch",
		       dm_device_name(t->md),
		       prev_disk->disk_name,
		       template_disk->disk_name);
	return NULL;
}

1112
/*
1113 1114 1115
 * Register the mapped device for blk_integrity support if the
 * underlying devices have an integrity profile.  But all devices may
 * not have matching profiles (checking all devices isn't reliable
1116
 * during table load because this table may use other DM device(s) which
1117 1118 1119 1120
 * must be resumed before they will have an initialized integity
 * profile).  Consequently, stacked DM devices force a 2 stage integrity
 * profile validation: First pass during table load, final pass during
 * resume.
1121
 */
1122
static int dm_table_register_integrity(struct dm_table *t)
1123
{
1124
	struct mapped_device *md = t->md;
1125
	struct gendisk *template_disk = NULL;
1126

1127
	template_disk = dm_table_get_integrity_disk(t);
1128 1129
	if (!template_disk)
		return 0;
1130

1131
	if (!integrity_profile_exists(dm_disk(md))) {
1132
		t->integrity_supported = true;
1133 1134 1135 1136 1137 1138 1139
		/*
		 * Register integrity profile during table load; we can do
		 * this because the final profile must match during resume.
		 */
		blk_integrity_register(dm_disk(md),
				       blk_get_integrity(template_disk));
		return 0;
1140 1141 1142
	}

	/*
1143
	 * If DM device already has an initialized integrity
1144 1145
	 * profile the new profile should not conflict.
	 */
1146
	if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1147 1148 1149 1150 1151 1152 1153
		DMWARN("%s: conflict with existing integrity profile: "
		       "%s profile mismatch",
		       dm_device_name(t->md),
		       template_disk->disk_name);
		return 1;
	}

1154
	/* Preserve existing integrity profile */
1155
	t->integrity_supported = true;
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	return 0;
}

/*
 * Prepares the table for use by building the indices,
 * setting the type, and allocating mempools.
 */
int dm_table_complete(struct dm_table *t)
{
	int r;

1167
	r = dm_table_determine_type(t);
1168
	if (r) {
1169
		DMERR("unable to determine table type");
1170 1171 1172 1173 1174 1175 1176 1177 1178
		return r;
	}

	r = dm_table_build_index(t);
	if (r) {
		DMERR("unable to build btrees");
		return r;
	}

1179
	r = dm_table_register_integrity(t);
1180 1181 1182 1183 1184
	if (r) {
		DMERR("could not register integrity profile.");
		return r;
	}

1185
	r = dm_table_alloc_md_mempools(t, t->md);
1186 1187 1188 1189 1190 1191
	if (r)
		DMERR("unable to allocate mempools");

	return r;
}

A
Arjan van de Ven 已提交
1192
static DEFINE_MUTEX(_event_lock);
L
Linus Torvalds 已提交
1193 1194 1195
void dm_table_event_callback(struct dm_table *t,
			     void (*fn)(void *), void *context)
{
A
Arjan van de Ven 已提交
1196
	mutex_lock(&_event_lock);
L
Linus Torvalds 已提交
1197 1198
	t->event_fn = fn;
	t->event_context = context;
A
Arjan van de Ven 已提交
1199
	mutex_unlock(&_event_lock);
L
Linus Torvalds 已提交
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
}

void dm_table_event(struct dm_table *t)
{
	/*
	 * You can no longer call dm_table_event() from interrupt
	 * context, use a bottom half instead.
	 */
	BUG_ON(in_interrupt());

A
Arjan van de Ven 已提交
1210
	mutex_lock(&_event_lock);
L
Linus Torvalds 已提交
1211 1212
	if (t->event_fn)
		t->event_fn(t->event_context);
A
Arjan van de Ven 已提交
1213
	mutex_unlock(&_event_lock);
L
Linus Torvalds 已提交
1214
}
1215
EXPORT_SYMBOL(dm_table_event);
L
Linus Torvalds 已提交
1216 1217 1218 1219 1220

sector_t dm_table_get_size(struct dm_table *t)
{
	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
}
1221
EXPORT_SYMBOL(dm_table_get_size);
L
Linus Torvalds 已提交
1222 1223 1224

struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
{
1225
	if (index >= t->num_targets)
L
Linus Torvalds 已提交
1226 1227 1228 1229 1230 1231 1232
		return NULL;

	return t->targets + index;
}

/*
 * Search the btree for the correct target.
1233 1234 1235
 *
 * Caller should check returned pointer with dm_target_is_valid()
 * to trap I/O beyond end of device.
L
Linus Torvalds 已提交
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
 */
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
{
	unsigned int l, n = 0, k = 0;
	sector_t *node;

	for (l = 0; l < t->depth; l++) {
		n = get_child(n, k);
		node = get_node(t, l, n);

		for (k = 0; k < KEYS_PER_NODE; k++)
			if (node[k] >= sector)
				break;
	}

	return &t->targets[(KEYS_PER_NODE * n) + k];
}

1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
static int count_device(struct dm_target *ti, struct dm_dev *dev,
			sector_t start, sector_t len, void *data)
{
	unsigned *num_devices = data;

	(*num_devices)++;

	return 0;
}

/*
 * Check whether a table has no data devices attached using each
 * target's iterate_devices method.
 * Returns false if the result is unknown because a target doesn't
 * support iterate_devices.
 */
bool dm_table_has_no_data_devices(struct dm_table *table)
{
	struct dm_target *uninitialized_var(ti);
	unsigned i = 0, num_devices = 0;

	while (i < dm_table_get_num_targets(table)) {
		ti = dm_table_get_target(table, i++);

		if (!ti->type->iterate_devices)
			return false;

		ti->type->iterate_devices(ti, count_device, &num_devices);
		if (num_devices)
			return false;
	}

	return true;
}

1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
/*
 * Establish the new table's queue_limits and validate them.
 */
int dm_calculate_queue_limits(struct dm_table *table,
			      struct queue_limits *limits)
{
	struct dm_target *uninitialized_var(ti);
	struct queue_limits ti_limits;
	unsigned i = 0;

1299
	blk_set_stacking_limits(limits);
1300 1301

	while (i < dm_table_get_num_targets(table)) {
1302
		blk_set_stacking_limits(&ti_limits);
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314

		ti = dm_table_get_target(table, i++);

		if (!ti->type->iterate_devices)
			goto combine_limits;

		/*
		 * Combine queue limits of all the devices this target uses.
		 */
		ti->type->iterate_devices(ti, dm_set_device_limits,
					  &ti_limits);

1315 1316 1317 1318
		/* Set I/O hints portion of queue limits */
		if (ti->type->io_hints)
			ti->type->io_hints(ti, &ti_limits);

1319 1320 1321 1322
		/*
		 * Check each device area is consistent with the target's
		 * overall queue limits.
		 */
1323 1324
		if (ti->type->iterate_devices(ti, device_area_is_invalid,
					      &ti_limits))
1325 1326 1327 1328 1329 1330 1331 1332
			return -EINVAL;

combine_limits:
		/*
		 * Merge this target's queue limits into the overall limits
		 * for the table.
		 */
		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1333
			DMWARN("%s: adding target device "
1334
			       "(start sect %llu len %llu) "
1335
			       "caused an alignment inconsistency",
1336 1337 1338 1339 1340 1341 1342 1343
			       dm_device_name(table->md),
			       (unsigned long long) ti->begin,
			       (unsigned long long) ti->len);
	}

	return validate_hardware_logical_block_alignment(table, limits);
}

M
Martin K. Petersen 已提交
1344
/*
1345 1346 1347
 * Verify that all devices have an integrity profile that matches the
 * DM device's registered integrity profile.  If the profiles don't
 * match then unregister the DM device's integrity profile.
M
Martin K. Petersen 已提交
1348
 */
1349
static void dm_table_verify_integrity(struct dm_table *t)
M
Martin K. Petersen 已提交
1350
{
1351
	struct gendisk *template_disk = NULL;
M
Martin K. Petersen 已提交
1352

1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
	if (t->integrity_supported) {
		/*
		 * Verify that the original integrity profile
		 * matches all the devices in this table.
		 */
		template_disk = dm_table_get_integrity_disk(t);
		if (template_disk &&
		    blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
			return;
	}
M
Martin K. Petersen 已提交
1363

1364
	if (integrity_profile_exists(dm_disk(t->md))) {
1365 1366
		DMWARN("%s: unable to establish an integrity profile",
		       dm_device_name(t->md));
1367 1368
		blk_integrity_unregister(dm_disk(t->md));
	}
M
Martin K. Petersen 已提交
1369 1370
}

1371 1372 1373
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
				sector_t start, sector_t len, void *data)
{
J
Jens Axboe 已提交
1374
	unsigned long flush = (unsigned long) data;
1375 1376
	struct request_queue *q = bdev_get_queue(dev->bdev);

J
Jens Axboe 已提交
1377
	return q && (q->queue_flags & flush);
1378 1379
}

J
Jens Axboe 已提交
1380
static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
{
	struct dm_target *ti;
	unsigned i = 0;

	/*
	 * Require at least one underlying device to support flushes.
	 * t->devices includes internal dm devices such as mirror logs
	 * so we need to use iterate_devices here, which targets
	 * supporting flushes must provide.
	 */
	while (i < dm_table_get_num_targets(t)) {
		ti = dm_table_get_target(t, i++);

1394
		if (!ti->num_flush_bios)
1395 1396
			continue;

1397
		if (ti->flush_supported)
1398
			return true;
1399

1400
		if (ti->type->iterate_devices &&
J
Jens Axboe 已提交
1401
		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
1402
			return true;
1403 1404
	}

1405
	return false;
1406 1407
}

1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
static bool dm_table_discard_zeroes_data(struct dm_table *t)
{
	struct dm_target *ti;
	unsigned i = 0;

	/* Ensure that all targets supports discard_zeroes_data. */
	while (i < dm_table_get_num_targets(t)) {
		ti = dm_table_get_target(t, i++);

		if (ti->discard_zeroes_data_unsupported)
1418
			return false;
1419 1420
	}

1421
	return true;
1422 1423
}

1424 1425 1426 1427 1428 1429 1430 1431
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
			    sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && blk_queue_nonrot(q);
}

1432 1433 1434 1435 1436 1437 1438 1439
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
			     sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && !blk_queue_add_random(q);
}

1440 1441 1442 1443 1444 1445 1446 1447
static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
				   sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}

1448 1449
static bool dm_table_all_devices_attribute(struct dm_table *t,
					   iterate_devices_callout_fn func)
1450 1451 1452 1453 1454 1455 1456 1457
{
	struct dm_target *ti;
	unsigned i = 0;

	while (i < dm_table_get_num_targets(t)) {
		ti = dm_table_get_target(t, i++);

		if (!ti->type->iterate_devices ||
1458
		    !ti->type->iterate_devices(ti, func, NULL))
1459
			return false;
1460 1461
	}

1462
	return true;
1463 1464
}

M
Mike Snitzer 已提交
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
					 sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && !q->limits.max_write_same_sectors;
}

static bool dm_table_supports_write_same(struct dm_table *t)
{
	struct dm_target *ti;
	unsigned i = 0;

	while (i < dm_table_get_num_targets(t)) {
		ti = dm_table_get_target(t, i++);

1481
		if (!ti->num_write_same_bios)
M
Mike Snitzer 已提交
1482 1483 1484
			return false;

		if (!ti->type->iterate_devices ||
M
Mike Snitzer 已提交
1485
		    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
M
Mike Snitzer 已提交
1486 1487 1488 1489 1490 1491
			return false;
	}

	return true;
}

1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
				  sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && blk_queue_discard(q);
}

static bool dm_table_supports_discards(struct dm_table *t)
{
	struct dm_target *ti;
	unsigned i = 0;

	/*
	 * Unless any target used by the table set discards_supported,
	 * require at least one underlying device to support discards.
	 * t->devices includes internal dm devices such as mirror logs
	 * so we need to use iterate_devices here, which targets
	 * supporting discard selectively must provide.
	 */
	while (i < dm_table_get_num_targets(t)) {
		ti = dm_table_get_target(t, i++);

		if (!ti->num_discard_bios)
			continue;

		if (ti->discards_supported)
1519
			return true;
1520 1521 1522

		if (ti->type->iterate_devices &&
		    ti->type->iterate_devices(ti, device_discard_capable, NULL))
1523
			return true;
1524 1525
	}

1526
	return false;
1527 1528
}

1529 1530
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
			       struct queue_limits *limits)
L
Linus Torvalds 已提交
1531
{
1532
	bool wc = false, fua = false;
1533

L
Linus Torvalds 已提交
1534
	/*
1535
	 * Copy table's limits to the DM device's request_queue
L
Linus Torvalds 已提交
1536
	 */
1537
	q->limits = *limits;
1538

M
Mike Snitzer 已提交
1539 1540 1541 1542 1543
	if (!dm_table_supports_discards(t))
		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
	else
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);

J
Jens Axboe 已提交
1544
	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
1545
		wc = true;
J
Jens Axboe 已提交
1546
		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
1547
			fua = true;
1548
	}
1549
	blk_queue_write_cache(q, wc, fua);
1550

1551 1552 1553
	if (!dm_table_discard_zeroes_data(t))
		q->limits.discard_zeroes_data = 0;

1554 1555
	/* Ensure that all underlying devices are non-rotational. */
	if (dm_table_all_devices_attribute(t, device_is_nonrot))
1556 1557 1558 1559
		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
	else
		queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);

M
Mike Snitzer 已提交
1560 1561
	if (!dm_table_supports_write_same(t))
		q->limits.max_write_same_sectors = 0;
M
Mike Snitzer 已提交
1562

1563 1564 1565 1566 1567
	if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
		queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
	else
		queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);

1568
	dm_table_verify_integrity(t);
K
Kiyoshi Ueda 已提交
1569

1570 1571 1572 1573 1574 1575 1576 1577 1578
	/*
	 * Determine whether or not this queue's I/O timings contribute
	 * to the entropy pool, Only request-based targets use this.
	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
	 * have it set.
	 */
	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
		queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);

K
Kiyoshi Ueda 已提交
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
	/*
	 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
	 * visible to other CPUs because, once the flag is set, incoming bios
	 * are processed by request-based dm, which refers to the queue
	 * settings.
	 * Until the flag set, bios are passed to bio-based dm and queued to
	 * md->deferred where queue settings are not needed yet.
	 * Those bios are passed to request-based dm at the resume time.
	 */
	smp_mb();
	if (dm_table_request_based(t))
		queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
L
Linus Torvalds 已提交
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
}

unsigned int dm_table_get_num_targets(struct dm_table *t)
{
	return t->num_targets;
}

struct list_head *dm_table_get_devices(struct dm_table *t)
{
	return &t->devices;
}

1603
fmode_t dm_table_get_mode(struct dm_table *t)
L
Linus Torvalds 已提交
1604 1605 1606
{
	return t->mode;
}
1607
EXPORT_SYMBOL(dm_table_get_mode);
L
Linus Torvalds 已提交
1608

1609 1610 1611 1612 1613 1614 1615
enum suspend_mode {
	PRESUSPEND,
	PRESUSPEND_UNDO,
	POSTSUSPEND,
};

static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
L
Linus Torvalds 已提交
1616 1617 1618 1619 1620
{
	int i = t->num_targets;
	struct dm_target *ti = t->targets;

	while (i--) {
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
		switch (mode) {
		case PRESUSPEND:
			if (ti->type->presuspend)
				ti->type->presuspend(ti);
			break;
		case PRESUSPEND_UNDO:
			if (ti->type->presuspend_undo)
				ti->type->presuspend_undo(ti);
			break;
		case POSTSUSPEND:
L
Linus Torvalds 已提交
1631 1632
			if (ti->type->postsuspend)
				ti->type->postsuspend(ti);
1633 1634
			break;
		}
L
Linus Torvalds 已提交
1635 1636 1637 1638 1639 1640
		ti++;
	}
}

void dm_table_presuspend_targets(struct dm_table *t)
{
1641 1642 1643
	if (!t)
		return;

1644 1645 1646 1647 1648 1649 1650 1651 1652
	suspend_targets(t, PRESUSPEND);
}

void dm_table_presuspend_undo_targets(struct dm_table *t)
{
	if (!t)
		return;

	suspend_targets(t, PRESUSPEND_UNDO);
L
Linus Torvalds 已提交
1653 1654 1655 1656
}

void dm_table_postsuspend_targets(struct dm_table *t)
{
1657 1658 1659
	if (!t)
		return;

1660
	suspend_targets(t, POSTSUSPEND);
L
Linus Torvalds 已提交
1661 1662
}

1663
int dm_table_resume_targets(struct dm_table *t)
L
Linus Torvalds 已提交
1664
{
1665 1666 1667 1668 1669 1670 1671 1672 1673
	int i, r = 0;

	for (i = 0; i < t->num_targets; i++) {
		struct dm_target *ti = t->targets + i;

		if (!ti->type->preresume)
			continue;

		r = ti->type->preresume(ti);
1674 1675 1676
		if (r) {
			DMERR("%s: %s: preresume failed, error = %d",
			      dm_device_name(t->md), ti->type->name, r);
1677
			return r;
1678
		}
1679
	}
L
Linus Torvalds 已提交
1680 1681 1682 1683 1684 1685 1686

	for (i = 0; i < t->num_targets; i++) {
		struct dm_target *ti = t->targets + i;

		if (ti->type->resume)
			ti->type->resume(ti);
	}
1687 1688

	return 0;
L
Linus Torvalds 已提交
1689 1690
}

1691 1692 1693 1694 1695 1696
void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
{
	list_add(&cb->list, &t->target_callbacks);
}
EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);

L
Linus Torvalds 已提交
1697 1698
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
M
Mikulas Patocka 已提交
1699
	struct dm_dev_internal *dd;
P
Paul Jimenez 已提交
1700
	struct list_head *devices = dm_table_get_devices(t);
1701
	struct dm_target_callbacks *cb;
L
Linus Torvalds 已提交
1702 1703
	int r = 0;

P
Paul Jimenez 已提交
1704
	list_for_each_entry(dd, devices, list) {
1705
		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
A
Alasdair G Kergon 已提交
1706 1707 1708 1709 1710 1711 1712
		char b[BDEVNAME_SIZE];

		if (likely(q))
			r |= bdi_congested(&q->backing_dev_info, bdi_bits);
		else
			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
				     dm_device_name(t->md),
1713
				     bdevname(dd->dm_dev->bdev, b));
L
Linus Torvalds 已提交
1714 1715
	}

1716 1717 1718 1719
	list_for_each_entry(cb, &t->target_callbacks, list)
		if (cb->congested_fn)
			r |= cb->congested_fn(cb, bdi_bits);

L
Linus Torvalds 已提交
1720 1721 1722
	return r;
}

M
Mike Anderson 已提交
1723 1724 1725 1726
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
	return t->md;
}
1727
EXPORT_SYMBOL(dm_table_get_md);
M
Mike Anderson 已提交
1728

1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
void dm_table_run_md_queue_async(struct dm_table *t)
{
	struct mapped_device *md;
	struct request_queue *queue;
	unsigned long flags;

	if (!dm_table_request_based(t))
		return;

	md = dm_table_get_md(t);
	queue = dm_get_md_queue(md);
	if (queue) {
1741 1742 1743 1744 1745 1746 1747
		if (queue->mq_ops)
			blk_mq_run_hw_queues(queue, true);
		else {
			spin_lock_irqsave(queue->queue_lock, flags);
			blk_run_queue_async(queue);
			spin_unlock_irqrestore(queue->queue_lock, flags);
		}
1748 1749 1750 1751
	}
}
EXPORT_SYMBOL(dm_table_run_md_queue_async);