dm-table.c 40.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2001 Sistina Software (UK) Limited.
3
 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12 13 14
 *
 * This file is released under the GPL.
 */

#include "dm.h"

#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/namei.h>
#include <linux/ctype.h>
15
#include <linux/string.h>
L
Linus Torvalds 已提交
16 17
#include <linux/slab.h>
#include <linux/interrupt.h>
A
Arjan van de Ven 已提交
18
#include <linux/mutex.h>
19
#include <linux/delay.h>
A
Arun Sharma 已提交
20
#include <linux/atomic.h>
21
#include <linux/blk-mq.h>
L
Linus Torvalds 已提交
22

23 24
#define DM_MSG_PREFIX "table"

L
Linus Torvalds 已提交
25 26 27 28 29 30
#define MAX_DEPTH 16
#define NODE_SIZE L1_CACHE_BYTES
#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)

struct dm_table {
M
Mike Anderson 已提交
31
	struct mapped_device *md;
K
Kiyoshi Ueda 已提交
32
	unsigned type;
L
Linus Torvalds 已提交
33 34 35 36 37 38 39 40 41 42 43

	/* btree table */
	unsigned int depth;
	unsigned int counts[MAX_DEPTH];	/* in nodes */
	sector_t *index[MAX_DEPTH];

	unsigned int num_targets;
	unsigned int num_allocated;
	sector_t *highs;
	struct dm_target *targets;

44
	struct target_type *immutable_target_type;
45
	unsigned integrity_supported:1;
46
	unsigned singleton:1;
M
Mike Snitzer 已提交
47

L
Linus Torvalds 已提交
48 49 50 51 52
	/*
	 * Indicates the rw permissions for the new logical
	 * device.  This should be a combination of FMODE_READ
	 * and FMODE_WRITE.
	 */
53
	fmode_t mode;
L
Linus Torvalds 已提交
54 55 56 57 58 59 60

	/* a list of devices used by this table */
	struct list_head devices;

	/* events get handed up using this callback */
	void (*event_fn)(void *);
	void *event_context;
K
Kiyoshi Ueda 已提交
61 62

	struct dm_md_mempools *mempools;
63 64

	struct list_head target_callbacks;
L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
};

/*
 * Similar to ceiling(log_size(n))
 */
static unsigned int int_log(unsigned int n, unsigned int base)
{
	int result = 0;

	while (n > 1) {
		n = dm_div_up(n, base);
		result++;
	}

	return result;
}

/*
 * Calculate the index of the child node of the n'th node k'th key.
 */
static inline unsigned int get_child(unsigned int n, unsigned int k)
{
	return (n * CHILDREN_PER_NODE) + k;
}

/*
 * Return the n'th node of level l from table t.
 */
static inline sector_t *get_node(struct dm_table *t,
				 unsigned int l, unsigned int n)
{
	return t->index[l] + (n * KEYS_PER_NODE);
}

/*
 * Return the highest key that you could lookup from the n'th
 * node on level l of the btree.
 */
static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
{
	for (; l < t->depth - 1; l++)
		n = get_child(n, CHILDREN_PER_NODE - 1);

	if (n >= t->counts[l])
		return (sector_t) - 1;

	return get_node(t, l, n)[KEYS_PER_NODE - 1];
}

/*
 * Fills in a level of the btree based on the highs of the level
 * below it.
 */
static int setup_btree_index(unsigned int l, struct dm_table *t)
{
	unsigned int n, k;
	sector_t *node;

	for (n = 0U; n < t->counts[l]; n++) {
		node = get_node(t, l, n);

		for (k = 0U; k < KEYS_PER_NODE; k++)
			node[k] = high(t, l + 1, get_child(n, k));
	}

	return 0;
}

void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
{
	unsigned long size;
	void *addr;

	/*
	 * Check that we're not going to overflow.
	 */
	if (nmemb > (ULONG_MAX / elem_size))
		return NULL;

	size = nmemb * elem_size;
J
Joe Perches 已提交
145
	addr = vzalloc(size);
L
Linus Torvalds 已提交
146 147 148

	return addr;
}
149
EXPORT_SYMBOL(dm_vcalloc);
L
Linus Torvalds 已提交
150 151 152 153 154 155 156 157 158 159 160 161

/*
 * highs, and targets are managed as dynamic arrays during a
 * table load.
 */
static int alloc_targets(struct dm_table *t, unsigned int num)
{
	sector_t *n_highs;
	struct dm_target *n_targets;

	/*
	 * Allocate both the target array and offset array at once.
162 163
	 * Append an empty entry to catch sectors beyond the end of
	 * the device.
L
Linus Torvalds 已提交
164
	 */
165
	n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
L
Linus Torvalds 已提交
166 167 168 169 170 171
					  sizeof(sector_t));
	if (!n_highs)
		return -ENOMEM;

	n_targets = (struct dm_target *) (n_highs + num);

172
	memset(n_highs, -1, sizeof(*n_highs) * num);
L
Linus Torvalds 已提交
173 174 175 176 177 178 179 180 181
	vfree(t->highs);

	t->num_allocated = num;
	t->highs = n_highs;
	t->targets = n_targets;

	return 0;
}

182
int dm_table_create(struct dm_table **result, fmode_t mode,
M
Mike Anderson 已提交
183
		    unsigned num_targets, struct mapped_device *md)
L
Linus Torvalds 已提交
184
{
D
Dmitry Monakhov 已提交
185
	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
L
Linus Torvalds 已提交
186 187 188 189 190

	if (!t)
		return -ENOMEM;

	INIT_LIST_HEAD(&t->devices);
191
	INIT_LIST_HEAD(&t->target_callbacks);
L
Linus Torvalds 已提交
192 193 194 195 196 197

	if (!num_targets)
		num_targets = KEYS_PER_NODE;

	num_targets = dm_round_up(num_targets, KEYS_PER_NODE);

198 199 200 201 202
	if (!num_targets) {
		kfree(t);
		return -ENOMEM;
	}

L
Linus Torvalds 已提交
203 204 205 206 207 208
	if (alloc_targets(t, num_targets)) {
		kfree(t);
		return -ENOMEM;
	}

	t->mode = mode;
M
Mike Anderson 已提交
209
	t->md = md;
L
Linus Torvalds 已提交
210 211 212 213
	*result = t;
	return 0;
}

214
static void free_devices(struct list_head *devices, struct mapped_device *md)
L
Linus Torvalds 已提交
215 216 217
{
	struct list_head *tmp, *next;

P
Paul Jimenez 已提交
218
	list_for_each_safe(tmp, next, devices) {
M
Mikulas Patocka 已提交
219 220
		struct dm_dev_internal *dd =
		    list_entry(tmp, struct dm_dev_internal, list);
221 222 223
		DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s",
		       dm_device_name(md), dd->dm_dev->name);
		dm_put_table_device(md, dd->dm_dev);
L
Linus Torvalds 已提交
224 225 226 227
		kfree(dd);
	}
}

228
void dm_table_destroy(struct dm_table *t)
L
Linus Torvalds 已提交
229 230 231
{
	unsigned int i;

232 233 234
	if (!t)
		return;

235
	/* free the indexes */
L
Linus Torvalds 已提交
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
	if (t->depth >= 2)
		vfree(t->index[t->depth - 2]);

	/* free the targets */
	for (i = 0; i < t->num_targets; i++) {
		struct dm_target *tgt = t->targets + i;

		if (tgt->type->dtr)
			tgt->type->dtr(tgt);

		dm_put_target_type(tgt->type);
	}

	vfree(t->highs);

	/* free the device list */
252
	free_devices(&t->devices, t->md);
L
Linus Torvalds 已提交
253

K
Kiyoshi Ueda 已提交
254 255
	dm_free_md_mempools(t->mempools);

L
Linus Torvalds 已提交
256 257 258 259 260 261
	kfree(t);
}

/*
 * See if we've already got a device in the list.
 */
M
Mikulas Patocka 已提交
262
static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
L
Linus Torvalds 已提交
263
{
M
Mikulas Patocka 已提交
264
	struct dm_dev_internal *dd;
L
Linus Torvalds 已提交
265 266

	list_for_each_entry (dd, l, list)
267
		if (dd->dm_dev->bdev->bd_dev == dev)
L
Linus Torvalds 已提交
268 269 270 271 272 273
			return dd;

	return NULL;
}

/*
274
 * If possible, this checks an area of a destination device is invalid.
L
Linus Torvalds 已提交
275
 */
276 277
static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
				  sector_t start, sector_t len, void *data)
L
Linus Torvalds 已提交
278
{
279
	struct request_queue *q;
280 281 282 283
	struct queue_limits *limits = data;
	struct block_device *bdev = dev->bdev;
	sector_t dev_size =
		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
284
	unsigned short logical_block_size_sectors =
285
		limits->logical_block_size >> SECTOR_SHIFT;
286
	char b[BDEVNAME_SIZE];
M
Mike Anderson 已提交
287

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
	/*
	 * Some devices exist without request functions,
	 * such as loop devices not yet bound to backing files.
	 * Forbid the use of such devices.
	 */
	q = bdev_get_queue(bdev);
	if (!q || !q->make_request_fn) {
		DMWARN("%s: %s is not yet initialised: "
		       "start=%llu, len=%llu, dev_size=%llu",
		       dm_device_name(ti->table->md), bdevname(bdev, b),
		       (unsigned long long)start,
		       (unsigned long long)len,
		       (unsigned long long)dev_size);
		return 1;
	}

M
Mike Anderson 已提交
304
	if (!dev_size)
305
		return 0;
M
Mike Anderson 已提交
306

307
	if ((start >= dev_size) || (start + len > dev_size)) {
308 309 310 311 312 313
		DMWARN("%s: %s too small for target: "
		       "start=%llu, len=%llu, dev_size=%llu",
		       dm_device_name(ti->table->md), bdevname(bdev, b),
		       (unsigned long long)start,
		       (unsigned long long)len,
		       (unsigned long long)dev_size);
314
		return 1;
315 316 317
	}

	if (logical_block_size_sectors <= 1)
318
		return 0;
319 320 321

	if (start & (logical_block_size_sectors - 1)) {
		DMWARN("%s: start=%llu not aligned to h/w "
322
		       "logical block size %u of %s",
323 324
		       dm_device_name(ti->table->md),
		       (unsigned long long)start,
325
		       limits->logical_block_size, bdevname(bdev, b));
326
		return 1;
327 328
	}

329
	if (len & (logical_block_size_sectors - 1)) {
330
		DMWARN("%s: len=%llu not aligned to h/w "
331
		       "logical block size %u of %s",
332
		       dm_device_name(ti->table->md),
333
		       (unsigned long long)len,
334
		       limits->logical_block_size, bdevname(bdev, b));
335
		return 1;
336 337
	}

338
	return 0;
L
Linus Torvalds 已提交
339 340 341
}

/*
342
 * This upgrades the mode on an already open dm_dev, being
L
Linus Torvalds 已提交
343
 * careful to leave things as they were if we fail to reopen the
344 345
 * device and not to touch the existing bdev field in case
 * it is accessed concurrently inside dm_table_any_congested().
L
Linus Torvalds 已提交
346
 */
347
static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
M
Mikulas Patocka 已提交
348
			struct mapped_device *md)
L
Linus Torvalds 已提交
349 350
{
	int r;
351
	struct dm_dev *old_dev, *new_dev;
L
Linus Torvalds 已提交
352

353
	old_dev = dd->dm_dev;
354

355 356
	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
				dd->dm_dev->mode | new_mode, &new_dev);
357 358
	if (r)
		return r;
L
Linus Torvalds 已提交
359

360 361
	dd->dm_dev = new_dev;
	dm_put_table_device(md, old_dev);
L
Linus Torvalds 已提交
362

363
	return 0;
L
Linus Torvalds 已提交
364 365 366 367 368 369
}

/*
 * Add a device to the list, or just increment the usage count if
 * it's already present.
 */
370 371
int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
		  struct dm_dev **result)
L
Linus Torvalds 已提交
372 373
{
	int r;
A
Andrew Morton 已提交
374
	dev_t uninitialized_var(dev);
M
Mikulas Patocka 已提交
375
	struct dm_dev_internal *dd;
L
Linus Torvalds 已提交
376
	unsigned int major, minor;
377
	struct dm_table *t = ti->table;
378
	char dummy;
L
Linus Torvalds 已提交
379

380
	BUG_ON(!t);
L
Linus Torvalds 已提交
381

382
	if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
L
Linus Torvalds 已提交
383 384 385 386 387 388
		/* Extract the major/minor numbers */
		dev = MKDEV(major, minor);
		if (MAJOR(dev) != major || MINOR(dev) != minor)
			return -EOVERFLOW;
	} else {
		/* convert the path to a device */
389 390 391 392 393 394
		struct block_device *bdev = lookup_bdev(path);

		if (IS_ERR(bdev))
			return PTR_ERR(bdev);
		dev = bdev->bd_dev;
		bdput(bdev);
L
Linus Torvalds 已提交
395 396 397 398 399 400 401 402
	}

	dd = find_device(&t->devices, dev);
	if (!dd) {
		dd = kmalloc(sizeof(*dd), GFP_KERNEL);
		if (!dd)
			return -ENOMEM;

403
		if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
L
Linus Torvalds 已提交
404 405 406 407 408 409 410
			kfree(dd);
			return r;
		}

		atomic_set(&dd->count, 0);
		list_add(&dd->list, &t->devices);

411
	} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
412
		r = upgrade_mode(dd, mode, t->md);
L
Linus Torvalds 已提交
413 414 415 416 417
		if (r)
			return r;
	}
	atomic_inc(&dd->count);

418
	*result = dd->dm_dev;
L
Linus Torvalds 已提交
419 420
	return 0;
}
421
EXPORT_SYMBOL(dm_get_device);
L
Linus Torvalds 已提交
422

423 424
static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
				sector_t start, sector_t len, void *data)
L
Linus Torvalds 已提交
425
{
426 427
	struct queue_limits *limits = data;
	struct block_device *bdev = dev->bdev;
428
	struct request_queue *q = bdev_get_queue(bdev);
A
Alasdair G Kergon 已提交
429 430 431 432 433
	char b[BDEVNAME_SIZE];

	if (unlikely(!q)) {
		DMWARN("%s: Cannot set limits for nonexistent device %s",
		       dm_device_name(ti->table->md), bdevname(bdev, b));
434
		return 0;
A
Alasdair G Kergon 已提交
435
	}
436

437 438
	if (bdev_stack_limits(limits, bdev, start) < 0)
		DMWARN("%s: adding target device %s caused an alignment inconsistency: "
439 440 441 442 443 444
		       "physical_block_size=%u, logical_block_size=%u, "
		       "alignment_offset=%u, start=%llu",
		       dm_device_name(ti->table->md), bdevname(bdev, b),
		       q->limits.physical_block_size,
		       q->limits.logical_block_size,
		       q->limits.alignment_offset,
445
		       (unsigned long long) start << SECTOR_SHIFT);
446

447 448 449 450
	/*
	 * Check if merge fn is supported.
	 * If not we'll force DM to use PAGE_SIZE or
	 * smaller I/O, just to be safe.
451
	 */
452
	if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
453 454
		blk_limits_max_hw_sectors(limits,
					  (unsigned int) (PAGE_SIZE >> 9));
455
	return 0;
456
}
457

L
Linus Torvalds 已提交
458
/*
459
 * Decrement a device's use count and remove it if necessary.
L
Linus Torvalds 已提交
460
 */
M
Mikulas Patocka 已提交
461
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
L
Linus Torvalds 已提交
462
{
463 464 465
	int found = 0;
	struct list_head *devices = &ti->table->devices;
	struct dm_dev_internal *dd;
M
Mikulas Patocka 已提交
466

467 468 469 470 471 472 473 474 475 476 477
	list_for_each_entry(dd, devices, list) {
		if (dd->dm_dev == d) {
			found = 1;
			break;
		}
	}
	if (!found) {
		DMWARN("%s: device %s not in table devices list",
		       dm_device_name(ti->table->md), d->name);
		return;
	}
L
Linus Torvalds 已提交
478
	if (atomic_dec_and_test(&dd->count)) {
479
		dm_put_table_device(ti->table->md, d);
L
Linus Torvalds 已提交
480 481 482 483
		list_del(&dd->list);
		kfree(dd);
	}
}
484
EXPORT_SYMBOL(dm_put_device);
L
Linus Torvalds 已提交
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501

/*
 * Checks to see if the target joins onto the end of the table.
 */
static int adjoin(struct dm_table *table, struct dm_target *ti)
{
	struct dm_target *prev;

	if (!table->num_targets)
		return !ti->begin;

	prev = &table->targets[table->num_targets - 1];
	return (ti->begin == (prev->begin + prev->len));
}

/*
 * Used to dynamically allocate the arg array.
502 503 504 505 506 507 508
 *
 * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
 * process messages even if some device is suspended. These messages have a
 * small fixed number of arguments.
 *
 * On the other hand, dm-switch needs to process bulk data using messages and
 * excessive use of GFP_NOIO could cause trouble.
L
Linus Torvalds 已提交
509 510 511 512 513
 */
static char **realloc_argv(unsigned *array_size, char **old_argv)
{
	char **argv;
	unsigned new_size;
514
	gfp_t gfp;
L
Linus Torvalds 已提交
515

516 517 518 519 520 521 522 523
	if (*array_size) {
		new_size = *array_size * 2;
		gfp = GFP_KERNEL;
	} else {
		new_size = 8;
		gfp = GFP_NOIO;
	}
	argv = kmalloc(new_size * sizeof(*argv), gfp);
L
Linus Torvalds 已提交
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
	if (argv) {
		memcpy(argv, old_argv, *array_size * sizeof(*argv));
		*array_size = new_size;
	}

	kfree(old_argv);
	return argv;
}

/*
 * Destructively splits up the argument list to pass to ctr.
 */
int dm_split_args(int *argc, char ***argvp, char *input)
{
	char *start, *end = input, *out, **argv = NULL;
	unsigned array_size = 0;

	*argc = 0;
542 543 544 545 546 547

	if (!input) {
		*argvp = NULL;
		return 0;
	}

L
Linus Torvalds 已提交
548 549 550 551 552 553
	argv = realloc_argv(&array_size, argv);
	if (!argv)
		return -ENOMEM;

	while (1) {
		/* Skip whitespace */
554
		start = skip_spaces(end);
L
Linus Torvalds 已提交
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595

		if (!*start)
			break;	/* success, we hit the end */

		/* 'out' is used to remove any back-quotes */
		end = out = start;
		while (*end) {
			/* Everything apart from '\0' can be quoted */
			if (*end == '\\' && *(end + 1)) {
				*out++ = *(end + 1);
				end += 2;
				continue;
			}

			if (isspace(*end))
				break;	/* end of token */

			*out++ = *end++;
		}

		/* have we already filled the array ? */
		if ((*argc + 1) > array_size) {
			argv = realloc_argv(&array_size, argv);
			if (!argv)
				return -ENOMEM;
		}

		/* we know this is whitespace */
		if (*end)
			end++;

		/* terminate the string and put it in the array */
		*out = '\0';
		argv[*argc] = start;
		(*argc)++;
	}

	*argvp = argv;
	return 0;
}

596 597 598 599 600 601 602
/*
 * Impose necessary and sufficient conditions on a devices's table such
 * that any incoming bio which respects its logical_block_size can be
 * processed successfully.  If it falls across the boundary between
 * two or more targets, the size of each piece it gets split into must
 * be compatible with the logical_block_size of the target processing it.
 */
603 604
static int validate_hardware_logical_block_alignment(struct dm_table *table,
						 struct queue_limits *limits)
605 606 607 608 609 610
{
	/*
	 * This function uses arithmetic modulo the logical_block_size
	 * (in units of 512-byte sectors).
	 */
	unsigned short device_logical_block_size_sects =
611
		limits->logical_block_size >> SECTOR_SHIFT;
612 613 614 615 616 617 618 619 620 621 622 623 624

	/*
	 * Offset of the start of the next table entry, mod logical_block_size.
	 */
	unsigned short next_target_start = 0;

	/*
	 * Given an aligned bio that extends beyond the end of a
	 * target, how many sectors must the next target handle?
	 */
	unsigned short remaining = 0;

	struct dm_target *uninitialized_var(ti);
625
	struct queue_limits ti_limits;
626 627 628 629 630 631 632 633
	unsigned i = 0;

	/*
	 * Check each entry in the table in turn.
	 */
	while (i < dm_table_get_num_targets(table)) {
		ti = dm_table_get_target(table, i++);

634
		blk_set_stacking_limits(&ti_limits);
635 636 637 638 639 640

		/* combine all target devices' limits */
		if (ti->type->iterate_devices)
			ti->type->iterate_devices(ti, dm_set_device_limits,
						  &ti_limits);

641 642 643 644 645
		/*
		 * If the remaining sectors fall entirely within this
		 * table entry are they compatible with its logical_block_size?
		 */
		if (remaining < ti->len &&
646
		    remaining & ((ti_limits.logical_block_size >>
647 648 649 650 651 652 653 654 655 656 657 658
				  SECTOR_SHIFT) - 1))
			break;	/* Error */

		next_target_start =
		    (unsigned short) ((next_target_start + ti->len) &
				      (device_logical_block_size_sects - 1));
		remaining = next_target_start ?
		    device_logical_block_size_sects - next_target_start : 0;
	}

	if (remaining) {
		DMWARN("%s: table line %u (start sect %llu len %llu) "
659
		       "not aligned to h/w logical block size %u",
660 661 662
		       dm_device_name(table->md), i,
		       (unsigned long long) ti->begin,
		       (unsigned long long) ti->len,
663
		       limits->logical_block_size);
664 665 666 667 668 669
		return -EINVAL;
	}

	return 0;
}

L
Linus Torvalds 已提交
670 671 672 673 674 675 676
int dm_table_add_target(struct dm_table *t, const char *type,
			sector_t start, sector_t len, char *params)
{
	int r = -EINVAL, argc;
	char **argv;
	struct dm_target *tgt;

677 678 679 680 681 682
	if (t->singleton) {
		DMERR("%s: target type %s must appear alone in table",
		      dm_device_name(t->md), t->targets->type->name);
		return -EINVAL;
	}

683
	BUG_ON(t->num_targets >= t->num_allocated);
L
Linus Torvalds 已提交
684 685 686 687 688

	tgt = t->targets + t->num_targets;
	memset(tgt, 0, sizeof(*tgt));

	if (!len) {
689
		DMERR("%s: zero-length target", dm_device_name(t->md));
L
Linus Torvalds 已提交
690 691 692 693 694
		return -EINVAL;
	}

	tgt->type = dm_get_target_type(type);
	if (!tgt->type) {
695 696
		DMERR("%s: %s: unknown target type", dm_device_name(t->md),
		      type);
L
Linus Torvalds 已提交
697 698 699
		return -EINVAL;
	}

700 701 702 703 704 705 706 707 708
	if (dm_target_needs_singleton(tgt->type)) {
		if (t->num_targets) {
			DMERR("%s: target type %s must appear alone in table",
			      dm_device_name(t->md), type);
			return -EINVAL;
		}
		t->singleton = 1;
	}

709 710 711 712 713 714
	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
		DMERR("%s: target type %s may not be included in read-only tables",
		      dm_device_name(t->md), type);
		return -EINVAL;
	}

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
	if (t->immutable_target_type) {
		if (t->immutable_target_type != tgt->type) {
			DMERR("%s: immutable target type %s cannot be mixed with other target types",
			      dm_device_name(t->md), t->immutable_target_type->name);
			return -EINVAL;
		}
	} else if (dm_target_is_immutable(tgt->type)) {
		if (t->num_targets) {
			DMERR("%s: immutable target type %s cannot be mixed with other target types",
			      dm_device_name(t->md), tgt->type->name);
			return -EINVAL;
		}
		t->immutable_target_type = tgt->type;
	}

L
Linus Torvalds 已提交
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
	tgt->table = t;
	tgt->begin = start;
	tgt->len = len;
	tgt->error = "Unknown error";

	/*
	 * Does this target adjoin the previous one ?
	 */
	if (!adjoin(t, tgt)) {
		tgt->error = "Gap in table";
		r = -EINVAL;
		goto bad;
	}

	r = dm_split_args(&argc, &argv, params);
	if (r) {
		tgt->error = "couldn't split parameters (insufficient memory)";
		goto bad;
	}

	r = tgt->type->ctr(tgt, argc, argv);
	kfree(argv);
	if (r)
		goto bad;

	t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;

757 758
	if (!tgt->num_discard_bios && tgt->discards_supported)
		DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
M
Mike Snitzer 已提交
759
		       dm_device_name(t->md), type);
M
Mike Snitzer 已提交
760

L
Linus Torvalds 已提交
761 762 763
	return 0;

 bad:
764
	DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
L
Linus Torvalds 已提交
765 766 767 768
	dm_put_target_type(tgt->type);
	return r;
}

769 770 771 772 773 774 775
/*
 * Target argument parsing helpers.
 */
static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
			     unsigned *value, char **error, unsigned grouped)
{
	const char *arg_str = dm_shift_arg(arg_set);
776
	char dummy;
777 778

	if (!arg_str ||
779
	    (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
	    (*value < arg->min) ||
	    (*value > arg->max) ||
	    (grouped && arg_set->argc < *value)) {
		*error = arg->error;
		return -EINVAL;
	}

	return 0;
}

int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
		unsigned *value, char **error)
{
	return validate_next_arg(arg, arg_set, value, error, 0);
}
EXPORT_SYMBOL(dm_read_arg);

int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
		      unsigned *value, char **error)
{
	return validate_next_arg(arg, arg_set, value, error, 1);
}
EXPORT_SYMBOL(dm_read_arg_group);

const char *dm_shift_arg(struct dm_arg_set *as)
{
	char *r;

	if (as->argc) {
		as->argc--;
		r = *as->argv;
		as->argv++;
		return r;
	}

	return NULL;
}
EXPORT_SYMBOL(dm_shift_arg);

void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
{
	BUG_ON(as->argc < num_args);
	as->argc -= num_args;
	as->argv += num_args;
}
EXPORT_SYMBOL(dm_consume_args);

827
static int dm_table_set_type(struct dm_table *t)
K
Kiyoshi Ueda 已提交
828 829
{
	unsigned i;
830
	unsigned bio_based = 0, request_based = 0, hybrid = 0;
831
	bool use_blk_mq = false;
K
Kiyoshi Ueda 已提交
832 833 834
	struct dm_target *tgt;
	struct dm_dev_internal *dd;
	struct list_head *devices;
835
	unsigned live_md_type = dm_get_md_type(t->md);
K
Kiyoshi Ueda 已提交
836 837 838

	for (i = 0; i < t->num_targets; i++) {
		tgt = t->targets + i;
839 840 841
		if (dm_target_hybrid(tgt))
			hybrid = 1;
		else if (dm_target_request_based(tgt))
K
Kiyoshi Ueda 已提交
842 843 844 845 846 847 848 849 850 851 852
			request_based = 1;
		else
			bio_based = 1;

		if (bio_based && request_based) {
			DMWARN("Inconsistent table: different target types"
			       " can't be mixed up");
			return -EINVAL;
		}
	}

853 854 855 856 857 858
	if (hybrid && !bio_based && !request_based) {
		/*
		 * The targets can work either way.
		 * Determine the type from the live device.
		 * Default to bio-based if device is new.
		 */
859 860
		if (live_md_type == DM_TYPE_REQUEST_BASED ||
		    live_md_type == DM_TYPE_MQ_REQUEST_BASED)
861 862 863 864 865
			request_based = 1;
		else
			bio_based = 1;
	}

K
Kiyoshi Ueda 已提交
866 867 868 869 870 871 872 873
	if (bio_based) {
		/* We must use this table as bio-based */
		t->type = DM_TYPE_BIO_BASED;
		return 0;
	}

	BUG_ON(!request_based); /* No targets in this table */

874 875 876 877 878 879 880 881 882 883 884
	/*
	 * Request-based dm supports only tables that have a single target now.
	 * To support multiple targets, request splitting support is needed,
	 * and that needs lots of changes in the block-layer.
	 * (e.g. request completion process for partial completion.)
	 */
	if (t->num_targets > 1) {
		DMWARN("Request-based dm doesn't support multiple targets yet");
		return -EINVAL;
	}

K
Kiyoshi Ueda 已提交
885 886 887
	/* Non-request-stackable devices can't be used for request-based dm */
	devices = dm_table_get_devices(t);
	list_for_each_entry(dd, devices, list) {
888 889 890 891 892
		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);

		if (!blk_queue_stackable(q)) {
			DMERR("table load rejected: including"
			      " non-request-stackable devices");
K
Kiyoshi Ueda 已提交
893 894
			return -EINVAL;
		}
895 896 897 898 899 900 901 902 903 904 905 906 907

		if (q->mq_ops)
			use_blk_mq = true;
	}

	if (use_blk_mq) {
		/* verify _all_ devices in the table are blk-mq devices */
		list_for_each_entry(dd, devices, list)
			if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
				DMERR("table load rejected: not all devices"
				      " are blk-mq request-stackable");
				return -EINVAL;
			}
908
		t->type = DM_TYPE_MQ_REQUEST_BASED;
K
Kiyoshi Ueda 已提交
909

910 911 912
	} else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) {
		/* inherit live MD type */
		t->type = live_md_type;
K
Kiyoshi Ueda 已提交
913

914 915
	} else
		t->type = DM_TYPE_REQUEST_BASED;
K
Kiyoshi Ueda 已提交
916 917 918 919 920 921 922 923 924

	return 0;
}

unsigned dm_table_get_type(struct dm_table *t)
{
	return t->type;
}

925 926 927 928 929
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
{
	return t->immutable_target_type;
}

K
Kiyoshi Ueda 已提交
930 931
bool dm_table_request_based(struct dm_table *t)
{
932 933 934 935 936 937 938 939 940
	unsigned table_type = dm_table_get_type(t);

	return (table_type == DM_TYPE_REQUEST_BASED ||
		table_type == DM_TYPE_MQ_REQUEST_BASED);
}

bool dm_table_mq_request_based(struct dm_table *t)
{
	return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
K
Kiyoshi Ueda 已提交
941 942
}

943
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
K
Kiyoshi Ueda 已提交
944 945
{
	unsigned type = dm_table_get_type(t);
M
Mikulas Patocka 已提交
946 947 948
	unsigned per_bio_data_size = 0;
	struct dm_target *tgt;
	unsigned i;
K
Kiyoshi Ueda 已提交
949 950 951 952 953 954

	if (unlikely(type == DM_TYPE_NONE)) {
		DMWARN("no table type is set, can't allocate mempools");
		return -EINVAL;
	}

M
Mikulas Patocka 已提交
955 956 957 958 959 960
	if (type == DM_TYPE_BIO_BASED)
		for (i = 0; i < t->num_targets; i++) {
			tgt = t->targets + i;
			per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
		}

961
	t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size);
K
Kiyoshi Ueda 已提交
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
	if (!t->mempools)
		return -ENOMEM;

	return 0;
}

void dm_table_free_md_mempools(struct dm_table *t)
{
	dm_free_md_mempools(t->mempools);
	t->mempools = NULL;
}

struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
{
	return t->mempools;
}

L
Linus Torvalds 已提交
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
static int setup_indexes(struct dm_table *t)
{
	int i;
	unsigned int total = 0;
	sector_t *indexes;

	/* allocate the space for *all* the indexes */
	for (i = t->depth - 2; i >= 0; i--) {
		t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
		total += t->counts[i];
	}

	indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
	if (!indexes)
		return -ENOMEM;

	/* set up internal nodes, bottom-up */
J
Jun'ichi Nomura 已提交
996
	for (i = t->depth - 2; i >= 0; i--) {
L
Linus Torvalds 已提交
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
		t->index[i] = indexes;
		indexes += (KEYS_PER_NODE * t->counts[i]);
		setup_btree_index(i, t);
	}

	return 0;
}

/*
 * Builds the btree to index the map.
 */
1008
static int dm_table_build_index(struct dm_table *t)
L
Linus Torvalds 已提交
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
{
	int r = 0;
	unsigned int leaf_nodes;

	/* how many indexes will the btree have ? */
	leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
	t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);

	/* leaf layer has already been set up */
	t->counts[t->depth - 1] = leaf_nodes;
	t->index[t->depth - 1] = t->highs;

	if (t->depth >= 2)
		r = setup_indexes(t);

	return r;
}

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
/*
 * Get a disk whose integrity profile reflects the table's profile.
 * If %match_all is true, all devices' profiles must match.
 * If %match_all is false, all devices must at least have an
 * allocated integrity profile; but uninitialized is ok.
 * Returns NULL if integrity support was inconsistent or unavailable.
 */
static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
						    bool match_all)
{
	struct list_head *devices = dm_table_get_devices(t);
	struct dm_dev_internal *dd = NULL;
	struct gendisk *prev_disk = NULL, *template_disk = NULL;

	list_for_each_entry(dd, devices, list) {
1042
		template_disk = dd->dm_dev->bdev->bd_disk;
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
		if (!blk_get_integrity(template_disk))
			goto no_integrity;
		if (!match_all && !blk_integrity_is_initialized(template_disk))
			continue; /* skip uninitialized profiles */
		else if (prev_disk &&
			 blk_integrity_compare(prev_disk, template_disk) < 0)
			goto no_integrity;
		prev_disk = template_disk;
	}

	return template_disk;

no_integrity:
	if (prev_disk)
		DMWARN("%s: integrity not set: %s and %s profile mismatch",
		       dm_device_name(t->md),
		       prev_disk->disk_name,
		       template_disk->disk_name);
	return NULL;
}

1064 1065
/*
 * Register the mapped device for blk_integrity support if
1066 1067 1068 1069 1070 1071 1072
 * the underlying devices have an integrity profile.  But all devices
 * may not have matching profiles (checking all devices isn't reliable
 * during table load because this table may use other DM device(s) which
 * must be resumed before they will have an initialized integity profile).
 * Stacked DM devices force a 2 stage integrity profile validation:
 * 1 - during load, validate all initialized integrity profiles match
 * 2 - during resume, validate all integrity profiles match
1073 1074 1075
 */
static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
{
1076
	struct gendisk *template_disk = NULL;
1077

1078 1079 1080
	template_disk = dm_table_get_integrity_disk(t, false);
	if (!template_disk)
		return 0;
1081

1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
	if (!blk_integrity_is_initialized(dm_disk(md))) {
		t->integrity_supported = 1;
		return blk_integrity_register(dm_disk(md), NULL);
	}

	/*
	 * If DM device already has an initalized integrity
	 * profile the new profile should not conflict.
	 */
	if (blk_integrity_is_initialized(template_disk) &&
	    blk_integrity_compare(dm_disk(md), template_disk) < 0) {
		DMWARN("%s: conflict with existing integrity profile: "
		       "%s profile mismatch",
		       dm_device_name(t->md),
		       template_disk->disk_name);
		return 1;
	}

	/* Preserve existing initialized integrity profile */
	t->integrity_supported = 1;
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
	return 0;
}

/*
 * Prepares the table for use by building the indices,
 * setting the type, and allocating mempools.
 */
int dm_table_complete(struct dm_table *t)
{
	int r;

	r = dm_table_set_type(t);
	if (r) {
		DMERR("unable to set table type");
		return r;
	}

	r = dm_table_build_index(t);
	if (r) {
		DMERR("unable to build btrees");
		return r;
	}

	r = dm_table_prealloc_integrity(t, t->md);
	if (r) {
		DMERR("could not register integrity profile.");
		return r;
	}

1131
	r = dm_table_alloc_md_mempools(t, t->md);
1132 1133 1134 1135 1136 1137
	if (r)
		DMERR("unable to allocate mempools");

	return r;
}

A
Arjan van de Ven 已提交
1138
static DEFINE_MUTEX(_event_lock);
L
Linus Torvalds 已提交
1139 1140 1141
void dm_table_event_callback(struct dm_table *t,
			     void (*fn)(void *), void *context)
{
A
Arjan van de Ven 已提交
1142
	mutex_lock(&_event_lock);
L
Linus Torvalds 已提交
1143 1144
	t->event_fn = fn;
	t->event_context = context;
A
Arjan van de Ven 已提交
1145
	mutex_unlock(&_event_lock);
L
Linus Torvalds 已提交
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
}

void dm_table_event(struct dm_table *t)
{
	/*
	 * You can no longer call dm_table_event() from interrupt
	 * context, use a bottom half instead.
	 */
	BUG_ON(in_interrupt());

A
Arjan van de Ven 已提交
1156
	mutex_lock(&_event_lock);
L
Linus Torvalds 已提交
1157 1158
	if (t->event_fn)
		t->event_fn(t->event_context);
A
Arjan van de Ven 已提交
1159
	mutex_unlock(&_event_lock);
L
Linus Torvalds 已提交
1160
}
1161
EXPORT_SYMBOL(dm_table_event);
L
Linus Torvalds 已提交
1162 1163 1164 1165 1166

sector_t dm_table_get_size(struct dm_table *t)
{
	return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
}
1167
EXPORT_SYMBOL(dm_table_get_size);
L
Linus Torvalds 已提交
1168 1169 1170

struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
{
1171
	if (index >= t->num_targets)
L
Linus Torvalds 已提交
1172 1173 1174 1175 1176 1177 1178
		return NULL;

	return t->targets + index;
}

/*
 * Search the btree for the correct target.
1179 1180 1181
 *
 * Caller should check returned pointer with dm_target_is_valid()
 * to trap I/O beyond end of device.
L
Linus Torvalds 已提交
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
 */
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
{
	unsigned int l, n = 0, k = 0;
	sector_t *node;

	for (l = 0; l < t->depth; l++) {
		n = get_child(n, k);
		node = get_node(t, l, n);

		for (k = 0; k < KEYS_PER_NODE; k++)
			if (node[k] >= sector)
				break;
	}

	return &t->targets[(KEYS_PER_NODE * n) + k];
}

1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
static int count_device(struct dm_target *ti, struct dm_dev *dev,
			sector_t start, sector_t len, void *data)
{
	unsigned *num_devices = data;

	(*num_devices)++;

	return 0;
}

/*
 * Check whether a table has no data devices attached using each
 * target's iterate_devices method.
 * Returns false if the result is unknown because a target doesn't
 * support iterate_devices.
 */
bool dm_table_has_no_data_devices(struct dm_table *table)
{
	struct dm_target *uninitialized_var(ti);
	unsigned i = 0, num_devices = 0;

	while (i < dm_table_get_num_targets(table)) {
		ti = dm_table_get_target(table, i++);

		if (!ti->type->iterate_devices)
			return false;

		ti->type->iterate_devices(ti, count_device, &num_devices);
		if (num_devices)
			return false;
	}

	return true;
}

1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
/*
 * Establish the new table's queue_limits and validate them.
 */
int dm_calculate_queue_limits(struct dm_table *table,
			      struct queue_limits *limits)
{
	struct dm_target *uninitialized_var(ti);
	struct queue_limits ti_limits;
	unsigned i = 0;

1245
	blk_set_stacking_limits(limits);
1246 1247

	while (i < dm_table_get_num_targets(table)) {
1248
		blk_set_stacking_limits(&ti_limits);
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260

		ti = dm_table_get_target(table, i++);

		if (!ti->type->iterate_devices)
			goto combine_limits;

		/*
		 * Combine queue limits of all the devices this target uses.
		 */
		ti->type->iterate_devices(ti, dm_set_device_limits,
					  &ti_limits);

1261 1262 1263 1264
		/* Set I/O hints portion of queue limits */
		if (ti->type->io_hints)
			ti->type->io_hints(ti, &ti_limits);

1265 1266 1267 1268
		/*
		 * Check each device area is consistent with the target's
		 * overall queue limits.
		 */
1269 1270
		if (ti->type->iterate_devices(ti, device_area_is_invalid,
					      &ti_limits))
1271 1272 1273 1274 1275 1276 1277 1278
			return -EINVAL;

combine_limits:
		/*
		 * Merge this target's queue limits into the overall limits
		 * for the table.
		 */
		if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1279
			DMWARN("%s: adding target device "
1280
			       "(start sect %llu len %llu) "
1281
			       "caused an alignment inconsistency",
1282 1283 1284 1285 1286 1287 1288 1289
			       dm_device_name(table->md),
			       (unsigned long long) ti->begin,
			       (unsigned long long) ti->len);
	}

	return validate_hardware_logical_block_alignment(table, limits);
}

M
Martin K. Petersen 已提交
1290 1291
/*
 * Set the integrity profile for this device if all devices used have
1292 1293 1294 1295
 * matching profiles.  We're quite deep in the resume path but still
 * don't know if all devices (particularly DM devices this device
 * may be stacked on) have matching profiles.  Even if the profiles
 * don't match we have no way to fail (to resume) at this point.
M
Martin K. Petersen 已提交
1296 1297 1298
 */
static void dm_table_set_integrity(struct dm_table *t)
{
1299
	struct gendisk *template_disk = NULL;
M
Martin K. Petersen 已提交
1300 1301 1302 1303

	if (!blk_get_integrity(dm_disk(t->md)))
		return;

1304
	template_disk = dm_table_get_integrity_disk(t, true);
1305 1306 1307 1308
	if (template_disk)
		blk_integrity_register(dm_disk(t->md),
				       blk_get_integrity(template_disk));
	else if (blk_integrity_is_initialized(dm_disk(t->md)))
1309 1310
		DMWARN("%s: device no longer has a valid integrity profile",
		       dm_device_name(t->md));
1311 1312 1313
	else
		DMWARN("%s: unable to establish an integrity profile",
		       dm_device_name(t->md));
M
Martin K. Petersen 已提交
1314 1315
}

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
				sector_t start, sector_t len, void *data)
{
	unsigned flush = (*(unsigned *)data);
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && (q->flush_flags & flush);
}

static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
{
	struct dm_target *ti;
	unsigned i = 0;

	/*
	 * Require at least one underlying device to support flushes.
	 * t->devices includes internal dm devices such as mirror logs
	 * so we need to use iterate_devices here, which targets
	 * supporting flushes must provide.
	 */
	while (i < dm_table_get_num_targets(t)) {
		ti = dm_table_get_target(t, i++);

1339
		if (!ti->num_flush_bios)
1340 1341
			continue;

1342 1343 1344
		if (ti->flush_supported)
			return 1;

1345 1346 1347 1348 1349 1350 1351 1352
		if (ti->type->iterate_devices &&
		    ti->type->iterate_devices(ti, device_flush_capable, &flush))
			return 1;
	}

	return 0;
}

1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
static bool dm_table_discard_zeroes_data(struct dm_table *t)
{
	struct dm_target *ti;
	unsigned i = 0;

	/* Ensure that all targets supports discard_zeroes_data. */
	while (i < dm_table_get_num_targets(t)) {
		ti = dm_table_get_target(t, i++);

		if (ti->discard_zeroes_data_unsupported)
			return 0;
	}

	return 1;
}

1369 1370 1371 1372 1373 1374 1375 1376
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
			    sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && blk_queue_nonrot(q);
}

1377 1378 1379 1380 1381 1382 1383 1384
static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
			     sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && !blk_queue_add_random(q);
}

1385 1386 1387 1388 1389 1390 1391 1392
static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
				   sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}

1393 1394 1395 1396 1397 1398 1399 1400
static int queue_supports_sg_gaps(struct dm_target *ti, struct dm_dev *dev,
				  sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && !test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags);
}

1401 1402
static bool dm_table_all_devices_attribute(struct dm_table *t,
					   iterate_devices_callout_fn func)
1403 1404 1405 1406 1407 1408 1409 1410
{
	struct dm_target *ti;
	unsigned i = 0;

	while (i < dm_table_get_num_targets(t)) {
		ti = dm_table_get_target(t, i++);

		if (!ti->type->iterate_devices ||
1411
		    !ti->type->iterate_devices(ti, func, NULL))
1412 1413 1414 1415 1416 1417
			return 0;
	}

	return 1;
}

M
Mike Snitzer 已提交
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
					 sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && !q->limits.max_write_same_sectors;
}

static bool dm_table_supports_write_same(struct dm_table *t)
{
	struct dm_target *ti;
	unsigned i = 0;

	while (i < dm_table_get_num_targets(t)) {
		ti = dm_table_get_target(t, i++);

1434
		if (!ti->num_write_same_bios)
M
Mike Snitzer 已提交
1435 1436 1437
			return false;

		if (!ti->type->iterate_devices ||
M
Mike Snitzer 已提交
1438
		    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
M
Mike Snitzer 已提交
1439 1440 1441 1442 1443 1444
			return false;
	}

	return true;
}

1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
				  sector_t start, sector_t len, void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && blk_queue_discard(q);
}

static bool dm_table_supports_discards(struct dm_table *t)
{
	struct dm_target *ti;
	unsigned i = 0;

	/*
	 * Unless any target used by the table set discards_supported,
	 * require at least one underlying device to support discards.
	 * t->devices includes internal dm devices such as mirror logs
	 * so we need to use iterate_devices here, which targets
	 * supporting discard selectively must provide.
	 */
	while (i < dm_table_get_num_targets(t)) {
		ti = dm_table_get_target(t, i++);

		if (!ti->num_discard_bios)
			continue;

		if (ti->discards_supported)
			return 1;

		if (ti->type->iterate_devices &&
		    ti->type->iterate_devices(ti, device_discard_capable, NULL))
			return 1;
	}

	return 0;
}

1482 1483
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
			       struct queue_limits *limits)
L
Linus Torvalds 已提交
1484
{
1485 1486
	unsigned flush = 0;

L
Linus Torvalds 已提交
1487
	/*
1488
	 * Copy table's limits to the DM device's request_queue
L
Linus Torvalds 已提交
1489
	 */
1490
	q->limits = *limits;
1491

M
Mike Snitzer 已提交
1492 1493 1494 1495 1496
	if (!dm_table_supports_discards(t))
		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
	else
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);

1497 1498 1499 1500 1501 1502 1503
	if (dm_table_supports_flush(t, REQ_FLUSH)) {
		flush |= REQ_FLUSH;
		if (dm_table_supports_flush(t, REQ_FUA))
			flush |= REQ_FUA;
	}
	blk_queue_flush(q, flush);

1504 1505 1506
	if (!dm_table_discard_zeroes_data(t))
		q->limits.discard_zeroes_data = 0;

1507 1508
	/* Ensure that all underlying devices are non-rotational. */
	if (dm_table_all_devices_attribute(t, device_is_nonrot))
1509 1510 1511 1512
		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
	else
		queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);

M
Mike Snitzer 已提交
1513 1514
	if (!dm_table_supports_write_same(t))
		q->limits.max_write_same_sectors = 0;
M
Mike Snitzer 已提交
1515

1516 1517 1518 1519 1520
	if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
		queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
	else
		queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);

1521 1522 1523 1524 1525
	if (dm_table_all_devices_attribute(t, queue_supports_sg_gaps))
		queue_flag_clear_unlocked(QUEUE_FLAG_SG_GAPS, q);
	else
		queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, q);

M
Martin K. Petersen 已提交
1526
	dm_table_set_integrity(t);
K
Kiyoshi Ueda 已提交
1527

1528 1529 1530 1531 1532 1533 1534 1535 1536
	/*
	 * Determine whether or not this queue's I/O timings contribute
	 * to the entropy pool, Only request-based targets use this.
	 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
	 * have it set.
	 */
	if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
		queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);

K
Kiyoshi Ueda 已提交
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
	/*
	 * QUEUE_FLAG_STACKABLE must be set after all queue settings are
	 * visible to other CPUs because, once the flag is set, incoming bios
	 * are processed by request-based dm, which refers to the queue
	 * settings.
	 * Until the flag set, bios are passed to bio-based dm and queued to
	 * md->deferred where queue settings are not needed yet.
	 * Those bios are passed to request-based dm at the resume time.
	 */
	smp_mb();
	if (dm_table_request_based(t))
		queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
L
Linus Torvalds 已提交
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
}

unsigned int dm_table_get_num_targets(struct dm_table *t)
{
	return t->num_targets;
}

struct list_head *dm_table_get_devices(struct dm_table *t)
{
	return &t->devices;
}

1561
fmode_t dm_table_get_mode(struct dm_table *t)
L
Linus Torvalds 已提交
1562 1563 1564
{
	return t->mode;
}
1565
EXPORT_SYMBOL(dm_table_get_mode);
L
Linus Torvalds 已提交
1566

1567 1568 1569 1570 1571 1572 1573
enum suspend_mode {
	PRESUSPEND,
	PRESUSPEND_UNDO,
	POSTSUSPEND,
};

static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
L
Linus Torvalds 已提交
1574 1575 1576 1577 1578
{
	int i = t->num_targets;
	struct dm_target *ti = t->targets;

	while (i--) {
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
		switch (mode) {
		case PRESUSPEND:
			if (ti->type->presuspend)
				ti->type->presuspend(ti);
			break;
		case PRESUSPEND_UNDO:
			if (ti->type->presuspend_undo)
				ti->type->presuspend_undo(ti);
			break;
		case POSTSUSPEND:
L
Linus Torvalds 已提交
1589 1590
			if (ti->type->postsuspend)
				ti->type->postsuspend(ti);
1591 1592
			break;
		}
L
Linus Torvalds 已提交
1593 1594 1595 1596 1597 1598
		ti++;
	}
}

void dm_table_presuspend_targets(struct dm_table *t)
{
1599 1600 1601
	if (!t)
		return;

1602 1603 1604 1605 1606 1607 1608 1609 1610
	suspend_targets(t, PRESUSPEND);
}

void dm_table_presuspend_undo_targets(struct dm_table *t)
{
	if (!t)
		return;

	suspend_targets(t, PRESUSPEND_UNDO);
L
Linus Torvalds 已提交
1611 1612 1613 1614
}

void dm_table_postsuspend_targets(struct dm_table *t)
{
1615 1616 1617
	if (!t)
		return;

1618
	suspend_targets(t, POSTSUSPEND);
L
Linus Torvalds 已提交
1619 1620
}

1621
int dm_table_resume_targets(struct dm_table *t)
L
Linus Torvalds 已提交
1622
{
1623 1624 1625 1626 1627 1628 1629 1630 1631
	int i, r = 0;

	for (i = 0; i < t->num_targets; i++) {
		struct dm_target *ti = t->targets + i;

		if (!ti->type->preresume)
			continue;

		r = ti->type->preresume(ti);
1632 1633 1634
		if (r) {
			DMERR("%s: %s: preresume failed, error = %d",
			      dm_device_name(t->md), ti->type->name, r);
1635
			return r;
1636
		}
1637
	}
L
Linus Torvalds 已提交
1638 1639 1640 1641 1642 1643 1644

	for (i = 0; i < t->num_targets; i++) {
		struct dm_target *ti = t->targets + i;

		if (ti->type->resume)
			ti->type->resume(ti);
	}
1645 1646

	return 0;
L
Linus Torvalds 已提交
1647 1648
}

1649 1650 1651 1652 1653 1654
void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
{
	list_add(&cb->list, &t->target_callbacks);
}
EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);

L
Linus Torvalds 已提交
1655 1656
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
M
Mikulas Patocka 已提交
1657
	struct dm_dev_internal *dd;
P
Paul Jimenez 已提交
1658
	struct list_head *devices = dm_table_get_devices(t);
1659
	struct dm_target_callbacks *cb;
L
Linus Torvalds 已提交
1660 1661
	int r = 0;

P
Paul Jimenez 已提交
1662
	list_for_each_entry(dd, devices, list) {
1663
		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
A
Alasdair G Kergon 已提交
1664 1665 1666 1667 1668 1669 1670
		char b[BDEVNAME_SIZE];

		if (likely(q))
			r |= bdi_congested(&q->backing_dev_info, bdi_bits);
		else
			DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
				     dm_device_name(t->md),
1671
				     bdevname(dd->dm_dev->bdev, b));
L
Linus Torvalds 已提交
1672 1673
	}

1674 1675 1676 1677
	list_for_each_entry(cb, &t->target_callbacks, list)
		if (cb->congested_fn)
			r |= cb->congested_fn(cb, bdi_bits);

L
Linus Torvalds 已提交
1678 1679 1680
	return r;
}

M
Mike Anderson 已提交
1681 1682 1683 1684
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
	return t->md;
}
1685
EXPORT_SYMBOL(dm_table_get_md);
M
Mike Anderson 已提交
1686

1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
void dm_table_run_md_queue_async(struct dm_table *t)
{
	struct mapped_device *md;
	struct request_queue *queue;
	unsigned long flags;

	if (!dm_table_request_based(t))
		return;

	md = dm_table_get_md(t);
	queue = dm_get_md_queue(md);
	if (queue) {
1699 1700 1701 1702 1703 1704 1705
		if (queue->mq_ops)
			blk_mq_run_hw_queues(queue, true);
		else {
			spin_lock_irqsave(queue->queue_lock, flags);
			blk_run_queue_async(queue);
			spin_unlock_irqrestore(queue->queue_lock, flags);
		}
1706 1707 1708 1709
	}
}
EXPORT_SYMBOL(dm_table_run_md_queue_async);