dm-snap.c 68.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
 *
 * This file is released under the GPL.
 */

#include <linux/blkdev.h>
#include <linux/device-mapper.h>
9
#include <linux/delay.h>
L
Linus Torvalds 已提交
10 11 12 13
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
14
#include <linux/list_bl.h>
L
Linus Torvalds 已提交
15 16 17 18
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
V
vignesh babu 已提交
19
#include <linux/log2.h>
A
Alasdair G Kergon 已提交
20
#include <linux/dm-kcopyd.h>
L
Linus Torvalds 已提交
21

22 23
#include "dm.h"

24
#include "dm-exception-store.h"
L
Linus Torvalds 已提交
25

26 27
#define DM_MSG_PREFIX "snapshots"

M
Mikulas Patocka 已提交
28 29 30 31 32
static const char dm_snapshot_merge_target_name[] = "snapshot-merge";

#define dm_target_is_snapshot_merge(ti) \
	((ti)->type->name == dm_snapshot_merge_target_name)

33 34 35 36 37
/*
 * The size of the mempool used to track chunks in use.
 */
#define MIN_IOS 256

38 39 40 41
#define DM_TRACKED_CHUNK_HASH_SIZE	16
#define DM_TRACKED_CHUNK_HASH(x)	((unsigned long)(x) & \
					 (DM_TRACKED_CHUNK_HASH_SIZE - 1))

42
struct dm_exception_table {
43 44
	uint32_t hash_mask;
	unsigned hash_shift;
45
	struct hlist_bl_head *table;
46 47 48
};

struct dm_snapshot {
49
	struct rw_semaphore lock;
50 51

	struct dm_dev *origin;
52 53 54
	struct dm_dev *cow;

	struct dm_target *ti;
55 56 57 58

	/* List of snapshots per Origin */
	struct list_head list;

59 60 61 62
	/*
	 * You can't use a snapshot if this is 0 (e.g. if full).
	 * A snapshot-merge target never clears this.
	 */
63 64
	int valid;

65 66 67 68 69 70 71
	/*
	 * The snapshot overflowed because of a write to the snapshot device.
	 * We don't have to invalidate the snapshot in this case, but we need
	 * to prevent further writes.
	 */
	int snapshot_overflowed;

72 73 74 75 76
	/* Origin writes don't trigger exceptions until this is set */
	int active;

	atomic_t pending_exceptions_count;

77 78 79
	spinlock_t pe_allocation_lock;

	/* Protected by "pe_allocation_lock" */
80 81 82 83 84 85 86 87 88
	sector_t exception_start_sequence;

	/* Protected by kcopyd single-threaded callback */
	sector_t exception_complete_sequence;

	/*
	 * A list of pending exceptions that completed out of order.
	 * Protected by kcopyd single-threaded callback.
	 */
89
	struct rb_root out_of_order_tree;
90

91
	mempool_t pending_pool;
92

93 94
	struct dm_exception_table pending;
	struct dm_exception_table complete;
95 96 97 98 99 100 101

	/*
	 * pe_lock protects all pending_exception operations and access
	 * as well as the snapshot_bios list.
	 */
	spinlock_t pe_lock;

102 103 104 105
	/* Chunks with outstanding reads */
	spinlock_t tracked_chunk_lock;
	struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];

106 107 108
	/* The on disk metadata handler */
	struct dm_exception_store *store;

109 110
	unsigned in_progress;
	struct wait_queue_head in_progress_wait;
111

112 113
	struct dm_kcopyd_client *kcopyd_client;

114 115 116 117 118 119
	/* Wait for events based on state_bits */
	unsigned long state_bits;

	/* Range of chunks currently being merged. */
	chunk_t first_merging_chunk;
	int num_merging_chunks;
M
Mikulas Patocka 已提交
120

121 122 123 124 125 126 127 128 129 130 131 132 133
	/*
	 * The merge operation failed if this flag is set.
	 * Failure modes are handled as follows:
	 * - I/O error reading the header
	 *   	=> don't load the target; abort.
	 * - Header does not have "valid" flag set
	 *   	=> use the origin; forget about the snapshot.
	 * - I/O error when reading exceptions
	 *   	=> don't load the target; abort.
	 *         (We can't use the intermediate origin state.)
	 * - I/O error while merging
	 *	=> stop merging; set merge_failed; process I/O normally.
	 */
134 135 136 137
	bool merge_failed:1;

	bool discard_zeroes_cow:1;
	bool discard_passdown_origin:1;
138

139 140 141 142 143
	/*
	 * Incoming bios that overlap with chunks being merged must wait
	 * for them to be committed.
	 */
	struct bio_list bios_queued_during_merge;
144 145 146 147 148

	/*
	 * Flush data after merge.
	 */
	struct bio flush_bio;
149 150
};

M
Mikulas Patocka 已提交
151 152 153 154 155 156 157 158 159
/*
 * state_bits:
 *   RUNNING_MERGE  - Merge operation is in progress.
 *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
 *                    cleared afterwards.
 */
#define RUNNING_MERGE          0
#define SHUTDOWN_MERGE         1

160 161 162 163 164 165 166 167 168
/*
 * Maximum number of chunks being copied on write.
 *
 * The value was decided experimentally as a trade-off between memory
 * consumption, stalling the kernel's workqueues and maintaining a high enough
 * throughput.
 */
#define DEFAULT_COW_THRESHOLD 2048

169 170
static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
171 172
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");

173 174 175
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
		"A percentage of time allocated for copy on write");

176 177 178 179 180 181
struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
{
	return s->origin;
}
EXPORT_SYMBOL(dm_snap_origin);

182 183 184 185 186 187
struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
{
	return s->cow;
}
EXPORT_SYMBOL(dm_snap_cow);

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
static sector_t chunk_to_sector(struct dm_exception_store *store,
				chunk_t chunk)
{
	return chunk << store->chunk_shift;
}

static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
{
	/*
	 * There is only ever one instance of a particular block
	 * device so we can compare pointers safely.
	 */
	return lhs == rhs;
}

A
Alasdair G Kergon 已提交
203
struct dm_snap_pending_exception {
204
	struct dm_exception e;
L
Linus Torvalds 已提交
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220

	/*
	 * Origin buffers waiting for this to complete are held
	 * in a bio list
	 */
	struct bio_list origin_bios;
	struct bio_list snapshot_bios;

	/* Pointer back to snapshot context */
	struct dm_snapshot *snap;

	/*
	 * 1 indicates the exception has already been sent to
	 * kcopyd.
	 */
	int started;
221

222 223 224 225 226 227
	/* There was copying error. */
	int copy_error;

	/* A sequence number, it is used for in-order completion. */
	sector_t exception_sequence;

228
	struct rb_node out_of_order_node;
229

230 231 232 233 234
	/*
	 * For writing a complete chunk, bypassing the copy.
	 */
	struct bio *full_bio;
	bio_end_io_t *full_bio_end_io;
L
Linus Torvalds 已提交
235 236 237 238 239 240
};

/*
 * Hash table mapping origin volumes to lists of snapshots and
 * a lock to protect it
 */
241 242
static struct kmem_cache *exception_cache;
static struct kmem_cache *pending_cache;
L
Linus Torvalds 已提交
243

244 245 246 247 248
struct dm_snap_tracked_chunk {
	struct hlist_node node;
	chunk_t chunk;
};

249 250 251 252 253 254 255 256 257 258 259 260 261
static void init_tracked_chunk(struct bio *bio)
{
	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
	INIT_HLIST_NODE(&c->node);
}

static bool is_bio_tracked(struct bio *bio)
{
	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
	return !hlist_unhashed(&c->node);
}

static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
262
{
M
Mikulas Patocka 已提交
263
	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
264 265 266

	c->chunk = chunk;

267
	spin_lock_irq(&s->tracked_chunk_lock);
268 269
	hlist_add_head(&c->node,
		       &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
270
	spin_unlock_irq(&s->tracked_chunk_lock);
271 272
}

273
static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
274
{
275
	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
276 277 278 279 280 281 282
	unsigned long flags;

	spin_lock_irqsave(&s->tracked_chunk_lock, flags);
	hlist_del(&c->node);
	spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
}

283 284 285 286 287 288 289
static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
{
	struct dm_snap_tracked_chunk *c;
	int found = 0;

	spin_lock_irq(&s->tracked_chunk_lock);

290
	hlist_for_each_entry(c,
291 292 293 294 295 296 297 298 299 300 301 302
	    &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
		if (c->chunk == chunk) {
			found = 1;
			break;
		}
	}

	spin_unlock_irq(&s->tracked_chunk_lock);

	return found;
}

303 304 305 306 307 308 309 310 311 312
/*
 * This conflicting I/O is extremely improbable in the caller,
 * so msleep(1) is sufficient and there is no need for a wait queue.
 */
static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
{
	while (__chunk_is_tracked(s, chunk))
		msleep(1);
}

L
Linus Torvalds 已提交
313 314 315 316 317 318 319 320 321 322 323 324 325
/*
 * One of these per registered origin, held in the snapshot_origins hash
 */
struct origin {
	/* The origin device */
	struct block_device *bdev;

	struct list_head hash_list;

	/* List of snapshots for this origin */
	struct list_head snapshots;
};

326 327 328 329 330 331 332 333 334 335
/*
 * This structure is allocated for each origin target
 */
struct dm_origin {
	struct dm_dev *dev;
	struct dm_target *ti;
	unsigned split_boundary;
	struct list_head hash_list;
};

L
Linus Torvalds 已提交
336 337 338 339 340 341 342
/*
 * Size of the hash table for origin volumes. If we make this
 * the size of the minors list then it should be nearly perfect
 */
#define ORIGIN_HASH_SIZE 256
#define ORIGIN_MASK      0xFF
static struct list_head *_origins;
343
static struct list_head *_dm_origins;
L
Linus Torvalds 已提交
344 345
static struct rw_semaphore _origins_lock;

346 347 348 349
static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
static uint64_t _pending_exceptions_done_count;

L
Linus Torvalds 已提交
350 351 352 353
static int init_origin_hash(void)
{
	int i;

354 355
	_origins = kmalloc_array(ORIGIN_HASH_SIZE, sizeof(struct list_head),
				 GFP_KERNEL);
L
Linus Torvalds 已提交
356
	if (!_origins) {
357
		DMERR("unable to allocate memory for _origins");
L
Linus Torvalds 已提交
358 359 360 361
		return -ENOMEM;
	}
	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
		INIT_LIST_HEAD(_origins + i);
362

363 364 365
	_dm_origins = kmalloc_array(ORIGIN_HASH_SIZE,
				    sizeof(struct list_head),
				    GFP_KERNEL);
366 367 368 369 370 371 372 373
	if (!_dm_origins) {
		DMERR("unable to allocate memory for _dm_origins");
		kfree(_origins);
		return -ENOMEM;
	}
	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
		INIT_LIST_HEAD(_dm_origins + i);

L
Linus Torvalds 已提交
374 375 376 377 378 379 380 381
	init_rwsem(&_origins_lock);

	return 0;
}

static void exit_origin_hash(void)
{
	kfree(_origins);
382
	kfree(_dm_origins);
L
Linus Torvalds 已提交
383 384
}

A
Alasdair G Kergon 已提交
385
static unsigned origin_hash(struct block_device *bdev)
L
Linus Torvalds 已提交
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
{
	return bdev->bd_dev & ORIGIN_MASK;
}

static struct origin *__lookup_origin(struct block_device *origin)
{
	struct list_head *ol;
	struct origin *o;

	ol = &_origins[origin_hash(origin)];
	list_for_each_entry (o, ol, hash_list)
		if (bdev_equal(o->bdev, origin))
			return o;

	return NULL;
}

static void __insert_origin(struct origin *o)
{
	struct list_head *sl = &_origins[origin_hash(o->bdev)];
	list_add_tail(&o->hash_list, sl);
}

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
{
	struct list_head *ol;
	struct dm_origin *o;

	ol = &_dm_origins[origin_hash(origin)];
	list_for_each_entry (o, ol, hash_list)
		if (bdev_equal(o->dev->bdev, origin))
			return o;

	return NULL;
}

static void __insert_dm_origin(struct dm_origin *o)
{
	struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
	list_add_tail(&o->hash_list, sl);
}

static void __remove_dm_origin(struct dm_origin *o)
{
	list_del(&o->hash_list);
}

433 434 435 436 437
/*
 * _origins_lock must be held when calling this function.
 * Returns number of snapshots registered using the supplied cow device, plus:
 * snap_src - a snapshot suitable for use as a source of exception handover
 * snap_dest - a snapshot capable of receiving exception handover.
438 439
 * snap_merge - an existing snapshot-merge target linked to the same origin.
 *   There can be at most one snapshot-merge target. The parameter is optional.
440
 *
441
 * Possible return values and states of snap_src and snap_dest.
442 443 444 445 446 447 448 449
 *   0: NULL, NULL  - first new snapshot
 *   1: snap_src, NULL - normal snapshot
 *   2: snap_src, snap_dest  - waiting for handover
 *   2: snap_src, NULL - handed over, waiting for old to be deleted
 *   1: NULL, snap_dest - source got destroyed without handover
 */
static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
					struct dm_snapshot **snap_src,
450 451
					struct dm_snapshot **snap_dest,
					struct dm_snapshot **snap_merge)
452 453 454 455 456 457 458 459 460 461 462
{
	struct dm_snapshot *s;
	struct origin *o;
	int count = 0;
	int active;

	o = __lookup_origin(snap->origin->bdev);
	if (!o)
		goto out;

	list_for_each_entry(s, &o->snapshots, list) {
463 464
		if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
			*snap_merge = s;
465 466 467
		if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
			continue;

468
		down_read(&s->lock);
469
		active = s->active;
470
		up_read(&s->lock);
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491

		if (active) {
			if (snap_src)
				*snap_src = s;
		} else if (snap_dest)
			*snap_dest = s;

		count++;
	}

out:
	return count;
}

/*
 * On success, returns 1 if this snapshot is a handover destination,
 * otherwise returns 0.
 */
static int __validate_exception_handover(struct dm_snapshot *snap)
{
	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
492
	struct dm_snapshot *snap_merge = NULL;
493 494

	/* Does snapshot need exceptions handed over to it? */
495 496
	if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
					  &snap_merge) == 2) ||
497 498 499 500 501 502 503 504 505 506 507 508 509
	    snap_dest) {
		snap->ti->error = "Snapshot cow pairing for exception "
				  "table handover failed";
		return -EINVAL;
	}

	/*
	 * If no snap_src was found, snap cannot become a handover
	 * destination.
	 */
	if (!snap_src)
		return 0;

510 511 512 513 514 515 516 517 518 519 520 521 522 523
	/*
	 * Non-snapshot-merge handover?
	 */
	if (!dm_target_is_snapshot_merge(snap->ti))
		return 1;

	/*
	 * Do not allow more than one merging snapshot.
	 */
	if (snap_merge) {
		snap->ti->error = "A snapshot is already merging.";
		return -EINVAL;
	}

M
Mikulas Patocka 已提交
524 525 526 527 528 529 530
	if (!snap_src->store->type->prepare_merge ||
	    !snap_src->store->type->commit_merge) {
		snap->ti->error = "Snapshot exception store does not "
				  "support snapshot-merge.";
		return -EINVAL;
	}

531 532 533 534 535 536 537 538 539 540 541 542 543 544
	return 1;
}

static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
{
	struct dm_snapshot *l;

	/* Sort the list according to chunk size, largest-first smallest-last */
	list_for_each_entry(l, &o->snapshots, list)
		if (l->store->chunk_size < s->store->chunk_size)
			break;
	list_add_tail(&s->list, &l->list);
}

L
Linus Torvalds 已提交
545 546 547
/*
 * Make a note of the snapshot and its origin so we can look it
 * up when the origin has a write on it.
548 549 550 551
 *
 * Also validate snapshot exception store handovers.
 * On success, returns 1 if this registration is a handover destination,
 * otherwise returns 0.
L
Linus Torvalds 已提交
552 553 554
 */
static int register_snapshot(struct dm_snapshot *snap)
{
555
	struct origin *o, *new_o = NULL;
L
Linus Torvalds 已提交
556
	struct block_device *bdev = snap->origin->bdev;
557
	int r = 0;
L
Linus Torvalds 已提交
558

559 560 561 562
	new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
	if (!new_o)
		return -ENOMEM;

L
Linus Torvalds 已提交
563 564
	down_write(&_origins_lock);

565 566 567 568 569 570 571
	r = __validate_exception_handover(snap);
	if (r < 0) {
		kfree(new_o);
		goto out;
	}

	o = __lookup_origin(bdev);
572 573 574
	if (o)
		kfree(new_o);
	else {
L
Linus Torvalds 已提交
575
		/* New origin */
576
		o = new_o;
L
Linus Torvalds 已提交
577 578 579 580 581 582 583 584

		/* Initialise the struct */
		INIT_LIST_HEAD(&o->snapshots);
		o->bdev = bdev;

		__insert_origin(o);
	}

585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
	__insert_snapshot(o, snap);

out:
	up_write(&_origins_lock);

	return r;
}

/*
 * Move snapshot to correct place in list according to chunk size.
 */
static void reregister_snapshot(struct dm_snapshot *s)
{
	struct block_device *bdev = s->origin->bdev;

	down_write(&_origins_lock);

	list_del(&s->list);
	__insert_snapshot(__lookup_origin(bdev), s);
L
Linus Torvalds 已提交
604 605 606 607 608 609 610 611 612 613 614 615

	up_write(&_origins_lock);
}

static void unregister_snapshot(struct dm_snapshot *s)
{
	struct origin *o;

	down_write(&_origins_lock);
	o = __lookup_origin(s->origin->bdev);

	list_del(&s->list);
616
	if (o && list_empty(&o->snapshots)) {
L
Linus Torvalds 已提交
617 618 619 620 621 622 623 624 625
		list_del(&o->hash_list);
		kfree(o);
	}

	up_write(&_origins_lock);
}

/*
 * Implementation of the exception hash tables.
626 627
 * The lowest hash_shift bits of the chunk number are ignored, allowing
 * some consecutive chunks to be grouped together.
L
Linus Torvalds 已提交
628
 */
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);

/* Lock to protect access to the completed and pending exception hash tables. */
struct dm_exception_table_lock {
	struct hlist_bl_head *complete_slot;
	struct hlist_bl_head *pending_slot;
};

static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
					 struct dm_exception_table_lock *lock)
{
	struct dm_exception_table *complete = &s->complete;
	struct dm_exception_table *pending = &s->pending;

	lock->complete_slot = &complete->table[exception_hash(complete, chunk)];
	lock->pending_slot = &pending->table[exception_hash(pending, chunk)];
}

static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
{
	hlist_bl_lock(lock->complete_slot);
	hlist_bl_lock(lock->pending_slot);
}

static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
{
	hlist_bl_unlock(lock->pending_slot);
	hlist_bl_unlock(lock->complete_slot);
}

659 660
static int dm_exception_table_init(struct dm_exception_table *et,
				   uint32_t size, unsigned hash_shift)
L
Linus Torvalds 已提交
661 662 663
{
	unsigned int i;

664
	et->hash_shift = hash_shift;
L
Linus Torvalds 已提交
665
	et->hash_mask = size - 1;
666 667
	et->table = kvmalloc_array(size, sizeof(struct hlist_bl_head),
				   GFP_KERNEL);
L
Linus Torvalds 已提交
668 669 670 671
	if (!et->table)
		return -ENOMEM;

	for (i = 0; i < size; i++)
672
		INIT_HLIST_BL_HEAD(et->table + i);
L
Linus Torvalds 已提交
673 674 675 676

	return 0;
}

677 678
static void dm_exception_table_exit(struct dm_exception_table *et,
				    struct kmem_cache *mem)
L
Linus Torvalds 已提交
679
{
680 681 682
	struct hlist_bl_head *slot;
	struct dm_exception *ex;
	struct hlist_bl_node *pos, *n;
L
Linus Torvalds 已提交
683 684 685 686 687 688
	int i, size;

	size = et->hash_mask + 1;
	for (i = 0; i < size; i++) {
		slot = et->table + i;

689
		hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
L
Linus Torvalds 已提交
690 691 692
			kmem_cache_free(mem, ex);
	}

693
	kvfree(et->table);
L
Linus Torvalds 已提交
694 695
}

696
static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
L
Linus Torvalds 已提交
697
{
698
	return (chunk >> et->hash_shift) & et->hash_mask;
L
Linus Torvalds 已提交
699 700
}

701
static void dm_remove_exception(struct dm_exception *e)
L
Linus Torvalds 已提交
702
{
703
	hlist_bl_del(&e->hash_list);
L
Linus Torvalds 已提交
704 705 706 707 708 709
}

/*
 * Return the exception data for a sector, or NULL if not
 * remapped.
 */
710 711
static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
						chunk_t chunk)
L
Linus Torvalds 已提交
712
{
713 714
	struct hlist_bl_head *slot;
	struct hlist_bl_node *pos;
715
	struct dm_exception *e;
L
Linus Torvalds 已提交
716 717

	slot = &et->table[exception_hash(et, chunk)];
718
	hlist_bl_for_each_entry(e, pos, slot, hash_list)
719 720
		if (chunk >= e->old_chunk &&
		    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
L
Linus Torvalds 已提交
721 722 723 724 725
			return e;

	return NULL;
}

726
static struct dm_exception *alloc_completed_exception(gfp_t gfp)
L
Linus Torvalds 已提交
727
{
728
	struct dm_exception *e;
L
Linus Torvalds 已提交
729

730 731
	e = kmem_cache_alloc(exception_cache, gfp);
	if (!e && gfp == GFP_NOIO)
L
Linus Torvalds 已提交
732 733 734 735 736
		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);

	return e;
}

737
static void free_completed_exception(struct dm_exception *e)
L
Linus Torvalds 已提交
738 739 740 741
{
	kmem_cache_free(exception_cache, e);
}

742
static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
L
Linus Torvalds 已提交
743
{
744
	struct dm_snap_pending_exception *pe = mempool_alloc(&s->pending_pool,
745 746
							     GFP_NOIO);

747
	atomic_inc(&s->pending_exceptions_count);
748 749 750
	pe->snap = s;

	return pe;
L
Linus Torvalds 已提交
751 752
}

A
Alasdair G Kergon 已提交
753
static void free_pending_exception(struct dm_snap_pending_exception *pe)
L
Linus Torvalds 已提交
754
{
755 756
	struct dm_snapshot *s = pe->snap;

757
	mempool_free(pe, &s->pending_pool);
758
	smp_mb__before_atomic();
759
	atomic_dec(&s->pending_exceptions_count);
L
Linus Torvalds 已提交
760 761
}

762 763
static void dm_insert_exception(struct dm_exception_table *eh,
				struct dm_exception *new_e)
764
{
765 766
	struct hlist_bl_head *l;
	struct hlist_bl_node *pos;
767
	struct dm_exception *e = NULL;
768 769 770 771 772 773 774 775

	l = &eh->table[exception_hash(eh, new_e->old_chunk)];

	/* Add immediately if this table doesn't support consecutive chunks */
	if (!eh->hash_shift)
		goto out;

	/* List is ordered by old_chunk */
776
	hlist_bl_for_each_entry(e, pos, l, hash_list) {
777 778 779 780 781 782
		/* Insert after an existing chunk? */
		if (new_e->old_chunk == (e->old_chunk +
					 dm_consecutive_chunk_count(e) + 1) &&
		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
					 dm_consecutive_chunk_count(e) + 1)) {
			dm_consecutive_chunk_count_inc(e);
783
			free_completed_exception(new_e);
784 785 786 787 788 789 790 791 792
			return;
		}

		/* Insert before an existing chunk? */
		if (new_e->old_chunk == (e->old_chunk - 1) &&
		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
			dm_consecutive_chunk_count_inc(e);
			e->old_chunk--;
			e->new_chunk--;
793
			free_completed_exception(new_e);
794 795 796
			return;
		}

797
		if (new_e->old_chunk < e->old_chunk)
798 799 800 801
			break;
	}

out:
802 803 804 805 806 807 808 809 810 811 812 813 814
	if (!e) {
		/*
		 * Either the table doesn't support consecutive chunks or slot
		 * l is empty.
		 */
		hlist_bl_add_head(&new_e->hash_list, l);
	} else if (new_e->old_chunk < e->old_chunk) {
		/* Add before an existing exception */
		hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
	} else {
		/* Add to l's tail: e is the last exception in this slot */
		hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
	}
815 816
}

817 818 819 820 821
/*
 * Callback used by the exception stores to load exceptions when
 * initialising.
 */
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
L
Linus Torvalds 已提交
822
{
823
	struct dm_exception_table_lock lock;
824
	struct dm_snapshot *s = context;
825
	struct dm_exception *e;
L
Linus Torvalds 已提交
826

827
	e = alloc_completed_exception(GFP_KERNEL);
L
Linus Torvalds 已提交
828 829 830 831
	if (!e)
		return -ENOMEM;

	e->old_chunk = old;
832 833

	/* Consecutive_count is implicitly initialised to zero */
L
Linus Torvalds 已提交
834
	e->new_chunk = new;
835

836 837 838 839 840 841 842 843 844
	/*
	 * Although there is no need to lock access to the exception tables
	 * here, if we don't then hlist_bl_add_head(), called by
	 * dm_insert_exception(), will complain about accessing the
	 * corresponding list without locking it first.
	 */
	dm_exception_table_lock_init(s, old, &lock);

	dm_exception_table_lock(&lock);
845
	dm_insert_exception(&s->complete, e);
846
	dm_exception_table_unlock(&lock);
847

L
Linus Torvalds 已提交
848 849 850
	return 0;
}

851 852 853 854
/*
 * Return a minimum chunk size of all snapshots that have the specified origin.
 * Return zero if the origin has no snapshots.
 */
855
static uint32_t __minimum_chunk_size(struct origin *o)
856 857
{
	struct dm_snapshot *snap;
858
	unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
859 860 861

	if (o)
		list_for_each_entry(snap, &o->snapshots, list)
862 863
			chunk_size = min_not_zero(chunk_size,
						  snap->store->chunk_size);
864

865
	return (uint32_t) chunk_size;
866 867
}

L
Linus Torvalds 已提交
868 869 870 871 872 873 874
/*
 * Hard coded magic.
 */
static int calc_max_buckets(void)
{
	/* use a fixed size of 2MB */
	unsigned long mem = 2 * 1024 * 1024;
875
	mem /= sizeof(struct hlist_bl_head);
L
Linus Torvalds 已提交
876 877 878 879 880 881 882

	return mem;
}

/*
 * Allocate room for a suitable hash table.
 */
883
static int init_hash_tables(struct dm_snapshot *s)
L
Linus Torvalds 已提交
884
{
885
	sector_t hash_size, cow_dev_size, max_buckets;
L
Linus Torvalds 已提交
886 887 888 889 890

	/*
	 * Calculate based on the size of the original volume or
	 * the COW volume...
	 */
891
	cow_dev_size = get_dev_size(s->cow->bdev);
L
Linus Torvalds 已提交
892 893
	max_buckets = calc_max_buckets();

894
	hash_size = cow_dev_size >> s->store->chunk_shift;
L
Linus Torvalds 已提交
895 896
	hash_size = min(hash_size, max_buckets);

897 898
	if (hash_size < 64)
		hash_size = 64;
899
	hash_size = rounddown_pow_of_two(hash_size);
900 901
	if (dm_exception_table_init(&s->complete, hash_size,
				    DM_CHUNK_CONSECUTIVE_BITS))
L
Linus Torvalds 已提交
902 903 904 905 906 907 908 909 910 911
		return -ENOMEM;

	/*
	 * Allocate hash table for in-flight exceptions
	 * Make this smaller than the real hash table
	 */
	hash_size >>= 3;
	if (hash_size < 64)
		hash_size = 64;

912 913
	if (dm_exception_table_init(&s->pending, hash_size, 0)) {
		dm_exception_table_exit(&s->complete, exception_cache);
L
Linus Torvalds 已提交
914 915 916 917 918 919
		return -ENOMEM;
	}

	return 0;
}

M
Mikulas Patocka 已提交
920 921 922
static void merge_shutdown(struct dm_snapshot *s)
{
	clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
923
	smp_mb__after_atomic();
M
Mikulas Patocka 已提交
924 925 926
	wake_up_bit(&s->state_bits, RUNNING_MERGE);
}

927 928 929 930 931 932 933 934
static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
{
	s->first_merging_chunk = 0;
	s->num_merging_chunks = 0;

	return bio_list_get(&s->bios_queued_during_merge);
}

M
Mikulas Patocka 已提交
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
/*
 * Remove one chunk from the index of completed exceptions.
 */
static int __remove_single_exception_chunk(struct dm_snapshot *s,
					   chunk_t old_chunk)
{
	struct dm_exception *e;

	e = dm_lookup_exception(&s->complete, old_chunk);
	if (!e) {
		DMERR("Corruption detected: exception for block %llu is "
		      "on disk but not in memory",
		      (unsigned long long)old_chunk);
		return -EINVAL;
	}

	/*
	 * If this is the only chunk using this exception, remove exception.
	 */
	if (!dm_consecutive_chunk_count(e)) {
		dm_remove_exception(e);
		free_completed_exception(e);
		return 0;
	}

	/*
	 * The chunk may be either at the beginning or the end of a
	 * group of consecutive chunks - never in the middle.  We are
	 * removing chunks in the opposite order to that in which they
	 * were added, so this should always be true.
	 * Decrement the consecutive chunk counter and adjust the
	 * starting point if necessary.
	 */
	if (old_chunk == e->old_chunk) {
		e->old_chunk++;
		e->new_chunk++;
	} else if (old_chunk != e->old_chunk +
		   dm_consecutive_chunk_count(e)) {
		DMERR("Attempt to merge block %llu from the "
		      "middle of a chunk range [%llu - %llu]",
		      (unsigned long long)old_chunk,
		      (unsigned long long)e->old_chunk,
		      (unsigned long long)
		      e->old_chunk + dm_consecutive_chunk_count(e));
		return -EINVAL;
	}

	dm_consecutive_chunk_count_dec(e);

	return 0;
}

987 988 989
static void flush_bios(struct bio *bio);

static int remove_single_exception_chunk(struct dm_snapshot *s)
M
Mikulas Patocka 已提交
990
{
991 992 993
	struct bio *b = NULL;
	int r;
	chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
M
Mikulas Patocka 已提交
994

995
	down_write(&s->lock);
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009

	/*
	 * Process chunks (and associated exceptions) in reverse order
	 * so that dm_consecutive_chunk_count_dec() accounting works.
	 */
	do {
		r = __remove_single_exception_chunk(s, old_chunk);
		if (r)
			goto out;
	} while (old_chunk-- > s->first_merging_chunk);

	b = __release_queued_bios_after_merge(s);

out:
1010
	up_write(&s->lock);
1011 1012
	if (b)
		flush_bios(b);
M
Mikulas Patocka 已提交
1013 1014 1015 1016

	return r;
}

1017 1018 1019
static int origin_write_extent(struct dm_snapshot *merging_snap,
			       sector_t sector, unsigned chunk_size);

M
Mikulas Patocka 已提交
1020 1021 1022
static void merge_callback(int read_err, unsigned long write_err,
			   void *context);

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
static uint64_t read_pending_exceptions_done_count(void)
{
	uint64_t pending_exceptions_done;

	spin_lock(&_pending_exceptions_done_spinlock);
	pending_exceptions_done = _pending_exceptions_done_count;
	spin_unlock(&_pending_exceptions_done_spinlock);

	return pending_exceptions_done;
}

static void increment_pending_exceptions_done_count(void)
{
	spin_lock(&_pending_exceptions_done_spinlock);
	_pending_exceptions_done_count++;
	spin_unlock(&_pending_exceptions_done_spinlock);

	wake_up_all(&_pending_exceptions_done);
}

M
Mikulas Patocka 已提交
1043 1044
static void snapshot_merge_next_chunks(struct dm_snapshot *s)
{
1045
	int i, linear_chunks;
M
Mikulas Patocka 已提交
1046 1047
	chunk_t old_chunk, new_chunk;
	struct dm_io_region src, dest;
1048
	sector_t io_size;
1049
	uint64_t previous_count;
M
Mikulas Patocka 已提交
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062

	BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
	if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
		goto shut;

	/*
	 * valid flag never changes during merge, so no lock required.
	 */
	if (!s->valid) {
		DMERR("Snapshot is invalid: can't merge");
		goto shut;
	}

1063 1064 1065
	linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
						      &new_chunk);
	if (linear_chunks <= 0) {
1066
		if (linear_chunks < 0) {
M
Mikulas Patocka 已提交
1067 1068
			DMERR("Read error in exception store: "
			      "shutting down merge");
1069
			down_write(&s->lock);
1070
			s->merge_failed = true;
1071
			up_write(&s->lock);
1072
		}
M
Mikulas Patocka 已提交
1073 1074 1075
		goto shut;
	}

1076 1077 1078 1079 1080 1081 1082 1083 1084
	/* Adjust old_chunk and new_chunk to reflect start of linear region */
	old_chunk = old_chunk + 1 - linear_chunks;
	new_chunk = new_chunk + 1 - linear_chunks;

	/*
	 * Use one (potentially large) I/O to copy all 'linear_chunks'
	 * from the exception store to the origin
	 */
	io_size = linear_chunks * s->store->chunk_size;
M
Mikulas Patocka 已提交
1085 1086 1087

	dest.bdev = s->origin->bdev;
	dest.sector = chunk_to_sector(s->store, old_chunk);
1088
	dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
M
Mikulas Patocka 已提交
1089 1090 1091 1092 1093

	src.bdev = s->cow->bdev;
	src.sector = chunk_to_sector(s->store, new_chunk);
	src.count = dest.count;

1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
	/*
	 * Reallocate any exceptions needed in other snapshots then
	 * wait for the pending exceptions to complete.
	 * Each time any pending exception (globally on the system)
	 * completes we are woken and repeat the process to find out
	 * if we can proceed.  While this may not seem a particularly
	 * efficient algorithm, it is not expected to have any
	 * significant impact on performance.
	 */
	previous_count = read_pending_exceptions_done_count();
1104
	while (origin_write_extent(s, dest.sector, io_size)) {
1105 1106 1107 1108 1109 1110 1111
		wait_event(_pending_exceptions_done,
			   (read_pending_exceptions_done_count() !=
			    previous_count));
		/* Retry after the wait, until all exceptions are done. */
		previous_count = read_pending_exceptions_done_count();
	}

1112
	down_write(&s->lock);
1113
	s->first_merging_chunk = old_chunk;
1114
	s->num_merging_chunks = linear_chunks;
1115
	up_write(&s->lock);
1116

1117 1118 1119
	/* Wait until writes to all 'linear_chunks' drain */
	for (i = 0; i < linear_chunks; i++)
		__check_for_conflicting_io(s, old_chunk + i);
1120

M
Mikulas Patocka 已提交
1121 1122 1123 1124 1125 1126 1127
	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
	return;

shut:
	merge_shutdown(s);
}

1128 1129
static void error_bios(struct bio *bio);

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
static int flush_data(struct dm_snapshot *s)
{
	struct bio *flush_bio = &s->flush_bio;

	bio_reset(flush_bio);
	bio_set_dev(flush_bio, s->origin->bdev);
	flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;

	return submit_bio_wait(flush_bio);
}

M
Mikulas Patocka 已提交
1141 1142 1143
static void merge_callback(int read_err, unsigned long write_err, void *context)
{
	struct dm_snapshot *s = context;
1144
	struct bio *b = NULL;
M
Mikulas Patocka 已提交
1145 1146 1147 1148 1149 1150 1151 1152 1153

	if (read_err || write_err) {
		if (read_err)
			DMERR("Read error: shutting down merge.");
		else
			DMERR("Write error: shutting down merge.");
		goto shut;
	}

1154 1155 1156 1157 1158
	if (flush_data(s) < 0) {
		DMERR("Flush after merge failed: shutting down merge");
		goto shut;
	}

1159 1160
	if (s->store->type->commit_merge(s->store,
					 s->num_merging_chunks) < 0) {
M
Mikulas Patocka 已提交
1161 1162 1163 1164
		DMERR("Write error in exception store: shutting down merge");
		goto shut;
	}

1165 1166 1167
	if (remove_single_exception_chunk(s) < 0)
		goto shut;

M
Mikulas Patocka 已提交
1168 1169 1170 1171 1172
	snapshot_merge_next_chunks(s);

	return;

shut:
1173
	down_write(&s->lock);
1174
	s->merge_failed = true;
1175
	b = __release_queued_bios_after_merge(s);
1176
	up_write(&s->lock);
1177 1178
	error_bios(b);

M
Mikulas Patocka 已提交
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
	merge_shutdown(s);
}

static void start_merge(struct dm_snapshot *s)
{
	if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
		snapshot_merge_next_chunks(s);
}

/*
 * Stop the merging process and wait until it finishes.
 */
static void stop_merge(struct dm_snapshot *s)
{
	set_bit(SHUTDOWN_MERGE, &s->state_bits);
1194
	wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
M
Mikulas Patocka 已提交
1195 1196 1197
	clear_bit(SHUTDOWN_MERGE, &s->state_bits);
}

1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
				   struct dm_target *ti)
{
	int r;
	unsigned argc;
	const char *arg_name;

	static const struct dm_arg _args[] = {
		{0, 2, "Invalid number of feature arguments"},
	};

	/*
	 * No feature arguments supplied.
	 */
	if (!as->argc)
		return 0;

	r = dm_read_arg_group(_args, as, &argc, &ti->error);
	if (r)
		return -EINVAL;

	while (argc && !r) {
		arg_name = dm_shift_arg(as);
		argc--;

		if (!strcasecmp(arg_name, "discard_zeroes_cow"))
			s->discard_zeroes_cow = true;

		else if (!strcasecmp(arg_name, "discard_passdown_origin"))
			s->discard_passdown_origin = true;

		else {
			ti->error = "Unrecognised feature requested";
			r = -EINVAL;
			break;
		}
	}

	if (!s->discard_zeroes_cow && s->discard_passdown_origin) {
		/*
		 * TODO: really these are disjoint.. but ti->num_discard_bios
		 * and dm_bio_get_target_bio_nr() require rigid constraints.
		 */
		ti->error = "discard_passdown_origin feature depends on discard_zeroes_cow";
		r = -EINVAL;
	}

	return r;
}

L
Linus Torvalds 已提交
1248
/*
1249 1250
 * Construct a snapshot mapping:
 * <origin_dev> <COW-dev> <p|po|n> <chunk-size> [<# feature args> [<arg>]*]
L
Linus Torvalds 已提交
1251 1252 1253 1254
 */
static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct dm_snapshot *s;
1255
	struct dm_arg_set as;
1256
	int i;
L
Linus Torvalds 已提交
1257
	int r = -EINVAL;
1258
	char *origin_path, *cow_path;
1259
	dev_t origin_dev, cow_dev;
1260
	unsigned args_used, num_flush_bios = 1;
1261
	fmode_t origin_mode = FMODE_READ;
L
Linus Torvalds 已提交
1262

1263 1264
	if (argc < 4) {
		ti->error = "requires 4 or more arguments";
L
Linus Torvalds 已提交
1265
		r = -EINVAL;
1266
		goto bad;
L
Linus Torvalds 已提交
1267 1268
	}

1269
	if (dm_target_is_snapshot_merge(ti)) {
1270
		num_flush_bios = 2;
1271 1272 1273
		origin_mode = FMODE_WRITE;
	}

1274
	s = kzalloc(sizeof(*s), GFP_KERNEL);
1275
	if (!s) {
J
Jonathan Brassow 已提交
1276
		ti->error = "Cannot allocate private snapshot structure";
1277 1278 1279 1280
		r = -ENOMEM;
		goto bad;
	}

1281 1282 1283 1284 1285 1286 1287
	as.argc = argc;
	as.argv = argv;
	dm_consume_args(&as, 4);
	r = parse_snapshot_features(&as, s, ti);
	if (r)
		goto bad_features;

1288 1289 1290 1291 1292 1293 1294 1295 1296
	origin_path = argv[0];
	argv++;
	argc--;

	r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
	if (r) {
		ti->error = "Cannot get origin device";
		goto bad_origin;
	}
1297
	origin_dev = s->origin->bdev->bd_dev;
1298

1299 1300 1301 1302
	cow_path = argv[0];
	argv++;
	argc--;

1303 1304 1305 1306 1307 1308 1309
	cow_dev = dm_get_dev_t(cow_path);
	if (cow_dev && cow_dev == origin_dev) {
		ti->error = "COW device cannot be the same as origin device";
		r = -EINVAL;
		goto bad_cow;
	}

1310
	r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1311 1312 1313 1314 1315 1316
	if (r) {
		ti->error = "Cannot get COW device";
		goto bad_cow;
	}

	r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1317 1318
	if (r) {
		ti->error = "Couldn't create exception store";
L
Linus Torvalds 已提交
1319
		r = -EINVAL;
1320
		goto bad_store;
L
Linus Torvalds 已提交
1321 1322
	}

1323 1324 1325
	argv += args_used;
	argc -= args_used;

1326
	s->ti = ti;
L
Linus Torvalds 已提交
1327
	s->valid = 1;
1328
	s->snapshot_overflowed = 0;
1329
	s->active = 0;
1330
	atomic_set(&s->pending_exceptions_count, 0);
1331
	spin_lock_init(&s->pe_allocation_lock);
1332 1333
	s->exception_start_sequence = 0;
	s->exception_complete_sequence = 0;
1334
	s->out_of_order_tree = RB_ROOT;
1335
	init_rwsem(&s->lock);
1336
	INIT_LIST_HEAD(&s->list);
1337
	spin_lock_init(&s->pe_lock);
M
Mikulas Patocka 已提交
1338
	s->state_bits = 0;
1339
	s->merge_failed = false;
1340 1341 1342
	s->first_merging_chunk = 0;
	s->num_merging_chunks = 0;
	bio_list_init(&s->bios_queued_during_merge);
1343
	bio_init(&s->flush_bio, NULL, 0);
L
Linus Torvalds 已提交
1344 1345

	/* Allocate hash table for COW data */
1346
	if (init_hash_tables(s)) {
L
Linus Torvalds 已提交
1347 1348
		ti->error = "Unable to allocate hash table space";
		r = -ENOMEM;
1349
		goto bad_hash_tables;
L
Linus Torvalds 已提交
1350 1351
	}

1352
	init_waitqueue_head(&s->in_progress_wait);
1353

1354
	s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1355 1356
	if (IS_ERR(s->kcopyd_client)) {
		r = PTR_ERR(s->kcopyd_client);
L
Linus Torvalds 已提交
1357
		ti->error = "Could not create kcopyd client";
1358
		goto bad_kcopyd;
L
Linus Torvalds 已提交
1359 1360
	}

1361 1362
	r = mempool_init_slab_pool(&s->pending_pool, MIN_IOS, pending_cache);
	if (r) {
1363
		ti->error = "Could not allocate mempool for pending exceptions";
1364
		goto bad_pending_pool;
1365 1366
	}

1367 1368 1369 1370 1371
	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
		INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);

	spin_lock_init(&s->tracked_chunk_lock);

1372
	ti->private = s;
1373
	ti->num_flush_bios = num_flush_bios;
1374 1375
	if (s->discard_zeroes_cow)
		ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1);
1376
	ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399

	/* Add snapshot to the list of snapshots for this origin */
	/* Exceptions aren't triggered till snapshot_resume() is called */
	r = register_snapshot(s);
	if (r == -ENOMEM) {
		ti->error = "Snapshot origin struct allocation failed";
		goto bad_load_and_register;
	} else if (r < 0) {
		/* invalid handover, register_snapshot has set ti->error */
		goto bad_load_and_register;
	}

	/*
	 * Metadata must only be loaded into one table at once, so skip this
	 * if metadata will be handed over during resume.
	 * Chunk size will be set during the handover - set it to zero to
	 * ensure it's ignored.
	 */
	if (r > 0) {
		s->store->chunk_size = 0;
		return 0;
	}

1400 1401
	r = s->store->type->read_metadata(s->store, dm_add_exception,
					  (void *)s);
1402
	if (r < 0) {
1403
		ti->error = "Failed to read snapshot metadata";
1404
		goto bad_read_metadata;
1405 1406 1407
	} else if (r > 0) {
		s->valid = 0;
		DMWARN("Snapshot is marked invalid.");
1408
	}
1409

1410 1411
	if (!s->store->chunk_size) {
		ti->error = "Chunk size not set";
1412
		r = -EINVAL;
1413
		goto bad_read_metadata;
L
Linus Torvalds 已提交
1414
	}
1415 1416 1417 1418

	r = dm_set_target_max_io_len(ti, s->store->chunk_size);
	if (r)
		goto bad_read_metadata;
L
Linus Torvalds 已提交
1419 1420 1421

	return 0;

1422 1423
bad_read_metadata:
	unregister_snapshot(s);
1424
bad_load_and_register:
1425
	mempool_exit(&s->pending_pool);
1426
bad_pending_pool:
H
Heinz Mauelshagen 已提交
1427
	dm_kcopyd_client_destroy(s->kcopyd_client);
1428
bad_kcopyd:
1429 1430
	dm_exception_table_exit(&s->pending, pending_cache);
	dm_exception_table_exit(&s->complete, exception_cache);
1431
bad_hash_tables:
1432 1433 1434 1435
	dm_exception_store_destroy(s->store);
bad_store:
	dm_put_device(ti, s->cow);
bad_cow:
1436 1437
	dm_put_device(ti, s->origin);
bad_origin:
1438
bad_features:
1439 1440
	kfree(s);
bad:
L
Linus Torvalds 已提交
1441 1442 1443
	return r;
}

1444 1445
static void __free_exceptions(struct dm_snapshot *s)
{
H
Heinz Mauelshagen 已提交
1446
	dm_kcopyd_client_destroy(s->kcopyd_client);
1447 1448
	s->kcopyd_client = NULL;

1449 1450
	dm_exception_table_exit(&s->pending, pending_cache);
	dm_exception_table_exit(&s->complete, exception_cache);
1451 1452
}

1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
static void __handover_exceptions(struct dm_snapshot *snap_src,
				  struct dm_snapshot *snap_dest)
{
	union {
		struct dm_exception_table table_swap;
		struct dm_exception_store *store_swap;
	} u;

	/*
	 * Swap all snapshot context information between the two instances.
	 */
	u.table_swap = snap_dest->complete;
	snap_dest->complete = snap_src->complete;
	snap_src->complete = u.table_swap;

	u.store_swap = snap_dest->store;
	snap_dest->store = snap_src->store;
1470
	snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
1471 1472 1473 1474 1475
	snap_src->store = u.store_swap;

	snap_dest->store->snap = snap_dest;
	snap_src->store->snap = snap_src;

1476
	snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1477
	snap_dest->valid = snap_src->valid;
1478
	snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
1479 1480 1481 1482 1483 1484 1485

	/*
	 * Set source invalid to ensure it receives no further I/O.
	 */
	snap_src->valid = 0;
}

L
Linus Torvalds 已提交
1486 1487
static void snapshot_dtr(struct dm_target *ti)
{
1488 1489 1490
#ifdef CONFIG_DM_DEBUG
	int i;
#endif
A
Alasdair G Kergon 已提交
1491
	struct dm_snapshot *s = ti->private;
1492
	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
L
Linus Torvalds 已提交
1493

1494 1495
	down_read(&_origins_lock);
	/* Check whether exception handover must be cancelled */
1496
	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1497
	if (snap_src && snap_dest && (s == snap_src)) {
1498
		down_write(&snap_dest->lock);
1499
		snap_dest->valid = 0;
1500
		up_write(&snap_dest->lock);
1501 1502 1503 1504
		DMERR("Cancelling snapshot handover.");
	}
	up_read(&_origins_lock);

M
Mikulas Patocka 已提交
1505 1506 1507
	if (dm_target_is_snapshot_merge(ti))
		stop_merge(s);

1508 1509
	/* Prevent further origin writes from using this snapshot. */
	/* After this returns there can be no new kcopyd jobs. */
L
Linus Torvalds 已提交
1510 1511
	unregister_snapshot(s);

1512
	while (atomic_read(&s->pending_exceptions_count))
1513
		msleep(1);
1514
	/*
1515
	 * Ensure instructions in mempool_exit aren't reordered
1516 1517 1518 1519
	 * before atomic_read.
	 */
	smp_mb();

1520 1521 1522 1523 1524
#ifdef CONFIG_DM_DEBUG
	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
		BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
#endif

1525
	__free_exceptions(s);
L
Linus Torvalds 已提交
1526

1527
	mempool_exit(&s->pending_pool);
1528

1529
	dm_exception_store_destroy(s->store);
1530

1531 1532
	bio_uninit(&s->flush_bio);

1533 1534
	dm_put_device(ti, s->cow);

1535 1536
	dm_put_device(ti, s->origin);

1537 1538
	WARN_ON(s->in_progress);

L
Linus Torvalds 已提交
1539 1540 1541
	kfree(s);
}

1542 1543
static void account_start_copy(struct dm_snapshot *s)
{
1544 1545 1546
	spin_lock(&s->in_progress_wait.lock);
	s->in_progress++;
	spin_unlock(&s->in_progress_wait.lock);
1547 1548 1549 1550
}

static void account_end_copy(struct dm_snapshot *s)
{
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584
	spin_lock(&s->in_progress_wait.lock);
	BUG_ON(!s->in_progress);
	s->in_progress--;
	if (likely(s->in_progress <= cow_threshold) &&
	    unlikely(waitqueue_active(&s->in_progress_wait)))
		wake_up_locked(&s->in_progress_wait);
	spin_unlock(&s->in_progress_wait.lock);
}

static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
{
	if (unlikely(s->in_progress > cow_threshold)) {
		spin_lock(&s->in_progress_wait.lock);
		if (likely(s->in_progress > cow_threshold)) {
			/*
			 * NOTE: this throttle doesn't account for whether
			 * the caller is servicing an IO that will trigger a COW
			 * so excess throttling may result for chunks not required
			 * to be COW'd.  But if cow_threshold was reached, extra
			 * throttling is unlikely to negatively impact performance.
			 */
			DECLARE_WAITQUEUE(wait, current);
			__add_wait_queue(&s->in_progress_wait, &wait);
			__set_current_state(TASK_UNINTERRUPTIBLE);
			spin_unlock(&s->in_progress_wait.lock);
			if (unlock_origins)
				up_read(&_origins_lock);
			io_schedule();
			remove_wait_queue(&s->in_progress_wait, &wait);
			return false;
		}
		spin_unlock(&s->in_progress_wait.lock);
	}
	return true;
1585 1586
}

L
Linus Torvalds 已提交
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
/*
 * Flush a list of buffers.
 */
static void flush_bios(struct bio *bio)
{
	struct bio *n;

	while (bio) {
		n = bio->bi_next;
		bio->bi_next = NULL;
1597
		submit_bio_noacct(bio);
L
Linus Torvalds 已提交
1598 1599 1600 1601
		bio = n;
	}
}

1602
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614

/*
 * Flush a list of buffers.
 */
static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
{
	struct bio *n;
	int r;

	while (bio) {
		n = bio->bi_next;
		bio->bi_next = NULL;
1615
		r = do_origin(s->origin, bio, false);
1616
		if (r == DM_MAPIO_REMAPPED)
1617
			submit_bio_noacct(bio);
1618 1619 1620 1621
		bio = n;
	}
}

L
Linus Torvalds 已提交
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
/*
 * Error a list of buffers.
 */
static void error_bios(struct bio *bio)
{
	struct bio *n;

	while (bio) {
		n = bio->bi_next;
		bio->bi_next = NULL;
1632
		bio_io_error(bio);
L
Linus Torvalds 已提交
1633 1634 1635 1636
		bio = n;
	}
}

1637
static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1638 1639 1640 1641 1642 1643 1644 1645 1646
{
	if (!s->valid)
		return;

	if (err == -EIO)
		DMERR("Invalidating snapshot: Error reading/writing.");
	else if (err == -ENOMEM)
		DMERR("Invalidating snapshot: Unable to allocate exception.");

1647 1648
	if (s->store->type->drop_snapshot)
		s->store->type->drop_snapshot(s->store);
1649 1650 1651

	s->valid = 0;

1652
	dm_table_event(s->ti->table);
1653 1654
}

1655 1656 1657 1658 1659 1660 1661
static void invalidate_snapshot(struct dm_snapshot *s, int err)
{
	down_write(&s->lock);
	__invalidate_snapshot(s, err);
	up_write(&s->lock);
}

1662
static void pending_complete(void *context, int success)
L
Linus Torvalds 已提交
1663
{
1664
	struct dm_snap_pending_exception *pe = context;
1665
	struct dm_exception *e;
L
Linus Torvalds 已提交
1666
	struct dm_snapshot *s = pe->snap;
1667 1668
	struct bio *origin_bios = NULL;
	struct bio *snapshot_bios = NULL;
1669
	struct bio *full_bio = NULL;
1670
	struct dm_exception_table_lock lock;
1671
	int error = 0;
L
Linus Torvalds 已提交
1672

1673 1674
	dm_exception_table_lock_init(s, pe->e.old_chunk, &lock);

1675 1676
	if (!success) {
		/* Read/write error - snapshot is unusable */
1677
		invalidate_snapshot(s, -EIO);
1678
		error = 1;
1679 1680

		dm_exception_table_lock(&lock);
1681 1682 1683
		goto out;
	}

1684
	e = alloc_completed_exception(GFP_NOIO);
1685
	if (!e) {
1686
		invalidate_snapshot(s, -ENOMEM);
1687
		error = 1;
1688 1689

		dm_exception_table_lock(&lock);
1690 1691 1692
		goto out;
	}
	*e = pe->e;
L
Linus Torvalds 已提交
1693

1694
	down_read(&s->lock);
1695
	dm_exception_table_lock(&lock);
1696
	if (!s->valid) {
1697
		up_read(&s->lock);
1698
		free_completed_exception(e);
1699
		error = 1;
1700

1701
		goto out;
L
Linus Torvalds 已提交
1702 1703
	}

1704
	/*
1705 1706 1707 1708 1709
	 * Add a proper exception. After inserting the completed exception all
	 * subsequent snapshot reads to this chunk will be redirected to the
	 * COW device.  This ensures that we do not starve. Moreover, as long
	 * as the pending exception exists, neither origin writes nor snapshot
	 * merging can overwrite the chunk in origin.
1710
	 */
1711
	dm_insert_exception(&s->complete, e);
1712
	up_read(&s->lock);
1713

1714 1715
	/* Wait for conflicting reads to drain */
	if (__chunk_is_tracked(s, pe->e.old_chunk)) {
1716
		dm_exception_table_unlock(&lock);
1717
		__check_for_conflicting_io(s, pe->e.old_chunk);
1718
		dm_exception_table_lock(&lock);
1719 1720
	}

J
Jonathan Brassow 已提交
1721
out:
1722
	/* Remove the in-flight exception from the list */
1723
	dm_remove_exception(&pe->e);
1724 1725 1726

	dm_exception_table_unlock(&lock);

1727
	snapshot_bios = bio_list_get(&pe->snapshot_bios);
1728
	origin_bios = bio_list_get(&pe->origin_bios);
1729
	full_bio = pe->full_bio;
1730
	if (full_bio)
1731
		full_bio->bi_end_io = pe->full_bio_end_io;
1732 1733
	increment_pending_exceptions_done_count();

1734
	/* Submit any pending write bios */
1735 1736 1737
	if (error) {
		if (full_bio)
			bio_io_error(full_bio);
1738
		error_bios(snapshot_bios);
1739 1740
	} else {
		if (full_bio)
1741
			bio_endio(full_bio);
1742
		flush_bios(snapshot_bios);
1743
	}
1744

1745
	retry_origin_bios(s, origin_bios);
1746 1747

	free_pending_exception(pe);
L
Linus Torvalds 已提交
1748 1749
}

1750 1751 1752 1753
static void complete_exception(struct dm_snap_pending_exception *pe)
{
	struct dm_snapshot *s = pe->snap;

1754 1755 1756
	/* Update the metadata if we are persistent */
	s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
					 pending_complete, pe);
1757 1758
}

L
Linus Torvalds 已提交
1759 1760 1761 1762
/*
 * Called when the copy I/O has finished.  kcopyd actually runs
 * this code so don't block.
 */
1763
static void copy_callback(int read_err, unsigned long write_err, void *context)
L
Linus Torvalds 已提交
1764
{
A
Alasdair G Kergon 已提交
1765
	struct dm_snap_pending_exception *pe = context;
L
Linus Torvalds 已提交
1766 1767
	struct dm_snapshot *s = pe->snap;

1768
	pe->copy_error = read_err || write_err;
L
Linus Torvalds 已提交
1769

1770
	if (pe->exception_sequence == s->exception_complete_sequence) {
1771 1772
		struct rb_node *next;

1773 1774 1775
		s->exception_complete_sequence++;
		complete_exception(pe);

1776 1777 1778 1779
		next = rb_first(&s->out_of_order_tree);
		while (next) {
			pe = rb_entry(next, struct dm_snap_pending_exception,
					out_of_order_node);
1780 1781
			if (pe->exception_sequence != s->exception_complete_sequence)
				break;
1782
			next = rb_next(next);
1783
			s->exception_complete_sequence++;
1784
			rb_erase(&pe->out_of_order_node, &s->out_of_order_tree);
1785
			complete_exception(pe);
1786
			cond_resched();
1787 1788
		}
	} else {
1789 1790
		struct rb_node *parent = NULL;
		struct rb_node **p = &s->out_of_order_tree.rb_node;
1791 1792
		struct dm_snap_pending_exception *pe2;

1793 1794 1795 1796 1797 1798 1799 1800 1801
		while (*p) {
			pe2 = rb_entry(*p, struct dm_snap_pending_exception, out_of_order_node);
			parent = *p;

			BUG_ON(pe->exception_sequence == pe2->exception_sequence);
			if (pe->exception_sequence < pe2->exception_sequence)
				p = &((*p)->rb_left);
			else
				p = &((*p)->rb_right);
1802
		}
1803 1804 1805

		rb_link_node(&pe->out_of_order_node, parent, p);
		rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
1806
	}
1807
	account_end_copy(s);
L
Linus Torvalds 已提交
1808 1809 1810 1811 1812
}

/*
 * Dispatches the copy operation to kcopyd.
 */
A
Alasdair G Kergon 已提交
1813
static void start_copy(struct dm_snap_pending_exception *pe)
L
Linus Torvalds 已提交
1814 1815
{
	struct dm_snapshot *s = pe->snap;
H
Heinz Mauelshagen 已提交
1816
	struct dm_io_region src, dest;
L
Linus Torvalds 已提交
1817 1818 1819 1820 1821 1822
	struct block_device *bdev = s->origin->bdev;
	sector_t dev_size;

	dev_size = get_dev_size(bdev);

	src.bdev = bdev;
1823
	src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1824
	src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
L
Linus Torvalds 已提交
1825

1826
	dest.bdev = s->cow->bdev;
1827
	dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
L
Linus Torvalds 已提交
1828 1829 1830
	dest.count = src.count;

	/* Hand over to kcopyd */
1831
	account_start_copy(s);
J
Jonathan Brassow 已提交
1832
	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
L
Linus Torvalds 已提交
1833 1834
}

1835
static void full_bio_end_io(struct bio *bio)
1836 1837 1838
{
	void *callback_data = bio->bi_private;

1839
	dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
}

static void start_full_bio(struct dm_snap_pending_exception *pe,
			   struct bio *bio)
{
	struct dm_snapshot *s = pe->snap;
	void *callback_data;

	pe->full_bio = bio;
	pe->full_bio_end_io = bio->bi_end_io;

1851
	account_start_copy(s);
1852 1853 1854 1855 1856 1857
	callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
						   copy_callback, pe);

	bio->bi_end_io = full_bio_end_io;
	bio->bi_private = callback_data;

1858
	submit_bio_noacct(bio);
1859 1860
}

1861 1862 1863
static struct dm_snap_pending_exception *
__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
{
1864
	struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1865 1866 1867 1868 1869 1870 1871

	if (!e)
		return NULL;

	return container_of(e, struct dm_snap_pending_exception, e);
}

L
Linus Torvalds 已提交
1872
/*
1873
 * Inserts a pending exception into the pending table.
L
Linus Torvalds 已提交
1874
 *
1875 1876
 * NOTE: a write lock must be held on the chunk's pending exception table slot
 * before calling this.
L
Linus Torvalds 已提交
1877
 */
A
Alasdair G Kergon 已提交
1878
static struct dm_snap_pending_exception *
1879 1880
__insert_pending_exception(struct dm_snapshot *s,
			   struct dm_snap_pending_exception *pe, chunk_t chunk)
L
Linus Torvalds 已提交
1881
{
1882 1883 1884 1885
	pe->e.old_chunk = chunk;
	bio_list_init(&pe->origin_bios);
	bio_list_init(&pe->snapshot_bios);
	pe->started = 0;
1886
	pe->full_bio = NULL;
1887

1888
	spin_lock(&s->pe_allocation_lock);
1889
	if (s->store->type->prepare_exception(s->store, &pe->e)) {
1890
		spin_unlock(&s->pe_allocation_lock);
1891 1892 1893 1894
		free_pending_exception(pe);
		return NULL;
	}

1895
	pe->exception_sequence = s->exception_start_sequence++;
1896
	spin_unlock(&s->pe_allocation_lock);
1897

1898
	dm_insert_exception(&s->pending, &pe->e);
1899

L
Linus Torvalds 已提交
1900 1901 1902
	return pe;
}

1903 1904 1905 1906 1907
/*
 * Looks to see if this snapshot already has a pending exception
 * for this chunk, otherwise it allocates a new one and inserts
 * it into the pending table.
 *
1908 1909
 * NOTE: a write lock must be held on the chunk's pending exception table slot
 * before calling this.
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
 */
static struct dm_snap_pending_exception *
__find_pending_exception(struct dm_snapshot *s,
			 struct dm_snap_pending_exception *pe, chunk_t chunk)
{
	struct dm_snap_pending_exception *pe2;

	pe2 = __lookup_pending_exception(s, chunk);
	if (pe2) {
		free_pending_exception(pe);
		return pe2;
	}

	return __insert_pending_exception(s, pe, chunk);
}

1926
static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1927
			    struct bio *bio, chunk_t chunk)
L
Linus Torvalds 已提交
1928
{
1929
	bio_set_dev(bio, s->cow->bdev);
1930 1931 1932 1933
	bio->bi_iter.bi_sector =
		chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
				(chunk - e->old_chunk)) +
		(bio->bi_iter.bi_sector & s->store->chunk_mask);
L
Linus Torvalds 已提交
1934 1935
}

1936 1937 1938 1939 1940
static void zero_callback(int read_err, unsigned long write_err, void *context)
{
	struct bio *bio = context;
	struct dm_snapshot *s = bio->bi_private;

1941
	account_end_copy(s);
1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
	bio->bi_status = write_err ? BLK_STS_IOERR : 0;
	bio_endio(bio);
}

static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
			   struct bio *bio, chunk_t chunk)
{
	struct dm_io_region dest;

	dest.bdev = s->cow->bdev;
	dest.sector = bio->bi_iter.bi_sector;
	dest.count = s->store->chunk_size;

1955
	account_start_copy(s);
1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966
	WARN_ON_ONCE(bio->bi_private);
	bio->bi_private = s;
	dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
}

static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
{
	return bio->bi_iter.bi_size ==
		(s->store->chunk_size << SECTOR_SHIFT);
}

M
Mikulas Patocka 已提交
1967
static int snapshot_map(struct dm_target *ti, struct bio *bio)
L
Linus Torvalds 已提交
1968
{
1969
	struct dm_exception *e;
A
Alasdair G Kergon 已提交
1970
	struct dm_snapshot *s = ti->private;
1971
	int r = DM_MAPIO_REMAPPED;
L
Linus Torvalds 已提交
1972
	chunk_t chunk;
A
Alasdair G Kergon 已提交
1973
	struct dm_snap_pending_exception *pe = NULL;
1974
	struct dm_exception_table_lock lock;
L
Linus Torvalds 已提交
1975

1976 1977
	init_tracked_chunk(bio);

J
Jens Axboe 已提交
1978
	if (bio->bi_opf & REQ_PREFLUSH) {
1979
		bio_set_dev(bio, s->cow->bdev);
M
Mikulas Patocka 已提交
1980 1981 1982
		return DM_MAPIO_REMAPPED;
	}

1983
	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1984
	dm_exception_table_lock_init(s, chunk, &lock);
L
Linus Torvalds 已提交
1985 1986

	/* Full snapshots are not usable */
1987
	/* To get here the table must be live so s->active is always set. */
L
Linus Torvalds 已提交
1988
	if (!s->valid)
1989
		return DM_MAPIO_KILL;
L
Linus Torvalds 已提交
1990

1991 1992 1993 1994 1995
	if (bio_data_dir(bio) == WRITE) {
		while (unlikely(!wait_for_in_progress(s, false)))
			; /* wait_for_in_progress() has slept */
	}

1996
	down_read(&s->lock);
1997
	dm_exception_table_lock(&lock);
1998

1999 2000
	if (!s->valid || (unlikely(s->snapshot_overflowed) &&
	    bio_data_dir(bio) == WRITE)) {
2001
		r = DM_MAPIO_KILL;
2002 2003 2004
		goto out_unlock;
	}

2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
	if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
		if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) {
			/*
			 * passdown discard to origin (without triggering
			 * snapshot exceptions via do_origin; doing so would
			 * defeat the goal of freeing space in origin that is
			 * implied by the "discard_passdown_origin" feature)
			 */
			bio_set_dev(bio, s->origin->bdev);
			track_chunk(s, bio, chunk);
			goto out_unlock;
		}
		/* discard to snapshot (target_bio_nr == 0) zeroes exceptions */
	}

2020
	/* If the block is already remapped - use that, else remap it */
2021
	e = dm_lookup_exception(&s->complete, chunk);
2022
	if (e) {
2023
		remap_exception(s, e, bio, chunk);
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041
		if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
		    io_overlaps_chunk(s, bio)) {
			dm_exception_table_unlock(&lock);
			up_read(&s->lock);
			zero_exception(s, e, bio, chunk);
			r = DM_MAPIO_SUBMITTED; /* discard is not issued */
			goto out;
		}
		goto out_unlock;
	}

	if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
		/*
		 * If no exception exists, complete discard immediately
		 * otherwise it'll trigger copy-out.
		 */
		bio_endio(bio);
		r = DM_MAPIO_SUBMITTED;
2042 2043 2044
		goto out_unlock;
	}

L
Linus Torvalds 已提交
2045 2046 2047 2048 2049
	/*
	 * Write to snapshot - higher level takes care of RW/RO
	 * flags so we should only get this if we are
	 * writeable.
	 */
2050
	if (bio_data_dir(bio) == WRITE) {
2051
		pe = __lookup_pending_exception(s, chunk);
2052
		if (!pe) {
2053
			dm_exception_table_unlock(&lock);
2054
			pe = alloc_pending_exception(s);
2055
			dm_exception_table_lock(&lock);
2056

2057
			e = dm_lookup_exception(&s->complete, chunk);
2058 2059 2060 2061 2062 2063
			if (e) {
				free_pending_exception(pe);
				remap_exception(s, e, bio, chunk);
				goto out_unlock;
			}

2064
			pe = __find_pending_exception(s, pe, chunk);
2065
			if (!pe) {
2066
				dm_exception_table_unlock(&lock);
2067 2068 2069
				up_read(&s->lock);

				down_write(&s->lock);
2070

2071
				if (s->store->userspace_supports_overflow) {
2072 2073 2074 2075
					if (s->valid && !s->snapshot_overflowed) {
						s->snapshot_overflowed = 1;
						DMERR("Snapshot overflowed: Unable to allocate exception.");
					}
2076 2077
				} else
					__invalidate_snapshot(s, -ENOMEM);
2078 2079
				up_write(&s->lock);

2080
				r = DM_MAPIO_KILL;
2081
				goto out;
2082
			}
L
Linus Torvalds 已提交
2083 2084
		}

2085
		remap_exception(s, &pe->e, bio, chunk);
2086

2087
		r = DM_MAPIO_SUBMITTED;
2088

2089
		if (!pe->started && io_overlaps_chunk(s, bio)) {
2090
			pe->started = 1;
2091

2092
			dm_exception_table_unlock(&lock);
2093 2094
			up_read(&s->lock);

2095 2096 2097 2098 2099 2100
			start_full_bio(pe, bio);
			goto out;
		}

		bio_list_add(&pe->snapshot_bios, bio);

2101
		if (!pe->started) {
2102
			/* this is protected by the exception table lock */
2103
			pe->started = 1;
2104

2105
			dm_exception_table_unlock(&lock);
2106 2107
			up_read(&s->lock);

2108
			start_copy(pe);
2109 2110
			goto out;
		}
2111
	} else {
2112
		bio_set_dev(bio, s->origin->bdev);
2113
		track_chunk(s, bio, chunk);
2114
	}
L
Linus Torvalds 已提交
2115

J
Jonathan Brassow 已提交
2116
out_unlock:
2117
	dm_exception_table_unlock(&lock);
2118
	up_read(&s->lock);
J
Jonathan Brassow 已提交
2119
out:
L
Linus Torvalds 已提交
2120 2121 2122
	return r;
}

2123 2124 2125 2126 2127 2128 2129 2130 2131
/*
 * A snapshot-merge target behaves like a combination of a snapshot
 * target and a snapshot-origin target.  It only generates new
 * exceptions in other snapshots and not in the one that is being
 * merged.
 *
 * For each chunk, if there is an existing exception, it is used to
 * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
 * which in turn might generate exceptions in other snapshots.
2132 2133
 * If merging is currently taking place on the chunk in question, the
 * I/O is deferred by adding it to s->bios_queued_during_merge.
2134
 */
M
Mikulas Patocka 已提交
2135
static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
2136 2137 2138 2139 2140 2141
{
	struct dm_exception *e;
	struct dm_snapshot *s = ti->private;
	int r = DM_MAPIO_REMAPPED;
	chunk_t chunk;

2142 2143
	init_tracked_chunk(bio);

J
Jens Axboe 已提交
2144
	if (bio->bi_opf & REQ_PREFLUSH) {
2145
		if (!dm_bio_get_target_bio_nr(bio))
2146
			bio_set_dev(bio, s->origin->bdev);
2147
		else
2148
			bio_set_dev(bio, s->cow->bdev);
2149 2150 2151
		return DM_MAPIO_REMAPPED;
	}

2152 2153 2154 2155 2156 2157
	if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
		/* Once merging, discards no longer effect change */
		bio_endio(bio);
		return DM_MAPIO_SUBMITTED;
	}

2158
	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
2159

2160
	down_write(&s->lock);
2161

2162 2163 2164
	/* Full merging snapshots are redirected to the origin */
	if (!s->valid)
		goto redirect_to_origin;
2165 2166 2167 2168

	/* If the block is already remapped - use that */
	e = dm_lookup_exception(&s->complete, chunk);
	if (e) {
2169
		/* Queue writes overlapping with chunks being merged */
2170
		if (bio_data_dir(bio) == WRITE &&
2171 2172 2173
		    chunk >= s->first_merging_chunk &&
		    chunk < (s->first_merging_chunk +
			     s->num_merging_chunks)) {
2174
			bio_set_dev(bio, s->origin->bdev);
2175 2176 2177 2178
			bio_list_add(&s->bios_queued_during_merge, bio);
			r = DM_MAPIO_SUBMITTED;
			goto out_unlock;
		}
2179

2180
		remap_exception(s, e, bio, chunk);
2181

2182
		if (bio_data_dir(bio) == WRITE)
2183
			track_chunk(s, bio, chunk);
2184 2185 2186
		goto out_unlock;
	}

2187
redirect_to_origin:
2188
	bio_set_dev(bio, s->origin->bdev);
2189

2190
	if (bio_data_dir(bio) == WRITE) {
2191
		up_write(&s->lock);
2192
		return do_origin(s->origin, bio, false);
2193 2194 2195
	}

out_unlock:
2196
	up_write(&s->lock);
2197 2198 2199 2200

	return r;
}

2201 2202
static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
		blk_status_t *error)
2203 2204 2205
{
	struct dm_snapshot *s = ti->private;

2206 2207
	if (is_bio_tracked(bio))
		stop_tracking_chunk(s, bio);
2208

2209
	return DM_ENDIO_DONE;
2210 2211
}

M
Mikulas Patocka 已提交
2212 2213 2214 2215 2216 2217 2218
static void snapshot_merge_presuspend(struct dm_target *ti)
{
	struct dm_snapshot *s = ti->private;

	stop_merge(s);
}

2219 2220 2221 2222 2223 2224 2225
static int snapshot_preresume(struct dm_target *ti)
{
	int r = 0;
	struct dm_snapshot *s = ti->private;
	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;

	down_read(&_origins_lock);
2226
	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
2227
	if (snap_src && snap_dest) {
2228
		down_read(&snap_src->lock);
2229 2230 2231 2232
		if (s == snap_src) {
			DMERR("Unable to resume snapshot source until "
			      "handover completes.");
			r = -EINVAL;
2233
		} else if (!dm_suspended(snap_src->ti)) {
2234 2235 2236 2237
			DMERR("Unable to perform snapshot handover until "
			      "source is suspended.");
			r = -EINVAL;
		}
2238
		up_read(&snap_src->lock);
2239 2240 2241 2242 2243 2244
	}
	up_read(&_origins_lock);

	return r;
}

L
Linus Torvalds 已提交
2245 2246
static void snapshot_resume(struct dm_target *ti)
{
A
Alasdair G Kergon 已提交
2247
	struct dm_snapshot *s = ti->private;
2248
	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
2249 2250
	struct dm_origin *o;
	struct mapped_device *origin_md = NULL;
2251
	bool must_restart_merging = false;
2252 2253

	down_read(&_origins_lock);
2254 2255 2256 2257

	o = __lookup_dm_origin(s->origin->bdev);
	if (o)
		origin_md = dm_table_get_md(o->ti->table);
2258 2259 2260 2261 2262
	if (!origin_md) {
		(void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
		if (snap_merging)
			origin_md = dm_table_get_md(snap_merging->ti->table);
	}
2263 2264
	if (origin_md == dm_table_get_md(ti->table))
		origin_md = NULL;
2265 2266 2267 2268
	if (origin_md) {
		if (dm_hold(origin_md))
			origin_md = NULL;
	}
2269

2270 2271 2272
	up_read(&_origins_lock);

	if (origin_md) {
2273
		dm_internal_suspend_fast(origin_md);
2274 2275 2276 2277 2278 2279 2280
		if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
			must_restart_merging = true;
			stop_merge(snap_merging);
		}
	}

	down_read(&_origins_lock);
2281

2282
	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
2283
	if (snap_src && snap_dest) {
2284 2285
		down_write(&snap_src->lock);
		down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
2286
		__handover_exceptions(snap_src, snap_dest);
2287 2288
		up_write(&snap_dest->lock);
		up_write(&snap_src->lock);
2289
	}
2290

2291 2292
	up_read(&_origins_lock);

2293 2294 2295 2296 2297 2298 2299
	if (origin_md) {
		if (must_restart_merging)
			start_merge(snap_merging);
		dm_internal_resume_fast(origin_md);
		dm_put(origin_md);
	}

2300 2301
	/* Now we have correct chunk size, reregister */
	reregister_snapshot(s);
L
Linus Torvalds 已提交
2302

2303
	down_write(&s->lock);
2304
	s->active = 1;
2305
	up_write(&s->lock);
L
Linus Torvalds 已提交
2306 2307
}

2308
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
M
Mikulas Patocka 已提交
2309
{
2310
	uint32_t min_chunksize;
M
Mikulas Patocka 已提交
2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328

	down_read(&_origins_lock);
	min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
	up_read(&_origins_lock);

	return min_chunksize;
}

static void snapshot_merge_resume(struct dm_target *ti)
{
	struct dm_snapshot *s = ti->private;

	/*
	 * Handover exceptions from existing snapshot.
	 */
	snapshot_resume(ti);

	/*
2329
	 * snapshot-merge acts as an origin, so set ti->max_io_len
M
Mikulas Patocka 已提交
2330
	 */
2331
	ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
M
Mikulas Patocka 已提交
2332 2333 2334 2335

	start_merge(s);
}

2336 2337
static void snapshot_status(struct dm_target *ti, status_type_t type,
			    unsigned status_flags, char *result, unsigned maxlen)
L
Linus Torvalds 已提交
2338
{
2339
	unsigned sz = 0;
A
Alasdair G Kergon 已提交
2340
	struct dm_snapshot *snap = ti->private;
2341
	unsigned num_features;
L
Linus Torvalds 已提交
2342 2343 2344

	switch (type) {
	case STATUSTYPE_INFO:
2345

2346
		down_write(&snap->lock);
2347

L
Linus Torvalds 已提交
2348
		if (!snap->valid)
2349
			DMEMIT("Invalid");
2350 2351
		else if (snap->merge_failed)
			DMEMIT("Merge failed");
2352 2353
		else if (snap->snapshot_overflowed)
			DMEMIT("Overflow");
L
Linus Torvalds 已提交
2354
		else {
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
			if (snap->store->type->usage) {
				sector_t total_sectors, sectors_allocated,
					 metadata_sectors;
				snap->store->type->usage(snap->store,
							 &total_sectors,
							 &sectors_allocated,
							 &metadata_sectors);
				DMEMIT("%llu/%llu %llu",
				       (unsigned long long)sectors_allocated,
				       (unsigned long long)total_sectors,
				       (unsigned long long)metadata_sectors);
L
Linus Torvalds 已提交
2366 2367
			}
			else
2368
				DMEMIT("Unknown");
L
Linus Torvalds 已提交
2369
		}
2370

2371
		up_write(&snap->lock);
2372

L
Linus Torvalds 已提交
2373 2374 2375 2376 2377 2378 2379 2380
		break;

	case STATUSTYPE_TABLE:
		/*
		 * kdevname returns a static pointer so we need
		 * to make private copies if the output is to
		 * make sense.
		 */
2381
		DMEMIT("%s %s", snap->origin->name, snap->cow->name);
2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
		sz += snap->store->type->status(snap->store, type, result + sz,
						maxlen - sz);
		num_features = snap->discard_zeroes_cow + snap->discard_passdown_origin;
		if (num_features) {
			DMEMIT(" %u", num_features);
			if (snap->discard_zeroes_cow)
				DMEMIT(" discard_zeroes_cow");
			if (snap->discard_passdown_origin)
				DMEMIT(" discard_passdown_origin");
		}
L
Linus Torvalds 已提交
2392
		break;
2393 2394 2395 2396 2397 2398 2399 2400 2401 2402

	case STATUSTYPE_IMA:
		DMEMIT_TARGET_NAME_VERSION(ti->type);
		DMEMIT(",snap_origin_name=%s", snap->origin->name);
		DMEMIT(",snap_cow_name=%s", snap->cow->name);
		DMEMIT(",snap_valid=%c", snap->valid ? 'y' : 'n');
		DMEMIT(",snap_merge_failed=%c", snap->merge_failed ? 'y' : 'n');
		DMEMIT(",snapshot_overflowed=%c", snap->snapshot_overflowed ? 'y' : 'n');
		DMEMIT(";");
		break;
L
Linus Torvalds 已提交
2403 2404 2405
	}
}

2406 2407 2408 2409
static int snapshot_iterate_devices(struct dm_target *ti,
				    iterate_devices_callout_fn fn, void *data)
{
	struct dm_snapshot *snap = ti->private;
2410 2411 2412
	int r;

	r = fn(ti, snap->origin, 0, ti->len, data);
2413

2414 2415 2416 2417
	if (!r)
		r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);

	return r;
2418 2419
}

2420 2421 2422 2423 2424 2425 2426
static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
	struct dm_snapshot *snap = ti->private;

	if (snap->discard_zeroes_cow) {
		struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;

2427 2428
		down_read(&_origins_lock);

2429 2430 2431 2432 2433 2434 2435
		(void) __find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, NULL);
		if (snap_src && snap_dest)
			snap = snap_src;

		/* All discards are split on chunk_size boundary */
		limits->discard_granularity = snap->store->chunk_size;
		limits->max_discard_sectors = snap->store->chunk_size;
2436 2437

		up_read(&_origins_lock);
2438 2439
	}
}
2440

L
Linus Torvalds 已提交
2441 2442 2443
/*-----------------------------------------------------------------
 * Origin methods
 *---------------------------------------------------------------*/
2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456

/*
 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
 * supplied bio was ignored.  The caller may submit it immediately.
 * (No remapping actually occurs as the origin is always a direct linear
 * map.)
 *
 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
 * and any supplied bio is added to a list to be submitted once all
 * the necessary exceptions exist.
 */
static int __origin_write(struct list_head *snapshots, sector_t sector,
			  struct bio *bio)
L
Linus Torvalds 已提交
2457
{
2458
	int r = DM_MAPIO_REMAPPED;
L
Linus Torvalds 已提交
2459
	struct dm_snapshot *snap;
2460
	struct dm_exception *e;
2461
	struct dm_snap_pending_exception *pe, *pe2;
2462 2463
	struct dm_snap_pending_exception *pe_to_start_now = NULL;
	struct dm_snap_pending_exception *pe_to_start_last = NULL;
2464
	struct dm_exception_table_lock lock;
L
Linus Torvalds 已提交
2465 2466 2467 2468
	chunk_t chunk;

	/* Do all the snapshots on this origin */
	list_for_each_entry (snap, snapshots, list) {
2469 2470 2471 2472 2473 2474 2475
		/*
		 * Don't make new exceptions in a merging snapshot
		 * because it has effectively been deleted
		 */
		if (dm_target_is_snapshot_merge(snap->ti))
			continue;

2476
		/* Nothing to do if writing beyond end of snapshot */
2477
		if (sector >= dm_table_get_size(snap->ti->table))
2478
			continue;
L
Linus Torvalds 已提交
2479 2480 2481 2482 2483

		/*
		 * Remember, different snapshots can have
		 * different chunk sizes.
		 */
2484
		chunk = sector_to_chunk(snap->store, sector);
2485 2486
		dm_exception_table_lock_init(snap, chunk, &lock);

2487
		down_read(&snap->lock);
2488 2489 2490 2491 2492
		dm_exception_table_lock(&lock);

		/* Only deal with valid and active snapshots */
		if (!snap->valid || !snap->active)
			goto next_snapshot;
L
Linus Torvalds 已提交
2493

2494
		pe = __lookup_pending_exception(snap, chunk);
2495
		if (!pe) {
2496 2497 2498 2499 2500 2501 2502 2503 2504
			/*
			 * Check exception table to see if block is already
			 * remapped in this snapshot and trigger an exception
			 * if not.
			 */
			e = dm_lookup_exception(&snap->complete, chunk);
			if (e)
				goto next_snapshot;

2505
			dm_exception_table_unlock(&lock);
2506
			pe = alloc_pending_exception(snap);
2507
			dm_exception_table_lock(&lock);
2508

2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519
			pe2 = __lookup_pending_exception(snap, chunk);

			if (!pe2) {
				e = dm_lookup_exception(&snap->complete, chunk);
				if (e) {
					free_pending_exception(pe);
					goto next_snapshot;
				}

				pe = __insert_pending_exception(snap, pe, chunk);
				if (!pe) {
2520
					dm_exception_table_unlock(&lock);
2521
					up_read(&snap->lock);
2522

2523
					invalidate_snapshot(snap, -ENOMEM);
2524
					continue;
2525 2526
				}
			} else {
2527
				free_pending_exception(pe);
2528
				pe = pe2;
2529
			}
2530 2531
		}

2532
		r = DM_MAPIO_SUBMITTED;
2533

2534 2535 2536 2537 2538 2539 2540 2541
		/*
		 * If an origin bio was supplied, queue it to wait for the
		 * completion of this exception, and start this one last,
		 * at the end of the function.
		 */
		if (bio) {
			bio_list_add(&pe->origin_bios, bio);
			bio = NULL;
2542

2543 2544 2545 2546
			if (!pe->started) {
				pe->started = 1;
				pe_to_start_last = pe;
			}
2547 2548 2549 2550
		}

		if (!pe->started) {
			pe->started = 1;
2551
			pe_to_start_now = pe;
L
Linus Torvalds 已提交
2552 2553
		}

J
Jonathan Brassow 已提交
2554
next_snapshot:
2555
		dm_exception_table_unlock(&lock);
2556
		up_read(&snap->lock);
L
Linus Torvalds 已提交
2557

2558 2559 2560 2561
		if (pe_to_start_now) {
			start_copy(pe_to_start_now);
			pe_to_start_now = NULL;
		}
2562 2563
	}

L
Linus Torvalds 已提交
2564
	/*
2565 2566
	 * Submit the exception against which the bio is queued last,
	 * to give the other exceptions a head start.
L
Linus Torvalds 已提交
2567
	 */
2568 2569
	if (pe_to_start_last)
		start_copy(pe_to_start_last);
L
Linus Torvalds 已提交
2570 2571 2572 2573 2574 2575 2576

	return r;
}

/*
 * Called on a write from the origin driver.
 */
2577
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
L
Linus Torvalds 已提交
2578 2579
{
	struct origin *o;
2580
	int r = DM_MAPIO_REMAPPED;
L
Linus Torvalds 已提交
2581

2582
again:
L
Linus Torvalds 已提交
2583 2584
	down_read(&_origins_lock);
	o = __lookup_origin(origin->bdev);
2585 2586 2587 2588 2589 2590 2591 2592
	if (o) {
		if (limit) {
			struct dm_snapshot *s;
			list_for_each_entry(s, &o->snapshots, list)
				if (unlikely(!wait_for_in_progress(s, true)))
					goto again;
		}

2593
		r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2594
	}
L
Linus Torvalds 已提交
2595 2596 2597 2598 2599
	up_read(&_origins_lock);

	return r;
}

2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
/*
 * Trigger exceptions in all non-merging snapshots.
 *
 * The chunk size of the merging snapshot may be larger than the chunk
 * size of some other snapshot so we may need to reallocate multiple
 * chunks in other snapshots.
 *
 * We scan all the overlapping exceptions in the other snapshots.
 * Returns 1 if anything was reallocated and must be waited for,
 * otherwise returns 0.
 *
 * size must be a multiple of merging_snap's chunk_size.
 */
static int origin_write_extent(struct dm_snapshot *merging_snap,
			       sector_t sector, unsigned size)
{
	int must_wait = 0;
	sector_t n;
	struct origin *o;

	/*
2621
	 * The origin's __minimum_chunk_size() got stored in max_io_len
2622 2623 2624 2625
	 * by snapshot_merge_resume().
	 */
	down_read(&_origins_lock);
	o = __lookup_origin(merging_snap->origin->bdev);
2626
	for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2627 2628 2629 2630 2631 2632 2633 2634
		if (__origin_write(&o->snapshots, sector + n, NULL) ==
		    DM_MAPIO_SUBMITTED)
			must_wait = 1;
	up_read(&_origins_lock);

	return must_wait;
}

L
Linus Torvalds 已提交
2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646
/*
 * Origin: maps a linear range of a device, with hooks for snapshotting.
 */

/*
 * Construct an origin mapping: <dev_path>
 * The context for an origin is merely a 'struct dm_dev *'
 * pointing to the real device.
 */
static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	int r;
2647
	struct dm_origin *o;
L
Linus Torvalds 已提交
2648 2649

	if (argc != 1) {
2650
		ti->error = "origin: incorrect number of arguments";
L
Linus Torvalds 已提交
2651 2652 2653
		return -EINVAL;
	}

2654 2655 2656 2657 2658 2659 2660 2661
	o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
	if (!o) {
		ti->error = "Cannot allocate private origin structure";
		r = -ENOMEM;
		goto bad_alloc;
	}

	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
L
Linus Torvalds 已提交
2662 2663
	if (r) {
		ti->error = "Cannot get target device";
2664
		goto bad_open;
L
Linus Torvalds 已提交
2665 2666
	}

2667
	o->ti = ti;
2668
	ti->private = o;
2669
	ti->num_flush_bios = 1;
M
Mikulas Patocka 已提交
2670

L
Linus Torvalds 已提交
2671
	return 0;
2672 2673 2674 2675 2676

bad_open:
	kfree(o);
bad_alloc:
	return r;
L
Linus Torvalds 已提交
2677 2678 2679 2680
}

static void origin_dtr(struct dm_target *ti)
{
2681
	struct dm_origin *o = ti->private;
2682

2683 2684
	dm_put_device(ti, o->dev);
	kfree(o);
L
Linus Torvalds 已提交
2685 2686
}

M
Mikulas Patocka 已提交
2687
static int origin_map(struct dm_target *ti, struct bio *bio)
L
Linus Torvalds 已提交
2688
{
2689
	struct dm_origin *o = ti->private;
2690
	unsigned available_sectors;
L
Linus Torvalds 已提交
2691

2692
	bio_set_dev(bio, o->dev->bdev);
L
Linus Torvalds 已提交
2693

J
Jens Axboe 已提交
2694
	if (unlikely(bio->bi_opf & REQ_PREFLUSH))
M
Mikulas Patocka 已提交
2695 2696
		return DM_MAPIO_REMAPPED;

2697
	if (bio_data_dir(bio) != WRITE)
M
Mikulas Patocka 已提交
2698 2699
		return DM_MAPIO_REMAPPED;

2700 2701 2702 2703 2704 2705
	available_sectors = o->split_boundary -
		((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));

	if (bio_sectors(bio) > available_sectors)
		dm_accept_partial_bio(bio, available_sectors);

L
Linus Torvalds 已提交
2706
	/* Only tell snapshots if this is a write */
2707
	return do_origin(o->dev, bio, true);
L
Linus Torvalds 已提交
2708 2709 2710
}

/*
2711
 * Set the target "max_io_len" field to the minimum of all the snapshots'
L
Linus Torvalds 已提交
2712 2713 2714 2715
 * chunk sizes.
 */
static void origin_resume(struct dm_target *ti)
{
2716
	struct dm_origin *o = ti->private;
L
Linus Torvalds 已提交
2717

2718
	o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731

	down_write(&_origins_lock);
	__insert_dm_origin(o);
	up_write(&_origins_lock);
}

static void origin_postsuspend(struct dm_target *ti)
{
	struct dm_origin *o = ti->private;

	down_write(&_origins_lock);
	__remove_dm_origin(o);
	up_write(&_origins_lock);
L
Linus Torvalds 已提交
2732 2733
}

2734 2735
static void origin_status(struct dm_target *ti, status_type_t type,
			  unsigned status_flags, char *result, unsigned maxlen)
L
Linus Torvalds 已提交
2736
{
2737
	struct dm_origin *o = ti->private;
L
Linus Torvalds 已提交
2738 2739 2740 2741 2742 2743 2744

	switch (type) {
	case STATUSTYPE_INFO:
		result[0] = '\0';
		break;

	case STATUSTYPE_TABLE:
2745
		snprintf(result, maxlen, "%s", o->dev->name);
L
Linus Torvalds 已提交
2746
		break;
2747 2748 2749
	case STATUSTYPE_IMA:
		result[0] = '\0';
		break;
L
Linus Torvalds 已提交
2750 2751 2752
	}
}

2753 2754 2755
static int origin_iterate_devices(struct dm_target *ti,
				  iterate_devices_callout_fn fn, void *data)
{
2756
	struct dm_origin *o = ti->private;
2757

2758
	return fn(ti, o->dev, 0, ti->len, data);
2759 2760
}

L
Linus Torvalds 已提交
2761 2762
static struct target_type origin_target = {
	.name    = "snapshot-origin",
2763
	.version = {1, 9, 0},
L
Linus Torvalds 已提交
2764 2765 2766 2767 2768
	.module  = THIS_MODULE,
	.ctr     = origin_ctr,
	.dtr     = origin_dtr,
	.map     = origin_map,
	.resume  = origin_resume,
2769
	.postsuspend = origin_postsuspend,
L
Linus Torvalds 已提交
2770
	.status  = origin_status,
2771
	.iterate_devices = origin_iterate_devices,
L
Linus Torvalds 已提交
2772 2773 2774 2775
};

static struct target_type snapshot_target = {
	.name    = "snapshot",
2776
	.version = {1, 16, 0},
L
Linus Torvalds 已提交
2777 2778 2779 2780
	.module  = THIS_MODULE,
	.ctr     = snapshot_ctr,
	.dtr     = snapshot_dtr,
	.map     = snapshot_map,
2781
	.end_io  = snapshot_end_io,
2782
	.preresume  = snapshot_preresume,
L
Linus Torvalds 已提交
2783 2784
	.resume  = snapshot_resume,
	.status  = snapshot_status,
2785
	.iterate_devices = snapshot_iterate_devices,
2786
	.io_hints = snapshot_io_hints,
L
Linus Torvalds 已提交
2787 2788
};

M
Mikulas Patocka 已提交
2789 2790
static struct target_type merge_target = {
	.name    = dm_snapshot_merge_target_name,
2791
	.version = {1, 5, 0},
M
Mikulas Patocka 已提交
2792 2793 2794
	.module  = THIS_MODULE,
	.ctr     = snapshot_ctr,
	.dtr     = snapshot_dtr,
2795
	.map     = snapshot_merge_map,
M
Mikulas Patocka 已提交
2796
	.end_io  = snapshot_end_io,
M
Mikulas Patocka 已提交
2797
	.presuspend = snapshot_merge_presuspend,
M
Mikulas Patocka 已提交
2798
	.preresume  = snapshot_preresume,
M
Mikulas Patocka 已提交
2799
	.resume  = snapshot_merge_resume,
M
Mikulas Patocka 已提交
2800 2801
	.status  = snapshot_status,
	.iterate_devices = snapshot_iterate_devices,
2802
	.io_hints = snapshot_io_hints,
M
Mikulas Patocka 已提交
2803 2804
};

L
Linus Torvalds 已提交
2805 2806 2807 2808
static int __init dm_snapshot_init(void)
{
	int r;

2809 2810 2811 2812 2813 2814
	r = dm_exception_store_init();
	if (r) {
		DMERR("Failed to initialize exception stores");
		return r;
	}

L
Linus Torvalds 已提交
2815 2816 2817
	r = init_origin_hash();
	if (r) {
		DMERR("init_origin_hash failed.");
M
Mikulas Patocka 已提交
2818
		goto bad_origin_hash;
L
Linus Torvalds 已提交
2819 2820
	}

2821
	exception_cache = KMEM_CACHE(dm_exception, 0);
L
Linus Torvalds 已提交
2822 2823 2824
	if (!exception_cache) {
		DMERR("Couldn't create exception cache.");
		r = -ENOMEM;
M
Mikulas Patocka 已提交
2825
		goto bad_exception_cache;
L
Linus Torvalds 已提交
2826 2827
	}

A
Alasdair G Kergon 已提交
2828
	pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
L
Linus Torvalds 已提交
2829 2830 2831
	if (!pending_cache) {
		DMERR("Couldn't create pending cache.");
		r = -ENOMEM;
M
Mikulas Patocka 已提交
2832
		goto bad_pending_cache;
L
Linus Torvalds 已提交
2833 2834
	}

2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852
	r = dm_register_target(&snapshot_target);
	if (r < 0) {
		DMERR("snapshot target register failed %d", r);
		goto bad_register_snapshot_target;
	}

	r = dm_register_target(&origin_target);
	if (r < 0) {
		DMERR("Origin target register failed %d", r);
		goto bad_register_origin_target;
	}

	r = dm_register_target(&merge_target);
	if (r < 0) {
		DMERR("Merge target register failed %d", r);
		goto bad_register_merge_target;
	}

L
Linus Torvalds 已提交
2853 2854
	return 0;

M
Mikulas Patocka 已提交
2855
bad_register_merge_target:
L
Linus Torvalds 已提交
2856
	dm_unregister_target(&origin_target);
M
Mikulas Patocka 已提交
2857
bad_register_origin_target:
L
Linus Torvalds 已提交
2858
	dm_unregister_target(&snapshot_target);
2859
bad_register_snapshot_target:
2860 2861 2862 2863 2864 2865
	kmem_cache_destroy(pending_cache);
bad_pending_cache:
	kmem_cache_destroy(exception_cache);
bad_exception_cache:
	exit_origin_hash();
bad_origin_hash:
2866
	dm_exception_store_exit();
M
Mikulas Patocka 已提交
2867

L
Linus Torvalds 已提交
2868 2869 2870 2871 2872
	return r;
}

static void __exit dm_snapshot_exit(void)
{
2873 2874
	dm_unregister_target(&snapshot_target);
	dm_unregister_target(&origin_target);
M
Mikulas Patocka 已提交
2875
	dm_unregister_target(&merge_target);
L
Linus Torvalds 已提交
2876 2877 2878 2879

	exit_origin_hash();
	kmem_cache_destroy(pending_cache);
	kmem_cache_destroy(exception_cache);
2880 2881

	dm_exception_store_exit();
L
Linus Torvalds 已提交
2882 2883 2884 2885 2886 2887 2888 2889 2890
}

/* Module hooks */
module_init(dm_snapshot_init);
module_exit(dm_snapshot_exit);

MODULE_DESCRIPTION(DM_NAME " snapshot target");
MODULE_AUTHOR("Joe Thornber");
MODULE_LICENSE("GPL");
2891 2892
MODULE_ALIAS("dm-snapshot-origin");
MODULE_ALIAS("dm-snapshot-merge");