dm-snap-persistent.c 20.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
/*
 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
 * Copyright (C) 2006-2008 Red Hat GmbH
 *
 * This file is released under the GPL.
 */

#include "dm-exception-store.h"

#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/dm-io.h>

#define DM_MSG_PREFIX "persistent snapshot"
#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32	/* 16KB */

/*-----------------------------------------------------------------
 * Persistent snapshots, by persistent we mean that the snapshot
 * will survive a reboot.
 *---------------------------------------------------------------*/

/*
 * We need to store a record of which parts of the origin have
 * been copied to the snapshot device.  The snapshot code
 * requires that we copy exception chunks to chunk aligned areas
 * of the COW store.  It makes sense therefore, to store the
 * metadata in chunk size blocks.
 *
 * There is no backward or forward compatibility implemented,
 * snapshots with different disk versions than the kernel will
 * not be usable.  It is expected that "lvcreate" will blank out
 * the start of a fresh COW device before calling the snapshot
 * constructor.
 *
 * The first chunk of the COW device just contains the header.
 * After this there is a chunk filled with exception metadata,
 * followed by as many exception chunks as can fit in the
 * metadata areas.
 *
 * All on disk structures are in little-endian format.  The end
 * of the exceptions info is indicated by an exception with a
 * new_chunk of 0, which is invalid since it would point to the
 * header chunk.
 */

/*
 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
 */
#define SNAP_MAGIC 0x70416e53

/*
 * The on-disk version of the metadata.
 */
#define SNAPSHOT_DISK_VERSION 1

58 59
#define NUM_SNAPSHOT_HDR_CHUNKS 1

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
struct disk_header {
	uint32_t magic;

	/*
	 * Is this snapshot valid.  There is no way of recovering
	 * an invalid snapshot.
	 */
	uint32_t valid;

	/*
	 * Simple, incrementing version. no backward
	 * compatibility.
	 */
	uint32_t version;

	/* In sectors */
	uint32_t chunk_size;
};

struct disk_exception {
	uint64_t old_chunk;
	uint64_t new_chunk;
};

struct commit_callback {
	void (*callback)(void *, int success);
	void *context;
};

/*
 * The top level structure for a persistent exception store.
 */
struct pstore {
93
	struct dm_exception_store *store;
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
	int version;
	int valid;
	uint32_t exceptions_per_area;

	/*
	 * Now that we have an asynchronous kcopyd there is no
	 * need for large chunk sizes, so it wont hurt to have a
	 * whole chunks worth of metadata in memory at once.
	 */
	void *area;

	/*
	 * An area of zeros used to clear the next area.
	 */
	void *zero_area;

110 111 112 113 114 115 116
	/*
	 * An area used for header. The header can be written
	 * concurrently with metadata (when invalidating the snapshot),
	 * so it needs a separate buffer.
	 */
	void *header_area;

117 118 119 120 121 122 123 124
	/*
	 * Used to keep track of which metadata area the data in
	 * 'chunk' refers to.
	 */
	chunk_t current_area;

	/*
	 * The next free chunk for an exception.
125 126 127 128 129 130 131 132 133 134 135 136 137 138
	 *
	 * When creating exceptions, all the chunks here and above are
	 * free.  It holds the next chunk to be allocated.  On rare
	 * occasions (e.g. after a system crash) holes can be left in
	 * the exception store because chunks can be committed out of
	 * order.
	 *
	 * When merging exceptions, it does not necessarily mean all the
	 * chunks here and above are free.  It holds the value it would
	 * have held if all chunks had been committed in order of
	 * allocation.  Consequently the value may occasionally be
	 * slightly too low, but since it's only used for 'status' and
	 * it can never reach its minimum value too early this doesn't
	 * matter.
139
	 */
140

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
	chunk_t next_free;

	/*
	 * The index of next free exception in the current
	 * metadata area.
	 */
	uint32_t current_committed;

	atomic_t pending_count;
	uint32_t callback_count;
	struct commit_callback *callbacks;
	struct dm_io_client *io_client;

	struct workqueue_struct *metadata_wq;
};

static unsigned sectors_to_pages(unsigned sectors)
{
	return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
}

static int alloc_area(struct pstore *ps)
{
	int r = -ENOMEM;
	size_t len;

167
	len = ps->store->chunk_size << SECTOR_SHIFT;
168 169 170 171 172 173 174

	/*
	 * Allocate the chunk_size block of memory that will hold
	 * a single metadata area.
	 */
	ps->area = vmalloc(len);
	if (!ps->area)
175
		goto err_area;
176 177

	ps->zero_area = vmalloc(len);
178 179
	if (!ps->zero_area)
		goto err_zero_area;
180 181
	memset(ps->zero_area, 0, len);

182 183 184 185
	ps->header_area = vmalloc(len);
	if (!ps->header_area)
		goto err_header_area;

186
	return 0;
187 188 189 190 191 192 193 194 195

err_header_area:
	vfree(ps->zero_area);

err_zero_area:
	vfree(ps->area);

err_area:
	return r;
196 197 198 199
}

static void free_area(struct pstore *ps)
{
200 201
	if (ps->area)
		vfree(ps->area);
202
	ps->area = NULL;
203 204 205

	if (ps->zero_area)
		vfree(ps->zero_area);
206
	ps->zero_area = NULL;
207 208 209 210

	if (ps->header_area)
		vfree(ps->header_area);
	ps->header_area = NULL;
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
}

struct mdata_req {
	struct dm_io_region *where;
	struct dm_io_request *io_req;
	struct work_struct work;
	int result;
};

static void do_metadata(struct work_struct *work)
{
	struct mdata_req *req = container_of(work, struct mdata_req, work);

	req->result = dm_io(req->io_req, 1, req->where, NULL);
}

/*
 * Read or write a chunk aligned and sized block of data from a device.
 */
230 231
static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
		    int metadata)
232 233
{
	struct dm_io_region where = {
234
		.bdev = dm_snap_cow(ps->store->snap)->bdev,
235 236
		.sector = ps->store->chunk_size * chunk,
		.count = ps->store->chunk_size,
237 238 239 240
	};
	struct dm_io_request io_req = {
		.bi_rw = rw,
		.mem.type = DM_IO_VMA,
241
		.mem.ptr.vma = area,
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
		.client = ps->io_client,
		.notify.fn = NULL,
	};
	struct mdata_req req;

	if (!metadata)
		return dm_io(&io_req, 1, &where, NULL);

	req.where = &where;
	req.io_req = &io_req;

	/*
	 * Issue the synchronous I/O from a different thread
	 * to avoid generic_make_request recursion.
	 */
257
	INIT_WORK_ON_STACK(&req.work, do_metadata);
258 259 260 261 262 263 264 265 266 267 268
	queue_work(ps->metadata_wq, &req.work);
	flush_workqueue(ps->metadata_wq);

	return req.result;
}

/*
 * Convert a metadata area index to a chunk index.
 */
static chunk_t area_location(struct pstore *ps, chunk_t area)
{
269
	return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
270 271 272 273 274 275 276 277 278 279 280 281 282
}

/*
 * Read or write a metadata area.  Remembering to skip the first
 * chunk which holds the header.
 */
static int area_io(struct pstore *ps, int rw)
{
	int r;
	chunk_t chunk;

	chunk = area_location(ps, ps->current_area);

283
	r = chunk_io(ps, ps->area, chunk, rw, 0);
284 285 286 287 288 289 290 291
	if (r)
		return r;

	return 0;
}

static void zero_memory_area(struct pstore *ps)
{
292
	memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
293 294 295 296
}

static int zero_disk_area(struct pstore *ps, chunk_t area)
{
297
	return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
298 299 300 301 302 303
}

static int read_header(struct pstore *ps, int *new_snapshot)
{
	int r;
	struct disk_header *dh;
304
	unsigned chunk_size;
305
	int chunk_size_supplied = 1;
306
	char *chunk_err;
307 308

	/*
309 310
	 * Use default chunk size (or logical_block_size, if larger)
	 * if none supplied
311
	 */
312 313
	if (!ps->store->chunk_size) {
		ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
314 315
		    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
					    bdev) >> 9);
316 317
		ps->store->chunk_mask = ps->store->chunk_size - 1;
		ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
318 319 320
		chunk_size_supplied = 0;
	}

321 322
	ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
							     chunk_size));
323 324 325 326 327 328 329
	if (IS_ERR(ps->io_client))
		return PTR_ERR(ps->io_client);

	r = alloc_area(ps);
	if (r)
		return r;

330
	r = chunk_io(ps, ps->header_area, 0, READ, 1);
331 332 333
	if (r)
		goto bad;

334
	dh = ps->header_area;
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351

	if (le32_to_cpu(dh->magic) == 0) {
		*new_snapshot = 1;
		return 0;
	}

	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
		DMWARN("Invalid or corrupt snapshot");
		r = -ENXIO;
		goto bad;
	}

	*new_snapshot = 0;
	ps->valid = le32_to_cpu(dh->valid);
	ps->version = le32_to_cpu(dh->version);
	chunk_size = le32_to_cpu(dh->chunk_size);

352
	if (ps->store->chunk_size == chunk_size)
353 354
		return 0;

355
	if (chunk_size_supplied)
356 357 358
		DMWARN("chunk size %u in device metadata overrides "
		       "table chunk size of %u.",
		       chunk_size, ps->store->chunk_size);
359 360 361 362

	/* We had a bogus chunk_size. Fix stuff up. */
	free_area(ps);

363 364 365
	r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
					      &chunk_err);
	if (r) {
366 367
		DMERR("invalid on-disk chunk size %u: %s.",
		      chunk_size, chunk_err);
368 369
		return r;
	}
370

371
	r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
				ps->io_client);
	if (r)
		return r;

	r = alloc_area(ps);
	return r;

bad:
	free_area(ps);
	return r;
}

static int write_header(struct pstore *ps)
{
	struct disk_header *dh;

388
	memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
389

390
	dh = ps->header_area;
391 392 393
	dh->magic = cpu_to_le32(SNAP_MAGIC);
	dh->valid = cpu_to_le32(ps->valid);
	dh->version = cpu_to_le32(ps->version);
394
	dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
395

396
	return chunk_io(ps, ps->header_area, 0, WRITE, 1);
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
}

/*
 * Access functions for the disk exceptions, these do the endian conversions.
 */
static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
{
	BUG_ON(index >= ps->exceptions_per_area);

	return ((struct disk_exception *) ps->area) + index;
}

static void read_exception(struct pstore *ps,
			   uint32_t index, struct disk_exception *result)
{
	struct disk_exception *e = get_exception(ps, index);

	/* copy it */
	result->old_chunk = le64_to_cpu(e->old_chunk);
	result->new_chunk = le64_to_cpu(e->new_chunk);
}

static void write_exception(struct pstore *ps,
			    uint32_t index, struct disk_exception *de)
{
	struct disk_exception *e = get_exception(ps, index);

	/* copy it */
	e->old_chunk = cpu_to_le64(de->old_chunk);
	e->new_chunk = cpu_to_le64(de->new_chunk);
}

429 430 431 432 433 434 435 436 437
static void clear_exception(struct pstore *ps, uint32_t index)
{
	struct disk_exception *e = get_exception(ps, index);

	/* clear it */
	e->old_chunk = 0;
	e->new_chunk = 0;
}

438 439 440 441 442
/*
 * Registers the exceptions that are present in the current area.
 * 'full' is filled in to indicate if the area has been
 * filled.
 */
443 444 445 446 447
static int insert_exceptions(struct pstore *ps,
			     int (*callback)(void *callback_context,
					     chunk_t old, chunk_t new),
			     void *callback_context,
			     int *full)
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
{
	int r;
	unsigned int i;
	struct disk_exception de;

	/* presume the area is full */
	*full = 1;

	for (i = 0; i < ps->exceptions_per_area; i++) {
		read_exception(ps, i, &de);

		/*
		 * If the new_chunk is pointing at the start of
		 * the COW device, where the first metadata area
		 * is we know that we've hit the end of the
		 * exceptions.  Therefore the area is not full.
		 */
		if (de.new_chunk == 0LL) {
			ps->current_committed = i;
			*full = 0;
			break;
		}

		/*
		 * Keep track of the start of the free chunks.
		 */
		if (ps->next_free <= de.new_chunk)
			ps->next_free = de.new_chunk + 1;

		/*
		 * Otherwise we add the exception to the snapshot.
		 */
480
		r = callback(callback_context, de.old_chunk, de.new_chunk);
481 482 483 484 485 486 487
		if (r)
			return r;
	}

	return 0;
}

488 489 490 491
static int read_exceptions(struct pstore *ps,
			   int (*callback)(void *callback_context, chunk_t old,
					   chunk_t new),
			   void *callback_context)
492 493 494 495 496 497 498 499 500 501 502 503
{
	int r, full = 1;

	/*
	 * Keeping reading chunks and inserting exceptions until
	 * we find a partially full area.
	 */
	for (ps->current_area = 0; full; ps->current_area++) {
		r = area_io(ps, READ);
		if (r)
			return r;

504
		r = insert_exceptions(ps, callback, callback_context, &full);
505 506 507 508 509 510 511 512 513 514 515 516 517 518
		if (r)
			return r;
	}

	ps->current_area--;

	return 0;
}

static struct pstore *get_info(struct dm_exception_store *store)
{
	return (struct pstore *) store->context;
}

519 520 521 522
static void persistent_usage(struct dm_exception_store *store,
			     sector_t *total_sectors,
			     sector_t *sectors_allocated,
			     sector_t *metadata_sectors)
523
{
524 525 526
	struct pstore *ps = get_info(store);

	*sectors_allocated = ps->next_free * store->chunk_size;
527
	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
528 529 530 531 532 533

	/*
	 * First chunk is the fixed header.
	 * Then there are (ps->current_area + 1) metadata chunks, each one
	 * separated from the next by ps->exceptions_per_area data chunks.
	 */
534 535
	*metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
			    store->chunk_size;
536 537
}

538
static void persistent_dtr(struct dm_exception_store *store)
539 540 541 542
{
	struct pstore *ps = get_info(store);

	destroy_workqueue(ps->metadata_wq);
543 544 545 546

	/* Created in read_header */
	if (ps->io_client)
		dm_io_client_destroy(ps->io_client);
547
	free_area(ps);
548 549 550 551 552

	/* Allocated in persistent_read_metadata */
	if (ps->callbacks)
		vfree(ps->callbacks);

553 554 555
	kfree(ps);
}

556 557 558 559
static int persistent_read_metadata(struct dm_exception_store *store,
				    int (*callback)(void *callback_context,
						    chunk_t old, chunk_t new),
				    void *callback_context)
560 561 562 563 564 565 566 567 568 569 570 571 572 573
{
	int r, uninitialized_var(new_snapshot);
	struct pstore *ps = get_info(store);

	/*
	 * Read the snapshot header.
	 */
	r = read_header(ps, &new_snapshot);
	if (r)
		return r;

	/*
	 * Now we know correct chunk_size, complete the initialisation.
	 */
574 575
	ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
				  sizeof(struct disk_exception);
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
	ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
			sizeof(*ps->callbacks));
	if (!ps->callbacks)
		return -ENOMEM;

	/*
	 * Do we need to setup a new snapshot ?
	 */
	if (new_snapshot) {
		r = write_header(ps);
		if (r) {
			DMWARN("write_header failed");
			return r;
		}

		ps->current_area = 0;
		zero_memory_area(ps);
		r = zero_disk_area(ps, 0);
594
		if (r)
595
			DMWARN("zero_disk_area(0) failed");
596 597 598 599 600 601 602 603 604 605
		return r;
	}
	/*
	 * Sanity checks.
	 */
	if (ps->version != SNAPSHOT_DISK_VERSION) {
		DMWARN("unable to handle snapshot disk version %d",
		       ps->version);
		return -EINVAL;
	}
606

607 608 609 610 611
	/*
	 * Metadata are valid, but snapshot is invalidated
	 */
	if (!ps->valid)
		return 1;
612

613 614 615 616
	/*
	 * Read the metadata.
	 */
	r = read_exceptions(ps, callback, callback_context);
617

618
	return r;
619 620
}

621
static int persistent_prepare_exception(struct dm_exception_store *store,
622
					struct dm_exception *e)
623 624 625 626
{
	struct pstore *ps = get_info(store);
	uint32_t stride;
	chunk_t next_free;
627
	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
628 629

	/* Is there enough room ? */
630
	if (size < ((ps->next_free + 1) * store->chunk_size))
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
		return -ENOSPC;

	e->new_chunk = ps->next_free;

	/*
	 * Move onto the next free pending, making sure to take
	 * into account the location of the metadata chunks.
	 */
	stride = (ps->exceptions_per_area + 1);
	next_free = ++ps->next_free;
	if (sector_div(next_free, stride) == 1)
		ps->next_free++;

	atomic_inc(&ps->pending_count);
	return 0;
}

648
static void persistent_commit_exception(struct dm_exception_store *store,
649
					struct dm_exception *e,
650 651
					void (*callback) (void *, int success),
					void *callback_context)
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
{
	unsigned int i;
	struct pstore *ps = get_info(store);
	struct disk_exception de;
	struct commit_callback *cb;

	de.old_chunk = e->old_chunk;
	de.new_chunk = e->new_chunk;
	write_exception(ps, ps->current_committed++, &de);

	/*
	 * Add the callback to the back of the array.  This code
	 * is the only place where the callback array is
	 * manipulated, and we know that it will never be called
	 * multiple times concurrently.
	 */
	cb = ps->callbacks + ps->callback_count++;
	cb->callback = callback;
	cb->context = callback_context;

	/*
	 * If there are exceptions in flight and we have not yet
	 * filled this metadata area there's nothing more to do.
	 */
	if (!atomic_dec_and_test(&ps->pending_count) &&
	    (ps->current_committed != ps->exceptions_per_area))
		return;

	/*
	 * If we completely filled the current area, then wipe the next one.
	 */
	if ((ps->current_committed == ps->exceptions_per_area) &&
	     zero_disk_area(ps, ps->current_area + 1))
		ps->valid = 0;

	/*
	 * Commit exceptions to disk.
	 */
690
	if (ps->valid && area_io(ps, WRITE_BARRIER))
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
		ps->valid = 0;

	/*
	 * Advance to the next area if this one is full.
	 */
	if (ps->current_committed == ps->exceptions_per_area) {
		ps->current_committed = 0;
		ps->current_area++;
		zero_memory_area(ps);
	}

	for (i = 0; i < ps->callback_count; i++) {
		cb = ps->callbacks + i;
		cb->callback(cb->context, ps->valid);
	}

	ps->callback_count = 0;
}

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
static int persistent_prepare_merge(struct dm_exception_store *store,
				    chunk_t *last_old_chunk,
				    chunk_t *last_new_chunk)
{
	struct pstore *ps = get_info(store);
	struct disk_exception de;
	int nr_consecutive;
	int r;

	/*
	 * When current area is empty, move back to preceding area.
	 */
	if (!ps->current_committed) {
		/*
		 * Have we finished?
		 */
		if (!ps->current_area)
			return 0;

		ps->current_area--;
		r = area_io(ps, READ);
		if (r < 0)
			return r;
		ps->current_committed = ps->exceptions_per_area;
	}

	read_exception(ps, ps->current_committed - 1, &de);
	*last_old_chunk = de.old_chunk;
	*last_new_chunk = de.new_chunk;

	/*
	 * Find number of consecutive chunks within the current area,
	 * working backwards.
	 */
	for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
	     nr_consecutive++) {
		read_exception(ps, ps->current_committed - 1 - nr_consecutive,
			       &de);
		if (de.old_chunk != *last_old_chunk - nr_consecutive ||
		    de.new_chunk != *last_new_chunk - nr_consecutive)
			break;
	}

	return nr_consecutive;
}

static int persistent_commit_merge(struct dm_exception_store *store,
				   int nr_merged)
{
	int r, i;
	struct pstore *ps = get_info(store);

	BUG_ON(nr_merged > ps->current_committed);

	for (i = 0; i < nr_merged; i++)
		clear_exception(ps, ps->current_committed - 1 - i);

	r = area_io(ps, WRITE);
	if (r < 0)
		return r;

	ps->current_committed -= nr_merged;

	/*
	 * At this stage, only persistent_usage() uses ps->next_free, so
	 * we make no attempt to keep ps->next_free strictly accurate
	 * as exceptions may have been committed out-of-order originally.
	 * Once a snapshot has become merging, we set it to the value it
	 * would have held had all the exceptions been committed in order.
	 *
	 * ps->current_area does not get reduced by prepare_merge() until
	 * after commit_merge() has removed the nr_merged previous exceptions.
	 */
783 784
	ps->next_free = area_location(ps, ps->current_area) +
			ps->current_committed + 1;
785 786 787 788

	return 0;
}

789
static void persistent_drop_snapshot(struct dm_exception_store *store)
790 791 792 793 794 795 796 797
{
	struct pstore *ps = get_info(store);

	ps->valid = 0;
	if (write_header(ps))
		DMWARN("write header failed");
}

798 799
static int persistent_ctr(struct dm_exception_store *store,
			  unsigned argc, char **argv)
800 801 802 803
{
	struct pstore *ps;

	/* allocate the pstore */
804
	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
805 806 807
	if (!ps)
		return -ENOMEM;

808
	ps->store = store;
809 810 811
	ps->valid = 1;
	ps->version = SNAPSHOT_DISK_VERSION;
	ps->area = NULL;
812 813
	ps->zero_area = NULL;
	ps->header_area = NULL;
814
	ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
	ps->current_committed = 0;

	ps->callback_count = 0;
	atomic_set(&ps->pending_count, 0);
	ps->callbacks = NULL;

	ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
	if (!ps->metadata_wq) {
		kfree(ps);
		DMERR("couldn't start header metadata update thread");
		return -ENOMEM;
	}

	store->context = ps;

	return 0;
}

833 834 835
static unsigned persistent_status(struct dm_exception_store *store,
				  status_type_t status, char *result,
				  unsigned maxlen)
836
{
837 838 839 840 841 842
	unsigned sz = 0;

	switch (status) {
	case STATUSTYPE_INFO:
		break;
	case STATUSTYPE_TABLE:
843
		DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
844
	}
845 846 847 848 849 850 851 852 853 854 855 856

	return sz;
}

static struct dm_exception_store_type _persistent_type = {
	.name = "persistent",
	.module = THIS_MODULE,
	.ctr = persistent_ctr,
	.dtr = persistent_dtr,
	.read_metadata = persistent_read_metadata,
	.prepare_exception = persistent_prepare_exception,
	.commit_exception = persistent_commit_exception,
857 858
	.prepare_merge = persistent_prepare_merge,
	.commit_merge = persistent_commit_merge,
859
	.drop_snapshot = persistent_drop_snapshot,
860
	.usage = persistent_usage,
861 862 863 864 865 866 867 868 869 870 871
	.status = persistent_status,
};

static struct dm_exception_store_type _persistent_compat_type = {
	.name = "P",
	.module = THIS_MODULE,
	.ctr = persistent_ctr,
	.dtr = persistent_dtr,
	.read_metadata = persistent_read_metadata,
	.prepare_exception = persistent_prepare_exception,
	.commit_exception = persistent_commit_exception,
872 873
	.prepare_merge = persistent_prepare_merge,
	.commit_merge = persistent_commit_merge,
874
	.drop_snapshot = persistent_drop_snapshot,
875
	.usage = persistent_usage,
876 877 878
	.status = persistent_status,
};

879 880
int dm_persistent_snapshot_init(void)
{
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
	int r;

	r = dm_exception_store_type_register(&_persistent_type);
	if (r) {
		DMERR("Unable to register persistent exception store type");
		return r;
	}

	r = dm_exception_store_type_register(&_persistent_compat_type);
	if (r) {
		DMERR("Unable to register old-style persistent exception "
		      "store type");
		dm_exception_store_type_unregister(&_persistent_type);
		return r;
	}

	return r;
898 899 900 901
}

void dm_persistent_snapshot_exit(void)
{
902 903
	dm_exception_store_type_unregister(&_persistent_type);
	dm_exception_store_type_unregister(&_persistent_compat_type);
904
}