dm-exception-store.c 16.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * dm-exception-store.c
L
Linus Torvalds 已提交
3 4
 *
 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5
 * Copyright (C) 2006 Red Hat GmbH
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16
 *
 * This file is released under the GPL.
 */

#include "dm.h"
#include "dm-snap.h"

#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
A
Alasdair G Kergon 已提交
17 18
#include <linux/dm-io.h>
#include <linux/dm-kcopyd.h>
L
Linus Torvalds 已提交
19

20
#define DM_MSG_PREFIX "snapshots"
21
#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32	/* 16KB */
22

L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
/*-----------------------------------------------------------------
 * Persistent snapshots, by persistent we mean that the snapshot
 * will survive a reboot.
 *---------------------------------------------------------------*/

/*
 * We need to store a record of which parts of the origin have
 * been copied to the snapshot device.  The snapshot code
 * requires that we copy exception chunks to chunk aligned areas
 * of the COW store.  It makes sense therefore, to store the
 * metadata in chunk size blocks.
 *
 * There is no backward or forward compatibility implemented,
 * snapshots with different disk versions than the kernel will
 * not be usable.  It is expected that "lvcreate" will blank out
 * the start of a fresh COW device before calling the snapshot
 * constructor.
 *
 * The first chunk of the COW device just contains the header.
 * After this there is a chunk filled with exception metadata,
 * followed by as many exception chunks as can fit in the
 * metadata areas.
 *
 * All on disk structures are in little-endian format.  The end
 * of the exceptions info is indicated by an exception with a
 * new_chunk of 0, which is invalid since it would point to the
 * header chunk.
 */

/*
 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
 */
#define SNAP_MAGIC 0x70416e53

/*
 * The on-disk version of the metadata.
 */
#define SNAPSHOT_DISK_VERSION 1

struct disk_header {
	uint32_t magic;

	/*
	 * Is this snapshot valid.  There is no way of recovering
	 * an invalid snapshot.
	 */
	uint32_t valid;

	/*
	 * Simple, incrementing version. no backward
	 * compatibility.
	 */
	uint32_t version;

	/* In sectors */
	uint32_t chunk_size;
};

struct disk_exception {
	uint64_t old_chunk;
	uint64_t new_chunk;
};

struct commit_callback {
	void (*callback)(void *, int success);
	void *context;
};

/*
 * The top level structure for a persistent exception store.
 */
struct pstore {
	struct dm_snapshot *snap;	/* up pointer to my snapshot */
	int version;
	int valid;
	uint32_t exceptions_per_area;

	/*
	 * Now that we have an asynchronous kcopyd there is no
	 * need for large chunk sizes, so it wont hurt to have a
	 * whole chunks worth of metadata in memory at once.
	 */
	void *area;

107 108 109 110 111
	/*
	 * An area of zeros used to clear the next area.
	 */
	void *zero_area;

L
Linus Torvalds 已提交
112 113 114 115
	/*
	 * Used to keep track of which metadata area the data in
	 * 'chunk' refers to.
	 */
116
	chunk_t current_area;
L
Linus Torvalds 已提交
117 118 119 120

	/*
	 * The next free chunk for an exception.
	 */
121
	chunk_t next_free;
L
Linus Torvalds 已提交
122 123 124 125 126 127 128 129 130 131

	/*
	 * The index of next free exception in the current
	 * metadata area.
	 */
	uint32_t current_committed;

	atomic_t pending_count;
	uint32_t callback_count;
	struct commit_callback *callbacks;
132
	struct dm_io_client *io_client;
133 134

	struct workqueue_struct *metadata_wq;
L
Linus Torvalds 已提交
135 136
};

A
Alasdair G Kergon 已提交
137
static unsigned sectors_to_pages(unsigned sectors)
L
Linus Torvalds 已提交
138
{
139
	return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
L
Linus Torvalds 已提交
140 141 142 143 144 145 146
}

static int alloc_area(struct pstore *ps)
{
	int r = -ENOMEM;
	size_t len;

147
	len = ps->snap->chunk_size << SECTOR_SHIFT;
L
Linus Torvalds 已提交
148 149 150 151 152 153 154 155 156

	/*
	 * Allocate the chunk_size block of memory that will hold
	 * a single metadata area.
	 */
	ps->area = vmalloc(len);
	if (!ps->area)
		return r;

157 158 159 160 161 162 163
	ps->zero_area = vmalloc(len);
	if (!ps->zero_area) {
		vfree(ps->area);
		return r;
	}
	memset(ps->zero_area, 0, len);

L
Linus Torvalds 已提交
164 165 166 167 168 169
	return 0;
}

static void free_area(struct pstore *ps)
{
	vfree(ps->area);
170
	ps->area = NULL;
171 172
	vfree(ps->zero_area);
	ps->zero_area = NULL;
L
Linus Torvalds 已提交
173 174
}

175
struct mdata_req {
H
Heinz Mauelshagen 已提交
176
	struct dm_io_region *where;
177 178 179 180 181 182 183 184 185 186 187 188
	struct dm_io_request *io_req;
	struct work_struct work;
	int result;
};

static void do_metadata(struct work_struct *work)
{
	struct mdata_req *req = container_of(work, struct mdata_req, work);

	req->result = dm_io(req->io_req, 1, req->where, NULL);
}

L
Linus Torvalds 已提交
189 190 191
/*
 * Read or write a chunk aligned and sized block of data from a device.
 */
192
static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
L
Linus Torvalds 已提交
193
{
H
Heinz Mauelshagen 已提交
194
	struct dm_io_region where = {
195 196 197 198 199 200 201 202 203 204 205
		.bdev = ps->snap->cow->bdev,
		.sector = ps->snap->chunk_size * chunk,
		.count = ps->snap->chunk_size,
	};
	struct dm_io_request io_req = {
		.bi_rw = rw,
		.mem.type = DM_IO_VMA,
		.mem.ptr.vma = ps->area,
		.client = ps->io_client,
		.notify.fn = NULL,
	};
206 207 208 209
	struct mdata_req req;

	if (!metadata)
		return dm_io(&io_req, 1, &where, NULL);
210

211 212 213 214 215 216 217 218 219 220 221 222
	req.where = &where;
	req.io_req = &io_req;

	/*
	 * Issue the synchronous I/O from a different thread
	 * to avoid generic_make_request recursion.
	 */
	INIT_WORK(&req.work, do_metadata);
	queue_work(ps->metadata_wq, &req.work);
	flush_workqueue(ps->metadata_wq);

	return req.result;
L
Linus Torvalds 已提交
223 224
}

225 226 227 228 229 230 231 232
/*
 * Convert a metadata area index to a chunk index.
 */
static chunk_t area_location(struct pstore *ps, chunk_t area)
{
	return 1 + ((ps->exceptions_per_area + 1) * area);
}

L
Linus Torvalds 已提交
233 234 235 236
/*
 * Read or write a metadata area.  Remembering to skip the first
 * chunk which holds the header.
 */
237
static int area_io(struct pstore *ps, int rw)
L
Linus Torvalds 已提交
238 239
{
	int r;
240
	chunk_t chunk;
L
Linus Torvalds 已提交
241

242
	chunk = area_location(ps, ps->current_area);
L
Linus Torvalds 已提交
243

244
	r = chunk_io(ps, chunk, rw, 0);
L
Linus Torvalds 已提交
245 246 247 248 249 250
	if (r)
		return r;

	return 0;
}

251
static void zero_memory_area(struct pstore *ps)
L
Linus Torvalds 已提交
252
{
253
	memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
}

static int zero_disk_area(struct pstore *ps, chunk_t area)
{
	struct dm_io_region where = {
		.bdev = ps->snap->cow->bdev,
		.sector = ps->snap->chunk_size * area_location(ps, area),
		.count = ps->snap->chunk_size,
	};
	struct dm_io_request io_req = {
		.bi_rw = WRITE,
		.mem.type = DM_IO_VMA,
		.mem.ptr.vma = ps->zero_area,
		.client = ps->io_client,
		.notify.fn = NULL,
	};

	return dm_io(&io_req, 1, &where, NULL);
L
Linus Torvalds 已提交
272 273 274 275 276 277
}

static int read_header(struct pstore *ps, int *new_snapshot)
{
	int r;
	struct disk_header *dh;
278
	chunk_t chunk_size;
279
	int chunk_size_supplied = 1;
L
Linus Torvalds 已提交
280

281 282 283 284 285 286 287 288 289 290 291
	/*
	 * Use default chunk size (or hardsect_size, if larger) if none supplied
	 */
	if (!ps->snap->chunk_size) {
        	ps->snap->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
		    bdev_hardsect_size(ps->snap->cow->bdev) >> 9);
		ps->snap->chunk_mask = ps->snap->chunk_size - 1;
		ps->snap->chunk_shift = ffs(ps->snap->chunk_size) - 1;
		chunk_size_supplied = 0;
	}

292 293 294 295
	ps->io_client = dm_io_client_create(sectors_to_pages(ps->snap->
							     chunk_size));
	if (IS_ERR(ps->io_client))
		return PTR_ERR(ps->io_client);
L
Linus Torvalds 已提交
296

297 298
	r = alloc_area(ps);
	if (r)
299
		return r;
300

301
	r = chunk_io(ps, 0, READ, 1);
302
	if (r)
303
		goto bad;
304

L
Linus Torvalds 已提交
305 306 307 308
	dh = (struct disk_header *) ps->area;

	if (le32_to_cpu(dh->magic) == 0) {
		*new_snapshot = 1;
309 310
		return 0;
	}
L
Linus Torvalds 已提交
311

312 313
	if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
		DMWARN("Invalid or corrupt snapshot");
L
Linus Torvalds 已提交
314
		r = -ENXIO;
315
		goto bad;
L
Linus Torvalds 已提交
316 317
	}

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	*new_snapshot = 0;
	ps->valid = le32_to_cpu(dh->valid);
	ps->version = le32_to_cpu(dh->version);
	chunk_size = le32_to_cpu(dh->chunk_size);

	if (!chunk_size_supplied || ps->snap->chunk_size == chunk_size)
		return 0;

	DMWARN("chunk size %llu in device metadata overrides "
	       "table chunk size of %llu.",
	       (unsigned long long)chunk_size,
	       (unsigned long long)ps->snap->chunk_size);

	/* We had a bogus chunk_size. Fix stuff up. */
	free_area(ps);

	ps->snap->chunk_size = chunk_size;
	ps->snap->chunk_mask = chunk_size - 1;
	ps->snap->chunk_shift = ffs(chunk_size) - 1;

338 339
	r = dm_io_client_resize(sectors_to_pages(ps->snap->chunk_size),
				ps->io_client);
340 341 342 343
	if (r)
		return r;

	r = alloc_area(ps);
344
	return r;
345

346
bad:
347
	free_area(ps);
L
Linus Torvalds 已提交
348 349 350 351 352 353 354
	return r;
}

static int write_header(struct pstore *ps)
{
	struct disk_header *dh;

355
	memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
L
Linus Torvalds 已提交
356 357 358 359 360

	dh = (struct disk_header *) ps->area;
	dh->magic = cpu_to_le32(SNAP_MAGIC);
	dh->valid = cpu_to_le32(ps->valid);
	dh->version = cpu_to_le32(ps->version);
361
	dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
L
Linus Torvalds 已提交
362

363
	return chunk_io(ps, 0, WRITE, 1);
L
Linus Torvalds 已提交
364 365 366 367 368 369 370
}

/*
 * Access functions for the disk exceptions, these do the endian conversions.
 */
static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
{
371
	BUG_ON(index >= ps->exceptions_per_area);
L
Linus Torvalds 已提交
372 373 374 375

	return ((struct disk_exception *) ps->area) + index;
}

376 377
static void read_exception(struct pstore *ps,
			   uint32_t index, struct disk_exception *result)
L
Linus Torvalds 已提交
378
{
379
	struct disk_exception *e = get_exception(ps, index);
L
Linus Torvalds 已提交
380 381 382 383 384 385

	/* copy it */
	result->old_chunk = le64_to_cpu(e->old_chunk);
	result->new_chunk = le64_to_cpu(e->new_chunk);
}

386 387
static void write_exception(struct pstore *ps,
			    uint32_t index, struct disk_exception *de)
L
Linus Torvalds 已提交
388
{
389
	struct disk_exception *e = get_exception(ps, index);
L
Linus Torvalds 已提交
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410

	/* copy it */
	e->old_chunk = cpu_to_le64(de->old_chunk);
	e->new_chunk = cpu_to_le64(de->new_chunk);
}

/*
 * Registers the exceptions that are present in the current area.
 * 'full' is filled in to indicate if the area has been
 * filled.
 */
static int insert_exceptions(struct pstore *ps, int *full)
{
	int r;
	unsigned int i;
	struct disk_exception de;

	/* presume the area is full */
	*full = 1;

	for (i = 0; i < ps->exceptions_per_area; i++) {
411
		read_exception(ps, i, &de);
L
Linus Torvalds 已提交
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449

		/*
		 * If the new_chunk is pointing at the start of
		 * the COW device, where the first metadata area
		 * is we know that we've hit the end of the
		 * exceptions.  Therefore the area is not full.
		 */
		if (de.new_chunk == 0LL) {
			ps->current_committed = i;
			*full = 0;
			break;
		}

		/*
		 * Keep track of the start of the free chunks.
		 */
		if (ps->next_free <= de.new_chunk)
			ps->next_free = de.new_chunk + 1;

		/*
		 * Otherwise we add the exception to the snapshot.
		 */
		r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk);
		if (r)
			return r;
	}

	return 0;
}

static int read_exceptions(struct pstore *ps)
{
	int r, full = 1;

	/*
	 * Keeping reading chunks and inserting exceptions until
	 * we find a partially full area.
	 */
450 451
	for (ps->current_area = 0; full; ps->current_area++) {
		r = area_io(ps, READ);
L
Linus Torvalds 已提交
452 453 454 455 456 457 458 459
		if (r)
			return r;

		r = insert_exceptions(ps, &full);
		if (r)
			return r;
	}

460 461
	ps->current_area--;

L
Linus Torvalds 已提交
462 463 464
	return 0;
}

A
Alasdair G Kergon 已提交
465
static struct pstore *get_info(struct exception_store *store)
L
Linus Torvalds 已提交
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
{
	return (struct pstore *) store->context;
}

static void persistent_fraction_full(struct exception_store *store,
				     sector_t *numerator, sector_t *denominator)
{
	*numerator = get_info(store)->next_free * store->snap->chunk_size;
	*denominator = get_dev_size(store->snap->cow->bdev);
}

static void persistent_destroy(struct exception_store *store)
{
	struct pstore *ps = get_info(store);

481
	destroy_workqueue(ps->metadata_wq);
482
	dm_io_client_destroy(ps->io_client);
L
Linus Torvalds 已提交
483 484 485 486 487 488 489
	vfree(ps->callbacks);
	free_area(ps);
	kfree(ps);
}

static int persistent_read_metadata(struct exception_store *store)
{
490
	int r, uninitialized_var(new_snapshot);
L
Linus Torvalds 已提交
491 492 493 494 495 496 497 498 499
	struct pstore *ps = get_info(store);

	/*
	 * Read the snapshot header.
	 */
	r = read_header(ps, &new_snapshot);
	if (r)
		return r;

500 501 502 503 504 505 506 507 508 509
	/*
	 * Now we know correct chunk_size, complete the initialisation.
	 */
	ps->exceptions_per_area = (ps->snap->chunk_size << SECTOR_SHIFT) /
				  sizeof(struct disk_exception);
	ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
			sizeof(*ps->callbacks));
	if (!ps->callbacks)
		return -ENOMEM;

L
Linus Torvalds 已提交
510 511 512 513 514 515 516 517 518 519
	/*
	 * Do we need to setup a new snapshot ?
	 */
	if (new_snapshot) {
		r = write_header(ps);
		if (r) {
			DMWARN("write_header failed");
			return r;
		}

520 521 522
		ps->current_area = 0;
		zero_memory_area(ps);
		r = zero_disk_area(ps, 0);
L
Linus Torvalds 已提交
523
		if (r) {
524
			DMWARN("zero_disk_area(0) failed");
L
Linus Torvalds 已提交
525 526 527 528 529 530 531 532 533 534 535 536
			return r;
		}
	} else {
		/*
		 * Sanity checks.
		 */
		if (ps->version != SNAPSHOT_DISK_VERSION) {
			DMWARN("unable to handle snapshot disk version %d",
			       ps->version);
			return -EINVAL;
		}

537 538 539 540 541 542
		/*
		 * Metadata are valid, but snapshot is invalidated
		 */
		if (!ps->valid)
			return 1;

L
Linus Torvalds 已提交
543 544 545 546 547 548 549 550 551 552 553 554
		/*
		 * Read the metadata.
		 */
		r = read_exceptions(ps);
		if (r)
			return r;
	}

	return 0;
}

static int persistent_prepare(struct exception_store *store,
A
Alasdair G Kergon 已提交
555
			      struct dm_snap_exception *e)
L
Linus Torvalds 已提交
556 557 558
{
	struct pstore *ps = get_info(store);
	uint32_t stride;
559
	chunk_t next_free;
L
Linus Torvalds 已提交
560 561 562 563 564 565 566 567 568 569 570 571 572
	sector_t size = get_dev_size(store->snap->cow->bdev);

	/* Is there enough room ? */
	if (size < ((ps->next_free + 1) * store->snap->chunk_size))
		return -ENOSPC;

	e->new_chunk = ps->next_free;

	/*
	 * Move onto the next free pending, making sure to take
	 * into account the location of the metadata chunks.
	 */
	stride = (ps->exceptions_per_area + 1);
573 574
	next_free = ++ps->next_free;
	if (sector_div(next_free, stride) == 1)
L
Linus Torvalds 已提交
575 576 577 578 579 580 581
		ps->next_free++;

	atomic_inc(&ps->pending_count);
	return 0;
}

static void persistent_commit(struct exception_store *store,
A
Alasdair G Kergon 已提交
582
			      struct dm_snap_exception *e,
L
Linus Torvalds 已提交
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
			      void (*callback) (void *, int success),
			      void *callback_context)
{
	unsigned int i;
	struct pstore *ps = get_info(store);
	struct disk_exception de;
	struct commit_callback *cb;

	de.old_chunk = e->old_chunk;
	de.new_chunk = e->new_chunk;
	write_exception(ps, ps->current_committed++, &de);

	/*
	 * Add the callback to the back of the array.  This code
	 * is the only place where the callback array is
	 * manipulated, and we know that it will never be called
	 * multiple times concurrently.
	 */
	cb = ps->callbacks + ps->callback_count++;
	cb->callback = callback;
	cb->context = callback_context;

	/*
606 607
	 * If there are exceptions in flight and we have not yet
	 * filled this metadata area there's nothing more to do.
L
Linus Torvalds 已提交
608
	 */
609 610 611
	if (!atomic_dec_and_test(&ps->pending_count) &&
	    (ps->current_committed != ps->exceptions_per_area))
		return;
L
Linus Torvalds 已提交
612

613 614 615 616 617 618 619
	/*
	 * If we completely filled the current area, then wipe the next one.
	 */
	if ((ps->current_committed == ps->exceptions_per_area) &&
	     zero_disk_area(ps, ps->current_area + 1))
		ps->valid = 0;

620 621 622
	/*
	 * Commit exceptions to disk.
	 */
623
	if (ps->valid && area_io(ps, WRITE))
624
		ps->valid = 0;
625

626 627 628 629 630 631 632 633
	/*
	 * Advance to the next area if this one is full.
	 */
	if (ps->current_committed == ps->exceptions_per_area) {
		ps->current_committed = 0;
		ps->current_area++;
		zero_memory_area(ps);
	}
L
Linus Torvalds 已提交
634

635 636 637
	for (i = 0; i < ps->callback_count; i++) {
		cb = ps->callbacks + i;
		cb->callback(cb->context, ps->valid);
L
Linus Torvalds 已提交
638
	}
639 640

	ps->callback_count = 0;
L
Linus Torvalds 已提交
641 642 643 644 645 646 647 648 649 650 651
}

static void persistent_drop(struct exception_store *store)
{
	struct pstore *ps = get_info(store);

	ps->valid = 0;
	if (write_header(ps))
		DMWARN("write header failed");
}

652
int dm_create_persistent(struct exception_store *store)
L
Linus Torvalds 已提交
653 654 655 656 657
{
	struct pstore *ps;

	/* allocate the pstore */
	ps = kmalloc(sizeof(*ps), GFP_KERNEL);
658 659
	if (!ps)
		return -ENOMEM;
L
Linus Torvalds 已提交
660 661 662 663

	ps->snap = store->snap;
	ps->valid = 1;
	ps->version = SNAPSHOT_DISK_VERSION;
664
	ps->area = NULL;
L
Linus Torvalds 已提交
665 666 667 668 669
	ps->next_free = 2;	/* skipping the header and first area */
	ps->current_committed = 0;

	ps->callback_count = 0;
	atomic_set(&ps->pending_count, 0);
670
	ps->callbacks = NULL;
L
Linus Torvalds 已提交
671

672 673
	ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
	if (!ps->metadata_wq) {
674
		kfree(ps);
675 676 677 678
		DMERR("couldn't start header metadata update thread");
		return -ENOMEM;
	}

L
Linus Torvalds 已提交
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
	store->destroy = persistent_destroy;
	store->read_metadata = persistent_read_metadata;
	store->prepare_exception = persistent_prepare;
	store->commit_exception = persistent_commit;
	store->drop_snapshot = persistent_drop;
	store->fraction_full = persistent_fraction_full;
	store->context = ps;

	return 0;
}

/*-----------------------------------------------------------------
 * Implementation of the store for non-persistent snapshots.
 *---------------------------------------------------------------*/
struct transient_c {
	sector_t next_free;
};

static void transient_destroy(struct exception_store *store)
{
	kfree(store->context);
}

static int transient_read_metadata(struct exception_store *store)
{
	return 0;
}

A
Alasdair G Kergon 已提交
707 708
static int transient_prepare(struct exception_store *store,
			     struct dm_snap_exception *e)
L
Linus Torvalds 已提交
709 710 711 712 713 714 715 716 717 718 719 720 721 722
{
	struct transient_c *tc = (struct transient_c *) store->context;
	sector_t size = get_dev_size(store->snap->cow->bdev);

	if (size < (tc->next_free + store->snap->chunk_size))
		return -1;

	e->new_chunk = sector_to_chunk(store->snap, tc->next_free);
	tc->next_free += store->snap->chunk_size;

	return 0;
}

static void transient_commit(struct exception_store *store,
A
Alasdair G Kergon 已提交
723 724 725
			     struct dm_snap_exception *e,
			     void (*callback) (void *, int success),
			     void *callback_context)
L
Linus Torvalds 已提交
726 727 728 729 730 731 732 733 734 735 736 737
{
	/* Just succeed */
	callback(callback_context, 1);
}

static void transient_fraction_full(struct exception_store *store,
				    sector_t *numerator, sector_t *denominator)
{
	*numerator = ((struct transient_c *) store->context)->next_free;
	*denominator = get_dev_size(store->snap->cow->bdev);
}

738
int dm_create_transient(struct exception_store *store)
L
Linus Torvalds 已提交
739 740 741 742 743 744 745
{
	struct transient_c *tc;

	store->destroy = transient_destroy;
	store->read_metadata = transient_read_metadata;
	store->prepare_exception = transient_prepare;
	store->commit_exception = transient_commit;
746
	store->drop_snapshot = NULL;
L
Linus Torvalds 已提交
747 748 749 750 751 752 753 754 755 756 757
	store->fraction_full = transient_fraction_full;

	tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL);
	if (!tc)
		return -ENOMEM;

	tc->next_free = 0;
	store->context = tc;

	return 0;
}