dm-raid1.c 32.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2003 Sistina Software Limited.
3
 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7
 *
 * This file is released under the GPL.
 */

8
#include "dm-bio-record.h"
L
Linus Torvalds 已提交
9 10 11 12 13 14 15

#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
16
#include <linux/device-mapper.h>
A
Alasdair G Kergon 已提交
17 18 19
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
#include <linux/dm-kcopyd.h>
20
#include <linux/dm-region-hash.h>
L
Linus Torvalds 已提交
21

22
#define DM_MSG_PREFIX "raid1"
23 24

#define MAX_RECOVERY 1	/* Maximum number of regions recovered in parallel. */
M
Milan Broz 已提交
25
#define DM_IO_PAGES 64
26
#define DM_KCOPYD_PAGES 64
27

28
#define DM_RAID1_HANDLE_ERRORS 0x01
29
#define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS)
30

31
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
L
Linus Torvalds 已提交
32

33 34 35
/*-----------------------------------------------------------------
 * Mirror set structures.
 *---------------------------------------------------------------*/
36 37 38 39 40 41
enum dm_raid1_error {
	DM_RAID1_WRITE_ERROR,
	DM_RAID1_SYNC_ERROR,
	DM_RAID1_READ_ERROR
};

42
struct mirror {
43
	struct mirror_set *ms;
44
	atomic_t error_count;
A
Al Viro 已提交
45
	unsigned long error_type;
46 47 48 49 50 51 52
	struct dm_dev *dev;
	sector_t offset;
};

struct mirror_set {
	struct dm_target *ti;
	struct list_head list;
53

54
	uint64_t features;
55

56
	spinlock_t lock;	/* protects the lists */
57 58
	struct bio_list reads;
	struct bio_list writes;
59
	struct bio_list failures;
60

61 62
	struct dm_region_hash *rh;
	struct dm_kcopyd_client *kcopyd_client;
M
Milan Broz 已提交
63
	struct dm_io_client *io_client;
64
	mempool_t *read_record_pool;
M
Milan Broz 已提交
65

66 67 68
	/* recovery */
	region_t nr_regions;
	int in_sync;
J
Jonathan Brassow 已提交
69
	int log_failure;
70
	atomic_t suspend;
71

72
	atomic_t default_mirror;	/* Default mirror */
73

74 75
	struct workqueue_struct *kmirrord_wq;
	struct work_struct kmirrord_work;
M
Mikulas Patocka 已提交
76 77 78
	struct timer_list timer;
	unsigned long timer_pending;

79
	struct work_struct trigger_event;
80

81
	unsigned nr_mirrors;
82 83 84
	struct mirror mirror[0];
};

85
static void wakeup_mirrord(void *context)
L
Linus Torvalds 已提交
86
{
87
	struct mirror_set *ms = context;
L
Linus Torvalds 已提交
88

89 90 91
	queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
}

M
Mikulas Patocka 已提交
92 93 94 95 96
static void delayed_wake_fn(unsigned long data)
{
	struct mirror_set *ms = (struct mirror_set *) data;

	clear_bit(0, &ms->timer_pending);
97
	wakeup_mirrord(ms);
M
Mikulas Patocka 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110
}

static void delayed_wake(struct mirror_set *ms)
{
	if (test_and_set_bit(0, &ms->timer_pending))
		return;

	ms->timer.expires = jiffies + HZ / 5;
	ms->timer.data = (unsigned long) ms;
	ms->timer.function = delayed_wake_fn;
	add_timer(&ms->timer);
}

111
static void wakeup_all_recovery_waiters(void *context)
L
Linus Torvalds 已提交
112
{
113
	wake_up_all(&_kmirrord_recovery_stopped);
L
Linus Torvalds 已提交
114 115
}

116
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
L
Linus Torvalds 已提交
117 118 119
{
	unsigned long flags;
	int should_wake = 0;
120
	struct bio_list *bl;
L
Linus Torvalds 已提交
121

122 123 124 125 126
	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
	spin_lock_irqsave(&ms->lock, flags);
	should_wake = !(bl->head);
	bio_list_add(bl, bio);
	spin_unlock_irqrestore(&ms->lock, flags);
L
Linus Torvalds 已提交
127 128

	if (should_wake)
129
		wakeup_mirrord(ms);
L
Linus Torvalds 已提交
130 131
}

132
static void dispatch_bios(void *context, struct bio_list *bio_list)
L
Linus Torvalds 已提交
133
{
134 135
	struct mirror_set *ms = context;
	struct bio *bio;
L
Linus Torvalds 已提交
136

137 138
	while ((bio = bio_list_pop(bio_list)))
		queue_bio(ms, bio, WRITE);
L
Linus Torvalds 已提交
139 140
}

141 142 143 144 145 146
#define MIN_READ_RECORDS 20
struct dm_raid1_read_record {
	struct mirror *m;
	struct dm_bio_details details;
};

147 148
static struct kmem_cache *_dm_raid1_read_record_cache;

L
Linus Torvalds 已提交
149 150 151 152 153 154
/*
 * Every mirror should look like this one.
 */
#define DEFAULT_MIRROR 0

/*
155 156
 * This is yucky.  We squirrel the mirror struct away inside
 * bi_next for read/write buffers.  This is safe since the bh
L
Linus Torvalds 已提交
157 158
 * doesn't get submitted to the lower levels of block layer.
 */
159
static struct mirror *bio_get_m(struct bio *bio)
L
Linus Torvalds 已提交
160
{
161
	return (struct mirror *) bio->bi_next;
L
Linus Torvalds 已提交
162 163
}

164
static void bio_set_m(struct bio *bio, struct mirror *m)
L
Linus Torvalds 已提交
165
{
166
	bio->bi_next = (struct bio *) m;
L
Linus Torvalds 已提交
167 168
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
static struct mirror *get_default_mirror(struct mirror_set *ms)
{
	return &ms->mirror[atomic_read(&ms->default_mirror)];
}

static void set_default_mirror(struct mirror *m)
{
	struct mirror_set *ms = m->ms;
	struct mirror *m0 = &(ms->mirror[0]);

	atomic_set(&ms->default_mirror, m - m0);
}

/* fail_mirror
 * @m: mirror device to fail
 * @error_type: one of the enum's, DM_RAID1_*_ERROR
 *
 * If errors are being handled, record the type of
 * error encountered for this device.  If this type
 * of error has already been recorded, we can return;
 * otherwise, we must signal userspace by triggering
 * an event.  Additionally, if the device is the
 * primary device, we must choose a new primary, but
 * only if the mirror is in-sync.
 *
 * This function must not block.
 */
static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
{
	struct mirror_set *ms = m->ms;
	struct mirror *new;

	/*
	 * error_count is used for nothing more than a
	 * simple way to tell if a device has encountered
	 * errors.
	 */
	atomic_inc(&m->error_count);

	if (test_and_set_bit(error_type, &m->error_type))
		return;

J
Jonathan Brassow 已提交
211 212 213
	if (!errors_handled(ms))
		return;

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
	if (m != get_default_mirror(ms))
		goto out;

	if (!ms->in_sync) {
		/*
		 * Better to issue requests to same failing device
		 * than to risk returning corrupt data.
		 */
		DMERR("Primary mirror (%s) failed while out-of-sync: "
		      "Reads may fail.", m->dev->name);
		goto out;
	}

	for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
		if (!atomic_read(&new->error_count)) {
			set_default_mirror(new);
			break;
		}

	if (unlikely(new == ms->mirror + ms->nr_mirrors))
		DMWARN("All sides of mirror have failed.");

out:
	schedule_work(&ms->trigger_event);
}

M
Mikulas Patocka 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
static int mirror_flush(struct dm_target *ti)
{
	struct mirror_set *ms = ti->private;
	unsigned long error_bits;

	unsigned int i;
	struct dm_io_region io[ms->nr_mirrors];
	struct mirror *m;
	struct dm_io_request io_req = {
		.bi_rw = WRITE_BARRIER,
		.mem.type = DM_IO_KMEM,
		.mem.ptr.bvec = NULL,
		.client = ms->io_client,
	};

	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
		io[i].bdev = m->dev->bdev;
		io[i].sector = 0;
		io[i].count = 0;
	}

	error_bits = -1;
	dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
	if (unlikely(error_bits != 0)) {
		for (i = 0; i < ms->nr_mirrors; i++)
			if (test_bit(i, &error_bits))
				fail_mirror(ms->mirror + i,
					    DM_RAID1_WRITE_ERROR);
		return -EIO;
	}

	return 0;
}

L
Linus Torvalds 已提交
274 275 276 277 278 279 280
/*-----------------------------------------------------------------
 * Recovery.
 *
 * When a mirror is first activated we may find that some regions
 * are in the no-sync state.  We have to recover these by
 * recopying from the default mirror to all the others.
 *---------------------------------------------------------------*/
281
static void recovery_complete(int read_err, unsigned long write_err,
L
Linus Torvalds 已提交
282 283
			      void *context)
{
284 285
	struct dm_region *reg = context;
	struct mirror_set *ms = dm_rh_region_context(reg);
286
	int m, bit = 0;
L
Linus Torvalds 已提交
287

288
	if (read_err) {
289 290
		/* Read error means the failure of default mirror. */
		DMERR_LIMIT("Unable to read primary mirror during recovery");
291 292
		fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
	}
293

294
	if (write_err) {
295
		DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
296
			    write_err);
297 298 299 300 301 302 303 304 305 306 307 308 309
		/*
		 * Bits correspond to devices (excluding default mirror).
		 * The default mirror cannot change during recovery.
		 */
		for (m = 0; m < ms->nr_mirrors; m++) {
			if (&ms->mirror[m] == get_default_mirror(ms))
				continue;
			if (test_bit(bit, &write_err))
				fail_mirror(ms->mirror + m,
					    DM_RAID1_SYNC_ERROR);
			bit++;
		}
	}
310

311
	dm_rh_recovery_end(reg, !(read_err || write_err));
L
Linus Torvalds 已提交
312 313
}

314
static int recover(struct mirror_set *ms, struct dm_region *reg)
L
Linus Torvalds 已提交
315 316
{
	int r;
317
	unsigned i;
H
Heinz Mauelshagen 已提交
318
	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
L
Linus Torvalds 已提交
319 320
	struct mirror *m;
	unsigned long flags = 0;
321 322
	region_t key = dm_rh_get_region_key(reg);
	sector_t region_size = dm_rh_get_region_size(ms->rh);
L
Linus Torvalds 已提交
323 324

	/* fill in the source */
325
	m = get_default_mirror(ms);
L
Linus Torvalds 已提交
326
	from.bdev = m->dev->bdev;
327 328
	from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
	if (key == (ms->nr_regions - 1)) {
L
Linus Torvalds 已提交
329 330 331 332
		/*
		 * The final region may be smaller than
		 * region_size.
		 */
333
		from.count = ms->ti->len & (region_size - 1);
L
Linus Torvalds 已提交
334
		if (!from.count)
335
			from.count = region_size;
L
Linus Torvalds 已提交
336
	} else
337
		from.count = region_size;
L
Linus Torvalds 已提交
338 339 340

	/* fill in the destinations */
	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
341
		if (&ms->mirror[i] == get_default_mirror(ms))
L
Linus Torvalds 已提交
342 343 344 345
			continue;

		m = ms->mirror + i;
		dest->bdev = m->dev->bdev;
346
		dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
L
Linus Torvalds 已提交
347 348 349 350 351
		dest->count = from.count;
		dest++;
	}

	/* hand to kcopyd */
352 353 354
	if (!errors_handled(ms))
		set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);

H
Heinz Mauelshagen 已提交
355 356
	r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
			   flags, recovery_complete, reg);
L
Linus Torvalds 已提交
357 358 359 360 361 362

	return r;
}

static void do_recovery(struct mirror_set *ms)
{
363 364
	struct dm_region *reg;
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
L
Linus Torvalds 已提交
365 366 367 368 369
	int r;

	/*
	 * Start quiescing some regions.
	 */
370
	dm_rh_recovery_prepare(ms->rh);
L
Linus Torvalds 已提交
371 372 373 374

	/*
	 * Copy any already quiesced regions.
	 */
375
	while ((reg = dm_rh_recovery_start(ms->rh))) {
L
Linus Torvalds 已提交
376 377
		r = recover(ms, reg);
		if (r)
378
			dm_rh_recovery_end(reg, 0);
L
Linus Torvalds 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
	}

	/*
	 * Update the in sync flag.
	 */
	if (!ms->in_sync &&
	    (log->type->get_sync_count(log) == ms->nr_regions)) {
		/* the sync is complete */
		dm_table_event(ms->ti->table);
		ms->in_sync = 1;
	}
}

/*-----------------------------------------------------------------
 * Reads
 *---------------------------------------------------------------*/
static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
{
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
	struct mirror *m = get_default_mirror(ms);

	do {
		if (likely(!atomic_read(&m->error_count)))
			return m;

		if (m-- == ms->mirror)
			m += ms->nr_mirrors;
	} while (m != get_default_mirror(ms));

	return NULL;
}

static int default_ok(struct mirror *m)
{
	struct mirror *default_mirror = get_default_mirror(m->ms);

	return !atomic_read(&default_mirror->error_count);
}

static int mirror_available(struct mirror_set *ms, struct bio *bio)
{
419 420
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
	region_t region = dm_rh_bio_to_region(ms->rh, bio);
421

422
	if (log->type->in_sync(log, region, 0))
423 424 425
		return choose_mirror(ms,  bio->bi_sector) ? 1 : 0;

	return 0;
L
Linus Torvalds 已提交
426 427 428 429 430
}

/*
 * remap a buffer to a particular mirror.
 */
431 432
static sector_t map_sector(struct mirror *m, struct bio *bio)
{
M
Mikulas Patocka 已提交
433 434
	if (unlikely(!bio->bi_size))
		return 0;
435 436 437 438
	return m->offset + (bio->bi_sector - m->ms->ti->begin);
}

static void map_bio(struct mirror *m, struct bio *bio)
L
Linus Torvalds 已提交
439 440
{
	bio->bi_bdev = m->dev->bdev;
441 442 443
	bio->bi_sector = map_sector(m, bio);
}

H
Heinz Mauelshagen 已提交
444
static void map_region(struct dm_io_region *io, struct mirror *m,
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
		       struct bio *bio)
{
	io->bdev = m->dev->bdev;
	io->sector = map_sector(m, bio);
	io->count = bio->bi_size >> 9;
}

/*-----------------------------------------------------------------
 * Reads
 *---------------------------------------------------------------*/
static void read_callback(unsigned long error, void *context)
{
	struct bio *bio = context;
	struct mirror *m;

	m = bio_get_m(bio);
	bio_set_m(bio, NULL);

	if (likely(!error)) {
		bio_endio(bio, 0);
		return;
	}

	fail_mirror(m, DM_RAID1_READ_ERROR);

	if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
		DMWARN_LIMIT("Read failure on mirror device %s.  "
			     "Trying alternative device.",
			     m->dev->name);
		queue_bio(m->ms, bio, bio_rw(bio));
		return;
	}

	DMERR_LIMIT("Read failure on mirror device %s.  Failing I/O.",
		    m->dev->name);
	bio_endio(bio, -EIO);
}

/* Asynchronous read. */
static void read_async_bio(struct mirror *m, struct bio *bio)
{
H
Heinz Mauelshagen 已提交
486
	struct dm_io_region io;
487 488 489 490 491 492 493 494 495 496 497
	struct dm_io_request io_req = {
		.bi_rw = READ,
		.mem.type = DM_IO_BVEC,
		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
		.notify.fn = read_callback,
		.notify.context = bio,
		.client = m->ms->io_client,
	};

	map_region(&io, m, bio);
	bio_set_m(bio, m);
498 499 500 501 502 503 504 505
	BUG_ON(dm_io(&io_req, 1, &io, NULL));
}

static inline int region_in_sync(struct mirror_set *ms, region_t region,
				 int may_block)
{
	int state = dm_rh_get_state(ms->rh, region, may_block);
	return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
L
Linus Torvalds 已提交
506 507 508 509 510 511 512 513 514
}

static void do_reads(struct mirror_set *ms, struct bio_list *reads)
{
	region_t region;
	struct bio *bio;
	struct mirror *m;

	while ((bio = bio_list_pop(reads))) {
515
		region = dm_rh_bio_to_region(ms->rh, bio);
516
		m = get_default_mirror(ms);
L
Linus Torvalds 已提交
517 518 519 520

		/*
		 * We can only read balance if the region is in sync.
		 */
521
		if (likely(region_in_sync(ms, region, 1)))
L
Linus Torvalds 已提交
522
			m = choose_mirror(ms, bio->bi_sector);
523 524
		else if (m && atomic_read(&m->error_count))
			m = NULL;
L
Linus Torvalds 已提交
525

526 527 528 529
		if (likely(m))
			read_async_bio(m, bio);
		else
			bio_endio(bio, -EIO);
L
Linus Torvalds 已提交
530 531 532 533 534 535 536 537 538 539 540 541 542
	}
}

/*-----------------------------------------------------------------
 * Writes.
 *
 * We do different things with the write io depending on the
 * state of the region that it's in:
 *
 * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
 * RECOVERING:	delay the io until recovery completes
 * NOSYNC:	increment pending, just write to the default mirror
 *---------------------------------------------------------------*/
543 544


L
Linus Torvalds 已提交
545 546
static void write_callback(unsigned long error, void *context)
{
547
	unsigned i, ret = 0;
L
Linus Torvalds 已提交
548 549
	struct bio *bio = (struct bio *) context;
	struct mirror_set *ms;
550 551 552
	int uptodate = 0;
	int should_wake = 0;
	unsigned long flags;
L
Linus Torvalds 已提交
553

554 555
	ms = bio_get_m(bio)->ms;
	bio_set_m(bio, NULL);
L
Linus Torvalds 已提交
556 557 558 559 560 561 562

	/*
	 * NOTE: We don't decrement the pending count here,
	 * instead it is done by the targets endio function.
	 * This way we handle both writes to SYNC and NOSYNC
	 * regions with the same code.
	 */
563 564
	if (likely(!error))
		goto out;
L
Linus Torvalds 已提交
565

566 567 568 569 570 571 572 573 574 575 576
	for (i = 0; i < ms->nr_mirrors; i++)
		if (test_bit(i, &error))
			fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
		else
			uptodate = 1;

	if (unlikely(!uptodate)) {
		DMERR("All replicated volumes dead, failing I/O");
		/* None of the writes succeeded, fail the I/O. */
		ret = -EIO;
	} else if (errors_handled(ms)) {
L
Linus Torvalds 已提交
577
		/*
578 579 580
		 * Need to raise event.  Since raising
		 * events can block, we need to do it in
		 * the main thread.
L
Linus Torvalds 已提交
581
		 */
582 583 584 585 586 587
		spin_lock_irqsave(&ms->lock, flags);
		if (!ms->failures.head)
			should_wake = 1;
		bio_list_add(&ms->failures, bio);
		spin_unlock_irqrestore(&ms->lock, flags);
		if (should_wake)
588
			wakeup_mirrord(ms);
589
		return;
L
Linus Torvalds 已提交
590
	}
591 592
out:
	bio_endio(bio, ret);
L
Linus Torvalds 已提交
593 594 595 596 597
}

static void do_write(struct mirror_set *ms, struct bio *bio)
{
	unsigned int i;
H
Heinz Mauelshagen 已提交
598
	struct dm_io_region io[ms->nr_mirrors], *dest = io;
L
Linus Torvalds 已提交
599
	struct mirror *m;
M
Milan Broz 已提交
600
	struct dm_io_request io_req = {
M
Mikulas Patocka 已提交
601
		.bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER),
M
Milan Broz 已提交
602 603 604 605 606 607
		.mem.type = DM_IO_BVEC,
		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
		.notify.fn = write_callback,
		.notify.context = bio,
		.client = ms->io_client,
	};
L
Linus Torvalds 已提交
608

609 610
	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
		map_region(dest++, m, bio);
L
Linus Torvalds 已提交
611

612 613 614 615 616
	/*
	 * Use default mirror because we only need it to retrieve the reference
	 * to the mirror set in write_callback().
	 */
	bio_set_m(bio, get_default_mirror(ms));
M
Milan Broz 已提交
617

618
	BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
L
Linus Torvalds 已提交
619 620 621 622 623 624 625
}

static void do_writes(struct mirror_set *ms, struct bio_list *writes)
{
	int state;
	struct bio *bio;
	struct bio_list sync, nosync, recover, *this_list = NULL;
626 627 628
	struct bio_list requeue;
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
	region_t region;
L
Linus Torvalds 已提交
629 630 631 632 633 634 635 636 637 638

	if (!writes->head)
		return;

	/*
	 * Classify each write.
	 */
	bio_list_init(&sync);
	bio_list_init(&nosync);
	bio_list_init(&recover);
639
	bio_list_init(&requeue);
L
Linus Torvalds 已提交
640 641

	while ((bio = bio_list_pop(writes))) {
M
Mikulas Patocka 已提交
642 643 644 645 646
		if (unlikely(bio_empty_barrier(bio))) {
			bio_list_add(&sync, bio);
			continue;
		}

647 648 649 650 651 652 653 654 655
		region = dm_rh_bio_to_region(ms->rh, bio);

		if (log->type->is_remote_recovering &&
		    log->type->is_remote_recovering(log, region)) {
			bio_list_add(&requeue, bio);
			continue;
		}

		state = dm_rh_get_state(ms->rh, region, 1);
L
Linus Torvalds 已提交
656
		switch (state) {
657 658
		case DM_RH_CLEAN:
		case DM_RH_DIRTY:
L
Linus Torvalds 已提交
659 660 661
			this_list = &sync;
			break;

662
		case DM_RH_NOSYNC:
L
Linus Torvalds 已提交
663 664 665
			this_list = &nosync;
			break;

666
		case DM_RH_RECOVERING:
L
Linus Torvalds 已提交
667 668 669 670 671 672 673
			this_list = &recover;
			break;
		}

		bio_list_add(this_list, bio);
	}

674 675 676 677 678 679 680 681
	/*
	 * Add bios that are delayed due to remote recovery
	 * back on to the write queue
	 */
	if (unlikely(requeue.head)) {
		spin_lock_irq(&ms->lock);
		bio_list_merge(&ms->writes, &requeue);
		spin_unlock_irq(&ms->lock);
682
		delayed_wake(ms);
683 684
	}

L
Linus Torvalds 已提交
685 686 687 688 689
	/*
	 * Increment the pending counts for any regions that will
	 * be written to (writes to recover regions are going to
	 * be delayed).
	 */
690 691
	dm_rh_inc_pending(ms->rh, &sync);
	dm_rh_inc_pending(ms->rh, &nosync);
692 693 694 695 696 697 698

	/*
	 * If the flush fails on a previous call and succeeds here,
	 * we must not reset the log_failure variable.  We need
	 * userspace interaction to do that.
	 */
	ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
L
Linus Torvalds 已提交
699 700 701 702

	/*
	 * Dispatch io.
	 */
703 704 705 706
	if (unlikely(ms->log_failure)) {
		spin_lock_irq(&ms->lock);
		bio_list_merge(&ms->failures, &sync);
		spin_unlock_irq(&ms->lock);
707
		wakeup_mirrord(ms);
708
	} else
J
Jonathan Brassow 已提交
709
		while ((bio = bio_list_pop(&sync)))
710
			do_write(ms, bio);
L
Linus Torvalds 已提交
711 712

	while ((bio = bio_list_pop(&recover)))
713
		dm_rh_delay(ms->rh, bio);
L
Linus Torvalds 已提交
714 715

	while ((bio = bio_list_pop(&nosync))) {
716
		map_bio(get_default_mirror(ms), bio);
L
Linus Torvalds 已提交
717 718 719 720
		generic_make_request(bio);
	}
}

721 722 723 724 725 726 727
static void do_failures(struct mirror_set *ms, struct bio_list *failures)
{
	struct bio *bio;

	if (!failures->head)
		return;

728
	if (!ms->log_failure) {
I
Ilpo Jarvinen 已提交
729
		while ((bio = bio_list_pop(failures))) {
730 731
			ms->in_sync = 0;
			dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
I
Ilpo Jarvinen 已提交
732
		}
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
		return;
	}

	/*
	 * If the log has failed, unattempted writes are being
	 * put on the failures list.  We can't issue those writes
	 * until a log has been marked, so we must store them.
	 *
	 * If a 'noflush' suspend is in progress, we can requeue
	 * the I/O's to the core.  This give userspace a chance
	 * to reconfigure the mirror, at which point the core
	 * will reissue the writes.  If the 'noflush' flag is
	 * not set, we have no choice but to return errors.
	 *
	 * Some writes on the failures list may have been
	 * submitted before the log failure and represent a
	 * failure to write to one of the devices.  It is ok
	 * for us to treat them the same and requeue them
	 * as well.
	 */
	if (dm_noflush_suspending(ms->ti)) {
		while ((bio = bio_list_pop(failures)))
			bio_endio(bio, DM_ENDIO_REQUEUE);
		return;
	}

	if (atomic_read(&ms->suspend)) {
		while ((bio = bio_list_pop(failures)))
			bio_endio(bio, -EIO);
		return;
	}

	spin_lock_irq(&ms->lock);
	bio_list_merge(&ms->failures, failures);
	spin_unlock_irq(&ms->lock);

M
Mikulas Patocka 已提交
769
	delayed_wake(ms);
770 771 772 773 774 775 776 777 778 779
}

static void trigger_event(struct work_struct *work)
{
	struct mirror_set *ms =
		container_of(work, struct mirror_set, trigger_event);

	dm_table_event(ms->ti->table);
}

L
Linus Torvalds 已提交
780 781 782
/*-----------------------------------------------------------------
 * kmirrord
 *---------------------------------------------------------------*/
M
Mikulas Patocka 已提交
783
static void do_mirror(struct work_struct *work)
L
Linus Torvalds 已提交
784
{
785 786
	struct mirror_set *ms = container_of(work, struct mirror_set,
					     kmirrord_work);
787 788
	struct bio_list reads, writes, failures;
	unsigned long flags;
L
Linus Torvalds 已提交
789

790
	spin_lock_irqsave(&ms->lock, flags);
L
Linus Torvalds 已提交
791 792
	reads = ms->reads;
	writes = ms->writes;
793
	failures = ms->failures;
L
Linus Torvalds 已提交
794 795
	bio_list_init(&ms->reads);
	bio_list_init(&ms->writes);
796 797
	bio_list_init(&ms->failures);
	spin_unlock_irqrestore(&ms->lock, flags);
L
Linus Torvalds 已提交
798

799
	dm_rh_update_states(ms->rh, errors_handled(ms));
L
Linus Torvalds 已提交
800 801 802
	do_recovery(ms);
	do_reads(ms, &reads);
	do_writes(ms, &writes);
803
	do_failures(ms, &failures);
M
Mikulas Patocka 已提交
804 805

	dm_table_unplug_all(ms->ti->table);
L
Linus Torvalds 已提交
806 807 808 809 810 811 812 813
}

/*-----------------------------------------------------------------
 * Target functions
 *---------------------------------------------------------------*/
static struct mirror_set *alloc_context(unsigned int nr_mirrors,
					uint32_t region_size,
					struct dm_target *ti,
H
Heinz Mauelshagen 已提交
814
					struct dm_dirty_log *dl)
L
Linus Torvalds 已提交
815 816 817 818 819 820
{
	size_t len;
	struct mirror_set *ms = NULL;

	len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);

821
	ms = kzalloc(len, GFP_KERNEL);
L
Linus Torvalds 已提交
822
	if (!ms) {
823
		ti->error = "Cannot allocate mirror context";
L
Linus Torvalds 已提交
824 825 826 827 828 829 830 831 832
		return NULL;
	}

	spin_lock_init(&ms->lock);

	ms->ti = ti;
	ms->nr_mirrors = nr_mirrors;
	ms->nr_regions = dm_sector_div_up(ti->len, region_size);
	ms->in_sync = 0;
833 834
	ms->log_failure = 0;
	atomic_set(&ms->suspend, 0);
835
	atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
L
Linus Torvalds 已提交
836

837 838 839
	ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
						_dm_raid1_read_record_cache);

840 841 842 843 844 845
	if (!ms->read_record_pool) {
		ti->error = "Error creating mirror read_record_pool";
		kfree(ms);
		return NULL;
	}

M
Milan Broz 已提交
846 847 848
	ms->io_client = dm_io_client_create(DM_IO_PAGES);
	if (IS_ERR(ms->io_client)) {
		ti->error = "Error creating dm_io client";
849
		mempool_destroy(ms->read_record_pool);
M
Milan Broz 已提交
850 851 852 853
		kfree(ms);
 		return NULL;
	}

854 855 856 857 858
	ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
				       wakeup_all_recovery_waiters,
				       ms->ti->begin, MAX_RECOVERY,
				       dl, region_size, ms->nr_regions);
	if (IS_ERR(ms->rh)) {
859
		ti->error = "Error creating dirty region hash";
D
Dmitry Monakhov 已提交
860
		dm_io_client_destroy(ms->io_client);
861
		mempool_destroy(ms->read_record_pool);
L
Linus Torvalds 已提交
862 863 864 865 866 867 868 869 870 871 872 873 874
		kfree(ms);
		return NULL;
	}

	return ms;
}

static void free_context(struct mirror_set *ms, struct dm_target *ti,
			 unsigned int m)
{
	while (m--)
		dm_put_device(ti, ms->mirror[m].dev);

M
Milan Broz 已提交
875
	dm_io_client_destroy(ms->io_client);
876
	dm_region_hash_destroy(ms->rh);
877
	mempool_destroy(ms->read_record_pool);
L
Linus Torvalds 已提交
878 879 880 881 882 883
	kfree(ms);
}

static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
		      unsigned int mirror, char **argv)
{
A
Andrew Morton 已提交
884
	unsigned long long offset;
L
Linus Torvalds 已提交
885

A
Andrew Morton 已提交
886
	if (sscanf(argv[1], "%llu", &offset) != 1) {
887
		ti->error = "Invalid offset";
L
Linus Torvalds 已提交
888 889 890 891 892 893
		return -EINVAL;
	}

	if (dm_get_device(ti, argv[0], offset, ti->len,
			  dm_table_get_mode(ti->table),
			  &ms->mirror[mirror].dev)) {
894
		ti->error = "Device lookup failure";
L
Linus Torvalds 已提交
895 896 897
		return -ENXIO;
	}

898
	ms->mirror[mirror].ms = ms;
899 900
	atomic_set(&(ms->mirror[mirror].error_count), 0);
	ms->mirror[mirror].error_type = 0;
L
Linus Torvalds 已提交
901 902 903 904 905 906 907 908
	ms->mirror[mirror].offset = offset;

	return 0;
}

/*
 * Create dirty log: log_type #log_params <log_params>
 */
H
Heinz Mauelshagen 已提交
909
static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
910 911
					     unsigned argc, char **argv,
					     unsigned *args_used)
L
Linus Torvalds 已提交
912
{
913
	unsigned param_count;
H
Heinz Mauelshagen 已提交
914
	struct dm_dirty_log *dl;
L
Linus Torvalds 已提交
915 916

	if (argc < 2) {
917
		ti->error = "Insufficient mirror log arguments";
L
Linus Torvalds 已提交
918 919 920 921
		return NULL;
	}

	if (sscanf(argv[1], "%u", &param_count) != 1) {
922
		ti->error = "Invalid mirror log argument count";
L
Linus Torvalds 已提交
923 924 925 926 927 928
		return NULL;
	}

	*args_used = 2 + param_count;

	if (argc < *args_used) {
929
		ti->error = "Insufficient mirror log arguments";
L
Linus Torvalds 已提交
930 931 932
		return NULL;
	}

M
Mikulas Patocka 已提交
933 934
	dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
				 argv + 2);
L
Linus Torvalds 已提交
935
	if (!dl) {
936
		ti->error = "Error creating mirror dirty log";
L
Linus Torvalds 已提交
937 938 939 940 941 942
		return NULL;
	}

	return dl;
}

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
			  unsigned *args_used)
{
	unsigned num_features;
	struct dm_target *ti = ms->ti;

	*args_used = 0;

	if (!argc)
		return 0;

	if (sscanf(argv[0], "%u", &num_features) != 1) {
		ti->error = "Invalid number of features";
		return -EINVAL;
	}

	argc--;
	argv++;
	(*args_used)++;

	if (num_features > argc) {
		ti->error = "Not enough arguments to support feature count";
		return -EINVAL;
	}

	if (!strcmp("handle_errors", argv[0]))
		ms->features |= DM_RAID1_HANDLE_ERRORS;
	else {
		ti->error = "Unrecognised feature requested";
		return -EINVAL;
	}

	(*args_used)++;

	return 0;
}

L
Linus Torvalds 已提交
980 981 982 983 984
/*
 * Construct a mirror mapping:
 *
 * log_type #log_params <log_params>
 * #mirrors [mirror_path offset]{2,}
985
 * [#features <features>]
L
Linus Torvalds 已提交
986 987 988
 *
 * log_type is "core" or "disk"
 * #log_params is between 1 and 3
989 990
 *
 * If present, features must be "handle_errors".
L
Linus Torvalds 已提交
991 992 993 994 995 996
 */
static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	int r;
	unsigned int nr_mirrors, m, args_used;
	struct mirror_set *ms;
H
Heinz Mauelshagen 已提交
997
	struct dm_dirty_log *dl;
L
Linus Torvalds 已提交
998 999 1000 1001 1002 1003 1004 1005 1006

	dl = create_dirty_log(ti, argc, argv, &args_used);
	if (!dl)
		return -EINVAL;

	argv += args_used;
	argc -= args_used;

	if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
H
Heinz Mauelshagen 已提交
1007
	    nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
1008
		ti->error = "Invalid number of mirrors";
H
Heinz Mauelshagen 已提交
1009
		dm_dirty_log_destroy(dl);
L
Linus Torvalds 已提交
1010 1011 1012 1013 1014
		return -EINVAL;
	}

	argv++, argc--;

1015 1016
	if (argc < nr_mirrors * 2) {
		ti->error = "Too few mirror arguments";
H
Heinz Mauelshagen 已提交
1017
		dm_dirty_log_destroy(dl);
L
Linus Torvalds 已提交
1018 1019 1020 1021 1022
		return -EINVAL;
	}

	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
	if (!ms) {
H
Heinz Mauelshagen 已提交
1023
		dm_dirty_log_destroy(dl);
L
Linus Torvalds 已提交
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
		return -ENOMEM;
	}

	/* Get the mirror parameter sets */
	for (m = 0; m < nr_mirrors; m++) {
		r = get_mirror(ms, ti, m, argv);
		if (r) {
			free_context(ms, ti, m);
			return r;
		}
		argv += 2;
		argc -= 2;
	}

	ti->private = ms;
1039
	ti->split_io = dm_rh_get_region_size(ms->rh);
M
Mikulas Patocka 已提交
1040
	ti->num_flush_requests = 1;
L
Linus Torvalds 已提交
1041

1042 1043 1044
	ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
	if (!ms->kmirrord_wq) {
		DMERR("couldn't start kmirrord");
D
Dmitry Monakhov 已提交
1045 1046
		r = -ENOMEM;
		goto err_free_context;
1047 1048
	}
	INIT_WORK(&ms->kmirrord_work, do_mirror);
M
Mikulas Patocka 已提交
1049 1050
	init_timer(&ms->timer);
	ms->timer_pending = 0;
1051
	INIT_WORK(&ms->trigger_event, trigger_event);
1052

1053
	r = parse_features(ms, argc, argv, &args_used);
D
Dmitry Monakhov 已提交
1054 1055
	if (r)
		goto err_destroy_wq;
1056 1057 1058 1059

	argv += args_used;
	argc -= args_used;

1060 1061 1062 1063 1064 1065 1066 1067 1068
	/*
	 * Any read-balancing addition depends on the
	 * DM_RAID1_HANDLE_ERRORS flag being present.
	 * This is because the decision to balance depends
	 * on the sync state of a region.  If the above
	 * flag is not present, we ignore errors; and
	 * the sync state may be inaccurate.
	 */

1069 1070
	if (argc) {
		ti->error = "Too many mirror arguments";
D
Dmitry Monakhov 已提交
1071 1072
		r = -EINVAL;
		goto err_destroy_wq;
1073 1074
	}

1075
	r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
D
Dmitry Monakhov 已提交
1076 1077
	if (r)
		goto err_destroy_wq;
L
Linus Torvalds 已提交
1078

1079
	wakeup_mirrord(ms);
L
Linus Torvalds 已提交
1080
	return 0;
D
Dmitry Monakhov 已提交
1081 1082 1083 1084 1085 1086

err_destroy_wq:
	destroy_workqueue(ms->kmirrord_wq);
err_free_context:
	free_context(ms, ti, ms->nr_mirrors);
	return r;
L
Linus Torvalds 已提交
1087 1088 1089 1090 1091 1092
}

static void mirror_dtr(struct dm_target *ti)
{
	struct mirror_set *ms = (struct mirror_set *) ti->private;

M
Mikulas Patocka 已提交
1093
	del_timer_sync(&ms->timer);
1094
	flush_workqueue(ms->kmirrord_wq);
1095
	flush_scheduled_work();
H
Heinz Mauelshagen 已提交
1096
	dm_kcopyd_client_destroy(ms->kcopyd_client);
1097
	destroy_workqueue(ms->kmirrord_wq);
L
Linus Torvalds 已提交
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
	free_context(ms, ti, ms->nr_mirrors);
}

/*
 * Mirror mapping function
 */
static int mirror_map(struct dm_target *ti, struct bio *bio,
		      union map_info *map_context)
{
	int r, rw = bio_rw(bio);
	struct mirror *m;
	struct mirror_set *ms = ti->private;
1110
	struct dm_raid1_read_record *read_record = NULL;
1111
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
L
Linus Torvalds 已提交
1112 1113

	if (rw == WRITE) {
1114
		/* Save region for mirror_end_io() handler */
1115
		map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
L
Linus Torvalds 已提交
1116
		queue_bio(ms, bio, rw);
1117
		return DM_MAPIO_SUBMITTED;
L
Linus Torvalds 已提交
1118 1119
	}

1120
	r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
L
Linus Torvalds 已提交
1121 1122 1123 1124
	if (r < 0 && r != -EWOULDBLOCK)
		return r;

	/*
1125
	 * If region is not in-sync queue the bio.
L
Linus Torvalds 已提交
1126
	 */
1127 1128 1129
	if (!r || (r == -EWOULDBLOCK)) {
		if (rw == READA)
			return -EWOULDBLOCK;
L
Linus Torvalds 已提交
1130 1131

		queue_bio(ms, bio, rw);
1132
		return DM_MAPIO_SUBMITTED;
L
Linus Torvalds 已提交
1133 1134
	}

1135 1136 1137 1138
	/*
	 * The region is in-sync and we can perform reads directly.
	 * Store enough information so we can retry if it fails.
	 */
L
Linus Torvalds 已提交
1139
	m = choose_mirror(ms, bio->bi_sector);
1140
	if (unlikely(!m))
L
Linus Torvalds 已提交
1141 1142
		return -EIO;

1143 1144 1145 1146 1147 1148 1149 1150 1151
	read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
	if (likely(read_record)) {
		dm_bio_record(&read_record->details, bio);
		map_context->ptr = read_record;
		read_record->m = m;
	}

	map_bio(m, bio);

1152
	return DM_MAPIO_REMAPPED;
L
Linus Torvalds 已提交
1153 1154 1155 1156 1157 1158 1159
}

static int mirror_end_io(struct dm_target *ti, struct bio *bio,
			 int error, union map_info *map_context)
{
	int rw = bio_rw(bio);
	struct mirror_set *ms = (struct mirror_set *) ti->private;
1160 1161 1162
	struct mirror *m = NULL;
	struct dm_bio_details *bd = NULL;
	struct dm_raid1_read_record *read_record = map_context->ptr;
L
Linus Torvalds 已提交
1163 1164 1165 1166

	/*
	 * We need to dec pending if this was a write.
	 */
1167
	if (rw == WRITE) {
M
Mikulas Patocka 已提交
1168 1169
		if (likely(!bio_empty_barrier(bio)))
			dm_rh_dec(ms->rh, map_context->ll);
1170 1171
		return error;
	}
L
Linus Torvalds 已提交
1172

1173 1174 1175
	if (error == -EOPNOTSUPP)
		goto out;

1176
	if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
1177 1178 1179 1180 1181 1182 1183 1184 1185
		goto out;

	if (unlikely(error)) {
		if (!read_record) {
			/*
			 * There wasn't enough memory to record necessary
			 * information for a retry or there was no other
			 * mirror in-sync.
			 */
A
Adrian Bunk 已提交
1186
			DMERR_LIMIT("Mirror read failed.");
1187 1188
			return -EIO;
		}
A
Adrian Bunk 已提交
1189 1190 1191

		m = read_record->m;

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
		DMERR("Mirror read failed from %s. Trying alternative device.",
		      m->dev->name);

		fail_mirror(m, DM_RAID1_READ_ERROR);

		/*
		 * A failed read is requeued for another attempt using an intact
		 * mirror.
		 */
		if (default_ok(m) || mirror_available(ms, bio)) {
			bd = &read_record->details;

			dm_bio_restore(bd, bio);
			mempool_free(read_record, ms->read_record_pool);
			map_context->ptr = NULL;
			queue_bio(ms, bio, rw);
			return 1;
		}
		DMERR("All replicated volumes dead, failing I/O");
	}

out:
	if (read_record) {
		mempool_free(read_record, ms->read_record_pool);
		map_context->ptr = NULL;
	}

	return error;
L
Linus Torvalds 已提交
1220 1221
}

1222
static void mirror_presuspend(struct dm_target *ti)
L
Linus Torvalds 已提交
1223 1224
{
	struct mirror_set *ms = (struct mirror_set *) ti->private;
1225
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
L
Linus Torvalds 已提交
1226

1227 1228 1229 1230 1231 1232
	atomic_set(&ms->suspend, 1);

	/*
	 * We must finish up all the work that we've
	 * generated (i.e. recovery work).
	 */
1233
	dm_rh_stop_recovery(ms->rh);
1234 1235

	wait_event(_kmirrord_recovery_stopped,
1236
		   !dm_rh_recovery_in_flight(ms->rh));
1237

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
	if (log->type->presuspend && log->type->presuspend(log))
		/* FIXME: need better error handling */
		DMWARN("log presuspend failed");

	/*
	 * Now that recovery is complete/stopped and the
	 * delayed bios are queued, we need to wait for
	 * the worker thread to complete.  This way,
	 * we know that all of our I/O has been pushed.
	 */
	flush_workqueue(ms->kmirrord_wq);
}

static void mirror_postsuspend(struct dm_target *ti)
{
	struct mirror_set *ms = ti->private;
1254
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1255

J
Jonathan Brassow 已提交
1256
	if (log->type->postsuspend && log->type->postsuspend(log))
L
Linus Torvalds 已提交
1257
		/* FIXME: need better error handling */
1258
		DMWARN("log postsuspend failed");
L
Linus Torvalds 已提交
1259 1260 1261 1262
}

static void mirror_resume(struct dm_target *ti)
{
1263
	struct mirror_set *ms = ti->private;
1264
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1265 1266

	atomic_set(&ms->suspend, 0);
L
Linus Torvalds 已提交
1267 1268 1269
	if (log->type->resume && log->type->resume(log))
		/* FIXME: need better error handling */
		DMWARN("log resume failed");
1270
	dm_rh_start_recovery(ms->rh);
L
Linus Torvalds 已提交
1271 1272
}

J
Jonathan Brassow 已提交
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
/*
 * device_status_char
 * @m: mirror device/leg we want the status of
 *
 * We return one character representing the most severe error
 * we have encountered.
 *    A => Alive - No failures
 *    D => Dead - A write failure occurred leaving mirror out-of-sync
 *    S => Sync - A sychronization failure occurred, mirror out-of-sync
 *    R => Read - A read failure occurred, mirror data unaffected
 *
 * Returns: <char>
 */
static char device_status_char(struct mirror *m)
{
	if (!atomic_read(&(m->error_count)))
		return 'A';

	return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
		(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
		(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
}


L
Linus Torvalds 已提交
1297 1298 1299
static int mirror_status(struct dm_target *ti, status_type_t type,
			 char *result, unsigned int maxlen)
{
J
Jonathan E Brassow 已提交
1300
	unsigned int m, sz = 0;
L
Linus Torvalds 已提交
1301
	struct mirror_set *ms = (struct mirror_set *) ti->private;
1302
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
J
Jonathan Brassow 已提交
1303
	char buffer[ms->nr_mirrors + 1];
L
Linus Torvalds 已提交
1304 1305 1306 1307

	switch (type) {
	case STATUSTYPE_INFO:
		DMEMIT("%d ", ms->nr_mirrors);
J
Jonathan Brassow 已提交
1308
		for (m = 0; m < ms->nr_mirrors; m++) {
L
Linus Torvalds 已提交
1309
			DMEMIT("%s ", ms->mirror[m].dev->name);
J
Jonathan Brassow 已提交
1310 1311 1312
			buffer[m] = device_status_char(&(ms->mirror[m]));
		}
		buffer[m] = '\0';
L
Linus Torvalds 已提交
1313

J
Jonathan Brassow 已提交
1314
		DMEMIT("%llu/%llu 1 %s ",
1315
		      (unsigned long long)log->type->get_sync_count(log),
J
Jonathan Brassow 已提交
1316
		      (unsigned long long)ms->nr_regions, buffer);
J
Jonathan E Brassow 已提交
1317

1318
		sz += log->type->status(log, type, result+sz, maxlen-sz);
J
Jonathan E Brassow 已提交
1319

L
Linus Torvalds 已提交
1320 1321 1322
		break;

	case STATUSTYPE_TABLE:
1323
		sz = log->type->status(log, type, result, maxlen);
J
Jonathan E Brassow 已提交
1324

1325
		DMEMIT("%d", ms->nr_mirrors);
L
Linus Torvalds 已提交
1326
		for (m = 0; m < ms->nr_mirrors; m++)
1327
			DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1328
			       (unsigned long long)ms->mirror[m].offset);
1329 1330 1331

		if (ms->features & DM_RAID1_HANDLE_ERRORS)
			DMEMIT(" 1 handle_errors");
L
Linus Torvalds 已提交
1332 1333 1334 1335 1336
	}

	return 0;
}

1337 1338 1339 1340 1341 1342 1343 1344 1345
static int mirror_iterate_devices(struct dm_target *ti,
				  iterate_devices_callout_fn fn, void *data)
{
	struct mirror_set *ms = ti->private;
	int ret = 0;
	unsigned i;

	for (i = 0; !ret && i < ms->nr_mirrors; i++)
		ret = fn(ti, ms->mirror[i].dev,
1346
			 ms->mirror[i].offset, ti->len, data);
1347 1348 1349 1350

	return ret;
}

L
Linus Torvalds 已提交
1351 1352
static struct target_type mirror_target = {
	.name	 = "mirror",
1353
	.version = {1, 12, 0},
L
Linus Torvalds 已提交
1354 1355 1356 1357 1358
	.module	 = THIS_MODULE,
	.ctr	 = mirror_ctr,
	.dtr	 = mirror_dtr,
	.map	 = mirror_map,
	.end_io	 = mirror_end_io,
1359
	.presuspend = mirror_presuspend,
L
Linus Torvalds 已提交
1360 1361 1362
	.postsuspend = mirror_postsuspend,
	.resume	 = mirror_resume,
	.status	 = mirror_status,
1363
	.iterate_devices = mirror_iterate_devices,
L
Linus Torvalds 已提交
1364 1365 1366 1367 1368 1369
};

static int __init dm_mirror_init(void)
{
	int r;

1370 1371 1372 1373 1374 1375 1376
	_dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
	if (!_dm_raid1_read_record_cache) {
		DMERR("Can't allocate dm_raid1_read_record cache");
		r = -ENOMEM;
		goto bad_cache;
	}

L
Linus Torvalds 已提交
1377
	r = dm_register_target(&mirror_target);
1378
	if (r < 0) {
1379
		DMERR("Failed to register mirror target");
1380 1381 1382 1383
		goto bad_target;
	}

	return 0;
L
Linus Torvalds 已提交
1384

1385 1386 1387
bad_target:
	kmem_cache_destroy(_dm_raid1_read_record_cache);
bad_cache:
L
Linus Torvalds 已提交
1388 1389 1390 1391 1392
	return r;
}

static void __exit dm_mirror_exit(void)
{
1393
	dm_unregister_target(&mirror_target);
1394
	kmem_cache_destroy(_dm_raid1_read_record_cache);
L
Linus Torvalds 已提交
1395 1396 1397 1398 1399 1400 1401 1402 1403
}

/* Module hooks */
module_init(dm_mirror_init);
module_exit(dm_mirror_exit);

MODULE_DESCRIPTION(DM_NAME " mirror target");
MODULE_AUTHOR("Joe Thornber");
MODULE_LICENSE("GPL");