dm-raid1.c 34.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2003 Sistina Software Limited.
3
 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7
 *
 * This file is released under the GPL.
 */

8
#include "dm-bio-record.h"
L
Linus Torvalds 已提交
9 10 11 12 13 14 15

#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
16
#include <linux/device-mapper.h>
A
Alasdair G Kergon 已提交
17 18 19
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
#include <linux/dm-kcopyd.h>
20
#include <linux/dm-region-hash.h>
L
Linus Torvalds 已提交
21

22
#define DM_MSG_PREFIX "raid1"
23 24

#define MAX_RECOVERY 1	/* Maximum number of regions recovered in parallel. */
25

26
#define DM_RAID1_HANDLE_ERRORS 0x01
27
#define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS)
28

29
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
L
Linus Torvalds 已提交
30

31 32 33
/*-----------------------------------------------------------------
 * Mirror set structures.
 *---------------------------------------------------------------*/
34 35
enum dm_raid1_error {
	DM_RAID1_WRITE_ERROR,
36
	DM_RAID1_FLUSH_ERROR,
37 38 39 40
	DM_RAID1_SYNC_ERROR,
	DM_RAID1_READ_ERROR
};

41
struct mirror {
42
	struct mirror_set *ms;
43
	atomic_t error_count;
A
Al Viro 已提交
44
	unsigned long error_type;
45 46 47 48 49 50 51
	struct dm_dev *dev;
	sector_t offset;
};

struct mirror_set {
	struct dm_target *ti;
	struct list_head list;
52

53
	uint64_t features;
54

55
	spinlock_t lock;	/* protects the lists */
56 57
	struct bio_list reads;
	struct bio_list writes;
58
	struct bio_list failures;
59
	struct bio_list holds;	/* bios are waiting until suspend */
60

61 62
	struct dm_region_hash *rh;
	struct dm_kcopyd_client *kcopyd_client;
M
Milan Broz 已提交
63
	struct dm_io_client *io_client;
64
	mempool_t *read_record_pool;
M
Milan Broz 已提交
65

66 67 68
	/* recovery */
	region_t nr_regions;
	int in_sync;
J
Jonathan Brassow 已提交
69
	int log_failure;
70
	int leg_failure;
71
	atomic_t suspend;
72

73
	atomic_t default_mirror;	/* Default mirror */
74

75 76
	struct workqueue_struct *kmirrord_wq;
	struct work_struct kmirrord_work;
M
Mikulas Patocka 已提交
77 78 79
	struct timer_list timer;
	unsigned long timer_pending;

80
	struct work_struct trigger_event;
81

82
	unsigned nr_mirrors;
83 84 85
	struct mirror mirror[0];
};

86
static void wakeup_mirrord(void *context)
L
Linus Torvalds 已提交
87
{
88
	struct mirror_set *ms = context;
L
Linus Torvalds 已提交
89

90 91 92
	queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
}

M
Mikulas Patocka 已提交
93 94 95 96 97
static void delayed_wake_fn(unsigned long data)
{
	struct mirror_set *ms = (struct mirror_set *) data;

	clear_bit(0, &ms->timer_pending);
98
	wakeup_mirrord(ms);
M
Mikulas Patocka 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111
}

static void delayed_wake(struct mirror_set *ms)
{
	if (test_and_set_bit(0, &ms->timer_pending))
		return;

	ms->timer.expires = jiffies + HZ / 5;
	ms->timer.data = (unsigned long) ms;
	ms->timer.function = delayed_wake_fn;
	add_timer(&ms->timer);
}

112
static void wakeup_all_recovery_waiters(void *context)
L
Linus Torvalds 已提交
113
{
114
	wake_up_all(&_kmirrord_recovery_stopped);
L
Linus Torvalds 已提交
115 116
}

117
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
L
Linus Torvalds 已提交
118 119 120
{
	unsigned long flags;
	int should_wake = 0;
121
	struct bio_list *bl;
L
Linus Torvalds 已提交
122

123 124 125 126 127
	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
	spin_lock_irqsave(&ms->lock, flags);
	should_wake = !(bl->head);
	bio_list_add(bl, bio);
	spin_unlock_irqrestore(&ms->lock, flags);
L
Linus Torvalds 已提交
128 129

	if (should_wake)
130
		wakeup_mirrord(ms);
L
Linus Torvalds 已提交
131 132
}

133
static void dispatch_bios(void *context, struct bio_list *bio_list)
L
Linus Torvalds 已提交
134
{
135 136
	struct mirror_set *ms = context;
	struct bio *bio;
L
Linus Torvalds 已提交
137

138 139
	while ((bio = bio_list_pop(bio_list)))
		queue_bio(ms, bio, WRITE);
L
Linus Torvalds 已提交
140 141
}

142 143 144 145 146 147
#define MIN_READ_RECORDS 20
struct dm_raid1_read_record {
	struct mirror *m;
	struct dm_bio_details details;
};

148 149
static struct kmem_cache *_dm_raid1_read_record_cache;

L
Linus Torvalds 已提交
150 151 152 153 154 155
/*
 * Every mirror should look like this one.
 */
#define DEFAULT_MIRROR 0

/*
156 157
 * This is yucky.  We squirrel the mirror struct away inside
 * bi_next for read/write buffers.  This is safe since the bh
L
Linus Torvalds 已提交
158 159
 * doesn't get submitted to the lower levels of block layer.
 */
160
static struct mirror *bio_get_m(struct bio *bio)
L
Linus Torvalds 已提交
161
{
162
	return (struct mirror *) bio->bi_next;
L
Linus Torvalds 已提交
163 164
}

165
static void bio_set_m(struct bio *bio, struct mirror *m)
L
Linus Torvalds 已提交
166
{
167
	bio->bi_next = (struct bio *) m;
L
Linus Torvalds 已提交
168 169
}

170 171 172 173 174 175 176 177 178 179 180 181 182
static struct mirror *get_default_mirror(struct mirror_set *ms)
{
	return &ms->mirror[atomic_read(&ms->default_mirror)];
}

static void set_default_mirror(struct mirror *m)
{
	struct mirror_set *ms = m->ms;
	struct mirror *m0 = &(ms->mirror[0]);

	atomic_set(&ms->default_mirror, m - m0);
}

183 184 185 186 187 188 189 190 191 192 193
static struct mirror *get_valid_mirror(struct mirror_set *ms)
{
	struct mirror *m;

	for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
		if (!atomic_read(&m->error_count))
			return m;

	return NULL;
}

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
/* fail_mirror
 * @m: mirror device to fail
 * @error_type: one of the enum's, DM_RAID1_*_ERROR
 *
 * If errors are being handled, record the type of
 * error encountered for this device.  If this type
 * of error has already been recorded, we can return;
 * otherwise, we must signal userspace by triggering
 * an event.  Additionally, if the device is the
 * primary device, we must choose a new primary, but
 * only if the mirror is in-sync.
 *
 * This function must not block.
 */
static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
{
	struct mirror_set *ms = m->ms;
	struct mirror *new;

213 214
	ms->leg_failure = 1;

215 216 217 218 219 220 221 222 223 224
	/*
	 * error_count is used for nothing more than a
	 * simple way to tell if a device has encountered
	 * errors.
	 */
	atomic_inc(&m->error_count);

	if (test_and_set_bit(error_type, &m->error_type))
		return;

J
Jonathan Brassow 已提交
225 226 227
	if (!errors_handled(ms))
		return;

228 229 230 231 232 233 234 235 236 237 238 239 240
	if (m != get_default_mirror(ms))
		goto out;

	if (!ms->in_sync) {
		/*
		 * Better to issue requests to same failing device
		 * than to risk returning corrupt data.
		 */
		DMERR("Primary mirror (%s) failed while out-of-sync: "
		      "Reads may fail.", m->dev->name);
		goto out;
	}

241 242 243 244
	new = get_valid_mirror(ms);
	if (new)
		set_default_mirror(new);
	else
245 246 247 248 249 250
		DMWARN("All sides of mirror have failed.");

out:
	schedule_work(&ms->trigger_event);
}

M
Mikulas Patocka 已提交
251 252 253 254 255 256 257 258 259
static int mirror_flush(struct dm_target *ti)
{
	struct mirror_set *ms = ti->private;
	unsigned long error_bits;

	unsigned int i;
	struct dm_io_region io[ms->nr_mirrors];
	struct mirror *m;
	struct dm_io_request io_req = {
260
		.bi_rw = WRITE_FLUSH,
M
Mikulas Patocka 已提交
261
		.mem.type = DM_IO_KMEM,
M
Mike Snitzer 已提交
262
		.mem.ptr.addr = NULL,
M
Mikulas Patocka 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
		.client = ms->io_client,
	};

	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
		io[i].bdev = m->dev->bdev;
		io[i].sector = 0;
		io[i].count = 0;
	}

	error_bits = -1;
	dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
	if (unlikely(error_bits != 0)) {
		for (i = 0; i < ms->nr_mirrors; i++)
			if (test_bit(i, &error_bits))
				fail_mirror(ms->mirror + i,
278
					    DM_RAID1_FLUSH_ERROR);
M
Mikulas Patocka 已提交
279 280 281 282 283 284
		return -EIO;
	}

	return 0;
}

L
Linus Torvalds 已提交
285 286 287 288 289 290 291
/*-----------------------------------------------------------------
 * Recovery.
 *
 * When a mirror is first activated we may find that some regions
 * are in the no-sync state.  We have to recover these by
 * recopying from the default mirror to all the others.
 *---------------------------------------------------------------*/
292
static void recovery_complete(int read_err, unsigned long write_err,
L
Linus Torvalds 已提交
293 294
			      void *context)
{
295 296
	struct dm_region *reg = context;
	struct mirror_set *ms = dm_rh_region_context(reg);
297
	int m, bit = 0;
L
Linus Torvalds 已提交
298

299
	if (read_err) {
300 301
		/* Read error means the failure of default mirror. */
		DMERR_LIMIT("Unable to read primary mirror during recovery");
302 303
		fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
	}
304

305
	if (write_err) {
306
		DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
307
			    write_err);
308 309 310 311 312 313 314 315 316 317 318 319 320
		/*
		 * Bits correspond to devices (excluding default mirror).
		 * The default mirror cannot change during recovery.
		 */
		for (m = 0; m < ms->nr_mirrors; m++) {
			if (&ms->mirror[m] == get_default_mirror(ms))
				continue;
			if (test_bit(bit, &write_err))
				fail_mirror(ms->mirror + m,
					    DM_RAID1_SYNC_ERROR);
			bit++;
		}
	}
321

322
	dm_rh_recovery_end(reg, !(read_err || write_err));
L
Linus Torvalds 已提交
323 324
}

325
static int recover(struct mirror_set *ms, struct dm_region *reg)
L
Linus Torvalds 已提交
326 327
{
	int r;
328
	unsigned i;
H
Heinz Mauelshagen 已提交
329
	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
L
Linus Torvalds 已提交
330 331
	struct mirror *m;
	unsigned long flags = 0;
332 333
	region_t key = dm_rh_get_region_key(reg);
	sector_t region_size = dm_rh_get_region_size(ms->rh);
L
Linus Torvalds 已提交
334 335

	/* fill in the source */
336
	m = get_default_mirror(ms);
L
Linus Torvalds 已提交
337
	from.bdev = m->dev->bdev;
338 339
	from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
	if (key == (ms->nr_regions - 1)) {
L
Linus Torvalds 已提交
340 341 342 343
		/*
		 * The final region may be smaller than
		 * region_size.
		 */
344
		from.count = ms->ti->len & (region_size - 1);
L
Linus Torvalds 已提交
345
		if (!from.count)
346
			from.count = region_size;
L
Linus Torvalds 已提交
347
	} else
348
		from.count = region_size;
L
Linus Torvalds 已提交
349 350 351

	/* fill in the destinations */
	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
352
		if (&ms->mirror[i] == get_default_mirror(ms))
L
Linus Torvalds 已提交
353 354 355 356
			continue;

		m = ms->mirror + i;
		dest->bdev = m->dev->bdev;
357
		dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
L
Linus Torvalds 已提交
358 359 360 361 362
		dest->count = from.count;
		dest++;
	}

	/* hand to kcopyd */
363 364 365
	if (!errors_handled(ms))
		set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);

H
Heinz Mauelshagen 已提交
366 367
	r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
			   flags, recovery_complete, reg);
L
Linus Torvalds 已提交
368 369 370 371 372 373

	return r;
}

static void do_recovery(struct mirror_set *ms)
{
374 375
	struct dm_region *reg;
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
L
Linus Torvalds 已提交
376 377 378 379 380
	int r;

	/*
	 * Start quiescing some regions.
	 */
381
	dm_rh_recovery_prepare(ms->rh);
L
Linus Torvalds 已提交
382 383 384 385

	/*
	 * Copy any already quiesced regions.
	 */
386
	while ((reg = dm_rh_recovery_start(ms->rh))) {
L
Linus Torvalds 已提交
387 388
		r = recover(ms, reg);
		if (r)
389
			dm_rh_recovery_end(reg, 0);
L
Linus Torvalds 已提交
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	}

	/*
	 * Update the in sync flag.
	 */
	if (!ms->in_sync &&
	    (log->type->get_sync_count(log) == ms->nr_regions)) {
		/* the sync is complete */
		dm_table_event(ms->ti->table);
		ms->in_sync = 1;
	}
}

/*-----------------------------------------------------------------
 * Reads
 *---------------------------------------------------------------*/
static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
{
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
	struct mirror *m = get_default_mirror(ms);

	do {
		if (likely(!atomic_read(&m->error_count)))
			return m;

		if (m-- == ms->mirror)
			m += ms->nr_mirrors;
	} while (m != get_default_mirror(ms));

	return NULL;
}

static int default_ok(struct mirror *m)
{
	struct mirror *default_mirror = get_default_mirror(m->ms);

	return !atomic_read(&default_mirror->error_count);
}

static int mirror_available(struct mirror_set *ms, struct bio *bio)
{
430 431
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
	region_t region = dm_rh_bio_to_region(ms->rh, bio);
432

433
	if (log->type->in_sync(log, region, 0))
434 435 436
		return choose_mirror(ms,  bio->bi_sector) ? 1 : 0;

	return 0;
L
Linus Torvalds 已提交
437 438 439 440 441
}

/*
 * remap a buffer to a particular mirror.
 */
442 443
static sector_t map_sector(struct mirror *m, struct bio *bio)
{
M
Mikulas Patocka 已提交
444 445
	if (unlikely(!bio->bi_size))
		return 0;
446
	return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
447 448 449
}

static void map_bio(struct mirror *m, struct bio *bio)
L
Linus Torvalds 已提交
450 451
{
	bio->bi_bdev = m->dev->bdev;
452 453 454
	bio->bi_sector = map_sector(m, bio);
}

H
Heinz Mauelshagen 已提交
455
static void map_region(struct dm_io_region *io, struct mirror *m,
456 457 458 459 460 461 462
		       struct bio *bio)
{
	io->bdev = m->dev->bdev;
	io->sector = map_sector(m, bio);
	io->count = bio->bi_size >> 9;
}

463 464 465
static void hold_bio(struct mirror_set *ms, struct bio *bio)
{
	/*
466 467
	 * Lock is required to avoid race condition during suspend
	 * process.
468
	 */
469 470
	spin_lock_irq(&ms->lock);

471
	if (atomic_read(&ms->suspend)) {
472 473 474 475 476
		spin_unlock_irq(&ms->lock);

		/*
		 * If device is suspended, complete the bio.
		 */
477 478 479 480 481 482 483 484 485 486 487 488 489 490
		if (dm_noflush_suspending(ms->ti))
			bio_endio(bio, DM_ENDIO_REQUEUE);
		else
			bio_endio(bio, -EIO);
		return;
	}

	/*
	 * Hold bio until the suspend is complete.
	 */
	bio_list_add(&ms->holds, bio);
	spin_unlock_irq(&ms->lock);
}

491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
/*-----------------------------------------------------------------
 * Reads
 *---------------------------------------------------------------*/
static void read_callback(unsigned long error, void *context)
{
	struct bio *bio = context;
	struct mirror *m;

	m = bio_get_m(bio);
	bio_set_m(bio, NULL);

	if (likely(!error)) {
		bio_endio(bio, 0);
		return;
	}

	fail_mirror(m, DM_RAID1_READ_ERROR);

	if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
		DMWARN_LIMIT("Read failure on mirror device %s.  "
			     "Trying alternative device.",
			     m->dev->name);
		queue_bio(m->ms, bio, bio_rw(bio));
		return;
	}

	DMERR_LIMIT("Read failure on mirror device %s.  Failing I/O.",
		    m->dev->name);
	bio_endio(bio, -EIO);
}

/* Asynchronous read. */
static void read_async_bio(struct mirror *m, struct bio *bio)
{
H
Heinz Mauelshagen 已提交
525
	struct dm_io_region io;
526 527 528 529 530 531 532 533 534 535 536
	struct dm_io_request io_req = {
		.bi_rw = READ,
		.mem.type = DM_IO_BVEC,
		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
		.notify.fn = read_callback,
		.notify.context = bio,
		.client = m->ms->io_client,
	};

	map_region(&io, m, bio);
	bio_set_m(bio, m);
537 538 539 540 541 542 543 544
	BUG_ON(dm_io(&io_req, 1, &io, NULL));
}

static inline int region_in_sync(struct mirror_set *ms, region_t region,
				 int may_block)
{
	int state = dm_rh_get_state(ms->rh, region, may_block);
	return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
L
Linus Torvalds 已提交
545 546 547 548 549 550 551 552 553
}

static void do_reads(struct mirror_set *ms, struct bio_list *reads)
{
	region_t region;
	struct bio *bio;
	struct mirror *m;

	while ((bio = bio_list_pop(reads))) {
554
		region = dm_rh_bio_to_region(ms->rh, bio);
555
		m = get_default_mirror(ms);
L
Linus Torvalds 已提交
556 557 558 559

		/*
		 * We can only read balance if the region is in sync.
		 */
560
		if (likely(region_in_sync(ms, region, 1)))
L
Linus Torvalds 已提交
561
			m = choose_mirror(ms, bio->bi_sector);
562 563
		else if (m && atomic_read(&m->error_count))
			m = NULL;
L
Linus Torvalds 已提交
564

565 566 567 568
		if (likely(m))
			read_async_bio(m, bio);
		else
			bio_endio(bio, -EIO);
L
Linus Torvalds 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581
	}
}

/*-----------------------------------------------------------------
 * Writes.
 *
 * We do different things with the write io depending on the
 * state of the region that it's in:
 *
 * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
 * RECOVERING:	delay the io until recovery completes
 * NOSYNC:	increment pending, just write to the default mirror
 *---------------------------------------------------------------*/
582 583


L
Linus Torvalds 已提交
584 585
static void write_callback(unsigned long error, void *context)
{
586
	unsigned i, ret = 0;
L
Linus Torvalds 已提交
587 588
	struct bio *bio = (struct bio *) context;
	struct mirror_set *ms;
589 590
	int should_wake = 0;
	unsigned long flags;
L
Linus Torvalds 已提交
591

592 593
	ms = bio_get_m(bio)->ms;
	bio_set_m(bio, NULL);
L
Linus Torvalds 已提交
594 595 596 597 598 599 600

	/*
	 * NOTE: We don't decrement the pending count here,
	 * instead it is done by the targets endio function.
	 * This way we handle both writes to SYNC and NOSYNC
	 * regions with the same code.
	 */
601 602 603 604
	if (likely(!error)) {
		bio_endio(bio, ret);
		return;
	}
L
Linus Torvalds 已提交
605

606 607 608 609
	for (i = 0; i < ms->nr_mirrors; i++)
		if (test_bit(i, &error))
			fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);

610 611 612 613 614 615 616 617 618 619 620 621
	/*
	 * Need to raise event.  Since raising
	 * events can block, we need to do it in
	 * the main thread.
	 */
	spin_lock_irqsave(&ms->lock, flags);
	if (!ms->failures.head)
		should_wake = 1;
	bio_list_add(&ms->failures, bio);
	spin_unlock_irqrestore(&ms->lock, flags);
	if (should_wake)
		wakeup_mirrord(ms);
L
Linus Torvalds 已提交
622 623 624 625 626
}

static void do_write(struct mirror_set *ms, struct bio *bio)
{
	unsigned int i;
H
Heinz Mauelshagen 已提交
627
	struct dm_io_region io[ms->nr_mirrors], *dest = io;
L
Linus Torvalds 已提交
628
	struct mirror *m;
M
Milan Broz 已提交
629
	struct dm_io_request io_req = {
630
		.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
M
Milan Broz 已提交
631 632 633 634 635 636
		.mem.type = DM_IO_BVEC,
		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
		.notify.fn = write_callback,
		.notify.context = bio,
		.client = ms->io_client,
	};
L
Linus Torvalds 已提交
637

M
Mike Snitzer 已提交
638 639 640 641 642 643
	if (bio->bi_rw & REQ_DISCARD) {
		io_req.bi_rw |= REQ_DISCARD;
		io_req.mem.type = DM_IO_KMEM;
		io_req.mem.ptr.addr = NULL;
	}

644 645
	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
		map_region(dest++, m, bio);
L
Linus Torvalds 已提交
646

647 648 649 650 651
	/*
	 * Use default mirror because we only need it to retrieve the reference
	 * to the mirror set in write_callback().
	 */
	bio_set_m(bio, get_default_mirror(ms));
M
Milan Broz 已提交
652

653
	BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
L
Linus Torvalds 已提交
654 655 656 657 658 659 660
}

static void do_writes(struct mirror_set *ms, struct bio_list *writes)
{
	int state;
	struct bio *bio;
	struct bio_list sync, nosync, recover, *this_list = NULL;
661 662 663
	struct bio_list requeue;
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
	region_t region;
L
Linus Torvalds 已提交
664 665 666 667 668 669 670 671 672 673

	if (!writes->head)
		return;

	/*
	 * Classify each write.
	 */
	bio_list_init(&sync);
	bio_list_init(&nosync);
	bio_list_init(&recover);
674
	bio_list_init(&requeue);
L
Linus Torvalds 已提交
675 676

	while ((bio = bio_list_pop(writes))) {
M
Mike Snitzer 已提交
677 678
		if ((bio->bi_rw & REQ_FLUSH) ||
		    (bio->bi_rw & REQ_DISCARD)) {
M
Mikulas Patocka 已提交
679 680 681 682
			bio_list_add(&sync, bio);
			continue;
		}

683 684 685 686 687 688 689 690 691
		region = dm_rh_bio_to_region(ms->rh, bio);

		if (log->type->is_remote_recovering &&
		    log->type->is_remote_recovering(log, region)) {
			bio_list_add(&requeue, bio);
			continue;
		}

		state = dm_rh_get_state(ms->rh, region, 1);
L
Linus Torvalds 已提交
692
		switch (state) {
693 694
		case DM_RH_CLEAN:
		case DM_RH_DIRTY:
L
Linus Torvalds 已提交
695 696 697
			this_list = &sync;
			break;

698
		case DM_RH_NOSYNC:
L
Linus Torvalds 已提交
699 700 701
			this_list = &nosync;
			break;

702
		case DM_RH_RECOVERING:
L
Linus Torvalds 已提交
703 704 705 706 707 708 709
			this_list = &recover;
			break;
		}

		bio_list_add(this_list, bio);
	}

710 711 712 713 714 715 716 717
	/*
	 * Add bios that are delayed due to remote recovery
	 * back on to the write queue
	 */
	if (unlikely(requeue.head)) {
		spin_lock_irq(&ms->lock);
		bio_list_merge(&ms->writes, &requeue);
		spin_unlock_irq(&ms->lock);
718
		delayed_wake(ms);
719 720
	}

L
Linus Torvalds 已提交
721 722 723 724 725
	/*
	 * Increment the pending counts for any regions that will
	 * be written to (writes to recover regions are going to
	 * be delayed).
	 */
726 727
	dm_rh_inc_pending(ms->rh, &sync);
	dm_rh_inc_pending(ms->rh, &nosync);
728 729 730 731 732 733 734

	/*
	 * If the flush fails on a previous call and succeeds here,
	 * we must not reset the log_failure variable.  We need
	 * userspace interaction to do that.
	 */
	ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
L
Linus Torvalds 已提交
735 736 737 738

	/*
	 * Dispatch io.
	 */
739
	if (unlikely(ms->log_failure) && errors_handled(ms)) {
740 741 742
		spin_lock_irq(&ms->lock);
		bio_list_merge(&ms->failures, &sync);
		spin_unlock_irq(&ms->lock);
743
		wakeup_mirrord(ms);
744
	} else
J
Jonathan Brassow 已提交
745
		while ((bio = bio_list_pop(&sync)))
746
			do_write(ms, bio);
L
Linus Torvalds 已提交
747 748

	while ((bio = bio_list_pop(&recover)))
749
		dm_rh_delay(ms->rh, bio);
L
Linus Torvalds 已提交
750 751

	while ((bio = bio_list_pop(&nosync))) {
752 753 754 755 756 757
		if (unlikely(ms->leg_failure) && errors_handled(ms)) {
			spin_lock_irq(&ms->lock);
			bio_list_add(&ms->failures, bio);
			spin_unlock_irq(&ms->lock);
			wakeup_mirrord(ms);
		} else {
758 759 760
			map_bio(get_default_mirror(ms), bio);
			generic_make_request(bio);
		}
L
Linus Torvalds 已提交
761 762 763
	}
}

764 765 766 767
static void do_failures(struct mirror_set *ms, struct bio_list *failures)
{
	struct bio *bio;

768
	if (likely(!failures->head))
769 770
		return;

771 772
	/*
	 * If the log has failed, unattempted writes are being
773
	 * put on the holds list.  We can't issue those writes
774 775 776 777 778 779 780 781 782 783 784 785 786 787
	 * until a log has been marked, so we must store them.
	 *
	 * If a 'noflush' suspend is in progress, we can requeue
	 * the I/O's to the core.  This give userspace a chance
	 * to reconfigure the mirror, at which point the core
	 * will reissue the writes.  If the 'noflush' flag is
	 * not set, we have no choice but to return errors.
	 *
	 * Some writes on the failures list may have been
	 * submitted before the log failure and represent a
	 * failure to write to one of the devices.  It is ok
	 * for us to treat them the same and requeue them
	 * as well.
	 */
788
	while ((bio = bio_list_pop(failures))) {
789
		if (!ms->log_failure) {
790
			ms->in_sync = 0;
791
			dm_rh_mark_nosync(ms->rh, bio);
792
		}
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807

		/*
		 * If all the legs are dead, fail the I/O.
		 * If we have been told to handle errors, hold the bio
		 * and wait for userspace to deal with the problem.
		 * Otherwise pretend that the I/O succeeded. (This would
		 * be wrong if the failed leg returned after reboot and
		 * got replicated back to the good legs.)
		 */
		if (!get_valid_mirror(ms))
			bio_endio(bio, -EIO);
		else if (errors_handled(ms))
			hold_bio(ms, bio);
		else
			bio_endio(bio, 0);
808
	}
809 810 811 812 813 814 815 816 817 818
}

static void trigger_event(struct work_struct *work)
{
	struct mirror_set *ms =
		container_of(work, struct mirror_set, trigger_event);

	dm_table_event(ms->ti->table);
}

L
Linus Torvalds 已提交
819 820 821
/*-----------------------------------------------------------------
 * kmirrord
 *---------------------------------------------------------------*/
M
Mikulas Patocka 已提交
822
static void do_mirror(struct work_struct *work)
L
Linus Torvalds 已提交
823
{
824 825
	struct mirror_set *ms = container_of(work, struct mirror_set,
					     kmirrord_work);
826 827
	struct bio_list reads, writes, failures;
	unsigned long flags;
L
Linus Torvalds 已提交
828

829
	spin_lock_irqsave(&ms->lock, flags);
L
Linus Torvalds 已提交
830 831
	reads = ms->reads;
	writes = ms->writes;
832
	failures = ms->failures;
L
Linus Torvalds 已提交
833 834
	bio_list_init(&ms->reads);
	bio_list_init(&ms->writes);
835 836
	bio_list_init(&ms->failures);
	spin_unlock_irqrestore(&ms->lock, flags);
L
Linus Torvalds 已提交
837

838
	dm_rh_update_states(ms->rh, errors_handled(ms));
L
Linus Torvalds 已提交
839 840 841
	do_recovery(ms);
	do_reads(ms, &reads);
	do_writes(ms, &writes);
842
	do_failures(ms, &failures);
L
Linus Torvalds 已提交
843 844 845 846 847 848 849 850
}

/*-----------------------------------------------------------------
 * Target functions
 *---------------------------------------------------------------*/
static struct mirror_set *alloc_context(unsigned int nr_mirrors,
					uint32_t region_size,
					struct dm_target *ti,
H
Heinz Mauelshagen 已提交
851
					struct dm_dirty_log *dl)
L
Linus Torvalds 已提交
852 853 854 855 856 857
{
	size_t len;
	struct mirror_set *ms = NULL;

	len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);

858
	ms = kzalloc(len, GFP_KERNEL);
L
Linus Torvalds 已提交
859
	if (!ms) {
860
		ti->error = "Cannot allocate mirror context";
L
Linus Torvalds 已提交
861 862 863 864
		return NULL;
	}

	spin_lock_init(&ms->lock);
865 866 867 868
	bio_list_init(&ms->reads);
	bio_list_init(&ms->writes);
	bio_list_init(&ms->failures);
	bio_list_init(&ms->holds);
L
Linus Torvalds 已提交
869 870 871 872 873

	ms->ti = ti;
	ms->nr_mirrors = nr_mirrors;
	ms->nr_regions = dm_sector_div_up(ti->len, region_size);
	ms->in_sync = 0;
874
	ms->log_failure = 0;
875
	ms->leg_failure = 0;
876
	atomic_set(&ms->suspend, 0);
877
	atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
L
Linus Torvalds 已提交
878

879 880 881
	ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
						_dm_raid1_read_record_cache);

882 883 884 885 886 887
	if (!ms->read_record_pool) {
		ti->error = "Error creating mirror read_record_pool";
		kfree(ms);
		return NULL;
	}

888
	ms->io_client = dm_io_client_create();
M
Milan Broz 已提交
889 890
	if (IS_ERR(ms->io_client)) {
		ti->error = "Error creating dm_io client";
891
		mempool_destroy(ms->read_record_pool);
M
Milan Broz 已提交
892 893 894 895
		kfree(ms);
 		return NULL;
	}

896 897 898 899 900
	ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
				       wakeup_all_recovery_waiters,
				       ms->ti->begin, MAX_RECOVERY,
				       dl, region_size, ms->nr_regions);
	if (IS_ERR(ms->rh)) {
901
		ti->error = "Error creating dirty region hash";
D
Dmitry Monakhov 已提交
902
		dm_io_client_destroy(ms->io_client);
903
		mempool_destroy(ms->read_record_pool);
L
Linus Torvalds 已提交
904 905 906 907 908 909 910 911 912 913 914 915 916
		kfree(ms);
		return NULL;
	}

	return ms;
}

static void free_context(struct mirror_set *ms, struct dm_target *ti,
			 unsigned int m)
{
	while (m--)
		dm_put_device(ti, ms->mirror[m].dev);

M
Milan Broz 已提交
917
	dm_io_client_destroy(ms->io_client);
918
	dm_region_hash_destroy(ms->rh);
919
	mempool_destroy(ms->read_record_pool);
L
Linus Torvalds 已提交
920 921 922 923 924 925
	kfree(ms);
}

static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
		      unsigned int mirror, char **argv)
{
A
Andrew Morton 已提交
926
	unsigned long long offset;
L
Linus Torvalds 已提交
927

A
Andrew Morton 已提交
928
	if (sscanf(argv[1], "%llu", &offset) != 1) {
929
		ti->error = "Invalid offset";
L
Linus Torvalds 已提交
930 931 932
		return -EINVAL;
	}

933
	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
L
Linus Torvalds 已提交
934
			  &ms->mirror[mirror].dev)) {
935
		ti->error = "Device lookup failure";
L
Linus Torvalds 已提交
936 937 938
		return -ENXIO;
	}

939
	ms->mirror[mirror].ms = ms;
940 941
	atomic_set(&(ms->mirror[mirror].error_count), 0);
	ms->mirror[mirror].error_type = 0;
L
Linus Torvalds 已提交
942 943 944 945 946 947 948 949
	ms->mirror[mirror].offset = offset;

	return 0;
}

/*
 * Create dirty log: log_type #log_params <log_params>
 */
H
Heinz Mauelshagen 已提交
950
static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
951 952
					     unsigned argc, char **argv,
					     unsigned *args_used)
L
Linus Torvalds 已提交
953
{
954
	unsigned param_count;
H
Heinz Mauelshagen 已提交
955
	struct dm_dirty_log *dl;
L
Linus Torvalds 已提交
956 957

	if (argc < 2) {
958
		ti->error = "Insufficient mirror log arguments";
L
Linus Torvalds 已提交
959 960 961 962
		return NULL;
	}

	if (sscanf(argv[1], "%u", &param_count) != 1) {
963
		ti->error = "Invalid mirror log argument count";
L
Linus Torvalds 已提交
964 965 966 967 968 969
		return NULL;
	}

	*args_used = 2 + param_count;

	if (argc < *args_used) {
970
		ti->error = "Insufficient mirror log arguments";
L
Linus Torvalds 已提交
971 972 973
		return NULL;
	}

M
Mikulas Patocka 已提交
974 975
	dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
				 argv + 2);
L
Linus Torvalds 已提交
976
	if (!dl) {
977
		ti->error = "Error creating mirror dirty log";
L
Linus Torvalds 已提交
978 979 980 981 982 983
		return NULL;
	}

	return dl;
}

984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
			  unsigned *args_used)
{
	unsigned num_features;
	struct dm_target *ti = ms->ti;

	*args_used = 0;

	if (!argc)
		return 0;

	if (sscanf(argv[0], "%u", &num_features) != 1) {
		ti->error = "Invalid number of features";
		return -EINVAL;
	}

	argc--;
	argv++;
	(*args_used)++;

	if (num_features > argc) {
		ti->error = "Not enough arguments to support feature count";
		return -EINVAL;
	}

	if (!strcmp("handle_errors", argv[0]))
		ms->features |= DM_RAID1_HANDLE_ERRORS;
	else {
		ti->error = "Unrecognised feature requested";
		return -EINVAL;
	}

	(*args_used)++;

	return 0;
}

L
Linus Torvalds 已提交
1021 1022 1023 1024 1025
/*
 * Construct a mirror mapping:
 *
 * log_type #log_params <log_params>
 * #mirrors [mirror_path offset]{2,}
1026
 * [#features <features>]
L
Linus Torvalds 已提交
1027 1028 1029
 *
 * log_type is "core" or "disk"
 * #log_params is between 1 and 3
1030 1031
 *
 * If present, features must be "handle_errors".
L
Linus Torvalds 已提交
1032 1033 1034 1035 1036 1037
 */
static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	int r;
	unsigned int nr_mirrors, m, args_used;
	struct mirror_set *ms;
H
Heinz Mauelshagen 已提交
1038
	struct dm_dirty_log *dl;
L
Linus Torvalds 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047

	dl = create_dirty_log(ti, argc, argv, &args_used);
	if (!dl)
		return -EINVAL;

	argv += args_used;
	argc -= args_used;

	if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
H
Heinz Mauelshagen 已提交
1048
	    nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
1049
		ti->error = "Invalid number of mirrors";
H
Heinz Mauelshagen 已提交
1050
		dm_dirty_log_destroy(dl);
L
Linus Torvalds 已提交
1051 1052 1053 1054 1055
		return -EINVAL;
	}

	argv++, argc--;

1056 1057
	if (argc < nr_mirrors * 2) {
		ti->error = "Too few mirror arguments";
H
Heinz Mauelshagen 已提交
1058
		dm_dirty_log_destroy(dl);
L
Linus Torvalds 已提交
1059 1060 1061 1062 1063
		return -EINVAL;
	}

	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
	if (!ms) {
H
Heinz Mauelshagen 已提交
1064
		dm_dirty_log_destroy(dl);
L
Linus Torvalds 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
		return -ENOMEM;
	}

	/* Get the mirror parameter sets */
	for (m = 0; m < nr_mirrors; m++) {
		r = get_mirror(ms, ti, m, argv);
		if (r) {
			free_context(ms, ti, m);
			return r;
		}
		argv += 2;
		argc -= 2;
	}

	ti->private = ms;
1080
	ti->split_io = dm_rh_get_region_size(ms->rh);
M
Mikulas Patocka 已提交
1081
	ti->num_flush_requests = 1;
M
Mike Snitzer 已提交
1082
	ti->num_discard_requests = 1;
L
Linus Torvalds 已提交
1083

1084 1085
	ms->kmirrord_wq = alloc_workqueue("kmirrord",
					  WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
1086 1087
	if (!ms->kmirrord_wq) {
		DMERR("couldn't start kmirrord");
D
Dmitry Monakhov 已提交
1088 1089
		r = -ENOMEM;
		goto err_free_context;
1090 1091
	}
	INIT_WORK(&ms->kmirrord_work, do_mirror);
M
Mikulas Patocka 已提交
1092 1093
	init_timer(&ms->timer);
	ms->timer_pending = 0;
1094
	INIT_WORK(&ms->trigger_event, trigger_event);
1095

1096
	r = parse_features(ms, argc, argv, &args_used);
D
Dmitry Monakhov 已提交
1097 1098
	if (r)
		goto err_destroy_wq;
1099 1100 1101 1102

	argv += args_used;
	argc -= args_used;

1103 1104 1105 1106 1107 1108 1109 1110 1111
	/*
	 * Any read-balancing addition depends on the
	 * DM_RAID1_HANDLE_ERRORS flag being present.
	 * This is because the decision to balance depends
	 * on the sync state of a region.  If the above
	 * flag is not present, we ignore errors; and
	 * the sync state may be inaccurate.
	 */

1112 1113
	if (argc) {
		ti->error = "Too many mirror arguments";
D
Dmitry Monakhov 已提交
1114 1115
		r = -EINVAL;
		goto err_destroy_wq;
1116 1117
	}

1118 1119 1120
	ms->kcopyd_client = dm_kcopyd_client_create();
	if (IS_ERR(ms->kcopyd_client)) {
		r = PTR_ERR(ms->kcopyd_client);
D
Dmitry Monakhov 已提交
1121
		goto err_destroy_wq;
1122
	}
L
Linus Torvalds 已提交
1123

1124
	wakeup_mirrord(ms);
L
Linus Torvalds 已提交
1125
	return 0;
D
Dmitry Monakhov 已提交
1126 1127 1128 1129 1130 1131

err_destroy_wq:
	destroy_workqueue(ms->kmirrord_wq);
err_free_context:
	free_context(ms, ti, ms->nr_mirrors);
	return r;
L
Linus Torvalds 已提交
1132 1133 1134 1135 1136 1137
}

static void mirror_dtr(struct dm_target *ti)
{
	struct mirror_set *ms = (struct mirror_set *) ti->private;

M
Mikulas Patocka 已提交
1138
	del_timer_sync(&ms->timer);
1139
	flush_workqueue(ms->kmirrord_wq);
T
Tejun Heo 已提交
1140
	flush_work_sync(&ms->trigger_event);
H
Heinz Mauelshagen 已提交
1141
	dm_kcopyd_client_destroy(ms->kcopyd_client);
1142
	destroy_workqueue(ms->kmirrord_wq);
L
Linus Torvalds 已提交
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
	free_context(ms, ti, ms->nr_mirrors);
}

/*
 * Mirror mapping function
 */
static int mirror_map(struct dm_target *ti, struct bio *bio,
		      union map_info *map_context)
{
	int r, rw = bio_rw(bio);
	struct mirror *m;
	struct mirror_set *ms = ti->private;
1155
	struct dm_raid1_read_record *read_record = NULL;
1156
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
L
Linus Torvalds 已提交
1157 1158

	if (rw == WRITE) {
1159
		/* Save region for mirror_end_io() handler */
1160
		map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
L
Linus Torvalds 已提交
1161
		queue_bio(ms, bio, rw);
1162
		return DM_MAPIO_SUBMITTED;
L
Linus Torvalds 已提交
1163 1164
	}

1165
	r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
L
Linus Torvalds 已提交
1166 1167 1168 1169
	if (r < 0 && r != -EWOULDBLOCK)
		return r;

	/*
1170
	 * If region is not in-sync queue the bio.
L
Linus Torvalds 已提交
1171
	 */
1172 1173 1174
	if (!r || (r == -EWOULDBLOCK)) {
		if (rw == READA)
			return -EWOULDBLOCK;
L
Linus Torvalds 已提交
1175 1176

		queue_bio(ms, bio, rw);
1177
		return DM_MAPIO_SUBMITTED;
L
Linus Torvalds 已提交
1178 1179
	}

1180 1181 1182 1183
	/*
	 * The region is in-sync and we can perform reads directly.
	 * Store enough information so we can retry if it fails.
	 */
L
Linus Torvalds 已提交
1184
	m = choose_mirror(ms, bio->bi_sector);
1185
	if (unlikely(!m))
L
Linus Torvalds 已提交
1186 1187
		return -EIO;

1188 1189 1190 1191 1192 1193 1194 1195 1196
	read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
	if (likely(read_record)) {
		dm_bio_record(&read_record->details, bio);
		map_context->ptr = read_record;
		read_record->m = m;
	}

	map_bio(m, bio);

1197
	return DM_MAPIO_REMAPPED;
L
Linus Torvalds 已提交
1198 1199 1200 1201 1202 1203 1204
}

static int mirror_end_io(struct dm_target *ti, struct bio *bio,
			 int error, union map_info *map_context)
{
	int rw = bio_rw(bio);
	struct mirror_set *ms = (struct mirror_set *) ti->private;
1205 1206 1207
	struct mirror *m = NULL;
	struct dm_bio_details *bd = NULL;
	struct dm_raid1_read_record *read_record = map_context->ptr;
L
Linus Torvalds 已提交
1208 1209 1210 1211

	/*
	 * We need to dec pending if this was a write.
	 */
1212
	if (rw == WRITE) {
1213
		if (!(bio->bi_rw & REQ_FLUSH))
M
Mikulas Patocka 已提交
1214
			dm_rh_dec(ms->rh, map_context->ll);
1215 1216
		return error;
	}
L
Linus Torvalds 已提交
1217

1218 1219 1220
	if (error == -EOPNOTSUPP)
		goto out;

1221
	if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
1222 1223 1224 1225 1226 1227 1228 1229 1230
		goto out;

	if (unlikely(error)) {
		if (!read_record) {
			/*
			 * There wasn't enough memory to record necessary
			 * information for a retry or there was no other
			 * mirror in-sync.
			 */
A
Adrian Bunk 已提交
1231
			DMERR_LIMIT("Mirror read failed.");
1232 1233
			return -EIO;
		}
A
Adrian Bunk 已提交
1234 1235 1236

		m = read_record->m;

1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
		DMERR("Mirror read failed from %s. Trying alternative device.",
		      m->dev->name);

		fail_mirror(m, DM_RAID1_READ_ERROR);

		/*
		 * A failed read is requeued for another attempt using an intact
		 * mirror.
		 */
		if (default_ok(m) || mirror_available(ms, bio)) {
			bd = &read_record->details;

			dm_bio_restore(bd, bio);
			mempool_free(read_record, ms->read_record_pool);
			map_context->ptr = NULL;
			queue_bio(ms, bio, rw);
			return 1;
		}
		DMERR("All replicated volumes dead, failing I/O");
	}

out:
	if (read_record) {
		mempool_free(read_record, ms->read_record_pool);
		map_context->ptr = NULL;
	}

	return error;
L
Linus Torvalds 已提交
1265 1266
}

1267
static void mirror_presuspend(struct dm_target *ti)
L
Linus Torvalds 已提交
1268 1269
{
	struct mirror_set *ms = (struct mirror_set *) ti->private;
1270
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
L
Linus Torvalds 已提交
1271

1272 1273 1274
	struct bio_list holds;
	struct bio *bio;

1275 1276
	atomic_set(&ms->suspend, 1);

1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
	/*
	 * Process bios in the hold list to start recovery waiting
	 * for bios in the hold list. After the process, no bio has
	 * a chance to be added in the hold list because ms->suspend
	 * is set.
	 */
	spin_lock_irq(&ms->lock);
	holds = ms->holds;
	bio_list_init(&ms->holds);
	spin_unlock_irq(&ms->lock);

	while ((bio = bio_list_pop(&holds)))
		hold_bio(ms, bio);

1291 1292 1293 1294
	/*
	 * We must finish up all the work that we've
	 * generated (i.e. recovery work).
	 */
1295
	dm_rh_stop_recovery(ms->rh);
1296 1297

	wait_event(_kmirrord_recovery_stopped,
1298
		   !dm_rh_recovery_in_flight(ms->rh));
1299

1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
	if (log->type->presuspend && log->type->presuspend(log))
		/* FIXME: need better error handling */
		DMWARN("log presuspend failed");

	/*
	 * Now that recovery is complete/stopped and the
	 * delayed bios are queued, we need to wait for
	 * the worker thread to complete.  This way,
	 * we know that all of our I/O has been pushed.
	 */
	flush_workqueue(ms->kmirrord_wq);
}

static void mirror_postsuspend(struct dm_target *ti)
{
	struct mirror_set *ms = ti->private;
1316
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1317

J
Jonathan Brassow 已提交
1318
	if (log->type->postsuspend && log->type->postsuspend(log))
L
Linus Torvalds 已提交
1319
		/* FIXME: need better error handling */
1320
		DMWARN("log postsuspend failed");
L
Linus Torvalds 已提交
1321 1322 1323 1324
}

static void mirror_resume(struct dm_target *ti)
{
1325
	struct mirror_set *ms = ti->private;
1326
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1327 1328

	atomic_set(&ms->suspend, 0);
L
Linus Torvalds 已提交
1329 1330 1331
	if (log->type->resume && log->type->resume(log))
		/* FIXME: need better error handling */
		DMWARN("log resume failed");
1332
	dm_rh_start_recovery(ms->rh);
L
Linus Torvalds 已提交
1333 1334
}

J
Jonathan Brassow 已提交
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
/*
 * device_status_char
 * @m: mirror device/leg we want the status of
 *
 * We return one character representing the most severe error
 * we have encountered.
 *    A => Alive - No failures
 *    D => Dead - A write failure occurred leaving mirror out-of-sync
 *    S => Sync - A sychronization failure occurred, mirror out-of-sync
 *    R => Read - A read failure occurred, mirror data unaffected
 *
 * Returns: <char>
 */
static char device_status_char(struct mirror *m)
{
	if (!atomic_read(&(m->error_count)))
		return 'A';

1353 1354
	return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
		(test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
J
Jonathan Brassow 已提交
1355 1356 1357 1358 1359
		(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
		(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
}


L
Linus Torvalds 已提交
1360 1361 1362
static int mirror_status(struct dm_target *ti, status_type_t type,
			 char *result, unsigned int maxlen)
{
J
Jonathan E Brassow 已提交
1363
	unsigned int m, sz = 0;
L
Linus Torvalds 已提交
1364
	struct mirror_set *ms = (struct mirror_set *) ti->private;
1365
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
J
Jonathan Brassow 已提交
1366
	char buffer[ms->nr_mirrors + 1];
L
Linus Torvalds 已提交
1367 1368 1369 1370

	switch (type) {
	case STATUSTYPE_INFO:
		DMEMIT("%d ", ms->nr_mirrors);
J
Jonathan Brassow 已提交
1371
		for (m = 0; m < ms->nr_mirrors; m++) {
L
Linus Torvalds 已提交
1372
			DMEMIT("%s ", ms->mirror[m].dev->name);
J
Jonathan Brassow 已提交
1373 1374 1375
			buffer[m] = device_status_char(&(ms->mirror[m]));
		}
		buffer[m] = '\0';
L
Linus Torvalds 已提交
1376

J
Jonathan Brassow 已提交
1377
		DMEMIT("%llu/%llu 1 %s ",
1378
		      (unsigned long long)log->type->get_sync_count(log),
J
Jonathan Brassow 已提交
1379
		      (unsigned long long)ms->nr_regions, buffer);
J
Jonathan E Brassow 已提交
1380

1381
		sz += log->type->status(log, type, result+sz, maxlen-sz);
J
Jonathan E Brassow 已提交
1382

L
Linus Torvalds 已提交
1383 1384 1385
		break;

	case STATUSTYPE_TABLE:
1386
		sz = log->type->status(log, type, result, maxlen);
J
Jonathan E Brassow 已提交
1387

1388
		DMEMIT("%d", ms->nr_mirrors);
L
Linus Torvalds 已提交
1389
		for (m = 0; m < ms->nr_mirrors; m++)
1390
			DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1391
			       (unsigned long long)ms->mirror[m].offset);
1392 1393 1394

		if (ms->features & DM_RAID1_HANDLE_ERRORS)
			DMEMIT(" 1 handle_errors");
L
Linus Torvalds 已提交
1395 1396 1397 1398 1399
	}

	return 0;
}

1400 1401 1402 1403 1404 1405 1406 1407 1408
static int mirror_iterate_devices(struct dm_target *ti,
				  iterate_devices_callout_fn fn, void *data)
{
	struct mirror_set *ms = ti->private;
	int ret = 0;
	unsigned i;

	for (i = 0; !ret && i < ms->nr_mirrors; i++)
		ret = fn(ti, ms->mirror[i].dev,
1409
			 ms->mirror[i].offset, ti->len, data);
1410 1411 1412 1413

	return ret;
}

L
Linus Torvalds 已提交
1414 1415
static struct target_type mirror_target = {
	.name	 = "mirror",
1416
	.version = {1, 12, 1},
L
Linus Torvalds 已提交
1417 1418 1419 1420 1421
	.module	 = THIS_MODULE,
	.ctr	 = mirror_ctr,
	.dtr	 = mirror_dtr,
	.map	 = mirror_map,
	.end_io	 = mirror_end_io,
1422
	.presuspend = mirror_presuspend,
L
Linus Torvalds 已提交
1423 1424 1425
	.postsuspend = mirror_postsuspend,
	.resume	 = mirror_resume,
	.status	 = mirror_status,
1426
	.iterate_devices = mirror_iterate_devices,
L
Linus Torvalds 已提交
1427 1428 1429 1430 1431 1432
};

static int __init dm_mirror_init(void)
{
	int r;

1433 1434 1435 1436 1437 1438 1439
	_dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
	if (!_dm_raid1_read_record_cache) {
		DMERR("Can't allocate dm_raid1_read_record cache");
		r = -ENOMEM;
		goto bad_cache;
	}

L
Linus Torvalds 已提交
1440
	r = dm_register_target(&mirror_target);
1441
	if (r < 0) {
1442
		DMERR("Failed to register mirror target");
1443 1444 1445 1446
		goto bad_target;
	}

	return 0;
L
Linus Torvalds 已提交
1447

1448 1449 1450
bad_target:
	kmem_cache_destroy(_dm_raid1_read_record_cache);
bad_cache:
L
Linus Torvalds 已提交
1451 1452 1453 1454 1455
	return r;
}

static void __exit dm_mirror_exit(void)
{
1456
	dm_unregister_target(&mirror_target);
1457
	kmem_cache_destroy(_dm_raid1_read_record_cache);
L
Linus Torvalds 已提交
1458 1459 1460 1461 1462 1463 1464 1465 1466
}

/* Module hooks */
module_init(dm_mirror_init);
module_exit(dm_mirror_exit);

MODULE_DESCRIPTION(DM_NAME " mirror target");
MODULE_AUTHOR("Joe Thornber");
MODULE_LICENSE("GPL");