dm-raid1.c 33.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (C) 2003 Sistina Software Limited.
3
 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7
 *
 * This file is released under the GPL.
 */

8
#include "dm-bio-record.h"
L
Linus Torvalds 已提交
9 10 11 12 13 14 15

#include <linux/init.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
16
#include <linux/device-mapper.h>
A
Alasdair G Kergon 已提交
17 18 19
#include <linux/dm-io.h>
#include <linux/dm-dirty-log.h>
#include <linux/dm-kcopyd.h>
20
#include <linux/dm-region-hash.h>
L
Linus Torvalds 已提交
21

22
#define DM_MSG_PREFIX "raid1"
23 24

#define MAX_RECOVERY 1	/* Maximum number of regions recovered in parallel. */
M
Milan Broz 已提交
25
#define DM_IO_PAGES 64
26
#define DM_KCOPYD_PAGES 64
27

28
#define DM_RAID1_HANDLE_ERRORS 0x01
29
#define errors_handled(p)	((p)->features & DM_RAID1_HANDLE_ERRORS)
30

31
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
L
Linus Torvalds 已提交
32

33 34 35
/*-----------------------------------------------------------------
 * Mirror set structures.
 *---------------------------------------------------------------*/
36 37
enum dm_raid1_error {
	DM_RAID1_WRITE_ERROR,
38
	DM_RAID1_FLUSH_ERROR,
39 40 41 42
	DM_RAID1_SYNC_ERROR,
	DM_RAID1_READ_ERROR
};

43
struct mirror {
44
	struct mirror_set *ms;
45
	atomic_t error_count;
A
Al Viro 已提交
46
	unsigned long error_type;
47 48 49 50 51 52 53
	struct dm_dev *dev;
	sector_t offset;
};

struct mirror_set {
	struct dm_target *ti;
	struct list_head list;
54

55
	uint64_t features;
56

57
	spinlock_t lock;	/* protects the lists */
58 59
	struct bio_list reads;
	struct bio_list writes;
60
	struct bio_list failures;
61
	struct bio_list holds;	/* bios are waiting until suspend */
62

63 64
	struct dm_region_hash *rh;
	struct dm_kcopyd_client *kcopyd_client;
M
Milan Broz 已提交
65
	struct dm_io_client *io_client;
66
	mempool_t *read_record_pool;
M
Milan Broz 已提交
67

68 69 70
	/* recovery */
	region_t nr_regions;
	int in_sync;
J
Jonathan Brassow 已提交
71
	int log_failure;
72
	atomic_t suspend;
73

74
	atomic_t default_mirror;	/* Default mirror */
75

76 77
	struct workqueue_struct *kmirrord_wq;
	struct work_struct kmirrord_work;
M
Mikulas Patocka 已提交
78 79 80
	struct timer_list timer;
	unsigned long timer_pending;

81
	struct work_struct trigger_event;
82

83
	unsigned nr_mirrors;
84 85 86
	struct mirror mirror[0];
};

87
static void wakeup_mirrord(void *context)
L
Linus Torvalds 已提交
88
{
89
	struct mirror_set *ms = context;
L
Linus Torvalds 已提交
90

91 92 93
	queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
}

M
Mikulas Patocka 已提交
94 95 96 97 98
static void delayed_wake_fn(unsigned long data)
{
	struct mirror_set *ms = (struct mirror_set *) data;

	clear_bit(0, &ms->timer_pending);
99
	wakeup_mirrord(ms);
M
Mikulas Patocka 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112
}

static void delayed_wake(struct mirror_set *ms)
{
	if (test_and_set_bit(0, &ms->timer_pending))
		return;

	ms->timer.expires = jiffies + HZ / 5;
	ms->timer.data = (unsigned long) ms;
	ms->timer.function = delayed_wake_fn;
	add_timer(&ms->timer);
}

113
static void wakeup_all_recovery_waiters(void *context)
L
Linus Torvalds 已提交
114
{
115
	wake_up_all(&_kmirrord_recovery_stopped);
L
Linus Torvalds 已提交
116 117
}

118
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
L
Linus Torvalds 已提交
119 120 121
{
	unsigned long flags;
	int should_wake = 0;
122
	struct bio_list *bl;
L
Linus Torvalds 已提交
123

124 125 126 127 128
	bl = (rw == WRITE) ? &ms->writes : &ms->reads;
	spin_lock_irqsave(&ms->lock, flags);
	should_wake = !(bl->head);
	bio_list_add(bl, bio);
	spin_unlock_irqrestore(&ms->lock, flags);
L
Linus Torvalds 已提交
129 130

	if (should_wake)
131
		wakeup_mirrord(ms);
L
Linus Torvalds 已提交
132 133
}

134
static void dispatch_bios(void *context, struct bio_list *bio_list)
L
Linus Torvalds 已提交
135
{
136 137
	struct mirror_set *ms = context;
	struct bio *bio;
L
Linus Torvalds 已提交
138

139 140
	while ((bio = bio_list_pop(bio_list)))
		queue_bio(ms, bio, WRITE);
L
Linus Torvalds 已提交
141 142
}

143 144 145 146 147 148
#define MIN_READ_RECORDS 20
struct dm_raid1_read_record {
	struct mirror *m;
	struct dm_bio_details details;
};

149 150
static struct kmem_cache *_dm_raid1_read_record_cache;

L
Linus Torvalds 已提交
151 152 153 154 155 156
/*
 * Every mirror should look like this one.
 */
#define DEFAULT_MIRROR 0

/*
157 158
 * This is yucky.  We squirrel the mirror struct away inside
 * bi_next for read/write buffers.  This is safe since the bh
L
Linus Torvalds 已提交
159 160
 * doesn't get submitted to the lower levels of block layer.
 */
161
static struct mirror *bio_get_m(struct bio *bio)
L
Linus Torvalds 已提交
162
{
163
	return (struct mirror *) bio->bi_next;
L
Linus Torvalds 已提交
164 165
}

166
static void bio_set_m(struct bio *bio, struct mirror *m)
L
Linus Torvalds 已提交
167
{
168
	bio->bi_next = (struct bio *) m;
L
Linus Torvalds 已提交
169 170
}

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
static struct mirror *get_default_mirror(struct mirror_set *ms)
{
	return &ms->mirror[atomic_read(&ms->default_mirror)];
}

static void set_default_mirror(struct mirror *m)
{
	struct mirror_set *ms = m->ms;
	struct mirror *m0 = &(ms->mirror[0]);

	atomic_set(&ms->default_mirror, m - m0);
}

/* fail_mirror
 * @m: mirror device to fail
 * @error_type: one of the enum's, DM_RAID1_*_ERROR
 *
 * If errors are being handled, record the type of
 * error encountered for this device.  If this type
 * of error has already been recorded, we can return;
 * otherwise, we must signal userspace by triggering
 * an event.  Additionally, if the device is the
 * primary device, we must choose a new primary, but
 * only if the mirror is in-sync.
 *
 * This function must not block.
 */
static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
{
	struct mirror_set *ms = m->ms;
	struct mirror *new;

	/*
	 * error_count is used for nothing more than a
	 * simple way to tell if a device has encountered
	 * errors.
	 */
	atomic_inc(&m->error_count);

	if (test_and_set_bit(error_type, &m->error_type))
		return;

J
Jonathan Brassow 已提交
213 214 215
	if (!errors_handled(ms))
		return;

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	if (m != get_default_mirror(ms))
		goto out;

	if (!ms->in_sync) {
		/*
		 * Better to issue requests to same failing device
		 * than to risk returning corrupt data.
		 */
		DMERR("Primary mirror (%s) failed while out-of-sync: "
		      "Reads may fail.", m->dev->name);
		goto out;
	}

	for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
		if (!atomic_read(&new->error_count)) {
			set_default_mirror(new);
			break;
		}

	if (unlikely(new == ms->mirror + ms->nr_mirrors))
		DMWARN("All sides of mirror have failed.");

out:
	schedule_work(&ms->trigger_event);
}

M
Mikulas Patocka 已提交
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
static int mirror_flush(struct dm_target *ti)
{
	struct mirror_set *ms = ti->private;
	unsigned long error_bits;

	unsigned int i;
	struct dm_io_region io[ms->nr_mirrors];
	struct mirror *m;
	struct dm_io_request io_req = {
		.bi_rw = WRITE_BARRIER,
		.mem.type = DM_IO_KMEM,
		.mem.ptr.bvec = NULL,
		.client = ms->io_client,
	};

	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
		io[i].bdev = m->dev->bdev;
		io[i].sector = 0;
		io[i].count = 0;
	}

	error_bits = -1;
	dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
	if (unlikely(error_bits != 0)) {
		for (i = 0; i < ms->nr_mirrors; i++)
			if (test_bit(i, &error_bits))
				fail_mirror(ms->mirror + i,
269
					    DM_RAID1_FLUSH_ERROR);
M
Mikulas Patocka 已提交
270 271 272 273 274 275
		return -EIO;
	}

	return 0;
}

L
Linus Torvalds 已提交
276 277 278 279 280 281 282
/*-----------------------------------------------------------------
 * Recovery.
 *
 * When a mirror is first activated we may find that some regions
 * are in the no-sync state.  We have to recover these by
 * recopying from the default mirror to all the others.
 *---------------------------------------------------------------*/
283
static void recovery_complete(int read_err, unsigned long write_err,
L
Linus Torvalds 已提交
284 285
			      void *context)
{
286 287
	struct dm_region *reg = context;
	struct mirror_set *ms = dm_rh_region_context(reg);
288
	int m, bit = 0;
L
Linus Torvalds 已提交
289

290
	if (read_err) {
291 292
		/* Read error means the failure of default mirror. */
		DMERR_LIMIT("Unable to read primary mirror during recovery");
293 294
		fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
	}
295

296
	if (write_err) {
297
		DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
298
			    write_err);
299 300 301 302 303 304 305 306 307 308 309 310 311
		/*
		 * Bits correspond to devices (excluding default mirror).
		 * The default mirror cannot change during recovery.
		 */
		for (m = 0; m < ms->nr_mirrors; m++) {
			if (&ms->mirror[m] == get_default_mirror(ms))
				continue;
			if (test_bit(bit, &write_err))
				fail_mirror(ms->mirror + m,
					    DM_RAID1_SYNC_ERROR);
			bit++;
		}
	}
312

313
	dm_rh_recovery_end(reg, !(read_err || write_err));
L
Linus Torvalds 已提交
314 315
}

316
static int recover(struct mirror_set *ms, struct dm_region *reg)
L
Linus Torvalds 已提交
317 318
{
	int r;
319
	unsigned i;
H
Heinz Mauelshagen 已提交
320
	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
L
Linus Torvalds 已提交
321 322
	struct mirror *m;
	unsigned long flags = 0;
323 324
	region_t key = dm_rh_get_region_key(reg);
	sector_t region_size = dm_rh_get_region_size(ms->rh);
L
Linus Torvalds 已提交
325 326

	/* fill in the source */
327
	m = get_default_mirror(ms);
L
Linus Torvalds 已提交
328
	from.bdev = m->dev->bdev;
329 330
	from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
	if (key == (ms->nr_regions - 1)) {
L
Linus Torvalds 已提交
331 332 333 334
		/*
		 * The final region may be smaller than
		 * region_size.
		 */
335
		from.count = ms->ti->len & (region_size - 1);
L
Linus Torvalds 已提交
336
		if (!from.count)
337
			from.count = region_size;
L
Linus Torvalds 已提交
338
	} else
339
		from.count = region_size;
L
Linus Torvalds 已提交
340 341 342

	/* fill in the destinations */
	for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
343
		if (&ms->mirror[i] == get_default_mirror(ms))
L
Linus Torvalds 已提交
344 345 346 347
			continue;

		m = ms->mirror + i;
		dest->bdev = m->dev->bdev;
348
		dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
L
Linus Torvalds 已提交
349 350 351 352 353
		dest->count = from.count;
		dest++;
	}

	/* hand to kcopyd */
354 355 356
	if (!errors_handled(ms))
		set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);

H
Heinz Mauelshagen 已提交
357 358
	r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
			   flags, recovery_complete, reg);
L
Linus Torvalds 已提交
359 360 361 362 363 364

	return r;
}

static void do_recovery(struct mirror_set *ms)
{
365 366
	struct dm_region *reg;
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
L
Linus Torvalds 已提交
367 368 369 370 371
	int r;

	/*
	 * Start quiescing some regions.
	 */
372
	dm_rh_recovery_prepare(ms->rh);
L
Linus Torvalds 已提交
373 374 375 376

	/*
	 * Copy any already quiesced regions.
	 */
377
	while ((reg = dm_rh_recovery_start(ms->rh))) {
L
Linus Torvalds 已提交
378 379
		r = recover(ms, reg);
		if (r)
380
			dm_rh_recovery_end(reg, 0);
L
Linus Torvalds 已提交
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
	}

	/*
	 * Update the in sync flag.
	 */
	if (!ms->in_sync &&
	    (log->type->get_sync_count(log) == ms->nr_regions)) {
		/* the sync is complete */
		dm_table_event(ms->ti->table);
		ms->in_sync = 1;
	}
}

/*-----------------------------------------------------------------
 * Reads
 *---------------------------------------------------------------*/
static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
{
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
	struct mirror *m = get_default_mirror(ms);

	do {
		if (likely(!atomic_read(&m->error_count)))
			return m;

		if (m-- == ms->mirror)
			m += ms->nr_mirrors;
	} while (m != get_default_mirror(ms));

	return NULL;
}

static int default_ok(struct mirror *m)
{
	struct mirror *default_mirror = get_default_mirror(m->ms);

	return !atomic_read(&default_mirror->error_count);
}

static int mirror_available(struct mirror_set *ms, struct bio *bio)
{
421 422
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
	region_t region = dm_rh_bio_to_region(ms->rh, bio);
423

424
	if (log->type->in_sync(log, region, 0))
425 426 427
		return choose_mirror(ms,  bio->bi_sector) ? 1 : 0;

	return 0;
L
Linus Torvalds 已提交
428 429 430 431 432
}

/*
 * remap a buffer to a particular mirror.
 */
433 434
static sector_t map_sector(struct mirror *m, struct bio *bio)
{
M
Mikulas Patocka 已提交
435 436
	if (unlikely(!bio->bi_size))
		return 0;
437 438 439 440
	return m->offset + (bio->bi_sector - m->ms->ti->begin);
}

static void map_bio(struct mirror *m, struct bio *bio)
L
Linus Torvalds 已提交
441 442
{
	bio->bi_bdev = m->dev->bdev;
443 444 445
	bio->bi_sector = map_sector(m, bio);
}

H
Heinz Mauelshagen 已提交
446
static void map_region(struct dm_io_region *io, struct mirror *m,
447 448 449 450 451 452 453
		       struct bio *bio)
{
	io->bdev = m->dev->bdev;
	io->sector = map_sector(m, bio);
	io->count = bio->bi_size >> 9;
}

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
static void hold_bio(struct mirror_set *ms, struct bio *bio)
{
	/*
	 * If device is suspended, complete the bio.
	 */
	if (atomic_read(&ms->suspend)) {
		if (dm_noflush_suspending(ms->ti))
			bio_endio(bio, DM_ENDIO_REQUEUE);
		else
			bio_endio(bio, -EIO);
		return;
	}

	/*
	 * Hold bio until the suspend is complete.
	 */
	spin_lock_irq(&ms->lock);
	bio_list_add(&ms->holds, bio);
	spin_unlock_irq(&ms->lock);
}

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
/*-----------------------------------------------------------------
 * Reads
 *---------------------------------------------------------------*/
static void read_callback(unsigned long error, void *context)
{
	struct bio *bio = context;
	struct mirror *m;

	m = bio_get_m(bio);
	bio_set_m(bio, NULL);

	if (likely(!error)) {
		bio_endio(bio, 0);
		return;
	}

	fail_mirror(m, DM_RAID1_READ_ERROR);

	if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
		DMWARN_LIMIT("Read failure on mirror device %s.  "
			     "Trying alternative device.",
			     m->dev->name);
		queue_bio(m->ms, bio, bio_rw(bio));
		return;
	}

	DMERR_LIMIT("Read failure on mirror device %s.  Failing I/O.",
		    m->dev->name);
	bio_endio(bio, -EIO);
}

/* Asynchronous read. */
static void read_async_bio(struct mirror *m, struct bio *bio)
{
H
Heinz Mauelshagen 已提交
509
	struct dm_io_region io;
510 511 512 513 514 515 516 517 518 519 520
	struct dm_io_request io_req = {
		.bi_rw = READ,
		.mem.type = DM_IO_BVEC,
		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
		.notify.fn = read_callback,
		.notify.context = bio,
		.client = m->ms->io_client,
	};

	map_region(&io, m, bio);
	bio_set_m(bio, m);
521 522 523 524 525 526 527 528
	BUG_ON(dm_io(&io_req, 1, &io, NULL));
}

static inline int region_in_sync(struct mirror_set *ms, region_t region,
				 int may_block)
{
	int state = dm_rh_get_state(ms->rh, region, may_block);
	return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
L
Linus Torvalds 已提交
529 530 531 532 533 534 535 536 537
}

static void do_reads(struct mirror_set *ms, struct bio_list *reads)
{
	region_t region;
	struct bio *bio;
	struct mirror *m;

	while ((bio = bio_list_pop(reads))) {
538
		region = dm_rh_bio_to_region(ms->rh, bio);
539
		m = get_default_mirror(ms);
L
Linus Torvalds 已提交
540 541 542 543

		/*
		 * We can only read balance if the region is in sync.
		 */
544
		if (likely(region_in_sync(ms, region, 1)))
L
Linus Torvalds 已提交
545
			m = choose_mirror(ms, bio->bi_sector);
546 547
		else if (m && atomic_read(&m->error_count))
			m = NULL;
L
Linus Torvalds 已提交
548

549 550 551 552
		if (likely(m))
			read_async_bio(m, bio);
		else
			bio_endio(bio, -EIO);
L
Linus Torvalds 已提交
553 554 555 556 557 558 559 560 561 562 563 564 565
	}
}

/*-----------------------------------------------------------------
 * Writes.
 *
 * We do different things with the write io depending on the
 * state of the region that it's in:
 *
 * SYNC: 	increment pending, use kcopyd to write to *all* mirrors
 * RECOVERING:	delay the io until recovery completes
 * NOSYNC:	increment pending, just write to the default mirror
 *---------------------------------------------------------------*/
566 567


L
Linus Torvalds 已提交
568 569
static void write_callback(unsigned long error, void *context)
{
570
	unsigned i, ret = 0;
L
Linus Torvalds 已提交
571 572
	struct bio *bio = (struct bio *) context;
	struct mirror_set *ms;
573 574 575
	int uptodate = 0;
	int should_wake = 0;
	unsigned long flags;
L
Linus Torvalds 已提交
576

577 578
	ms = bio_get_m(bio)->ms;
	bio_set_m(bio, NULL);
L
Linus Torvalds 已提交
579 580 581 582 583 584 585

	/*
	 * NOTE: We don't decrement the pending count here,
	 * instead it is done by the targets endio function.
	 * This way we handle both writes to SYNC and NOSYNC
	 * regions with the same code.
	 */
586 587
	if (likely(!error))
		goto out;
L
Linus Torvalds 已提交
588

589 590 591 592 593 594 595 596 597 598 599
	for (i = 0; i < ms->nr_mirrors; i++)
		if (test_bit(i, &error))
			fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
		else
			uptodate = 1;

	if (unlikely(!uptodate)) {
		DMERR("All replicated volumes dead, failing I/O");
		/* None of the writes succeeded, fail the I/O. */
		ret = -EIO;
	} else if (errors_handled(ms)) {
L
Linus Torvalds 已提交
600
		/*
601 602 603
		 * Need to raise event.  Since raising
		 * events can block, we need to do it in
		 * the main thread.
L
Linus Torvalds 已提交
604
		 */
605 606 607 608 609 610
		spin_lock_irqsave(&ms->lock, flags);
		if (!ms->failures.head)
			should_wake = 1;
		bio_list_add(&ms->failures, bio);
		spin_unlock_irqrestore(&ms->lock, flags);
		if (should_wake)
611
			wakeup_mirrord(ms);
612
		return;
L
Linus Torvalds 已提交
613
	}
614 615
out:
	bio_endio(bio, ret);
L
Linus Torvalds 已提交
616 617 618 619 620
}

static void do_write(struct mirror_set *ms, struct bio *bio)
{
	unsigned int i;
H
Heinz Mauelshagen 已提交
621
	struct dm_io_region io[ms->nr_mirrors], *dest = io;
L
Linus Torvalds 已提交
622
	struct mirror *m;
M
Milan Broz 已提交
623
	struct dm_io_request io_req = {
M
Mikulas Patocka 已提交
624
		.bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER),
M
Milan Broz 已提交
625 626 627 628 629 630
		.mem.type = DM_IO_BVEC,
		.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
		.notify.fn = write_callback,
		.notify.context = bio,
		.client = ms->io_client,
	};
L
Linus Torvalds 已提交
631

632 633
	for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
		map_region(dest++, m, bio);
L
Linus Torvalds 已提交
634

635 636 637 638 639
	/*
	 * Use default mirror because we only need it to retrieve the reference
	 * to the mirror set in write_callback().
	 */
	bio_set_m(bio, get_default_mirror(ms));
M
Milan Broz 已提交
640

641
	BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
L
Linus Torvalds 已提交
642 643 644 645 646 647 648
}

static void do_writes(struct mirror_set *ms, struct bio_list *writes)
{
	int state;
	struct bio *bio;
	struct bio_list sync, nosync, recover, *this_list = NULL;
649 650 651
	struct bio_list requeue;
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
	region_t region;
L
Linus Torvalds 已提交
652 653 654 655 656 657 658 659 660 661

	if (!writes->head)
		return;

	/*
	 * Classify each write.
	 */
	bio_list_init(&sync);
	bio_list_init(&nosync);
	bio_list_init(&recover);
662
	bio_list_init(&requeue);
L
Linus Torvalds 已提交
663 664

	while ((bio = bio_list_pop(writes))) {
M
Mikulas Patocka 已提交
665 666 667 668 669
		if (unlikely(bio_empty_barrier(bio))) {
			bio_list_add(&sync, bio);
			continue;
		}

670 671 672 673 674 675 676 677 678
		region = dm_rh_bio_to_region(ms->rh, bio);

		if (log->type->is_remote_recovering &&
		    log->type->is_remote_recovering(log, region)) {
			bio_list_add(&requeue, bio);
			continue;
		}

		state = dm_rh_get_state(ms->rh, region, 1);
L
Linus Torvalds 已提交
679
		switch (state) {
680 681
		case DM_RH_CLEAN:
		case DM_RH_DIRTY:
L
Linus Torvalds 已提交
682 683 684
			this_list = &sync;
			break;

685
		case DM_RH_NOSYNC:
L
Linus Torvalds 已提交
686 687 688
			this_list = &nosync;
			break;

689
		case DM_RH_RECOVERING:
L
Linus Torvalds 已提交
690 691 692 693 694 695 696
			this_list = &recover;
			break;
		}

		bio_list_add(this_list, bio);
	}

697 698 699 700 701 702 703 704
	/*
	 * Add bios that are delayed due to remote recovery
	 * back on to the write queue
	 */
	if (unlikely(requeue.head)) {
		spin_lock_irq(&ms->lock);
		bio_list_merge(&ms->writes, &requeue);
		spin_unlock_irq(&ms->lock);
705
		delayed_wake(ms);
706 707
	}

L
Linus Torvalds 已提交
708 709 710 711 712
	/*
	 * Increment the pending counts for any regions that will
	 * be written to (writes to recover regions are going to
	 * be delayed).
	 */
713 714
	dm_rh_inc_pending(ms->rh, &sync);
	dm_rh_inc_pending(ms->rh, &nosync);
715 716 717 718 719 720 721

	/*
	 * If the flush fails on a previous call and succeeds here,
	 * we must not reset the log_failure variable.  We need
	 * userspace interaction to do that.
	 */
	ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
L
Linus Torvalds 已提交
722 723 724 725

	/*
	 * Dispatch io.
	 */
726 727 728 729
	if (unlikely(ms->log_failure)) {
		spin_lock_irq(&ms->lock);
		bio_list_merge(&ms->failures, &sync);
		spin_unlock_irq(&ms->lock);
730
		wakeup_mirrord(ms);
731
	} else
J
Jonathan Brassow 已提交
732
		while ((bio = bio_list_pop(&sync)))
733
			do_write(ms, bio);
L
Linus Torvalds 已提交
734 735

	while ((bio = bio_list_pop(&recover)))
736
		dm_rh_delay(ms->rh, bio);
L
Linus Torvalds 已提交
737 738

	while ((bio = bio_list_pop(&nosync))) {
739
		map_bio(get_default_mirror(ms), bio);
L
Linus Torvalds 已提交
740 741 742 743
		generic_make_request(bio);
	}
}

744 745 746 747 748 749 750
static void do_failures(struct mirror_set *ms, struct bio_list *failures)
{
	struct bio *bio;

	if (!failures->head)
		return;

751
	if (!ms->log_failure) {
I
Ilpo Jarvinen 已提交
752
		while ((bio = bio_list_pop(failures))) {
753 754
			ms->in_sync = 0;
			dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
I
Ilpo Jarvinen 已提交
755
		}
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
		return;
	}

	/*
	 * If the log has failed, unattempted writes are being
	 * put on the failures list.  We can't issue those writes
	 * until a log has been marked, so we must store them.
	 *
	 * If a 'noflush' suspend is in progress, we can requeue
	 * the I/O's to the core.  This give userspace a chance
	 * to reconfigure the mirror, at which point the core
	 * will reissue the writes.  If the 'noflush' flag is
	 * not set, we have no choice but to return errors.
	 *
	 * Some writes on the failures list may have been
	 * submitted before the log failure and represent a
	 * failure to write to one of the devices.  It is ok
	 * for us to treat them the same and requeue them
	 * as well.
	 */
	if (dm_noflush_suspending(ms->ti)) {
		while ((bio = bio_list_pop(failures)))
			bio_endio(bio, DM_ENDIO_REQUEUE);
		return;
	}

	if (atomic_read(&ms->suspend)) {
		while ((bio = bio_list_pop(failures)))
			bio_endio(bio, -EIO);
		return;
	}

	spin_lock_irq(&ms->lock);
	bio_list_merge(&ms->failures, failures);
	spin_unlock_irq(&ms->lock);

M
Mikulas Patocka 已提交
792
	delayed_wake(ms);
793 794 795 796 797 798 799 800 801 802
}

static void trigger_event(struct work_struct *work)
{
	struct mirror_set *ms =
		container_of(work, struct mirror_set, trigger_event);

	dm_table_event(ms->ti->table);
}

L
Linus Torvalds 已提交
803 804 805
/*-----------------------------------------------------------------
 * kmirrord
 *---------------------------------------------------------------*/
M
Mikulas Patocka 已提交
806
static void do_mirror(struct work_struct *work)
L
Linus Torvalds 已提交
807
{
808 809
	struct mirror_set *ms = container_of(work, struct mirror_set,
					     kmirrord_work);
810 811
	struct bio_list reads, writes, failures;
	unsigned long flags;
L
Linus Torvalds 已提交
812

813
	spin_lock_irqsave(&ms->lock, flags);
L
Linus Torvalds 已提交
814 815
	reads = ms->reads;
	writes = ms->writes;
816
	failures = ms->failures;
L
Linus Torvalds 已提交
817 818
	bio_list_init(&ms->reads);
	bio_list_init(&ms->writes);
819 820
	bio_list_init(&ms->failures);
	spin_unlock_irqrestore(&ms->lock, flags);
L
Linus Torvalds 已提交
821

822
	dm_rh_update_states(ms->rh, errors_handled(ms));
L
Linus Torvalds 已提交
823 824 825
	do_recovery(ms);
	do_reads(ms, &reads);
	do_writes(ms, &writes);
826
	do_failures(ms, &failures);
M
Mikulas Patocka 已提交
827 828

	dm_table_unplug_all(ms->ti->table);
L
Linus Torvalds 已提交
829 830 831 832 833 834 835 836
}

/*-----------------------------------------------------------------
 * Target functions
 *---------------------------------------------------------------*/
static struct mirror_set *alloc_context(unsigned int nr_mirrors,
					uint32_t region_size,
					struct dm_target *ti,
H
Heinz Mauelshagen 已提交
837
					struct dm_dirty_log *dl)
L
Linus Torvalds 已提交
838 839 840 841 842 843
{
	size_t len;
	struct mirror_set *ms = NULL;

	len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);

844
	ms = kzalloc(len, GFP_KERNEL);
L
Linus Torvalds 已提交
845
	if (!ms) {
846
		ti->error = "Cannot allocate mirror context";
L
Linus Torvalds 已提交
847 848 849 850 851 852 853 854 855
		return NULL;
	}

	spin_lock_init(&ms->lock);

	ms->ti = ti;
	ms->nr_mirrors = nr_mirrors;
	ms->nr_regions = dm_sector_div_up(ti->len, region_size);
	ms->in_sync = 0;
856 857
	ms->log_failure = 0;
	atomic_set(&ms->suspend, 0);
858
	atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
L
Linus Torvalds 已提交
859

860 861 862
	ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
						_dm_raid1_read_record_cache);

863 864 865 866 867 868
	if (!ms->read_record_pool) {
		ti->error = "Error creating mirror read_record_pool";
		kfree(ms);
		return NULL;
	}

M
Milan Broz 已提交
869 870 871
	ms->io_client = dm_io_client_create(DM_IO_PAGES);
	if (IS_ERR(ms->io_client)) {
		ti->error = "Error creating dm_io client";
872
		mempool_destroy(ms->read_record_pool);
M
Milan Broz 已提交
873 874 875 876
		kfree(ms);
 		return NULL;
	}

877 878 879 880 881
	ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
				       wakeup_all_recovery_waiters,
				       ms->ti->begin, MAX_RECOVERY,
				       dl, region_size, ms->nr_regions);
	if (IS_ERR(ms->rh)) {
882
		ti->error = "Error creating dirty region hash";
D
Dmitry Monakhov 已提交
883
		dm_io_client_destroy(ms->io_client);
884
		mempool_destroy(ms->read_record_pool);
L
Linus Torvalds 已提交
885 886 887 888 889 890 891 892 893 894 895 896 897
		kfree(ms);
		return NULL;
	}

	return ms;
}

static void free_context(struct mirror_set *ms, struct dm_target *ti,
			 unsigned int m)
{
	while (m--)
		dm_put_device(ti, ms->mirror[m].dev);

M
Milan Broz 已提交
898
	dm_io_client_destroy(ms->io_client);
899
	dm_region_hash_destroy(ms->rh);
900
	mempool_destroy(ms->read_record_pool);
L
Linus Torvalds 已提交
901 902 903 904 905 906
	kfree(ms);
}

static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
		      unsigned int mirror, char **argv)
{
A
Andrew Morton 已提交
907
	unsigned long long offset;
L
Linus Torvalds 已提交
908

A
Andrew Morton 已提交
909
	if (sscanf(argv[1], "%llu", &offset) != 1) {
910
		ti->error = "Invalid offset";
L
Linus Torvalds 已提交
911 912 913 914 915 916
		return -EINVAL;
	}

	if (dm_get_device(ti, argv[0], offset, ti->len,
			  dm_table_get_mode(ti->table),
			  &ms->mirror[mirror].dev)) {
917
		ti->error = "Device lookup failure";
L
Linus Torvalds 已提交
918 919 920
		return -ENXIO;
	}

921
	ms->mirror[mirror].ms = ms;
922 923
	atomic_set(&(ms->mirror[mirror].error_count), 0);
	ms->mirror[mirror].error_type = 0;
L
Linus Torvalds 已提交
924 925 926 927 928 929 930 931
	ms->mirror[mirror].offset = offset;

	return 0;
}

/*
 * Create dirty log: log_type #log_params <log_params>
 */
H
Heinz Mauelshagen 已提交
932
static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
933 934
					     unsigned argc, char **argv,
					     unsigned *args_used)
L
Linus Torvalds 已提交
935
{
936
	unsigned param_count;
H
Heinz Mauelshagen 已提交
937
	struct dm_dirty_log *dl;
L
Linus Torvalds 已提交
938 939

	if (argc < 2) {
940
		ti->error = "Insufficient mirror log arguments";
L
Linus Torvalds 已提交
941 942 943 944
		return NULL;
	}

	if (sscanf(argv[1], "%u", &param_count) != 1) {
945
		ti->error = "Invalid mirror log argument count";
L
Linus Torvalds 已提交
946 947 948 949 950 951
		return NULL;
	}

	*args_used = 2 + param_count;

	if (argc < *args_used) {
952
		ti->error = "Insufficient mirror log arguments";
L
Linus Torvalds 已提交
953 954 955
		return NULL;
	}

M
Mikulas Patocka 已提交
956 957
	dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
				 argv + 2);
L
Linus Torvalds 已提交
958
	if (!dl) {
959
		ti->error = "Error creating mirror dirty log";
L
Linus Torvalds 已提交
960 961 962 963 964 965
		return NULL;
	}

	return dl;
}

966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
			  unsigned *args_used)
{
	unsigned num_features;
	struct dm_target *ti = ms->ti;

	*args_used = 0;

	if (!argc)
		return 0;

	if (sscanf(argv[0], "%u", &num_features) != 1) {
		ti->error = "Invalid number of features";
		return -EINVAL;
	}

	argc--;
	argv++;
	(*args_used)++;

	if (num_features > argc) {
		ti->error = "Not enough arguments to support feature count";
		return -EINVAL;
	}

	if (!strcmp("handle_errors", argv[0]))
		ms->features |= DM_RAID1_HANDLE_ERRORS;
	else {
		ti->error = "Unrecognised feature requested";
		return -EINVAL;
	}

	(*args_used)++;

	return 0;
}

L
Linus Torvalds 已提交
1003 1004 1005 1006 1007
/*
 * Construct a mirror mapping:
 *
 * log_type #log_params <log_params>
 * #mirrors [mirror_path offset]{2,}
1008
 * [#features <features>]
L
Linus Torvalds 已提交
1009 1010 1011
 *
 * log_type is "core" or "disk"
 * #log_params is between 1 and 3
1012 1013
 *
 * If present, features must be "handle_errors".
L
Linus Torvalds 已提交
1014 1015 1016 1017 1018 1019
 */
static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	int r;
	unsigned int nr_mirrors, m, args_used;
	struct mirror_set *ms;
H
Heinz Mauelshagen 已提交
1020
	struct dm_dirty_log *dl;
L
Linus Torvalds 已提交
1021 1022 1023 1024 1025 1026 1027 1028 1029

	dl = create_dirty_log(ti, argc, argv, &args_used);
	if (!dl)
		return -EINVAL;

	argv += args_used;
	argc -= args_used;

	if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
H
Heinz Mauelshagen 已提交
1030
	    nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
1031
		ti->error = "Invalid number of mirrors";
H
Heinz Mauelshagen 已提交
1032
		dm_dirty_log_destroy(dl);
L
Linus Torvalds 已提交
1033 1034 1035 1036 1037
		return -EINVAL;
	}

	argv++, argc--;

1038 1039
	if (argc < nr_mirrors * 2) {
		ti->error = "Too few mirror arguments";
H
Heinz Mauelshagen 已提交
1040
		dm_dirty_log_destroy(dl);
L
Linus Torvalds 已提交
1041 1042 1043 1044 1045
		return -EINVAL;
	}

	ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
	if (!ms) {
H
Heinz Mauelshagen 已提交
1046
		dm_dirty_log_destroy(dl);
L
Linus Torvalds 已提交
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
		return -ENOMEM;
	}

	/* Get the mirror parameter sets */
	for (m = 0; m < nr_mirrors; m++) {
		r = get_mirror(ms, ti, m, argv);
		if (r) {
			free_context(ms, ti, m);
			return r;
		}
		argv += 2;
		argc -= 2;
	}

	ti->private = ms;
1062
	ti->split_io = dm_rh_get_region_size(ms->rh);
M
Mikulas Patocka 已提交
1063
	ti->num_flush_requests = 1;
L
Linus Torvalds 已提交
1064

1065 1066 1067
	ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
	if (!ms->kmirrord_wq) {
		DMERR("couldn't start kmirrord");
D
Dmitry Monakhov 已提交
1068 1069
		r = -ENOMEM;
		goto err_free_context;
1070 1071
	}
	INIT_WORK(&ms->kmirrord_work, do_mirror);
M
Mikulas Patocka 已提交
1072 1073
	init_timer(&ms->timer);
	ms->timer_pending = 0;
1074
	INIT_WORK(&ms->trigger_event, trigger_event);
1075

1076
	r = parse_features(ms, argc, argv, &args_used);
D
Dmitry Monakhov 已提交
1077 1078
	if (r)
		goto err_destroy_wq;
1079 1080 1081 1082

	argv += args_used;
	argc -= args_used;

1083 1084 1085 1086 1087 1088 1089 1090 1091
	/*
	 * Any read-balancing addition depends on the
	 * DM_RAID1_HANDLE_ERRORS flag being present.
	 * This is because the decision to balance depends
	 * on the sync state of a region.  If the above
	 * flag is not present, we ignore errors; and
	 * the sync state may be inaccurate.
	 */

1092 1093
	if (argc) {
		ti->error = "Too many mirror arguments";
D
Dmitry Monakhov 已提交
1094 1095
		r = -EINVAL;
		goto err_destroy_wq;
1096 1097
	}

1098
	r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
D
Dmitry Monakhov 已提交
1099 1100
	if (r)
		goto err_destroy_wq;
L
Linus Torvalds 已提交
1101

1102
	wakeup_mirrord(ms);
L
Linus Torvalds 已提交
1103
	return 0;
D
Dmitry Monakhov 已提交
1104 1105 1106 1107 1108 1109

err_destroy_wq:
	destroy_workqueue(ms->kmirrord_wq);
err_free_context:
	free_context(ms, ti, ms->nr_mirrors);
	return r;
L
Linus Torvalds 已提交
1110 1111 1112 1113 1114 1115
}

static void mirror_dtr(struct dm_target *ti)
{
	struct mirror_set *ms = (struct mirror_set *) ti->private;

M
Mikulas Patocka 已提交
1116
	del_timer_sync(&ms->timer);
1117
	flush_workqueue(ms->kmirrord_wq);
1118
	flush_scheduled_work();
H
Heinz Mauelshagen 已提交
1119
	dm_kcopyd_client_destroy(ms->kcopyd_client);
1120
	destroy_workqueue(ms->kmirrord_wq);
L
Linus Torvalds 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	free_context(ms, ti, ms->nr_mirrors);
}

/*
 * Mirror mapping function
 */
static int mirror_map(struct dm_target *ti, struct bio *bio,
		      union map_info *map_context)
{
	int r, rw = bio_rw(bio);
	struct mirror *m;
	struct mirror_set *ms = ti->private;
1133
	struct dm_raid1_read_record *read_record = NULL;
1134
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
L
Linus Torvalds 已提交
1135 1136

	if (rw == WRITE) {
1137
		/* Save region for mirror_end_io() handler */
1138
		map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
L
Linus Torvalds 已提交
1139
		queue_bio(ms, bio, rw);
1140
		return DM_MAPIO_SUBMITTED;
L
Linus Torvalds 已提交
1141 1142
	}

1143
	r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
L
Linus Torvalds 已提交
1144 1145 1146 1147
	if (r < 0 && r != -EWOULDBLOCK)
		return r;

	/*
1148
	 * If region is not in-sync queue the bio.
L
Linus Torvalds 已提交
1149
	 */
1150 1151 1152
	if (!r || (r == -EWOULDBLOCK)) {
		if (rw == READA)
			return -EWOULDBLOCK;
L
Linus Torvalds 已提交
1153 1154

		queue_bio(ms, bio, rw);
1155
		return DM_MAPIO_SUBMITTED;
L
Linus Torvalds 已提交
1156 1157
	}

1158 1159 1160 1161
	/*
	 * The region is in-sync and we can perform reads directly.
	 * Store enough information so we can retry if it fails.
	 */
L
Linus Torvalds 已提交
1162
	m = choose_mirror(ms, bio->bi_sector);
1163
	if (unlikely(!m))
L
Linus Torvalds 已提交
1164 1165
		return -EIO;

1166 1167 1168 1169 1170 1171 1172 1173 1174
	read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
	if (likely(read_record)) {
		dm_bio_record(&read_record->details, bio);
		map_context->ptr = read_record;
		read_record->m = m;
	}

	map_bio(m, bio);

1175
	return DM_MAPIO_REMAPPED;
L
Linus Torvalds 已提交
1176 1177 1178 1179 1180 1181 1182
}

static int mirror_end_io(struct dm_target *ti, struct bio *bio,
			 int error, union map_info *map_context)
{
	int rw = bio_rw(bio);
	struct mirror_set *ms = (struct mirror_set *) ti->private;
1183 1184 1185
	struct mirror *m = NULL;
	struct dm_bio_details *bd = NULL;
	struct dm_raid1_read_record *read_record = map_context->ptr;
L
Linus Torvalds 已提交
1186 1187 1188 1189

	/*
	 * We need to dec pending if this was a write.
	 */
1190
	if (rw == WRITE) {
M
Mikulas Patocka 已提交
1191 1192
		if (likely(!bio_empty_barrier(bio)))
			dm_rh_dec(ms->rh, map_context->ll);
1193 1194
		return error;
	}
L
Linus Torvalds 已提交
1195

1196 1197 1198
	if (error == -EOPNOTSUPP)
		goto out;

1199
	if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
1200 1201 1202 1203 1204 1205 1206 1207 1208
		goto out;

	if (unlikely(error)) {
		if (!read_record) {
			/*
			 * There wasn't enough memory to record necessary
			 * information for a retry or there was no other
			 * mirror in-sync.
			 */
A
Adrian Bunk 已提交
1209
			DMERR_LIMIT("Mirror read failed.");
1210 1211
			return -EIO;
		}
A
Adrian Bunk 已提交
1212 1213 1214

		m = read_record->m;

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
		DMERR("Mirror read failed from %s. Trying alternative device.",
		      m->dev->name);

		fail_mirror(m, DM_RAID1_READ_ERROR);

		/*
		 * A failed read is requeued for another attempt using an intact
		 * mirror.
		 */
		if (default_ok(m) || mirror_available(ms, bio)) {
			bd = &read_record->details;

			dm_bio_restore(bd, bio);
			mempool_free(read_record, ms->read_record_pool);
			map_context->ptr = NULL;
			queue_bio(ms, bio, rw);
			return 1;
		}
		DMERR("All replicated volumes dead, failing I/O");
	}

out:
	if (read_record) {
		mempool_free(read_record, ms->read_record_pool);
		map_context->ptr = NULL;
	}

	return error;
L
Linus Torvalds 已提交
1243 1244
}

1245
static void mirror_presuspend(struct dm_target *ti)
L
Linus Torvalds 已提交
1246 1247
{
	struct mirror_set *ms = (struct mirror_set *) ti->private;
1248
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
L
Linus Torvalds 已提交
1249

1250 1251 1252
	struct bio_list holds;
	struct bio *bio;

1253 1254 1255 1256 1257 1258
	atomic_set(&ms->suspend, 1);

	/*
	 * We must finish up all the work that we've
	 * generated (i.e. recovery work).
	 */
1259
	dm_rh_stop_recovery(ms->rh);
1260 1261

	wait_event(_kmirrord_recovery_stopped,
1262
		   !dm_rh_recovery_in_flight(ms->rh));
1263

1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
	if (log->type->presuspend && log->type->presuspend(log))
		/* FIXME: need better error handling */
		DMWARN("log presuspend failed");

	/*
	 * Now that recovery is complete/stopped and the
	 * delayed bios are queued, we need to wait for
	 * the worker thread to complete.  This way,
	 * we know that all of our I/O has been pushed.
	 */
	flush_workqueue(ms->kmirrord_wq);
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290

	/*
	 * Now set ms->suspend is set and the workqueue flushed, no more
	 * entries can be added to ms->hold list, so process it.
	 *
	 * Bios can still arrive concurrently with or after this
	 * presuspend function, but they cannot join the hold list
	 * because ms->suspend is set.
	 */
	spin_lock_irq(&ms->lock);
	holds = ms->holds;
	bio_list_init(&ms->holds);
	spin_unlock_irq(&ms->lock);

	while ((bio = bio_list_pop(&holds)))
		hold_bio(ms, bio);
1291 1292 1293 1294 1295
}

static void mirror_postsuspend(struct dm_target *ti)
{
	struct mirror_set *ms = ti->private;
1296
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1297

J
Jonathan Brassow 已提交
1298
	if (log->type->postsuspend && log->type->postsuspend(log))
L
Linus Torvalds 已提交
1299
		/* FIXME: need better error handling */
1300
		DMWARN("log postsuspend failed");
L
Linus Torvalds 已提交
1301 1302 1303 1304
}

static void mirror_resume(struct dm_target *ti)
{
1305
	struct mirror_set *ms = ti->private;
1306
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1307 1308

	atomic_set(&ms->suspend, 0);
L
Linus Torvalds 已提交
1309 1310 1311
	if (log->type->resume && log->type->resume(log))
		/* FIXME: need better error handling */
		DMWARN("log resume failed");
1312
	dm_rh_start_recovery(ms->rh);
L
Linus Torvalds 已提交
1313 1314
}

J
Jonathan Brassow 已提交
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
/*
 * device_status_char
 * @m: mirror device/leg we want the status of
 *
 * We return one character representing the most severe error
 * we have encountered.
 *    A => Alive - No failures
 *    D => Dead - A write failure occurred leaving mirror out-of-sync
 *    S => Sync - A sychronization failure occurred, mirror out-of-sync
 *    R => Read - A read failure occurred, mirror data unaffected
 *
 * Returns: <char>
 */
static char device_status_char(struct mirror *m)
{
	if (!atomic_read(&(m->error_count)))
		return 'A';

1333 1334
	return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
		(test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
J
Jonathan Brassow 已提交
1335 1336 1337 1338 1339
		(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
		(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
}


L
Linus Torvalds 已提交
1340 1341 1342
static int mirror_status(struct dm_target *ti, status_type_t type,
			 char *result, unsigned int maxlen)
{
J
Jonathan E Brassow 已提交
1343
	unsigned int m, sz = 0;
L
Linus Torvalds 已提交
1344
	struct mirror_set *ms = (struct mirror_set *) ti->private;
1345
	struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
J
Jonathan Brassow 已提交
1346
	char buffer[ms->nr_mirrors + 1];
L
Linus Torvalds 已提交
1347 1348 1349 1350

	switch (type) {
	case STATUSTYPE_INFO:
		DMEMIT("%d ", ms->nr_mirrors);
J
Jonathan Brassow 已提交
1351
		for (m = 0; m < ms->nr_mirrors; m++) {
L
Linus Torvalds 已提交
1352
			DMEMIT("%s ", ms->mirror[m].dev->name);
J
Jonathan Brassow 已提交
1353 1354 1355
			buffer[m] = device_status_char(&(ms->mirror[m]));
		}
		buffer[m] = '\0';
L
Linus Torvalds 已提交
1356

J
Jonathan Brassow 已提交
1357
		DMEMIT("%llu/%llu 1 %s ",
1358
		      (unsigned long long)log->type->get_sync_count(log),
J
Jonathan Brassow 已提交
1359
		      (unsigned long long)ms->nr_regions, buffer);
J
Jonathan E Brassow 已提交
1360

1361
		sz += log->type->status(log, type, result+sz, maxlen-sz);
J
Jonathan E Brassow 已提交
1362

L
Linus Torvalds 已提交
1363 1364 1365
		break;

	case STATUSTYPE_TABLE:
1366
		sz = log->type->status(log, type, result, maxlen);
J
Jonathan E Brassow 已提交
1367

1368
		DMEMIT("%d", ms->nr_mirrors);
L
Linus Torvalds 已提交
1369
		for (m = 0; m < ms->nr_mirrors; m++)
1370
			DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1371
			       (unsigned long long)ms->mirror[m].offset);
1372 1373 1374

		if (ms->features & DM_RAID1_HANDLE_ERRORS)
			DMEMIT(" 1 handle_errors");
L
Linus Torvalds 已提交
1375 1376 1377 1378 1379
	}

	return 0;
}

1380 1381 1382 1383 1384 1385 1386 1387 1388
static int mirror_iterate_devices(struct dm_target *ti,
				  iterate_devices_callout_fn fn, void *data)
{
	struct mirror_set *ms = ti->private;
	int ret = 0;
	unsigned i;

	for (i = 0; !ret && i < ms->nr_mirrors; i++)
		ret = fn(ti, ms->mirror[i].dev,
1389
			 ms->mirror[i].offset, ti->len, data);
1390 1391 1392 1393

	return ret;
}

L
Linus Torvalds 已提交
1394 1395
static struct target_type mirror_target = {
	.name	 = "mirror",
1396
	.version = {1, 12, 0},
L
Linus Torvalds 已提交
1397 1398 1399 1400 1401
	.module	 = THIS_MODULE,
	.ctr	 = mirror_ctr,
	.dtr	 = mirror_dtr,
	.map	 = mirror_map,
	.end_io	 = mirror_end_io,
1402
	.presuspend = mirror_presuspend,
L
Linus Torvalds 已提交
1403 1404 1405
	.postsuspend = mirror_postsuspend,
	.resume	 = mirror_resume,
	.status	 = mirror_status,
1406
	.iterate_devices = mirror_iterate_devices,
L
Linus Torvalds 已提交
1407 1408 1409 1410 1411 1412
};

static int __init dm_mirror_init(void)
{
	int r;

1413 1414 1415 1416 1417 1418 1419
	_dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
	if (!_dm_raid1_read_record_cache) {
		DMERR("Can't allocate dm_raid1_read_record cache");
		r = -ENOMEM;
		goto bad_cache;
	}

L
Linus Torvalds 已提交
1420
	r = dm_register_target(&mirror_target);
1421
	if (r < 0) {
1422
		DMERR("Failed to register mirror target");
1423 1424 1425 1426
		goto bad_target;
	}

	return 0;
L
Linus Torvalds 已提交
1427

1428 1429 1430
bad_target:
	kmem_cache_destroy(_dm_raid1_read_record_cache);
bad_cache:
L
Linus Torvalds 已提交
1431 1432 1433 1434 1435
	return r;
}

static void __exit dm_mirror_exit(void)
{
1436
	dm_unregister_target(&mirror_target);
1437
	kmem_cache_destroy(_dm_raid1_read_record_cache);
L
Linus Torvalds 已提交
1438 1439 1440 1441 1442 1443 1444 1445 1446
}

/* Module hooks */
module_init(dm_mirror_init);
module_exit(dm_mirror_exit);

MODULE_DESCRIPTION(DM_NAME " mirror target");
MODULE_AUTHOR("Joe Thornber");
MODULE_LICENSE("GPL");