raid1.c 83.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * raid1.c : Multiple Devices driver for Linux
 *
 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
 *
 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
 *
 * RAID-1 management functions.
 *
 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
 *
12
 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
L
Linus Torvalds 已提交
13 14
 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
 *
15 16 17 18 19 20 21 22 23
 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
 * bitmapped intelligence in resync:
 *
 *      - bitmap marked during normal i/o
 *      - bitmap used to skip nondirty blocks during sync
 *
 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
 * - persistent bitmap code
 *
L
Linus Torvalds 已提交
24 25 26 27 28 29 30 31 32 33
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * You should have received a copy of the GNU General Public License
 * (for example /usr/src/linux/COPYING); if not, write to the Free
 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

34
#include <linux/slab.h>
35
#include <linux/delay.h>
36
#include <linux/blkdev.h>
37
#include <linux/module.h>
38
#include <linux/seq_file.h>
39
#include <linux/ratelimit.h>
40
#include "md.h"
41 42
#include "raid1.h"
#include "bitmap.h"
43

L
Linus Torvalds 已提交
44 45 46 47 48
/*
 * Number of guaranteed r1bios in case of extreme VM load:
 */
#define	NR_RAID1_BIOS 256

49 50 51 52 53 54 55 56 57 58 59 60 61 62
/* when we get a read error on a read-only array, we redirect to another
 * device without failing the first device, or trying to over-write to
 * correct the read error.  To keep track of bad blocks on a per-bio
 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
 */
#define IO_BLOCKED ((struct bio *)1)
/* When we successfully write to a known bad-block, we need to remove the
 * bad-block marking which must be done from process context.  So we record
 * the success by setting devs[n].bio to IO_MADE_GOOD
 */
#define IO_MADE_GOOD ((struct bio *)2)

#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)

63 64 65 66 67
/* When there are this many requests queue to be written by
 * the raid1 thread, we become 'congested' to provide back-pressure
 * for writeback.
 */
static int max_queued_requests = 1024;
L
Linus Torvalds 已提交
68

69 70
static void allow_barrier(struct r1conf *conf);
static void lower_barrier(struct r1conf *conf);
L
Linus Torvalds 已提交
71

A
Al Viro 已提交
72
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
L
Linus Torvalds 已提交
73 74
{
	struct pool_info *pi = data;
75
	int size = offsetof(struct r1bio, bios[pi->raid_disks]);
L
Linus Torvalds 已提交
76 77

	/* allocate a r1bio with room for raid_disks entries in the bios array */
J
Jens Axboe 已提交
78
	return kzalloc(size, gfp_flags);
L
Linus Torvalds 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91
}

static void r1bio_pool_free(void *r1_bio, void *data)
{
	kfree(r1_bio);
}

#define RESYNC_BLOCK_SIZE (64*1024)
//#define RESYNC_BLOCK_SIZE PAGE_SIZE
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
#define RESYNC_WINDOW (2048*1024)

A
Al Viro 已提交
92
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
L
Linus Torvalds 已提交
93 94 95
{
	struct pool_info *pi = data;
	struct page *page;
96
	struct r1bio *r1_bio;
L
Linus Torvalds 已提交
97 98 99 100
	struct bio *bio;
	int i, j;

	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
J
Jens Axboe 已提交
101
	if (!r1_bio)
L
Linus Torvalds 已提交
102 103 104 105 106 107
		return NULL;

	/*
	 * Allocate bios : 1 for reading, n-1 for writing
	 */
	for (j = pi->raid_disks ; j-- ; ) {
108
		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
L
Linus Torvalds 已提交
109 110 111 112 113 114
		if (!bio)
			goto out_free_bio;
		r1_bio->bios[j] = bio;
	}
	/*
	 * Allocate RESYNC_PAGES data pages and attach them to
115 116 117
	 * the first bio.
	 * If this is a user-requested check/repair, allocate
	 * RESYNC_PAGES for each bio.
L
Linus Torvalds 已提交
118
	 */
119 120 121 122 123 124 125 126 127 128 129 130
	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
		j = pi->raid_disks;
	else
		j = 1;
	while(j--) {
		bio = r1_bio->bios[j];
		for (i = 0; i < RESYNC_PAGES; i++) {
			page = alloc_page(gfp_flags);
			if (unlikely(!page))
				goto out_free_pages;

			bio->bi_io_vec[i].bv_page = page;
131
			bio->bi_vcnt = i+1;
132 133 134 135 136 137 138 139
		}
	}
	/* If not user-requests, copy the page pointers to all bios */
	if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
		for (i=0; i<RESYNC_PAGES ; i++)
			for (j=1; j<pi->raid_disks; j++)
				r1_bio->bios[j]->bi_io_vec[i].bv_page =
					r1_bio->bios[0]->bi_io_vec[i].bv_page;
L
Linus Torvalds 已提交
140 141 142 143 144 145 146
	}

	r1_bio->master_bio = NULL;

	return r1_bio;

out_free_pages:
147 148 149
	for (j=0 ; j < pi->raid_disks; j++)
		for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
			put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
150
	j = -1;
L
Linus Torvalds 已提交
151
out_free_bio:
152
	while (++j < pi->raid_disks)
L
Linus Torvalds 已提交
153 154 155 156 157 158 159 160
		bio_put(r1_bio->bios[j]);
	r1bio_pool_free(r1_bio, data);
	return NULL;
}

static void r1buf_pool_free(void *__r1_bio, void *data)
{
	struct pool_info *pi = data;
161
	int i,j;
162
	struct r1bio *r1bio = __r1_bio;
L
Linus Torvalds 已提交
163

164 165 166 167 168
	for (i = 0; i < RESYNC_PAGES; i++)
		for (j = pi->raid_disks; j-- ;) {
			if (j == 0 ||
			    r1bio->bios[j]->bi_io_vec[i].bv_page !=
			    r1bio->bios[0]->bi_io_vec[i].bv_page)
169
				safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
170
		}
L
Linus Torvalds 已提交
171 172 173 174 175 176
	for (i=0 ; i < pi->raid_disks; i++)
		bio_put(r1bio->bios[i]);

	r1bio_pool_free(r1bio, data);
}

177
static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
L
Linus Torvalds 已提交
178 179 180
{
	int i;

181
	for (i = 0; i < conf->raid_disks * 2; i++) {
L
Linus Torvalds 已提交
182
		struct bio **bio = r1_bio->bios + i;
183
		if (!BIO_SPECIAL(*bio))
L
Linus Torvalds 已提交
184 185 186 187 188
			bio_put(*bio);
		*bio = NULL;
	}
}

189
static void free_r1bio(struct r1bio *r1_bio)
L
Linus Torvalds 已提交
190
{
191
	struct r1conf *conf = r1_bio->mddev->private;
L
Linus Torvalds 已提交
192 193 194 195 196

	put_all_bios(conf, r1_bio);
	mempool_free(r1_bio, conf->r1bio_pool);
}

197
static void put_buf(struct r1bio *r1_bio)
L
Linus Torvalds 已提交
198
{
199
	struct r1conf *conf = r1_bio->mddev->private;
200 201
	int i;

202
	for (i = 0; i < conf->raid_disks * 2; i++) {
203 204 205 206
		struct bio *bio = r1_bio->bios[i];
		if (bio->bi_end_io)
			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
	}
L
Linus Torvalds 已提交
207 208 209

	mempool_free(r1_bio, conf->r1buf_pool);

210
	lower_barrier(conf);
L
Linus Torvalds 已提交
211 212
}

213
static void reschedule_retry(struct r1bio *r1_bio)
L
Linus Torvalds 已提交
214 215
{
	unsigned long flags;
216
	struct mddev *mddev = r1_bio->mddev;
217
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
218 219 220

	spin_lock_irqsave(&conf->device_lock, flags);
	list_add(&r1_bio->retry_list, &conf->retry_list);
221
	conf->nr_queued ++;
L
Linus Torvalds 已提交
222 223
	spin_unlock_irqrestore(&conf->device_lock, flags);

224
	wake_up(&conf->wait_barrier);
L
Linus Torvalds 已提交
225 226 227 228 229 230 231 232
	md_wakeup_thread(mddev->thread);
}

/*
 * raid_end_bio_io() is called when we have finished servicing a mirrored
 * operation and are ready to return a success/failure code to the buffer
 * cache layer.
 */
233
static void call_bio_endio(struct r1bio *r1_bio)
234 235 236
{
	struct bio *bio = r1_bio->master_bio;
	int done;
237
	struct r1conf *conf = r1_bio->mddev->private;
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259

	if (bio->bi_phys_segments) {
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
		bio->bi_phys_segments--;
		done = (bio->bi_phys_segments == 0);
		spin_unlock_irqrestore(&conf->device_lock, flags);
	} else
		done = 1;

	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
	if (done) {
		bio_endio(bio, 0);
		/*
		 * Wake up any possible resync thread that waits for the device
		 * to go idle.
		 */
		allow_barrier(conf);
	}
}

260
static void raid_end_bio_io(struct r1bio *r1_bio)
L
Linus Torvalds 已提交
261 262 263
{
	struct bio *bio = r1_bio->master_bio;

264 265
	/* if nobody has done the final endio yet, do it now */
	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
266 267 268 269 270
		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
			 (unsigned long long) bio->bi_sector,
			 (unsigned long long) bio->bi_sector +
			 (bio->bi_size >> 9) - 1);
271

272
		call_bio_endio(r1_bio);
273
	}
L
Linus Torvalds 已提交
274 275 276 277 278 279
	free_r1bio(r1_bio);
}

/*
 * Update disk head position estimator based on IRQ completion info.
 */
280
static inline void update_head_pos(int disk, struct r1bio *r1_bio)
L
Linus Torvalds 已提交
281
{
282
	struct r1conf *conf = r1_bio->mddev->private;
L
Linus Torvalds 已提交
283 284 285 286 287

	conf->mirrors[disk].head_position =
		r1_bio->sector + (r1_bio->sectors);
}

288 289 290
/*
 * Find the disk number which triggered given bio
 */
291
static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
292 293
{
	int mirror;
294 295
	struct r1conf *conf = r1_bio->mddev->private;
	int raid_disks = conf->raid_disks;
296

297
	for (mirror = 0; mirror < raid_disks * 2; mirror++)
298 299 300
		if (r1_bio->bios[mirror] == bio)
			break;

301
	BUG_ON(mirror == raid_disks * 2);
302 303 304 305 306
	update_head_pos(mirror, r1_bio);

	return mirror;
}

307
static void raid1_end_read_request(struct bio *bio, int error)
L
Linus Torvalds 已提交
308 309
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
310
	struct r1bio *r1_bio = bio->bi_private;
L
Linus Torvalds 已提交
311
	int mirror;
312
	struct r1conf *conf = r1_bio->mddev->private;
L
Linus Torvalds 已提交
313 314 315 316 317

	mirror = r1_bio->read_disk;
	/*
	 * this branch is our 'one mirror IO has finished' event handler:
	 */
318 319
	update_head_pos(mirror, r1_bio);

320 321 322 323 324 325
	if (uptodate)
		set_bit(R1BIO_Uptodate, &r1_bio->state);
	else {
		/* If all other devices have failed, we want to return
		 * the error upwards rather than fail the last device.
		 * Here we redefine "uptodate" to mean "Don't want to retry"
L
Linus Torvalds 已提交
326
		 */
327 328 329 330 331 332 333 334
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
		if (r1_bio->mddev->degraded == conf->raid_disks ||
		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
			uptodate = 1;
		spin_unlock_irqrestore(&conf->device_lock, flags);
	}
L
Linus Torvalds 已提交
335

336
	if (uptodate) {
L
Linus Torvalds 已提交
337
		raid_end_bio_io(r1_bio);
338 339
		rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
	} else {
L
Linus Torvalds 已提交
340 341 342 343
		/*
		 * oops, read error:
		 */
		char b[BDEVNAME_SIZE];
344 345 346 347 348 349 350
		printk_ratelimited(
			KERN_ERR "md/raid1:%s: %s: "
			"rescheduling sector %llu\n",
			mdname(conf->mddev),
			bdevname(conf->mirrors[mirror].rdev->bdev,
				 b),
			(unsigned long long)r1_bio->sector);
351
		set_bit(R1BIO_ReadError, &r1_bio->state);
L
Linus Torvalds 已提交
352
		reschedule_retry(r1_bio);
353
		/* don't drop the reference on read_disk yet */
L
Linus Torvalds 已提交
354 355 356
	}
}

357
static void close_write(struct r1bio *r1_bio)
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
{
	/* it really is the end of this request */
	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
		/* free extra copy of the data pages */
		int i = r1_bio->behind_page_count;
		while (i--)
			safe_put_page(r1_bio->behind_bvecs[i].bv_page);
		kfree(r1_bio->behind_bvecs);
		r1_bio->behind_bvecs = NULL;
	}
	/* clear the bitmap if all writes complete successfully */
	bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
			r1_bio->sectors,
			!test_bit(R1BIO_Degraded, &r1_bio->state),
			test_bit(R1BIO_BehindIO, &r1_bio->state));
	md_write_end(r1_bio->mddev);
}

376
static void r1_bio_write_done(struct r1bio *r1_bio)
377
{
378 379 380 381 382 383 384
	if (!atomic_dec_and_test(&r1_bio->remaining))
		return;

	if (test_bit(R1BIO_WriteError, &r1_bio->state))
		reschedule_retry(r1_bio);
	else {
		close_write(r1_bio);
385 386 387 388
		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
			reschedule_retry(r1_bio);
		else
			raid_end_bio_io(r1_bio);
389 390 391
	}
}

392
static void raid1_end_write_request(struct bio *bio, int error)
L
Linus Torvalds 已提交
393 394
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
395
	struct r1bio *r1_bio = bio->bi_private;
396
	int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
397
	struct r1conf *conf = r1_bio->mddev->private;
398
	struct bio *to_put = NULL;
L
Linus Torvalds 已提交
399

400
	mirror = find_bio_disk(r1_bio, bio);
L
Linus Torvalds 已提交
401

T
Tejun Heo 已提交
402 403 404 405
	/*
	 * 'one mirror IO has finished' event handler:
	 */
	if (!uptodate) {
406 407
		set_bit(WriteErrorSeen,
			&conf->mirrors[mirror].rdev->flags);
408 409 410 411 412
		if (!test_and_set_bit(WantReplacement,
				      &conf->mirrors[mirror].rdev->flags))
			set_bit(MD_RECOVERY_NEEDED, &
				conf->mddev->recovery);

413
		set_bit(R1BIO_WriteError, &r1_bio->state);
414
	} else {
L
Linus Torvalds 已提交
415
		/*
T
Tejun Heo 已提交
416 417 418 419 420 421 422 423
		 * Set R1BIO_Uptodate in our master bio, so that we
		 * will return a good error code for to the higher
		 * levels even if IO on some other mirrored buffer
		 * fails.
		 *
		 * The 'master' represents the composite IO operation
		 * to user-side. So if something waits for IO, then it
		 * will wait for the 'master' bio.
L
Linus Torvalds 已提交
424
		 */
425 426 427
		sector_t first_bad;
		int bad_sectors;

428 429
		r1_bio->bios[mirror] = NULL;
		to_put = bio;
T
Tejun Heo 已提交
430 431
		set_bit(R1BIO_Uptodate, &r1_bio->state);

432 433 434 435 436 437 438 439 440
		/* Maybe we can clear some bad blocks. */
		if (is_badblock(conf->mirrors[mirror].rdev,
				r1_bio->sector, r1_bio->sectors,
				&first_bad, &bad_sectors)) {
			r1_bio->bios[mirror] = IO_MADE_GOOD;
			set_bit(R1BIO_MadeGood, &r1_bio->state);
		}
	}

T
Tejun Heo 已提交
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
	if (behind) {
		if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
			atomic_dec(&r1_bio->behind_remaining);

		/*
		 * In behind mode, we ACK the master bio once the I/O
		 * has safely reached all non-writemostly
		 * disks. Setting the Returned bit ensures that this
		 * gets done only once -- we don't ever want to return
		 * -EIO here, instead we'll wait
		 */
		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
			/* Maybe we can return now */
			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
				struct bio *mbio = r1_bio->master_bio;
457 458 459 460 461
				pr_debug("raid1: behind end write sectors"
					 " %llu-%llu\n",
					 (unsigned long long) mbio->bi_sector,
					 (unsigned long long) mbio->bi_sector +
					 (mbio->bi_size >> 9) - 1);
462
				call_bio_endio(r1_bio);
463 464 465
			}
		}
	}
466 467 468
	if (r1_bio->bios[mirror] == NULL)
		rdev_dec_pending(conf->mirrors[mirror].rdev,
				 conf->mddev);
T
Tejun Heo 已提交
469

L
Linus Torvalds 已提交
470 471 472 473
	/*
	 * Let's see if all mirrored write operations have finished
	 * already.
	 */
474
	r1_bio_write_done(r1_bio);
475

476 477
	if (to_put)
		bio_put(to_put);
L
Linus Torvalds 已提交
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
}


/*
 * This routine returns the disk from which the requested read should
 * be done. There is a per-array 'next expected sequential IO' sector
 * number - if this matches on the next IO then we use the last disk.
 * There is also a per-disk 'last know head position' sector that is
 * maintained from IRQ contexts, both the normal and the resync IO
 * completion handlers update this position correctly. If there is no
 * perfect sequential match then we pick the disk whose head is closest.
 *
 * If there are 2 mirrors in the same 2 devices, performance degrades
 * because position is mirror, not device based.
 *
 * The rdev for the device selected will have nr_pending incremented.
 */
495
static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
L
Linus Torvalds 已提交
496
{
497
	const sector_t this_sector = r1_bio->sector;
498 499
	int sectors;
	int best_good_sectors;
500 501
	int best_disk, best_dist_disk, best_pending_disk;
	int has_nonrot_disk;
502
	int disk;
N
NeilBrown 已提交
503
	sector_t best_dist;
504
	unsigned int min_pending;
505
	struct md_rdev *rdev;
506
	int choose_first;
507
	int choose_next_idle;
L
Linus Torvalds 已提交
508 509 510

	rcu_read_lock();
	/*
511
	 * Check if we can balance. We can balance on the whole
L
Linus Torvalds 已提交
512 513 514 515
	 * device if no resync is going on, or below the resync window.
	 * We take the first readable disk when above the resync window.
	 */
 retry:
516
	sectors = r1_bio->sectors;
N
NeilBrown 已提交
517
	best_disk = -1;
518
	best_dist_disk = -1;
N
NeilBrown 已提交
519
	best_dist = MaxSector;
520 521
	best_pending_disk = -1;
	min_pending = UINT_MAX;
522
	best_good_sectors = 0;
523
	has_nonrot_disk = 0;
524
	choose_next_idle = 0;
525

L
Linus Torvalds 已提交
526
	if (conf->mddev->recovery_cp < MaxSector &&
527
	    (this_sector + sectors >= conf->next_resync))
528
		choose_first = 1;
529
	else
530
		choose_first = 0;
L
Linus Torvalds 已提交
531

532
	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
N
NeilBrown 已提交
533
		sector_t dist;
534 535
		sector_t first_bad;
		int bad_sectors;
536
		unsigned int pending;
537
		bool nonrot;
538

539 540 541
		rdev = rcu_dereference(conf->mirrors[disk].rdev);
		if (r1_bio->bios[disk] == IO_BLOCKED
		    || rdev == NULL
542
		    || test_bit(Unmerged, &rdev->flags)
N
NeilBrown 已提交
543
		    || test_bit(Faulty, &rdev->flags))
544
			continue;
N
NeilBrown 已提交
545 546
		if (!test_bit(In_sync, &rdev->flags) &&
		    rdev->recovery_offset < this_sector + sectors)
L
Linus Torvalds 已提交
547
			continue;
N
NeilBrown 已提交
548 549 550
		if (test_bit(WriteMostly, &rdev->flags)) {
			/* Don't balance among write-mostly, just
			 * use the first as a last resort */
551 552 553 554 555 556 557 558 559
			if (best_disk < 0) {
				if (is_badblock(rdev, this_sector, sectors,
						&first_bad, &bad_sectors)) {
					if (first_bad < this_sector)
						/* Cannot use this */
						continue;
					best_good_sectors = first_bad - this_sector;
				} else
					best_good_sectors = sectors;
N
NeilBrown 已提交
560
				best_disk = disk;
561
			}
N
NeilBrown 已提交
562 563 564 565 566
			continue;
		}
		/* This is a reasonable device to use.  It might
		 * even be best.
		 */
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
		if (is_badblock(rdev, this_sector, sectors,
				&first_bad, &bad_sectors)) {
			if (best_dist < MaxSector)
				/* already have a better device */
				continue;
			if (first_bad <= this_sector) {
				/* cannot read here. If this is the 'primary'
				 * device, then we must not read beyond
				 * bad_sectors from another device..
				 */
				bad_sectors -= (this_sector - first_bad);
				if (choose_first && sectors > bad_sectors)
					sectors = bad_sectors;
				if (best_good_sectors > sectors)
					best_good_sectors = sectors;

			} else {
				sector_t good_sectors = first_bad - this_sector;
				if (good_sectors > best_good_sectors) {
					best_good_sectors = good_sectors;
					best_disk = disk;
				}
				if (choose_first)
					break;
			}
			continue;
		} else
			best_good_sectors = sectors;

596 597
		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
		has_nonrot_disk |= nonrot;
598
		pending = atomic_read(&rdev->nr_pending);
N
NeilBrown 已提交
599
		dist = abs(this_sector - conf->mirrors[disk].head_position);
600
		if (choose_first) {
N
NeilBrown 已提交
601
			best_disk = disk;
L
Linus Torvalds 已提交
602 603
			break;
		}
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
		/* Don't change to another disk for sequential reads */
		if (conf->mirrors[disk].next_seq_sect == this_sector
		    || dist == 0) {
			int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
			struct raid1_info *mirror = &conf->mirrors[disk];

			best_disk = disk;
			/*
			 * If buffered sequential IO size exceeds optimal
			 * iosize, check if there is idle disk. If yes, choose
			 * the idle disk. read_balance could already choose an
			 * idle disk before noticing it's a sequential IO in
			 * this disk. This doesn't matter because this disk
			 * will idle, next time it will be utilized after the
			 * first disk has IO size exceeds optimal iosize. In
			 * this way, iosize of the first disk will be optimal
			 * iosize at least. iosize of the second disk might be
			 * small, but not a big deal since when the second disk
			 * starts IO, the first disk is likely still busy.
			 */
			if (nonrot && opt_iosize > 0 &&
			    mirror->seq_start != MaxSector &&
			    mirror->next_seq_sect > opt_iosize &&
			    mirror->next_seq_sect - opt_iosize >=
			    mirror->seq_start) {
				choose_next_idle = 1;
				continue;
			}
			break;
		}
		/* If device is idle, use it */
		if (pending == 0) {
			best_disk = disk;
			break;
		}

		if (choose_next_idle)
			continue;
642 643 644 645 646 647

		if (min_pending > pending) {
			min_pending = pending;
			best_pending_disk = disk;
		}

N
NeilBrown 已提交
648 649
		if (dist < best_dist) {
			best_dist = dist;
650
			best_dist_disk = disk;
L
Linus Torvalds 已提交
651
		}
652
	}
L
Linus Torvalds 已提交
653

654 655 656 657 658 659 660 661 662 663 664 665 666
	/*
	 * If all disks are rotational, choose the closest disk. If any disk is
	 * non-rotational, choose the disk with less pending request even the
	 * disk is rotational, which might/might not be optimal for raids with
	 * mixed ratation/non-rotational disks depending on workload.
	 */
	if (best_disk == -1) {
		if (has_nonrot_disk)
			best_disk = best_pending_disk;
		else
			best_disk = best_dist_disk;
	}

N
NeilBrown 已提交
667 668
	if (best_disk >= 0) {
		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
669 670 671
		if (!rdev)
			goto retry;
		atomic_inc(&rdev->nr_pending);
N
NeilBrown 已提交
672
		if (test_bit(Faulty, &rdev->flags)) {
L
Linus Torvalds 已提交
673 674 675
			/* cannot risk returning a device that failed
			 * before we inc'ed nr_pending
			 */
676
			rdev_dec_pending(rdev, conf->mddev);
L
Linus Torvalds 已提交
677 678
			goto retry;
		}
679
		sectors = best_good_sectors;
680 681 682 683

		if (conf->mirrors[best_disk].next_seq_sect != this_sector)
			conf->mirrors[best_disk].seq_start = this_sector;

684
		conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
L
Linus Torvalds 已提交
685 686
	}
	rcu_read_unlock();
687
	*max_sectors = sectors;
L
Linus Torvalds 已提交
688

N
NeilBrown 已提交
689
	return best_disk;
L
Linus Torvalds 已提交
690 691
}

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
static int raid1_mergeable_bvec(struct request_queue *q,
				struct bvec_merge_data *bvm,
				struct bio_vec *biovec)
{
	struct mddev *mddev = q->queuedata;
	struct r1conf *conf = mddev->private;
	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
	int max = biovec->bv_len;

	if (mddev->merge_check_needed) {
		int disk;
		rcu_read_lock();
		for (disk = 0; disk < conf->raid_disks * 2; disk++) {
			struct md_rdev *rdev = rcu_dereference(
				conf->mirrors[disk].rdev);
			if (rdev && !test_bit(Faulty, &rdev->flags)) {
				struct request_queue *q =
					bdev_get_queue(rdev->bdev);
				if (q->merge_bvec_fn) {
					bvm->bi_sector = sector +
						rdev->data_offset;
					bvm->bi_bdev = rdev->bdev;
					max = min(max, q->merge_bvec_fn(
							  q, bvm, biovec));
				}
			}
		}
		rcu_read_unlock();
	}
	return max;

}

725
int md_raid1_congested(struct mddev *mddev, int bits)
726
{
727
	struct r1conf *conf = mddev->private;
728 729
	int i, ret = 0;

730 731 732 733
	if ((bits & (1 << BDI_async_congested)) &&
	    conf->pending_count >= max_queued_requests)
		return 1;

734
	rcu_read_lock();
735
	for (i = 0; i < conf->raid_disks * 2; i++) {
736
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
737
		if (rdev && !test_bit(Faulty, &rdev->flags)) {
738
			struct request_queue *q = bdev_get_queue(rdev->bdev);
739

740 741
			BUG_ON(!q);

742 743 744
			/* Note the '|| 1' - when read_balance prefers
			 * non-congested targets, it can be removed
			 */
745
			if ((bits & (1<<BDI_async_congested)) || 1)
746 747 748 749 750 751 752 753
				ret |= bdi_congested(&q->backing_dev_info, bits);
			else
				ret &= bdi_congested(&q->backing_dev_info, bits);
		}
	}
	rcu_read_unlock();
	return ret;
}
754
EXPORT_SYMBOL_GPL(md_raid1_congested);
755

756 757
static int raid1_congested(void *data, int bits)
{
758
	struct mddev *mddev = data;
759 760 761 762

	return mddev_congested(mddev, bits) ||
		md_raid1_congested(mddev, bits);
}
763

764
static void flush_pending_writes(struct r1conf *conf)
765 766 767 768 769 770 771 772 773
{
	/* Any writes that have been queued but are awaiting
	 * bitmap updates get flushed here.
	 */
	spin_lock_irq(&conf->device_lock);

	if (conf->pending_bio_list.head) {
		struct bio *bio;
		bio = bio_list_get(&conf->pending_bio_list);
774
		conf->pending_count = 0;
775 776 777 778
		spin_unlock_irq(&conf->device_lock);
		/* flush any pending bitmap writes to
		 * disk before proceeding w/ I/O */
		bitmap_unplug(conf->mddev->bitmap);
779
		wake_up(&conf->wait_barrier);
780 781 782 783

		while (bio) { /* submit pending writes */
			struct bio *next = bio->bi_next;
			bio->bi_next = NULL;
S
Shaohua Li 已提交
784 785 786 787 788 789
			if (unlikely((bio->bi_rw & REQ_DISCARD) &&
			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
				/* Just ignore it */
				bio_endio(bio, 0);
			else
				generic_make_request(bio);
790 791 792 793
			bio = next;
		}
	} else
		spin_unlock_irq(&conf->device_lock);
J
Jens Axboe 已提交
794 795
}

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
/* Barriers....
 * Sometimes we need to suspend IO while we do something else,
 * either some resync/recovery, or reconfigure the array.
 * To do this we raise a 'barrier'.
 * The 'barrier' is a counter that can be raised multiple times
 * to count how many activities are happening which preclude
 * normal IO.
 * We can only raise the barrier if there is no pending IO.
 * i.e. if nr_pending == 0.
 * We choose only to raise the barrier if no-one is waiting for the
 * barrier to go down.  This means that as soon as an IO request
 * is ready, no other operations which require a barrier will start
 * until the IO request has had a chance.
 *
 * So: regular IO calls 'wait_barrier'.  When that returns there
 *    is no backgroup IO happening,  It must arrange to call
 *    allow_barrier when it has finished its IO.
 * backgroup IO calls must call raise_barrier.  Once that returns
 *    there is no normal IO happeing.  It must arrange to call
 *    lower_barrier when the particular background IO completes.
L
Linus Torvalds 已提交
816 817 818
 */
#define RESYNC_DEPTH 32

819
static void raise_barrier(struct r1conf *conf)
L
Linus Torvalds 已提交
820 821
{
	spin_lock_irq(&conf->resync_lock);
822 823 824

	/* Wait until no block IO is waiting */
	wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
825
			    conf->resync_lock);
826 827 828 829

	/* block any new IO from starting */
	conf->barrier++;

N
NeilBrown 已提交
830
	/* Now wait for all pending IO to complete */
831 832
	wait_event_lock_irq(conf->wait_barrier,
			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
833
			    conf->resync_lock);
834 835 836 837

	spin_unlock_irq(&conf->resync_lock);
}

838
static void lower_barrier(struct r1conf *conf)
839 840
{
	unsigned long flags;
841
	BUG_ON(conf->barrier <= 0);
842 843 844 845 846 847
	spin_lock_irqsave(&conf->resync_lock, flags);
	conf->barrier--;
	spin_unlock_irqrestore(&conf->resync_lock, flags);
	wake_up(&conf->wait_barrier);
}

848
static void wait_barrier(struct r1conf *conf)
849 850 851 852
{
	spin_lock_irq(&conf->resync_lock);
	if (conf->barrier) {
		conf->nr_waiting++;
853 854 855 856 857 858 859 860 861 862 863 864 865 866
		/* Wait for the barrier to drop.
		 * However if there are already pending
		 * requests (preventing the barrier from
		 * rising completely), and the
		 * pre-process bio queue isn't empty,
		 * then don't wait, as we need to empty
		 * that queue to get the nr_pending
		 * count down.
		 */
		wait_event_lock_irq(conf->wait_barrier,
				    !conf->barrier ||
				    (conf->nr_pending &&
				     current->bio_list &&
				     !bio_list_empty(current->bio_list)),
867
				    conf->resync_lock);
868
		conf->nr_waiting--;
L
Linus Torvalds 已提交
869
	}
870
	conf->nr_pending++;
L
Linus Torvalds 已提交
871 872 873
	spin_unlock_irq(&conf->resync_lock);
}

874
static void allow_barrier(struct r1conf *conf)
875 876 877 878 879 880 881 882
{
	unsigned long flags;
	spin_lock_irqsave(&conf->resync_lock, flags);
	conf->nr_pending--;
	spin_unlock_irqrestore(&conf->resync_lock, flags);
	wake_up(&conf->wait_barrier);
}

883
static void freeze_array(struct r1conf *conf)
884 885 886 887
{
	/* stop syncio and normal IO and wait for everything to
	 * go quite.
	 * We increment barrier and nr_waiting, and then
888 889 890 891 892 893 894 895
	 * wait until nr_pending match nr_queued+1
	 * This is called in the context of one normal IO request
	 * that has failed. Thus any sync request that might be pending
	 * will be blocked by nr_pending, and we need to wait for
	 * pending IO requests to complete or be queued for re-try.
	 * Thus the number queued (nr_queued) plus this request (1)
	 * must match the number of pending IOs (nr_pending) before
	 * we continue.
896 897 898 899
	 */
	spin_lock_irq(&conf->resync_lock);
	conf->barrier++;
	conf->nr_waiting++;
900 901 902 903
	wait_event_lock_irq_cmd(conf->wait_barrier,
				conf->nr_pending == conf->nr_queued+1,
				conf->resync_lock,
				flush_pending_writes(conf));
904 905
	spin_unlock_irq(&conf->resync_lock);
}
906
static void unfreeze_array(struct r1conf *conf)
907 908 909 910 911 912 913 914 915
{
	/* reverse the effect of the freeze */
	spin_lock_irq(&conf->resync_lock);
	conf->barrier--;
	conf->nr_waiting--;
	wake_up(&conf->wait_barrier);
	spin_unlock_irq(&conf->resync_lock);
}

916

917 918
/* duplicate the data pages for behind I/O 
 */
919
static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
920 921 922
{
	int i;
	struct bio_vec *bvec;
923
	struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
924
					GFP_NOIO);
925
	if (unlikely(!bvecs))
926
		return;
927 928

	bio_for_each_segment(bvec, bio, i) {
929 930 931
		bvecs[i] = *bvec;
		bvecs[i].bv_page = alloc_page(GFP_NOIO);
		if (unlikely(!bvecs[i].bv_page))
932
			goto do_sync_io;
933 934 935
		memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
		       kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
		kunmap(bvecs[i].bv_page);
936 937
		kunmap(bvec->bv_page);
	}
938
	r1_bio->behind_bvecs = bvecs;
939 940 941
	r1_bio->behind_page_count = bio->bi_vcnt;
	set_bit(R1BIO_BehindIO, &r1_bio->state);
	return;
942 943

do_sync_io:
944
	for (i = 0; i < bio->bi_vcnt; i++)
945 946 947
		if (bvecs[i].bv_page)
			put_page(bvecs[i].bv_page);
	kfree(bvecs);
948
	pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
949 950
}

951 952 953 954 955 956 957 958 959 960 961 962 963 964
struct raid1_plug_cb {
	struct blk_plug_cb	cb;
	struct bio_list		pending;
	int			pending_cnt;
};

static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
						  cb);
	struct mddev *mddev = plug->cb.data;
	struct r1conf *conf = mddev->private;
	struct bio *bio;

965
	if (from_schedule || current->bio_list) {
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
		spin_lock_irq(&conf->device_lock);
		bio_list_merge(&conf->pending_bio_list, &plug->pending);
		conf->pending_count += plug->pending_cnt;
		spin_unlock_irq(&conf->device_lock);
		md_wakeup_thread(mddev->thread);
		kfree(plug);
		return;
	}

	/* we aren't scheduling, so we can do the write-out directly. */
	bio = bio_list_get(&plug->pending);
	bitmap_unplug(mddev->bitmap);
	wake_up(&conf->wait_barrier);

	while (bio) { /* submit pending writes */
		struct bio *next = bio->bi_next;
		bio->bi_next = NULL;
		generic_make_request(bio);
		bio = next;
	}
	kfree(plug);
}

989
static void make_request(struct mddev *mddev, struct bio * bio)
L
Linus Torvalds 已提交
990
{
991
	struct r1conf *conf = mddev->private;
992
	struct raid1_info *mirror;
993
	struct r1bio *r1_bio;
L
Linus Torvalds 已提交
994
	struct bio *read_bio;
995
	int i, disks;
996
	struct bitmap *bitmap;
997
	unsigned long flags;
998
	const int rw = bio_data_dir(bio);
999
	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
T
Tejun Heo 已提交
1000
	const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
S
Shaohua Li 已提交
1001 1002
	const unsigned long do_discard = (bio->bi_rw
					  & (REQ_DISCARD | REQ_SECURE));
1003
	const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1004
	struct md_rdev *blocked_rdev;
1005 1006
	struct blk_plug_cb *cb;
	struct raid1_plug_cb *plug = NULL;
1007 1008 1009
	int first_clone;
	int sectors_handled;
	int max_sectors;
1010

L
Linus Torvalds 已提交
1011 1012 1013 1014 1015
	/*
	 * Register the new request and wait if the reconstruction
	 * thread has put up a bar for new requests.
	 * Continue immediately if no resync is active currently.
	 */
1016

1017 1018
	md_write_start(mddev, bio); /* wait on superblock update early */

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	if (bio_data_dir(bio) == WRITE &&
	    bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
	    bio->bi_sector < mddev->suspend_hi) {
		/* As the suspend_* range is controlled by
		 * userspace, we want an interruptible
		 * wait.
		 */
		DEFINE_WAIT(w);
		for (;;) {
			flush_signals(current);
			prepare_to_wait(&conf->wait_barrier,
					&w, TASK_INTERRUPTIBLE);
			if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
			    bio->bi_sector >= mddev->suspend_hi)
				break;
			schedule();
		}
		finish_wait(&conf->wait_barrier, &w);
	}
1038

1039
	wait_barrier(conf);
L
Linus Torvalds 已提交
1040

1041 1042
	bitmap = mddev->bitmap;

L
Linus Torvalds 已提交
1043 1044 1045 1046 1047 1048 1049 1050 1051
	/*
	 * make_request() can abort the operation when READA is being
	 * used and no empty request is available.
	 *
	 */
	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);

	r1_bio->master_bio = bio;
	r1_bio->sectors = bio->bi_size >> 9;
1052
	r1_bio->state = 0;
L
Linus Torvalds 已提交
1053 1054 1055
	r1_bio->mddev = mddev;
	r1_bio->sector = bio->bi_sector;

1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
	/* We might need to issue multiple reads to different
	 * devices if there are bad blocks around, so we keep
	 * track of the number of reads in bio->bi_phys_segments.
	 * If this is 0, there is only one r1_bio and no locking
	 * will be needed when requests complete.  If it is
	 * non-zero, then it is the number of not-completed requests.
	 */
	bio->bi_phys_segments = 0;
	clear_bit(BIO_SEG_VALID, &bio->bi_flags);

1066
	if (rw == READ) {
L
Linus Torvalds 已提交
1067 1068 1069
		/*
		 * read balancing logic:
		 */
1070 1071 1072 1073
		int rdisk;

read_again:
		rdisk = read_balance(conf, r1_bio, &max_sectors);
L
Linus Torvalds 已提交
1074 1075 1076 1077

		if (rdisk < 0) {
			/* couldn't find anywhere to read from */
			raid_end_bio_io(r1_bio);
1078
			return;
L
Linus Torvalds 已提交
1079 1080 1081
		}
		mirror = conf->mirrors + rdisk;

1082 1083 1084 1085 1086 1087 1088 1089 1090
		if (test_bit(WriteMostly, &mirror->rdev->flags) &&
		    bitmap) {
			/* Reading from a write-mostly device must
			 * take care not to over-take any writes
			 * that are 'behind'
			 */
			wait_event(bitmap->behind_wait,
				   atomic_read(&bitmap->behind_writes) == 0);
		}
L
Linus Torvalds 已提交
1091 1092
		r1_bio->read_disk = rdisk;

1093
		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1094 1095
		md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
			    max_sectors);
L
Linus Torvalds 已提交
1096 1097 1098 1099 1100 1101

		r1_bio->bios[rdisk] = read_bio;

		read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
		read_bio->bi_bdev = mirror->rdev->bdev;
		read_bio->bi_end_io = raid1_end_read_request;
1102
		read_bio->bi_rw = READ | do_sync;
L
Linus Torvalds 已提交
1103 1104
		read_bio->bi_private = r1_bio;

1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
		if (max_sectors < r1_bio->sectors) {
			/* could not read all from this device, so we will
			 * need another r1_bio.
			 */

			sectors_handled = (r1_bio->sector + max_sectors
					   - bio->bi_sector);
			r1_bio->sectors = max_sectors;
			spin_lock_irq(&conf->device_lock);
			if (bio->bi_phys_segments == 0)
				bio->bi_phys_segments = 2;
			else
				bio->bi_phys_segments++;
			spin_unlock_irq(&conf->device_lock);
			/* Cannot call generic_make_request directly
			 * as that will be queued in __make_request
			 * and subsequent mempool_alloc might block waiting
			 * for it.  So hand bio over to raid1d.
			 */
			reschedule_retry(r1_bio);

			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);

			r1_bio->master_bio = bio;
			r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
			r1_bio->state = 0;
			r1_bio->mddev = mddev;
			r1_bio->sector = bio->bi_sector + sectors_handled;
			goto read_again;
		} else
			generic_make_request(read_bio);
1136
		return;
L
Linus Torvalds 已提交
1137 1138 1139 1140 1141
	}

	/*
	 * WRITE:
	 */
1142 1143 1144 1145 1146
	if (conf->pending_count >= max_queued_requests) {
		md_wakeup_thread(mddev->thread);
		wait_event(conf->wait_barrier,
			   conf->pending_count < max_queued_requests);
	}
1147
	/* first select target devices under rcu_lock and
L
Linus Torvalds 已提交
1148 1149
	 * inc refcount on their rdev.  Record them by setting
	 * bios[x] to bio
1150 1151 1152 1153 1154 1155
	 * If there are known/acknowledged bad blocks on any device on
	 * which we have seen a write error, we want to avoid writing those
	 * blocks.
	 * This potentially requires several writes to write around
	 * the bad blocks.  Each set of writes gets it's own r1bio
	 * with a set of bios attached.
L
Linus Torvalds 已提交
1156
	 */
N
NeilBrown 已提交
1157

1158
	disks = conf->raid_disks * 2;
1159 1160
 retry_write:
	blocked_rdev = NULL;
L
Linus Torvalds 已提交
1161
	rcu_read_lock();
1162
	max_sectors = r1_bio->sectors;
L
Linus Torvalds 已提交
1163
	for (i = 0;  i < disks; i++) {
1164
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1165 1166 1167 1168 1169
		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
			atomic_inc(&rdev->nr_pending);
			blocked_rdev = rdev;
			break;
		}
1170
		r1_bio->bios[i] = NULL;
1171 1172
		if (!rdev || test_bit(Faulty, &rdev->flags)
		    || test_bit(Unmerged, &rdev->flags)) {
1173 1174
			if (i < conf->raid_disks)
				set_bit(R1BIO_Degraded, &r1_bio->state);
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
			continue;
		}

		atomic_inc(&rdev->nr_pending);
		if (test_bit(WriteErrorSeen, &rdev->flags)) {
			sector_t first_bad;
			int bad_sectors;
			int is_bad;

			is_bad = is_badblock(rdev, r1_bio->sector,
					     max_sectors,
					     &first_bad, &bad_sectors);
			if (is_bad < 0) {
				/* mustn't write here until the bad block is
				 * acknowledged*/
				set_bit(BlockedBadBlocks, &rdev->flags);
				blocked_rdev = rdev;
				break;
			}
			if (is_bad && first_bad <= r1_bio->sector) {
				/* Cannot write here at all */
				bad_sectors -= (r1_bio->sector - first_bad);
				if (bad_sectors < max_sectors)
					/* mustn't write more than bad_sectors
					 * to other devices yet
					 */
					max_sectors = bad_sectors;
1202
				rdev_dec_pending(rdev, mddev);
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
				/* We don't set R1BIO_Degraded as that
				 * only applies if the disk is
				 * missing, so it might be re-added,
				 * and we want to know to recover this
				 * chunk.
				 * In this case the device is here,
				 * and the fact that this chunk is not
				 * in-sync is recorded in the bad
				 * block log
				 */
				continue;
1214
			}
1215 1216 1217 1218 1219 1220 1221
			if (is_bad) {
				int good_sectors = first_bad - r1_bio->sector;
				if (good_sectors < max_sectors)
					max_sectors = good_sectors;
			}
		}
		r1_bio->bios[i] = bio;
L
Linus Torvalds 已提交
1222 1223 1224
	}
	rcu_read_unlock();

1225 1226 1227 1228 1229 1230 1231
	if (unlikely(blocked_rdev)) {
		/* Wait for this device to become unblocked */
		int j;

		for (j = 0; j < i; j++)
			if (r1_bio->bios[j])
				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1232
		r1_bio->state = 0;
1233 1234 1235 1236 1237 1238
		allow_barrier(conf);
		md_wait_for_blocked_rdev(blocked_rdev, mddev);
		wait_barrier(conf);
		goto retry_write;
	}

1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
	if (max_sectors < r1_bio->sectors) {
		/* We are splitting this write into multiple parts, so
		 * we need to prepare for allocating another r1_bio.
		 */
		r1_bio->sectors = max_sectors;
		spin_lock_irq(&conf->device_lock);
		if (bio->bi_phys_segments == 0)
			bio->bi_phys_segments = 2;
		else
			bio->bi_phys_segments++;
		spin_unlock_irq(&conf->device_lock);
1250
	}
1251
	sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
1252

1253
	atomic_set(&r1_bio->remaining, 1);
1254
	atomic_set(&r1_bio->behind_remaining, 0);
1255

1256
	first_clone = 1;
L
Linus Torvalds 已提交
1257 1258 1259 1260 1261
	for (i = 0; i < disks; i++) {
		struct bio *mbio;
		if (!r1_bio->bios[i])
			continue;

1262
		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
		md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);

		if (first_clone) {
			/* do behind I/O ?
			 * Not if there are too many, or cannot
			 * allocate memory, or a reader on WriteMostly
			 * is waiting for behind writes to flush */
			if (bitmap &&
			    (atomic_read(&bitmap->behind_writes)
			     < mddev->bitmap_info.max_write_behind) &&
			    !waitqueue_active(&bitmap->behind_wait))
				alloc_behind_pages(mbio, r1_bio);

			bitmap_startwrite(bitmap, r1_bio->sector,
					  r1_bio->sectors,
					  test_bit(R1BIO_BehindIO,
						   &r1_bio->state));
			first_clone = 0;
		}
1282
		if (r1_bio->behind_bvecs) {
1283 1284 1285 1286 1287 1288 1289
			struct bio_vec *bvec;
			int j;

			/* Yes, I really want the '__' version so that
			 * we clear any unused pointer in the io_vec, rather
			 * than leave them unchanged.  This is important
			 * because when we come to free the pages, we won't
N
NeilBrown 已提交
1290
			 * know the original bi_idx, so we just free
1291 1292 1293
			 * them all
			 */
			__bio_for_each_segment(bvec, mbio, j, 0)
1294
				bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1295 1296 1297 1298
			if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
				atomic_inc(&r1_bio->behind_remaining);
		}

1299 1300 1301 1302 1303 1304
		r1_bio->bios[i] = mbio;

		mbio->bi_sector	= (r1_bio->sector +
				   conf->mirrors[i].rdev->data_offset);
		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
		mbio->bi_end_io	= raid1_end_write_request;
1305 1306
		mbio->bi_rw =
			WRITE | do_flush_fua | do_sync | do_discard | do_same;
1307 1308
		mbio->bi_private = r1_bio;

L
Linus Torvalds 已提交
1309
		atomic_inc(&r1_bio->remaining);
1310 1311 1312 1313 1314 1315

		cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
		if (cb)
			plug = container_of(cb, struct raid1_plug_cb, cb);
		else
			plug = NULL;
1316
		spin_lock_irqsave(&conf->device_lock, flags);
1317 1318 1319 1320 1321 1322 1323
		if (plug) {
			bio_list_add(&plug->pending, mbio);
			plug->pending_cnt++;
		} else {
			bio_list_add(&conf->pending_bio_list, mbio);
			conf->pending_count++;
		}
1324
		spin_unlock_irqrestore(&conf->device_lock, flags);
1325
		if (!plug)
N
NeilBrown 已提交
1326
			md_wakeup_thread(mddev->thread);
L
Linus Torvalds 已提交
1327
	}
1328 1329 1330
	/* Mustn't call r1_bio_write_done before this next test,
	 * as it could result in the bio being freed.
	 */
1331
	if (sectors_handled < (bio->bi_size >> 9)) {
1332
		r1_bio_write_done(r1_bio);
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
		/* We need another r1_bio.  It has already been counted
		 * in bio->bi_phys_segments
		 */
		r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
		r1_bio->master_bio = bio;
		r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
		r1_bio->state = 0;
		r1_bio->mddev = mddev;
		r1_bio->sector = bio->bi_sector + sectors_handled;
		goto retry_write;
	}

1345 1346 1347 1348
	r1_bio_write_done(r1_bio);

	/* In case raid1d snuck in to freeze_array */
	wake_up(&conf->wait_barrier);
L
Linus Torvalds 已提交
1349 1350
}

1351
static void status(struct seq_file *seq, struct mddev *mddev)
L
Linus Torvalds 已提交
1352
{
1353
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
1354 1355 1356
	int i;

	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1357
		   conf->raid_disks - mddev->degraded);
1358 1359
	rcu_read_lock();
	for (i = 0; i < conf->raid_disks; i++) {
1360
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
L
Linus Torvalds 已提交
1361
		seq_printf(seq, "%s",
1362 1363 1364
			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
	}
	rcu_read_unlock();
L
Linus Torvalds 已提交
1365 1366 1367 1368
	seq_printf(seq, "]");
}


1369
static void error(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1370 1371
{
	char b[BDEVNAME_SIZE];
1372
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
1373 1374 1375 1376 1377 1378 1379

	/*
	 * If it is not operational, then we have already marked it as dead
	 * else if it is the last working disks, ignore the error, let the
	 * next level up know.
	 * else mark the drive as failed
	 */
1380
	if (test_bit(In_sync, &rdev->flags)
1381
	    && (conf->raid_disks - mddev->degraded) == 1) {
L
Linus Torvalds 已提交
1382 1383
		/*
		 * Don't fail the drive, act as though we were just a
1384 1385 1386
		 * normal single drive.
		 * However don't try a recovery from this drive as
		 * it is very likely to fail.
L
Linus Torvalds 已提交
1387
		 */
1388
		conf->recovery_disabled = mddev->recovery_disabled;
L
Linus Torvalds 已提交
1389
		return;
1390
	}
1391
	set_bit(Blocked, &rdev->flags);
1392 1393 1394
	if (test_and_clear_bit(In_sync, &rdev->flags)) {
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1395
		mddev->degraded++;
1396
		set_bit(Faulty, &rdev->flags);
1397
		spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1398 1399 1400
		/*
		 * if recovery is running, make sure it aborts.
		 */
1401
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1402 1403
	} else
		set_bit(Faulty, &rdev->flags);
1404
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1405 1406 1407
	printk(KERN_ALERT
	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
	       "md/raid1:%s: Operation continuing on %d devices.\n",
N
NeilBrown 已提交
1408 1409
	       mdname(mddev), bdevname(rdev->bdev, b),
	       mdname(mddev), conf->raid_disks - mddev->degraded);
L
Linus Torvalds 已提交
1410 1411
}

1412
static void print_conf(struct r1conf *conf)
L
Linus Torvalds 已提交
1413 1414 1415
{
	int i;

N
NeilBrown 已提交
1416
	printk(KERN_DEBUG "RAID1 conf printout:\n");
L
Linus Torvalds 已提交
1417
	if (!conf) {
N
NeilBrown 已提交
1418
		printk(KERN_DEBUG "(!conf)\n");
L
Linus Torvalds 已提交
1419 1420
		return;
	}
N
NeilBrown 已提交
1421
	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
L
Linus Torvalds 已提交
1422 1423
		conf->raid_disks);

1424
	rcu_read_lock();
L
Linus Torvalds 已提交
1425 1426
	for (i = 0; i < conf->raid_disks; i++) {
		char b[BDEVNAME_SIZE];
1427
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1428
		if (rdev)
N
NeilBrown 已提交
1429
			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1430 1431 1432
			       i, !test_bit(In_sync, &rdev->flags),
			       !test_bit(Faulty, &rdev->flags),
			       bdevname(rdev->bdev,b));
L
Linus Torvalds 已提交
1433
	}
1434
	rcu_read_unlock();
L
Linus Torvalds 已提交
1435 1436
}

1437
static void close_sync(struct r1conf *conf)
L
Linus Torvalds 已提交
1438
{
1439 1440
	wait_barrier(conf);
	allow_barrier(conf);
L
Linus Torvalds 已提交
1441 1442 1443 1444 1445

	mempool_destroy(conf->r1buf_pool);
	conf->r1buf_pool = NULL;
}

1446
static int raid1_spare_active(struct mddev *mddev)
L
Linus Torvalds 已提交
1447 1448
{
	int i;
1449
	struct r1conf *conf = mddev->private;
1450 1451
	int count = 0;
	unsigned long flags;
L
Linus Torvalds 已提交
1452 1453 1454

	/*
	 * Find all failed disks within the RAID1 configuration 
1455 1456
	 * and mark them readable.
	 * Called under mddev lock, so rcu protection not needed.
L
Linus Torvalds 已提交
1457 1458
	 */
	for (i = 0; i < conf->raid_disks; i++) {
1459
		struct md_rdev *rdev = conf->mirrors[i].rdev;
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
		if (repl
		    && repl->recovery_offset == MaxSector
		    && !test_bit(Faulty, &repl->flags)
		    && !test_and_set_bit(In_sync, &repl->flags)) {
			/* replacement has just become active */
			if (!rdev ||
			    !test_and_clear_bit(In_sync, &rdev->flags))
				count++;
			if (rdev) {
				/* Replaced device not technically
				 * faulty, but we need to be sure
				 * it gets removed and never re-added
				 */
				set_bit(Faulty, &rdev->flags);
				sysfs_notify_dirent_safe(
					rdev->sysfs_state);
			}
		}
1479 1480
		if (rdev
		    && !test_bit(Faulty, &rdev->flags)
1481
		    && !test_and_set_bit(In_sync, &rdev->flags)) {
1482
			count++;
1483
			sysfs_notify_dirent_safe(rdev->sysfs_state);
L
Linus Torvalds 已提交
1484 1485
		}
	}
1486 1487 1488
	spin_lock_irqsave(&conf->device_lock, flags);
	mddev->degraded -= count;
	spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1489 1490

	print_conf(conf);
1491
	return count;
L
Linus Torvalds 已提交
1492 1493 1494
}


1495
static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1496
{
1497
	struct r1conf *conf = mddev->private;
1498
	int err = -EEXIST;
1499
	int mirror = 0;
1500
	struct raid1_info *p;
1501
	int first = 0;
1502
	int last = conf->raid_disks - 1;
1503
	struct request_queue *q = bdev_get_queue(rdev->bdev);
L
Linus Torvalds 已提交
1504

1505 1506 1507
	if (mddev->recovery_disabled == conf->recovery_disabled)
		return -EBUSY;

1508 1509 1510
	if (rdev->raid_disk >= 0)
		first = last = rdev->raid_disk;

1511 1512 1513 1514 1515
	if (q->merge_bvec_fn) {
		set_bit(Unmerged, &rdev->flags);
		mddev->merge_check_needed = 1;
	}

1516 1517 1518
	for (mirror = first; mirror <= last; mirror++) {
		p = conf->mirrors+mirror;
		if (!p->rdev) {
L
Linus Torvalds 已提交
1519

1520 1521
			disk_stack_limits(mddev->gendisk, rdev->bdev,
					  rdev->data_offset << 9);
L
Linus Torvalds 已提交
1522 1523 1524

			p->head_position = 0;
			rdev->raid_disk = mirror;
1525
			err = 0;
1526 1527 1528 1529
			/* As all devices are equivalent, we don't need a full recovery
			 * if this was recently any drive of the array
			 */
			if (rdev->saved_raid_disk < 0)
1530
				conf->fullsync = 1;
1531
			rcu_assign_pointer(p->rdev, rdev);
L
Linus Torvalds 已提交
1532 1533
			break;
		}
1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545
		if (test_bit(WantReplacement, &p->rdev->flags) &&
		    p[conf->raid_disks].rdev == NULL) {
			/* Add this device as a replacement */
			clear_bit(In_sync, &rdev->flags);
			set_bit(Replacement, &rdev->flags);
			rdev->raid_disk = mirror;
			err = 0;
			conf->fullsync = 1;
			rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
			break;
		}
	}
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
	if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
		/* Some requests might not have seen this new
		 * merge_bvec_fn.  We must wait for them to complete
		 * before merging the device fully.
		 * First we make sure any code which has tested
		 * our function has submitted the request, then
		 * we wait for all outstanding requests to complete.
		 */
		synchronize_sched();
		raise_barrier(conf);
		lower_barrier(conf);
		clear_bit(Unmerged, &rdev->flags);
	}
1559
	md_integrity_add_rdev(rdev, mddev);
S
Shaohua Li 已提交
1560 1561
	if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
L
Linus Torvalds 已提交
1562
	print_conf(conf);
1563
	return err;
L
Linus Torvalds 已提交
1564 1565
}

1566
static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1567
{
1568
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
1569
	int err = 0;
1570
	int number = rdev->raid_disk;
1571
	struct raid1_info *p = conf->mirrors + number;
L
Linus Torvalds 已提交
1572

1573 1574 1575
	if (rdev != p->rdev)
		p = conf->mirrors + conf->raid_disks + number;

L
Linus Torvalds 已提交
1576
	print_conf(conf);
1577
	if (rdev == p->rdev) {
1578
		if (test_bit(In_sync, &rdev->flags) ||
L
Linus Torvalds 已提交
1579 1580 1581 1582
		    atomic_read(&rdev->nr_pending)) {
			err = -EBUSY;
			goto abort;
		}
N
NeilBrown 已提交
1583
		/* Only remove non-faulty devices if recovery
1584 1585 1586
		 * is not possible.
		 */
		if (!test_bit(Faulty, &rdev->flags) &&
1587
		    mddev->recovery_disabled != conf->recovery_disabled &&
1588 1589 1590 1591
		    mddev->degraded < conf->raid_disks) {
			err = -EBUSY;
			goto abort;
		}
L
Linus Torvalds 已提交
1592
		p->rdev = NULL;
1593
		synchronize_rcu();
L
Linus Torvalds 已提交
1594 1595 1596 1597
		if (atomic_read(&rdev->nr_pending)) {
			/* lost the race, try later */
			err = -EBUSY;
			p->rdev = rdev;
1598
			goto abort;
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612
		} else if (conf->mirrors[conf->raid_disks + number].rdev) {
			/* We just removed a device that is being replaced.
			 * Move down the replacement.  We drain all IO before
			 * doing this to avoid confusion.
			 */
			struct md_rdev *repl =
				conf->mirrors[conf->raid_disks + number].rdev;
			raise_barrier(conf);
			clear_bit(Replacement, &repl->flags);
			p->rdev = repl;
			conf->mirrors[conf->raid_disks + number].rdev = NULL;
			lower_barrier(conf);
			clear_bit(WantReplacement, &rdev->flags);
		} else
1613
			clear_bit(WantReplacement, &rdev->flags);
1614
		err = md_integrity_register(mddev);
L
Linus Torvalds 已提交
1615 1616 1617 1618 1619 1620 1621 1622
	}
abort:

	print_conf(conf);
	return err;
}


1623
static void end_sync_read(struct bio *bio, int error)
L
Linus Torvalds 已提交
1624
{
1625
	struct r1bio *r1_bio = bio->bi_private;
L
Linus Torvalds 已提交
1626

1627
	update_head_pos(r1_bio->read_disk, r1_bio);
1628

L
Linus Torvalds 已提交
1629 1630 1631 1632 1633
	/*
	 * we have read a block, now it needs to be re-written,
	 * or re-read if the read failed.
	 * We don't do much here, just schedule handling by raid1d
	 */
1634
	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
L
Linus Torvalds 已提交
1635
		set_bit(R1BIO_Uptodate, &r1_bio->state);
1636 1637 1638

	if (atomic_dec_and_test(&r1_bio->remaining))
		reschedule_retry(r1_bio);
L
Linus Torvalds 已提交
1639 1640
}

1641
static void end_sync_write(struct bio *bio, int error)
L
Linus Torvalds 已提交
1642 1643
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1644
	struct r1bio *r1_bio = bio->bi_private;
1645
	struct mddev *mddev = r1_bio->mddev;
1646
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
1647
	int mirror=0;
1648 1649
	sector_t first_bad;
	int bad_sectors;
L
Linus Torvalds 已提交
1650

1651 1652
	mirror = find_bio_disk(r1_bio, bio);

1653
	if (!uptodate) {
N
NeilBrown 已提交
1654
		sector_t sync_blocks = 0;
1655 1656 1657 1658
		sector_t s = r1_bio->sector;
		long sectors_to_go = r1_bio->sectors;
		/* make sure these bits doesn't get cleared. */
		do {
1659
			bitmap_end_sync(mddev->bitmap, s,
1660 1661 1662 1663
					&sync_blocks, 1);
			s += sync_blocks;
			sectors_to_go -= sync_blocks;
		} while (sectors_to_go > 0);
1664 1665
		set_bit(WriteErrorSeen,
			&conf->mirrors[mirror].rdev->flags);
1666 1667 1668 1669
		if (!test_and_set_bit(WantReplacement,
				      &conf->mirrors[mirror].rdev->flags))
			set_bit(MD_RECOVERY_NEEDED, &
				mddev->recovery);
1670
		set_bit(R1BIO_WriteError, &r1_bio->state);
1671 1672 1673
	} else if (is_badblock(conf->mirrors[mirror].rdev,
			       r1_bio->sector,
			       r1_bio->sectors,
1674 1675 1676 1677 1678 1679
			       &first_bad, &bad_sectors) &&
		   !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
				r1_bio->sector,
				r1_bio->sectors,
				&first_bad, &bad_sectors)
		)
1680
		set_bit(R1BIO_MadeGood, &r1_bio->state);
1681

L
Linus Torvalds 已提交
1682
	if (atomic_dec_and_test(&r1_bio->remaining)) {
1683
		int s = r1_bio->sectors;
1684 1685
		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
		    test_bit(R1BIO_WriteError, &r1_bio->state))
1686 1687 1688 1689 1690
			reschedule_retry(r1_bio);
		else {
			put_buf(r1_bio);
			md_done_sync(mddev, s, uptodate);
		}
L
Linus Torvalds 已提交
1691 1692 1693
	}
}

1694
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1695 1696 1697 1698 1699
			    int sectors, struct page *page, int rw)
{
	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
		/* success */
		return 1;
1700
	if (rw == WRITE) {
1701
		set_bit(WriteErrorSeen, &rdev->flags);
1702 1703 1704 1705 1706
		if (!test_and_set_bit(WantReplacement,
				      &rdev->flags))
			set_bit(MD_RECOVERY_NEEDED, &
				rdev->mddev->recovery);
	}
1707 1708 1709 1710 1711 1712
	/* need to record an error - either for the block or the device */
	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
		md_error(rdev->mddev, rdev);
	return 0;
}

1713
static int fix_sync_read_error(struct r1bio *r1_bio)
L
Linus Torvalds 已提交
1714
{
1715 1716 1717 1718 1719 1720 1721
	/* Try some synchronous reads of other devices to get
	 * good data, much like with normal read errors.  Only
	 * read into the pages we already have so we don't
	 * need to re-issue the read request.
	 * We don't need to freeze the array, because being in an
	 * active sync request, there is no normal IO, and
	 * no overlapping syncs.
1722 1723 1724
	 * We don't need to check is_badblock() again as we
	 * made sure that anything with a bad block in range
	 * will have bi_end_io clear.
1725
	 */
1726
	struct mddev *mddev = r1_bio->mddev;
1727
	struct r1conf *conf = mddev->private;
1728 1729 1730 1731 1732 1733 1734 1735 1736
	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
	sector_t sect = r1_bio->sector;
	int sectors = r1_bio->sectors;
	int idx = 0;

	while(sectors) {
		int s = sectors;
		int d = r1_bio->read_disk;
		int success = 0;
1737
		struct md_rdev *rdev;
1738
		int start;
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748

		if (s > (PAGE_SIZE>>9))
			s = PAGE_SIZE >> 9;
		do {
			if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
				/* No rcu protection needed here devices
				 * can only be removed when no resync is
				 * active, and resync is currently active
				 */
				rdev = conf->mirrors[d].rdev;
1749
				if (sync_page_io(rdev, sect, s<<9,
1750 1751 1752 1753 1754 1755 1756
						 bio->bi_io_vec[idx].bv_page,
						 READ, false)) {
					success = 1;
					break;
				}
			}
			d++;
1757
			if (d == conf->raid_disks * 2)
1758 1759 1760
				d = 0;
		} while (!success && d != r1_bio->read_disk);

1761
		if (!success) {
1762
			char b[BDEVNAME_SIZE];
1763 1764 1765 1766 1767 1768
			int abort = 0;
			/* Cannot read from anywhere, this block is lost.
			 * Record a bad block on each device.  If that doesn't
			 * work just disable and interrupt the recovery.
			 * Don't fail devices as that won't really help.
			 */
1769 1770 1771 1772 1773
			printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
			       " for block %llu\n",
			       mdname(mddev),
			       bdevname(bio->bi_bdev, b),
			       (unsigned long long)r1_bio->sector);
1774
			for (d = 0; d < conf->raid_disks * 2; d++) {
1775 1776 1777 1778 1779 1780 1781
				rdev = conf->mirrors[d].rdev;
				if (!rdev || test_bit(Faulty, &rdev->flags))
					continue;
				if (!rdev_set_badblocks(rdev, sect, s, 0))
					abort = 1;
			}
			if (abort) {
1782 1783
				conf->recovery_disabled =
					mddev->recovery_disabled;
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
				md_done_sync(mddev, r1_bio->sectors, 0);
				put_buf(r1_bio);
				return 0;
			}
			/* Try next page */
			sectors -= s;
			sect += s;
			idx++;
			continue;
1794
		}
1795 1796 1797 1798 1799

		start = d;
		/* write it back and re-read */
		while (d != r1_bio->read_disk) {
			if (d == 0)
1800
				d = conf->raid_disks * 2;
1801 1802 1803 1804
			d--;
			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
				continue;
			rdev = conf->mirrors[d].rdev;
1805 1806 1807
			if (r1_sync_page_io(rdev, sect, s,
					    bio->bi_io_vec[idx].bv_page,
					    WRITE) == 0) {
1808 1809
				r1_bio->bios[d]->bi_end_io = NULL;
				rdev_dec_pending(rdev, mddev);
1810
			}
1811 1812 1813 1814
		}
		d = start;
		while (d != r1_bio->read_disk) {
			if (d == 0)
1815
				d = conf->raid_disks * 2;
1816 1817 1818 1819
			d--;
			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
				continue;
			rdev = conf->mirrors[d].rdev;
1820 1821 1822
			if (r1_sync_page_io(rdev, sect, s,
					    bio->bi_io_vec[idx].bv_page,
					    READ) != 0)
1823
				atomic_add(s, &rdev->corrected_errors);
1824
		}
1825 1826 1827 1828
		sectors -= s;
		sect += s;
		idx ++;
	}
1829
	set_bit(R1BIO_Uptodate, &r1_bio->state);
1830
	set_bit(BIO_UPTODATE, &bio->bi_flags);
1831 1832 1833
	return 1;
}

1834
static int process_checks(struct r1bio *r1_bio)
1835 1836 1837 1838 1839 1840 1841 1842
{
	/* We have read all readable devices.  If we haven't
	 * got the block, then there is no hope left.
	 * If we have, then we want to do a comparison
	 * and skip the write if everything is the same.
	 * If any blocks failed to read, then we need to
	 * attempt an over-write
	 */
1843
	struct mddev *mddev = r1_bio->mddev;
1844
	struct r1conf *conf = mddev->private;
1845 1846
	int primary;
	int i;
1847
	int vcnt;
1848

1849
	for (primary = 0; primary < conf->raid_disks * 2; primary++)
1850 1851 1852 1853 1854 1855 1856
		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
			r1_bio->bios[primary]->bi_end_io = NULL;
			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
			break;
		}
	r1_bio->read_disk = primary;
1857
	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1858
	for (i = 0; i < conf->raid_disks * 2; i++) {
1859 1860 1861 1862
		int j;
		struct bio *pbio = r1_bio->bios[primary];
		struct bio *sbio = r1_bio->bios[i];
		int size;
1863

1864 1865 1866 1867 1868 1869 1870 1871 1872 1873
		if (r1_bio->bios[i]->bi_end_io != end_sync_read)
			continue;

		if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
			for (j = vcnt; j-- ; ) {
				struct page *p, *s;
				p = pbio->bi_io_vec[j].bv_page;
				s = sbio->bi_io_vec[j].bv_page;
				if (memcmp(page_address(p),
					   page_address(s),
1874
					   sbio->bi_io_vec[j].bv_len))
1875
					break;
1876
			}
1877 1878 1879
		} else
			j = 0;
		if (j >= 0)
1880
			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
			      && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
			/* No need to write to this device. */
			sbio->bi_end_io = NULL;
			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
			continue;
		}
		/* fixup the bio for reuse */
		sbio->bi_vcnt = vcnt;
		sbio->bi_size = r1_bio->sectors << 9;
		sbio->bi_idx = 0;
		sbio->bi_phys_segments = 0;
		sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
		sbio->bi_flags |= 1 << BIO_UPTODATE;
		sbio->bi_next = NULL;
		sbio->bi_sector = r1_bio->sector +
			conf->mirrors[i].rdev->data_offset;
		sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
		size = sbio->bi_size;
		for (j = 0; j < vcnt ; j++) {
			struct bio_vec *bi;
			bi = &sbio->bi_io_vec[j];
			bi->bv_offset = 0;
			if (size > PAGE_SIZE)
				bi->bv_len = PAGE_SIZE;
			else
				bi->bv_len = size;
			size -= PAGE_SIZE;
			memcpy(page_address(bi->bv_page),
			       page_address(pbio->bi_io_vec[j].bv_page),
			       PAGE_SIZE);
1912
		}
1913
	}
1914 1915 1916
	return 0;
}

1917
static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
1918
{
1919
	struct r1conf *conf = mddev->private;
1920
	int i;
1921
	int disks = conf->raid_disks * 2;
1922 1923 1924 1925 1926 1927 1928 1929
	struct bio *bio, *wbio;

	bio = r1_bio->bios[r1_bio->read_disk];

	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
		/* ouch - failed to read all of that. */
		if (!fix_sync_read_error(r1_bio))
			return;
1930 1931 1932 1933

	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
		if (process_checks(r1_bio) < 0)
			return;
1934 1935 1936
	/*
	 * schedule writes
	 */
L
Linus Torvalds 已提交
1937 1938 1939
	atomic_set(&r1_bio->remaining, 1);
	for (i = 0; i < disks ; i++) {
		wbio = r1_bio->bios[i];
1940 1941 1942 1943
		if (wbio->bi_end_io == NULL ||
		    (wbio->bi_end_io == end_sync_read &&
		     (i == r1_bio->read_disk ||
		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
L
Linus Torvalds 已提交
1944 1945
			continue;

1946 1947
		wbio->bi_rw = WRITE;
		wbio->bi_end_io = end_sync_write;
L
Linus Torvalds 已提交
1948 1949
		atomic_inc(&r1_bio->remaining);
		md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
1950

L
Linus Torvalds 已提交
1951 1952 1953 1954
		generic_make_request(wbio);
	}

	if (atomic_dec_and_test(&r1_bio->remaining)) {
1955
		/* if we're here, all write(s) have completed, so clean up */
1956 1957 1958 1959 1960 1961 1962 1963
		int s = r1_bio->sectors;
		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
		    test_bit(R1BIO_WriteError, &r1_bio->state))
			reschedule_retry(r1_bio);
		else {
			put_buf(r1_bio);
			md_done_sync(mddev, s, 1);
		}
L
Linus Torvalds 已提交
1964 1965 1966 1967 1968 1969 1970 1971
	}
}

/*
 * This is a kernel thread which:
 *
 *	1.	Retries failed read operations on working mirrors.
 *	2.	Updates the raid superblock when problems encounter.
1972
 *	3.	Performs writes following reads for array synchronising.
L
Linus Torvalds 已提交
1973 1974
 */

1975
static void fix_read_error(struct r1conf *conf, int read_disk,
1976 1977
			   sector_t sect, int sectors)
{
1978
	struct mddev *mddev = conf->mddev;
1979 1980 1981 1982 1983
	while(sectors) {
		int s = sectors;
		int d = read_disk;
		int success = 0;
		int start;
1984
		struct md_rdev *rdev;
1985 1986 1987 1988 1989 1990 1991 1992 1993 1994

		if (s > (PAGE_SIZE>>9))
			s = PAGE_SIZE >> 9;

		do {
			/* Note: no rcu protection needed here
			 * as this is synchronous in the raid1d thread
			 * which is the thread that might remove
			 * a device.  If raid1d ever becomes multi-threaded....
			 */
1995 1996 1997
			sector_t first_bad;
			int bad_sectors;

1998 1999
			rdev = conf->mirrors[d].rdev;
			if (rdev &&
2000 2001 2002
			    (test_bit(In_sync, &rdev->flags) ||
			     (!test_bit(Faulty, &rdev->flags) &&
			      rdev->recovery_offset >= sect + s)) &&
2003 2004
			    is_badblock(rdev, sect, s,
					&first_bad, &bad_sectors) == 0 &&
J
Jonathan Brassow 已提交
2005 2006
			    sync_page_io(rdev, sect, s<<9,
					 conf->tmppage, READ, false))
2007 2008 2009
				success = 1;
			else {
				d++;
2010
				if (d == conf->raid_disks * 2)
2011 2012 2013 2014 2015
					d = 0;
			}
		} while (!success && d != read_disk);

		if (!success) {
2016
			/* Cannot read from anywhere - mark it bad */
2017
			struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2018 2019
			if (!rdev_set_badblocks(rdev, sect, s, 0))
				md_error(mddev, rdev);
2020 2021 2022 2023 2024 2025
			break;
		}
		/* write it back and re-read */
		start = d;
		while (d != read_disk) {
			if (d==0)
2026
				d = conf->raid_disks * 2;
2027 2028 2029
			d--;
			rdev = conf->mirrors[d].rdev;
			if (rdev &&
2030 2031 2032
			    test_bit(In_sync, &rdev->flags))
				r1_sync_page_io(rdev, sect, s,
						conf->tmppage, WRITE);
2033 2034 2035 2036 2037
		}
		d = start;
		while (d != read_disk) {
			char b[BDEVNAME_SIZE];
			if (d==0)
2038
				d = conf->raid_disks * 2;
2039 2040 2041 2042
			d--;
			rdev = conf->mirrors[d].rdev;
			if (rdev &&
			    test_bit(In_sync, &rdev->flags)) {
2043 2044
				if (r1_sync_page_io(rdev, sect, s,
						    conf->tmppage, READ)) {
2045 2046
					atomic_add(s, &rdev->corrected_errors);
					printk(KERN_INFO
N
NeilBrown 已提交
2047
					       "md/raid1:%s: read error corrected "
2048 2049
					       "(%d sectors at %llu on %s)\n",
					       mdname(mddev), s,
2050 2051
					       (unsigned long long)(sect +
					           rdev->data_offset),
2052 2053 2054 2055 2056 2057 2058 2059 2060
					       bdevname(rdev->bdev, b));
				}
			}
		}
		sectors -= s;
		sect += s;
	}
}

2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079
static void bi_complete(struct bio *bio, int error)
{
	complete((struct completion *)bio->bi_private);
}

static int submit_bio_wait(int rw, struct bio *bio)
{
	struct completion event;
	rw |= REQ_SYNC;

	init_completion(&event);
	bio->bi_private = &event;
	bio->bi_end_io = bi_complete;
	submit_bio(rw, bio);
	wait_for_completion(&event);

	return test_bit(BIO_UPTODATE, &bio->bi_flags);
}

2080
static int narrow_write_error(struct r1bio *r1_bio, int i)
2081
{
2082
	struct mddev *mddev = r1_bio->mddev;
2083
	struct r1conf *conf = mddev->private;
2084
	struct md_rdev *rdev = conf->mirrors[i].rdev;
2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155
	int vcnt, idx;
	struct bio_vec *vec;

	/* bio has the data to be written to device 'i' where
	 * we just recently had a write error.
	 * We repeatedly clone the bio and trim down to one block,
	 * then try the write.  Where the write fails we record
	 * a bad block.
	 * It is conceivable that the bio doesn't exactly align with
	 * blocks.  We must handle this somehow.
	 *
	 * We currently own a reference on the rdev.
	 */

	int block_sectors;
	sector_t sector;
	int sectors;
	int sect_to_write = r1_bio->sectors;
	int ok = 1;

	if (rdev->badblocks.shift < 0)
		return 0;

	block_sectors = 1 << rdev->badblocks.shift;
	sector = r1_bio->sector;
	sectors = ((sector + block_sectors)
		   & ~(sector_t)(block_sectors - 1))
		- sector;

	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
		vcnt = r1_bio->behind_page_count;
		vec = r1_bio->behind_bvecs;
		idx = 0;
		while (vec[idx].bv_page == NULL)
			idx++;
	} else {
		vcnt = r1_bio->master_bio->bi_vcnt;
		vec = r1_bio->master_bio->bi_io_vec;
		idx = r1_bio->master_bio->bi_idx;
	}
	while (sect_to_write) {
		struct bio *wbio;
		if (sectors > sect_to_write)
			sectors = sect_to_write;
		/* Write at 'sector' for 'sectors'*/

		wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
		memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
		wbio->bi_sector = r1_bio->sector;
		wbio->bi_rw = WRITE;
		wbio->bi_vcnt = vcnt;
		wbio->bi_size = r1_bio->sectors << 9;
		wbio->bi_idx = idx;

		md_trim_bio(wbio, sector - r1_bio->sector, sectors);
		wbio->bi_sector += rdev->data_offset;
		wbio->bi_bdev = rdev->bdev;
		if (submit_bio_wait(WRITE, wbio) == 0)
			/* failure! */
			ok = rdev_set_badblocks(rdev, sector,
						sectors, 0)
				&& ok;

		bio_put(wbio);
		sect_to_write -= sectors;
		sector += sectors;
		sectors = block_sectors;
	}
	return ok;
}

2156
static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2157 2158 2159
{
	int m;
	int s = r1_bio->sectors;
2160
	for (m = 0; m < conf->raid_disks * 2 ; m++) {
2161
		struct md_rdev *rdev = conf->mirrors[m].rdev;
2162 2163 2164 2165 2166
		struct bio *bio = r1_bio->bios[m];
		if (bio->bi_end_io == NULL)
			continue;
		if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2167
			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178
		}
		if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
				md_error(conf->mddev, rdev);
		}
	}
	put_buf(r1_bio);
	md_done_sync(conf->mddev, s, 1);
}

2179
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2180 2181
{
	int m;
2182
	for (m = 0; m < conf->raid_disks * 2 ; m++)
2183
		if (r1_bio->bios[m] == IO_MADE_GOOD) {
2184
			struct md_rdev *rdev = conf->mirrors[m].rdev;
2185 2186
			rdev_clear_badblocks(rdev,
					     r1_bio->sector,
2187
					     r1_bio->sectors, 0);
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207
			rdev_dec_pending(rdev, conf->mddev);
		} else if (r1_bio->bios[m] != NULL) {
			/* This drive got a write error.  We need to
			 * narrow down and record precise write
			 * errors.
			 */
			if (!narrow_write_error(r1_bio, m)) {
				md_error(conf->mddev,
					 conf->mirrors[m].rdev);
				/* an I/O failed, we can't clear the bitmap */
				set_bit(R1BIO_Degraded, &r1_bio->state);
			}
			rdev_dec_pending(conf->mirrors[m].rdev,
					 conf->mddev);
		}
	if (test_bit(R1BIO_WriteError, &r1_bio->state))
		close_write(r1_bio);
	raid_end_bio_io(r1_bio);
}

2208
static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2209 2210 2211
{
	int disk;
	int max_sectors;
2212
	struct mddev *mddev = conf->mddev;
2213 2214
	struct bio *bio;
	char b[BDEVNAME_SIZE];
2215
	struct md_rdev *rdev;
2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232

	clear_bit(R1BIO_ReadError, &r1_bio->state);
	/* we got a read error. Maybe the drive is bad.  Maybe just
	 * the block and we can fix it.
	 * We freeze all other IO, and try reading the block from
	 * other devices.  When we find one, we re-write
	 * and check it that fixes the read error.
	 * This is all done synchronously while the array is
	 * frozen
	 */
	if (mddev->ro == 0) {
		freeze_array(conf);
		fix_read_error(conf, r1_bio->read_disk,
			       r1_bio->sector, r1_bio->sectors);
		unfreeze_array(conf);
	} else
		md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
2233
	rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298

	bio = r1_bio->bios[r1_bio->read_disk];
	bdevname(bio->bi_bdev, b);
read_more:
	disk = read_balance(conf, r1_bio, &max_sectors);
	if (disk == -1) {
		printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
		       " read error for block %llu\n",
		       mdname(mddev), b, (unsigned long long)r1_bio->sector);
		raid_end_bio_io(r1_bio);
	} else {
		const unsigned long do_sync
			= r1_bio->master_bio->bi_rw & REQ_SYNC;
		if (bio) {
			r1_bio->bios[r1_bio->read_disk] =
				mddev->ro ? IO_BLOCKED : NULL;
			bio_put(bio);
		}
		r1_bio->read_disk = disk;
		bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
		md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
		r1_bio->bios[r1_bio->read_disk] = bio;
		rdev = conf->mirrors[disk].rdev;
		printk_ratelimited(KERN_ERR
				   "md/raid1:%s: redirecting sector %llu"
				   " to other mirror: %s\n",
				   mdname(mddev),
				   (unsigned long long)r1_bio->sector,
				   bdevname(rdev->bdev, b));
		bio->bi_sector = r1_bio->sector + rdev->data_offset;
		bio->bi_bdev = rdev->bdev;
		bio->bi_end_io = raid1_end_read_request;
		bio->bi_rw = READ | do_sync;
		bio->bi_private = r1_bio;
		if (max_sectors < r1_bio->sectors) {
			/* Drat - have to split this up more */
			struct bio *mbio = r1_bio->master_bio;
			int sectors_handled = (r1_bio->sector + max_sectors
					       - mbio->bi_sector);
			r1_bio->sectors = max_sectors;
			spin_lock_irq(&conf->device_lock);
			if (mbio->bi_phys_segments == 0)
				mbio->bi_phys_segments = 2;
			else
				mbio->bi_phys_segments++;
			spin_unlock_irq(&conf->device_lock);
			generic_make_request(bio);
			bio = NULL;

			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);

			r1_bio->master_bio = mbio;
			r1_bio->sectors = (mbio->bi_size >> 9)
					  - sectors_handled;
			r1_bio->state = 0;
			set_bit(R1BIO_ReadError, &r1_bio->state);
			r1_bio->mddev = mddev;
			r1_bio->sector = mbio->bi_sector + sectors_handled;

			goto read_more;
		} else
			generic_make_request(bio);
	}
}

S
Shaohua Li 已提交
2299
static void raid1d(struct md_thread *thread)
L
Linus Torvalds 已提交
2300
{
S
Shaohua Li 已提交
2301
	struct mddev *mddev = thread->mddev;
2302
	struct r1bio *r1_bio;
L
Linus Torvalds 已提交
2303
	unsigned long flags;
2304
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
2305
	struct list_head *head = &conf->retry_list;
2306
	struct blk_plug plug;
L
Linus Torvalds 已提交
2307 2308

	md_check_recovery(mddev);
2309 2310

	blk_start_plug(&plug);
L
Linus Torvalds 已提交
2311
	for (;;) {
2312

2313
		flush_pending_writes(conf);
2314

2315 2316 2317
		spin_lock_irqsave(&conf->device_lock, flags);
		if (list_empty(head)) {
			spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
2318
			break;
2319
		}
2320
		r1_bio = list_entry(head->prev, struct r1bio, retry_list);
L
Linus Torvalds 已提交
2321
		list_del(head->prev);
2322
		conf->nr_queued--;
L
Linus Torvalds 已提交
2323 2324 2325
		spin_unlock_irqrestore(&conf->device_lock, flags);

		mddev = r1_bio->mddev;
2326
		conf = mddev->private;
2327
		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2328
			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2329 2330 2331
			    test_bit(R1BIO_WriteError, &r1_bio->state))
				handle_sync_write_finished(conf, r1_bio);
			else
2332
				sync_request_write(mddev, r1_bio);
2333
		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2334 2335 2336 2337 2338
			   test_bit(R1BIO_WriteError, &r1_bio->state))
			handle_write_finished(conf, r1_bio);
		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
			handle_read_error(conf, r1_bio);
		else
2339 2340 2341 2342
			/* just a partial read to be scheduled from separate
			 * context
			 */
			generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2343

N
NeilBrown 已提交
2344
		cond_resched();
2345 2346
		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
			md_check_recovery(mddev);
L
Linus Torvalds 已提交
2347
	}
2348
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
2349 2350 2351
}


2352
static int init_resync(struct r1conf *conf)
L
Linus Torvalds 已提交
2353 2354 2355 2356
{
	int buffs;

	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2357
	BUG_ON(conf->r1buf_pool);
L
Linus Torvalds 已提交
2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375
	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
					  conf->poolinfo);
	if (!conf->r1buf_pool)
		return -ENOMEM;
	conf->next_resync = 0;
	return 0;
}

/*
 * perform a "sync" on one "block"
 *
 * We need to make sure that no normal I/O request - particularly write
 * requests - conflict with active sync requests.
 *
 * This is achieved by tracking pending requests and a 'barrier' concept
 * that can be installed to exclude normal IO requests.
 */

2376
static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
L
Linus Torvalds 已提交
2377
{
2378
	struct r1conf *conf = mddev->private;
2379
	struct r1bio *r1_bio;
L
Linus Torvalds 已提交
2380 2381
	struct bio *bio;
	sector_t max_sector, nr_sectors;
2382
	int disk = -1;
L
Linus Torvalds 已提交
2383
	int i;
2384 2385
	int wonly = -1;
	int write_targets = 0, read_targets = 0;
N
NeilBrown 已提交
2386
	sector_t sync_blocks;
2387
	int still_degraded = 0;
2388 2389
	int good_sectors = RESYNC_SECTORS;
	int min_bad = 0; /* number of sectors that are bad in all devices */
L
Linus Torvalds 已提交
2390 2391 2392

	if (!conf->r1buf_pool)
		if (init_resync(conf))
2393
			return 0;
L
Linus Torvalds 已提交
2394

A
Andre Noll 已提交
2395
	max_sector = mddev->dev_sectors;
L
Linus Torvalds 已提交
2396
	if (sector_nr >= max_sector) {
2397 2398 2399 2400 2401
		/* If we aborted, we need to abort the
		 * sync on the 'current' bitmap chunk (there will
		 * only be one in raid1 resync.
		 * We can find the current addess in mddev->curr_resync
		 */
2402 2403
		if (mddev->curr_resync < max_sector) /* aborted */
			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2404
						&sync_blocks, 1);
2405
		else /* completed sync */
2406
			conf->fullsync = 0;
2407 2408

		bitmap_close_sync(mddev->bitmap);
L
Linus Torvalds 已提交
2409 2410 2411 2412
		close_sync(conf);
		return 0;
	}

2413 2414
	if (mddev->bitmap == NULL &&
	    mddev->recovery_cp == MaxSector &&
2415
	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2416 2417 2418 2419
	    conf->fullsync == 0) {
		*skipped = 1;
		return max_sector - sector_nr;
	}
2420 2421 2422
	/* before building a request, check if we can skip these blocks..
	 * This call the bitmap_start_sync doesn't actually record anything
	 */
2423
	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2424
	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2425 2426 2427 2428
		/* We can skip this block, and probably several more */
		*skipped = 1;
		return sync_blocks;
	}
L
Linus Torvalds 已提交
2429
	/*
2430 2431 2432
	 * If there is non-resync activity waiting for a turn,
	 * and resync is going fast enough,
	 * then let it though before starting on this new sync request.
L
Linus Torvalds 已提交
2433
	 */
2434
	if (!go_faster && conf->nr_waiting)
L
Linus Torvalds 已提交
2435
		msleep_interruptible(1000);
2436

N
NeilBrown 已提交
2437
	bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2438
	r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2439 2440 2441
	raise_barrier(conf);

	conf->next_resync = sector_nr;
L
Linus Torvalds 已提交
2442

2443
	rcu_read_lock();
L
Linus Torvalds 已提交
2444
	/*
2445 2446 2447 2448 2449 2450
	 * If we get a correctably read error during resync or recovery,
	 * we might want to read from a different device.  So we
	 * flag all drives that could conceivably be read from for READ,
	 * and any others (which will be non-In_sync devices) for WRITE.
	 * If a read fails, we try reading from something else for which READ
	 * is OK.
L
Linus Torvalds 已提交
2451 2452 2453 2454
	 */

	r1_bio->mddev = mddev;
	r1_bio->sector = sector_nr;
2455
	r1_bio->state = 0;
L
Linus Torvalds 已提交
2456 2457
	set_bit(R1BIO_IsSync, &r1_bio->state);

2458
	for (i = 0; i < conf->raid_disks * 2; i++) {
2459
		struct md_rdev *rdev;
L
Linus Torvalds 已提交
2460 2461 2462 2463
		bio = r1_bio->bios[i];

		/* take from bio_init */
		bio->bi_next = NULL;
2464
		bio->bi_flags &= ~(BIO_POOL_MASK-1);
L
Linus Torvalds 已提交
2465
		bio->bi_flags |= 1 << BIO_UPTODATE;
2466
		bio->bi_rw = READ;
L
Linus Torvalds 已提交
2467 2468 2469 2470 2471 2472 2473
		bio->bi_vcnt = 0;
		bio->bi_idx = 0;
		bio->bi_phys_segments = 0;
		bio->bi_size = 0;
		bio->bi_end_io = NULL;
		bio->bi_private = NULL;

2474 2475
		rdev = rcu_dereference(conf->mirrors[i].rdev);
		if (rdev == NULL ||
2476
		    test_bit(Faulty, &rdev->flags)) {
2477 2478
			if (i < conf->raid_disks)
				still_degraded = 1;
2479
		} else if (!test_bit(In_sync, &rdev->flags)) {
L
Linus Torvalds 已提交
2480 2481 2482
			bio->bi_rw = WRITE;
			bio->bi_end_io = end_sync_write;
			write_targets ++;
2483 2484
		} else {
			/* may need to read from here */
2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
			sector_t first_bad = MaxSector;
			int bad_sectors;

			if (is_badblock(rdev, sector_nr, good_sectors,
					&first_bad, &bad_sectors)) {
				if (first_bad > sector_nr)
					good_sectors = first_bad - sector_nr;
				else {
					bad_sectors -= (sector_nr - first_bad);
					if (min_bad == 0 ||
					    min_bad > bad_sectors)
						min_bad = bad_sectors;
				}
			}
			if (sector_nr < first_bad) {
				if (test_bit(WriteMostly, &rdev->flags)) {
					if (wonly < 0)
						wonly = i;
				} else {
					if (disk < 0)
						disk = i;
				}
				bio->bi_rw = READ;
				bio->bi_end_io = end_sync_read;
				read_targets++;
2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521
			} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
				test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
				!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
				/*
				 * The device is suitable for reading (InSync),
				 * but has bad block(s) here. Let's try to correct them,
				 * if we are doing resync or repair. Otherwise, leave
				 * this device alone for this sync request.
				 */
				bio->bi_rw = WRITE;
				bio->bi_end_io = end_sync_write;
				write_targets++;
2522 2523
			}
		}
2524 2525 2526 2527 2528 2529
		if (bio->bi_end_io) {
			atomic_inc(&rdev->nr_pending);
			bio->bi_sector = sector_nr + rdev->data_offset;
			bio->bi_bdev = rdev->bdev;
			bio->bi_private = r1_bio;
		}
L
Linus Torvalds 已提交
2530
	}
2531 2532 2533 2534
	rcu_read_unlock();
	if (disk < 0)
		disk = wonly;
	r1_bio->read_disk = disk;
2535

2536 2537 2538 2539 2540
	if (read_targets == 0 && min_bad > 0) {
		/* These sectors are bad on all InSync devices, so we
		 * need to mark them bad on all write targets
		 */
		int ok = 1;
2541
		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2542
			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2543
				struct md_rdev *rdev = conf->mirrors[i].rdev;
2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
				ok = rdev_set_badblocks(rdev, sector_nr,
							min_bad, 0
					) && ok;
			}
		set_bit(MD_CHANGE_DEVS, &mddev->flags);
		*skipped = 1;
		put_buf(r1_bio);

		if (!ok) {
			/* Cannot record the badblocks, so need to
			 * abort the resync.
			 * If there are multiple read targets, could just
			 * fail the really bad ones ???
			 */
			conf->recovery_disabled = mddev->recovery_disabled;
			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
			return 0;
		} else
			return min_bad;

	}
	if (min_bad > 0 && min_bad < good_sectors) {
		/* only resync enough to reach the next bad->good
		 * transition */
		good_sectors = min_bad;
	}

2571 2572 2573 2574 2575
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
		/* extra read targets are also write targets */
		write_targets += read_targets-1;

	if (write_targets == 0 || read_targets == 0) {
L
Linus Torvalds 已提交
2576 2577 2578
		/* There is nowhere to write, so all non-sync
		 * drives must be failed - so we are finished
		 */
2579 2580 2581 2582
		sector_t rv;
		if (min_bad > 0)
			max_sector = sector_nr + min_bad;
		rv = max_sector - sector_nr;
2583
		*skipped = 1;
L
Linus Torvalds 已提交
2584 2585 2586 2587
		put_buf(r1_bio);
		return rv;
	}

2588 2589
	if (max_sector > mddev->resync_max)
		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2590 2591
	if (max_sector > sector_nr + good_sectors)
		max_sector = sector_nr + good_sectors;
L
Linus Torvalds 已提交
2592
	nr_sectors = 0;
2593
	sync_blocks = 0;
L
Linus Torvalds 已提交
2594 2595 2596 2597 2598 2599 2600
	do {
		struct page *page;
		int len = PAGE_SIZE;
		if (sector_nr + (len>>9) > max_sector)
			len = (max_sector - sector_nr) << 9;
		if (len == 0)
			break;
2601 2602
		if (sync_blocks == 0) {
			if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2603 2604 2605
					       &sync_blocks, still_degraded) &&
			    !conf->fullsync &&
			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2606
				break;
2607
			BUG_ON(sync_blocks < (PAGE_SIZE>>9));
2608
			if ((len >> 9) > sync_blocks)
2609
				len = sync_blocks<<9;
2610
		}
2611

2612
		for (i = 0 ; i < conf->raid_disks * 2; i++) {
L
Linus Torvalds 已提交
2613 2614
			bio = r1_bio->bios[i];
			if (bio->bi_end_io) {
2615
				page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
L
Linus Torvalds 已提交
2616 2617
				if (bio_add_page(bio, page, len, 0) == 0) {
					/* stop here */
2618
					bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
L
Linus Torvalds 已提交
2619 2620 2621
					while (i > 0) {
						i--;
						bio = r1_bio->bios[i];
2622 2623
						if (bio->bi_end_io==NULL)
							continue;
L
Linus Torvalds 已提交
2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
						/* remove last page from this bio */
						bio->bi_vcnt--;
						bio->bi_size -= len;
						bio->bi_flags &= ~(1<< BIO_SEG_VALID);
					}
					goto bio_full;
				}
			}
		}
		nr_sectors += len>>9;
		sector_nr += len>>9;
2635
		sync_blocks -= (len>>9);
L
Linus Torvalds 已提交
2636 2637 2638 2639
	} while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
 bio_full:
	r1_bio->sectors = nr_sectors;

2640 2641 2642 2643 2644
	/* For a user-requested sync, we read all readable devices and do a
	 * compare
	 */
	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
		atomic_set(&r1_bio->remaining, read_targets);
2645
		for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2646 2647
			bio = r1_bio->bios[i];
			if (bio->bi_end_io == end_sync_read) {
2648
				read_targets--;
2649
				md_sync_acct(bio->bi_bdev, nr_sectors);
2650 2651 2652 2653 2654 2655
				generic_make_request(bio);
			}
		}
	} else {
		atomic_set(&r1_bio->remaining, 1);
		bio = r1_bio->bios[r1_bio->read_disk];
2656
		md_sync_acct(bio->bi_bdev, nr_sectors);
2657
		generic_make_request(bio);
L
Linus Torvalds 已提交
2658

2659
	}
L
Linus Torvalds 已提交
2660 2661 2662
	return nr_sectors;
}

2663
static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2664 2665 2666 2667 2668 2669 2670
{
	if (sectors)
		return sectors;

	return mddev->dev_sectors;
}

2671
static struct r1conf *setup_conf(struct mddev *mddev)
L
Linus Torvalds 已提交
2672
{
2673
	struct r1conf *conf;
2674
	int i;
2675
	struct raid1_info *disk;
2676
	struct md_rdev *rdev;
2677
	int err = -ENOMEM;
L
Linus Torvalds 已提交
2678

2679
	conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
L
Linus Torvalds 已提交
2680
	if (!conf)
2681
		goto abort;
L
Linus Torvalds 已提交
2682

2683
	conf->mirrors = kzalloc(sizeof(struct raid1_info)
2684
				* mddev->raid_disks * 2,
L
Linus Torvalds 已提交
2685 2686
				 GFP_KERNEL);
	if (!conf->mirrors)
2687
		goto abort;
L
Linus Torvalds 已提交
2688

2689 2690
	conf->tmppage = alloc_page(GFP_KERNEL);
	if (!conf->tmppage)
2691
		goto abort;
2692

2693
	conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
L
Linus Torvalds 已提交
2694
	if (!conf->poolinfo)
2695
		goto abort;
2696
	conf->poolinfo->raid_disks = mddev->raid_disks * 2;
L
Linus Torvalds 已提交
2697 2698 2699 2700
	conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
					  r1bio_pool_free,
					  conf->poolinfo);
	if (!conf->r1bio_pool)
2701 2702
		goto abort;

2703
	conf->poolinfo->mddev = mddev;
L
Linus Torvalds 已提交
2704

2705
	err = -EINVAL;
2706
	spin_lock_init(&conf->device_lock);
N
NeilBrown 已提交
2707
	rdev_for_each(rdev, mddev) {
2708
		struct request_queue *q;
2709
		int disk_idx = rdev->raid_disk;
L
Linus Torvalds 已提交
2710 2711 2712
		if (disk_idx >= mddev->raid_disks
		    || disk_idx < 0)
			continue;
2713
		if (test_bit(Replacement, &rdev->flags))
2714
			disk = conf->mirrors + mddev->raid_disks + disk_idx;
2715 2716
		else
			disk = conf->mirrors + disk_idx;
L
Linus Torvalds 已提交
2717

2718 2719
		if (disk->rdev)
			goto abort;
L
Linus Torvalds 已提交
2720
		disk->rdev = rdev;
2721 2722 2723
		q = bdev_get_queue(rdev->bdev);
		if (q->merge_bvec_fn)
			mddev->merge_check_needed = 1;
L
Linus Torvalds 已提交
2724 2725

		disk->head_position = 0;
2726
		disk->seq_start = MaxSector;
L
Linus Torvalds 已提交
2727 2728 2729 2730 2731 2732
	}
	conf->raid_disks = mddev->raid_disks;
	conf->mddev = mddev;
	INIT_LIST_HEAD(&conf->retry_list);

	spin_lock_init(&conf->resync_lock);
2733
	init_waitqueue_head(&conf->wait_barrier);
L
Linus Torvalds 已提交
2734

2735
	bio_list_init(&conf->pending_bio_list);
2736
	conf->pending_count = 0;
2737
	conf->recovery_disabled = mddev->recovery_disabled - 1;
2738

2739
	err = -EIO;
2740
	for (i = 0; i < conf->raid_disks * 2; i++) {
L
Linus Torvalds 已提交
2741 2742 2743

		disk = conf->mirrors + i;

2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
		if (i < conf->raid_disks &&
		    disk[conf->raid_disks].rdev) {
			/* This slot has a replacement. */
			if (!disk->rdev) {
				/* No original, just make the replacement
				 * a recovering spare
				 */
				disk->rdev =
					disk[conf->raid_disks].rdev;
				disk[conf->raid_disks].rdev = NULL;
			} else if (!test_bit(In_sync, &disk->rdev->flags))
				/* Original is not in_sync - bad */
				goto abort;
		}

2759 2760
		if (!disk->rdev ||
		    !test_bit(In_sync, &disk->rdev->flags)) {
L
Linus Torvalds 已提交
2761
			disk->head_position = 0;
2762 2763
			if (disk->rdev &&
			    (disk->rdev->saved_raid_disk < 0))
2764
				conf->fullsync = 1;
2765
		}
L
Linus Torvalds 已提交
2766
	}
2767 2768

	err = -ENOMEM;
2769
	conf->thread = md_register_thread(raid1d, mddev, "raid1");
2770 2771
	if (!conf->thread) {
		printk(KERN_ERR
N
NeilBrown 已提交
2772
		       "md/raid1:%s: couldn't allocate thread\n",
2773 2774
		       mdname(mddev));
		goto abort;
2775
	}
L
Linus Torvalds 已提交
2776

2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790
	return conf;

 abort:
	if (conf) {
		if (conf->r1bio_pool)
			mempool_destroy(conf->r1bio_pool);
		kfree(conf->mirrors);
		safe_put_page(conf->tmppage);
		kfree(conf->poolinfo);
		kfree(conf);
	}
	return ERR_PTR(err);
}

2791
static int stop(struct mddev *mddev);
2792
static int run(struct mddev *mddev)
2793
{
2794
	struct r1conf *conf;
2795
	int i;
2796
	struct md_rdev *rdev;
2797
	int ret;
S
Shaohua Li 已提交
2798
	bool discard_supported = false;
2799 2800

	if (mddev->level != 1) {
N
NeilBrown 已提交
2801
		printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2802 2803 2804 2805
		       mdname(mddev), mddev->level);
		return -EIO;
	}
	if (mddev->reshape_position != MaxSector) {
N
NeilBrown 已提交
2806
		printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2807 2808 2809
		       mdname(mddev));
		return -EIO;
	}
L
Linus Torvalds 已提交
2810
	/*
2811 2812 2813
	 * copy the already verified devices into our private RAID1
	 * bookkeeping area. [whatever we allocate in run(),
	 * should be freed in stop()]
L
Linus Torvalds 已提交
2814
	 */
2815 2816 2817 2818
	if (mddev->private == NULL)
		conf = setup_conf(mddev);
	else
		conf = mddev->private;
L
Linus Torvalds 已提交
2819

2820 2821
	if (IS_ERR(conf))
		return PTR_ERR(conf);
L
Linus Torvalds 已提交
2822

2823 2824 2825
	if (mddev->queue)
		blk_queue_max_write_same_sectors(mddev->queue,
						 mddev->chunk_sectors);
N
NeilBrown 已提交
2826
	rdev_for_each(rdev, mddev) {
2827 2828
		if (!mddev->gendisk)
			continue;
2829 2830
		disk_stack_limits(mddev->gendisk, rdev->bdev,
				  rdev->data_offset << 9);
S
Shaohua Li 已提交
2831 2832
		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
			discard_supported = true;
L
Linus Torvalds 已提交
2833
	}
2834

2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
	mddev->degraded = 0;
	for (i=0; i < conf->raid_disks; i++)
		if (conf->mirrors[i].rdev == NULL ||
		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
			mddev->degraded++;

	if (conf->raid_disks - mddev->degraded == 1)
		mddev->recovery_cp = MaxSector;

2845
	if (mddev->recovery_cp != MaxSector)
N
NeilBrown 已提交
2846
		printk(KERN_NOTICE "md/raid1:%s: not clean"
2847 2848
		       " -- starting background reconstruction\n",
		       mdname(mddev));
L
Linus Torvalds 已提交
2849
	printk(KERN_INFO 
N
NeilBrown 已提交
2850
		"md/raid1:%s: active with %d out of %d mirrors\n",
L
Linus Torvalds 已提交
2851 2852
		mdname(mddev), mddev->raid_disks - mddev->degraded, 
		mddev->raid_disks);
2853

L
Linus Torvalds 已提交
2854 2855 2856
	/*
	 * Ok, everything is just fine now
	 */
2857 2858 2859 2860
	mddev->thread = conf->thread;
	conf->thread = NULL;
	mddev->private = conf;

2861
	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
L
Linus Torvalds 已提交
2862

2863 2864 2865
	if (mddev->queue) {
		mddev->queue->backing_dev_info.congested_fn = raid1_congested;
		mddev->queue->backing_dev_info.congested_data = mddev;
2866
		blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
S
Shaohua Li 已提交
2867 2868 2869 2870 2871 2872 2873

		if (discard_supported)
			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
						mddev->queue);
		else
			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
						  mddev->queue);
2874
	}
2875 2876 2877 2878 2879

	ret =  md_integrity_register(mddev);
	if (ret)
		stop(mddev);
	return ret;
L
Linus Torvalds 已提交
2880 2881
}

2882
static int stop(struct mddev *mddev)
L
Linus Torvalds 已提交
2883
{
2884
	struct r1conf *conf = mddev->private;
2885 2886 2887
	struct bitmap *bitmap = mddev->bitmap;

	/* wait for behind writes to complete */
2888
	if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
N
NeilBrown 已提交
2889 2890
		printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
		       mdname(mddev));
2891
		/* need to kick something here to make sure I/O goes? */
2892 2893
		wait_event(bitmap->behind_wait,
			   atomic_read(&bitmap->behind_writes) == 0);
2894
	}
L
Linus Torvalds 已提交
2895

2896 2897 2898
	raise_barrier(conf);
	lower_barrier(conf);

2899
	md_unregister_thread(&mddev->thread);
L
Linus Torvalds 已提交
2900 2901
	if (conf->r1bio_pool)
		mempool_destroy(conf->r1bio_pool);
2902 2903
	kfree(conf->mirrors);
	kfree(conf->poolinfo);
L
Linus Torvalds 已提交
2904 2905 2906 2907 2908
	kfree(conf);
	mddev->private = NULL;
	return 0;
}

2909
static int raid1_resize(struct mddev *mddev, sector_t sectors)
L
Linus Torvalds 已提交
2910 2911 2912 2913 2914 2915 2916 2917
{
	/* no resync is happening, and there is enough space
	 * on all devices, so we can resize.
	 * We need to make sure resync covers any new space.
	 * If the array is shrinking we should possibly wait until
	 * any io in the removed space completes, but it hardly seems
	 * worth it.
	 */
2918 2919 2920
	sector_t newsize = raid1_size(mddev, sectors, 0);
	if (mddev->external_size &&
	    mddev->array_sectors > newsize)
D
Dan Williams 已提交
2921
		return -EINVAL;
2922 2923 2924 2925 2926 2927
	if (mddev->bitmap) {
		int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
		if (ret)
			return ret;
	}
	md_set_array_sectors(mddev, newsize);
2928
	set_capacity(mddev->gendisk, mddev->array_sectors);
2929
	revalidate_disk(mddev->gendisk);
D
Dan Williams 已提交
2930
	if (sectors > mddev->dev_sectors &&
2931
	    mddev->recovery_cp > mddev->dev_sectors) {
A
Andre Noll 已提交
2932
		mddev->recovery_cp = mddev->dev_sectors;
L
Linus Torvalds 已提交
2933 2934
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	}
D
Dan Williams 已提交
2935
	mddev->dev_sectors = sectors;
2936
	mddev->resync_max_sectors = sectors;
L
Linus Torvalds 已提交
2937 2938 2939
	return 0;
}

2940
static int raid1_reshape(struct mddev *mddev)
L
Linus Torvalds 已提交
2941 2942 2943 2944 2945 2946 2947 2948
{
	/* We need to:
	 * 1/ resize the r1bio_pool
	 * 2/ resize conf->mirrors
	 *
	 * We allocate a new r1bio_pool if we can.
	 * Then raise a device barrier and wait until all IO stops.
	 * Then resize conf->mirrors and swap in the new r1bio pool.
2949 2950 2951
	 *
	 * At the same time, we "pack" the devices so that all the missing
	 * devices have the higher raid_disk numbers.
L
Linus Torvalds 已提交
2952 2953 2954
	 */
	mempool_t *newpool, *oldpool;
	struct pool_info *newpoolinfo;
2955
	struct raid1_info *newmirrors;
2956
	struct r1conf *conf = mddev->private;
2957
	int cnt, raid_disks;
2958
	unsigned long flags;
2959
	int d, d2, err;
L
Linus Torvalds 已提交
2960

2961
	/* Cannot change chunk_size, layout, or level */
2962
	if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
2963 2964
	    mddev->layout != mddev->new_layout ||
	    mddev->level != mddev->new_level) {
2965
		mddev->new_chunk_sectors = mddev->chunk_sectors;
2966 2967 2968 2969 2970
		mddev->new_layout = mddev->layout;
		mddev->new_level = mddev->level;
		return -EINVAL;
	}

2971 2972 2973
	err = md_allow_write(mddev);
	if (err)
		return err;
2974

2975 2976
	raid_disks = mddev->raid_disks + mddev->delta_disks;

2977 2978 2979 2980 2981 2982
	if (raid_disks < conf->raid_disks) {
		cnt=0;
		for (d= 0; d < conf->raid_disks; d++)
			if (conf->mirrors[d].rdev)
				cnt++;
		if (cnt > raid_disks)
L
Linus Torvalds 已提交
2983
			return -EBUSY;
2984
	}
L
Linus Torvalds 已提交
2985 2986 2987 2988 2989

	newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
	if (!newpoolinfo)
		return -ENOMEM;
	newpoolinfo->mddev = mddev;
2990
	newpoolinfo->raid_disks = raid_disks * 2;
L
Linus Torvalds 已提交
2991 2992 2993 2994 2995 2996 2997

	newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
				 r1bio_pool_free, newpoolinfo);
	if (!newpool) {
		kfree(newpoolinfo);
		return -ENOMEM;
	}
2998
	newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
2999
			     GFP_KERNEL);
L
Linus Torvalds 已提交
3000 3001 3002 3003 3004 3005
	if (!newmirrors) {
		kfree(newpoolinfo);
		mempool_destroy(newpool);
		return -ENOMEM;
	}

3006
	raise_barrier(conf);
L
Linus Torvalds 已提交
3007 3008 3009 3010

	/* ok, everything is stopped */
	oldpool = conf->r1bio_pool;
	conf->r1bio_pool = newpool;
3011

3012
	for (d = d2 = 0; d < conf->raid_disks; d++) {
3013
		struct md_rdev *rdev = conf->mirrors[d].rdev;
3014
		if (rdev && rdev->raid_disk != d2) {
3015
			sysfs_unlink_rdev(mddev, rdev);
3016
			rdev->raid_disk = d2;
3017 3018
			sysfs_unlink_rdev(mddev, rdev);
			if (sysfs_link_rdev(mddev, rdev))
3019
				printk(KERN_WARNING
3020 3021
				       "md/raid1:%s: cannot register rd%d\n",
				       mdname(mddev), rdev->raid_disk);
3022
		}
3023 3024 3025
		if (rdev)
			newmirrors[d2++].rdev = rdev;
	}
L
Linus Torvalds 已提交
3026 3027 3028 3029 3030
	kfree(conf->mirrors);
	conf->mirrors = newmirrors;
	kfree(conf->poolinfo);
	conf->poolinfo = newpoolinfo;

3031
	spin_lock_irqsave(&conf->device_lock, flags);
L
Linus Torvalds 已提交
3032
	mddev->degraded += (raid_disks - conf->raid_disks);
3033
	spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
3034
	conf->raid_disks = mddev->raid_disks = raid_disks;
3035
	mddev->delta_disks = 0;
L
Linus Torvalds 已提交
3036

3037
	lower_barrier(conf);
L
Linus Torvalds 已提交
3038 3039 3040 3041 3042 3043 3044 3045

	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	md_wakeup_thread(mddev->thread);

	mempool_destroy(oldpool);
	return 0;
}

3046
static void raid1_quiesce(struct mddev *mddev, int state)
3047
{
3048
	struct r1conf *conf = mddev->private;
3049 3050

	switch(state) {
3051 3052 3053
	case 2: /* wake for suspend */
		wake_up(&conf->wait_barrier);
		break;
3054
	case 1:
3055
		raise_barrier(conf);
3056
		break;
3057
	case 0:
3058
		lower_barrier(conf);
3059 3060 3061 3062
		break;
	}
}

3063
static void *raid1_takeover(struct mddev *mddev)
3064 3065 3066 3067 3068
{
	/* raid1 can take over:
	 *  raid5 with 2 devices, any layout or chunk size
	 */
	if (mddev->level == 5 && mddev->raid_disks == 2) {
3069
		struct r1conf *conf;
3070 3071 3072 3073 3074 3075 3076 3077 3078 3079
		mddev->new_level = 1;
		mddev->new_layout = 0;
		mddev->new_chunk_sectors = 0;
		conf = setup_conf(mddev);
		if (!IS_ERR(conf))
			conf->barrier = 1;
		return conf;
	}
	return ERR_PTR(-EINVAL);
}
L
Linus Torvalds 已提交
3080

3081
static struct md_personality raid1_personality =
L
Linus Torvalds 已提交
3082 3083
{
	.name		= "raid1",
3084
	.level		= 1,
L
Linus Torvalds 已提交
3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095
	.owner		= THIS_MODULE,
	.make_request	= make_request,
	.run		= run,
	.stop		= stop,
	.status		= status,
	.error_handler	= error,
	.hot_add_disk	= raid1_add_disk,
	.hot_remove_disk= raid1_remove_disk,
	.spare_active	= raid1_spare_active,
	.sync_request	= sync_request,
	.resize		= raid1_resize,
3096
	.size		= raid1_size,
3097
	.check_reshape	= raid1_reshape,
3098
	.quiesce	= raid1_quiesce,
3099
	.takeover	= raid1_takeover,
L
Linus Torvalds 已提交
3100 3101 3102 3103
};

static int __init raid_init(void)
{
3104
	return register_md_personality(&raid1_personality);
L
Linus Torvalds 已提交
3105 3106 3107 3108
}

static void raid_exit(void)
{
3109
	unregister_md_personality(&raid1_personality);
L
Linus Torvalds 已提交
3110 3111 3112 3113 3114
}

module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
3115
MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
L
Linus Torvalds 已提交
3116
MODULE_ALIAS("md-personality-3"); /* RAID1 */
3117
MODULE_ALIAS("md-raid1");
3118
MODULE_ALIAS("md-level-1");
3119 3120

module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);