raid1.c 86.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * raid1.c : Multiple Devices driver for Linux
 *
 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
 *
 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
 *
 * RAID-1 management functions.
 *
 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
 *
12
 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
L
Linus Torvalds 已提交
13 14
 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
 *
15 16 17 18 19 20 21 22 23
 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
 * bitmapped intelligence in resync:
 *
 *      - bitmap marked during normal i/o
 *      - bitmap used to skip nondirty blocks during sync
 *
 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
 * - persistent bitmap code
 *
L
Linus Torvalds 已提交
24 25 26 27 28 29 30 31 32 33
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * You should have received a copy of the GNU General Public License
 * (for example /usr/src/linux/COPYING); if not, write to the Free
 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

34
#include <linux/slab.h>
35
#include <linux/delay.h>
36
#include <linux/blkdev.h>
37
#include <linux/module.h>
38
#include <linux/seq_file.h>
39
#include <linux/ratelimit.h>
40
#include "md.h"
41 42
#include "raid1.h"
#include "bitmap.h"
43

L
Linus Torvalds 已提交
44 45 46 47 48
/*
 * Number of guaranteed r1bios in case of extreme VM load:
 */
#define	NR_RAID1_BIOS 256

49 50 51 52 53 54 55 56 57 58 59 60 61 62
/* when we get a read error on a read-only array, we redirect to another
 * device without failing the first device, or trying to over-write to
 * correct the read error.  To keep track of bad blocks on a per-bio
 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
 */
#define IO_BLOCKED ((struct bio *)1)
/* When we successfully write to a known bad-block, we need to remove the
 * bad-block marking which must be done from process context.  So we record
 * the success by setting devs[n].bio to IO_MADE_GOOD
 */
#define IO_MADE_GOOD ((struct bio *)2)

#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)

63 64 65 66 67
/* When there are this many requests queue to be written by
 * the raid1 thread, we become 'congested' to provide back-pressure
 * for writeback.
 */
static int max_queued_requests = 1024;
L
Linus Torvalds 已提交
68

69 70
static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
			  sector_t bi_sector);
71
static void lower_barrier(struct r1conf *conf);
L
Linus Torvalds 已提交
72

A
Al Viro 已提交
73
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
L
Linus Torvalds 已提交
74 75
{
	struct pool_info *pi = data;
76
	int size = offsetof(struct r1bio, bios[pi->raid_disks]);
L
Linus Torvalds 已提交
77 78

	/* allocate a r1bio with room for raid_disks entries in the bios array */
J
Jens Axboe 已提交
79
	return kzalloc(size, gfp_flags);
L
Linus Torvalds 已提交
80 81 82 83 84 85 86 87
}

static void r1bio_pool_free(void *r1_bio, void *data)
{
	kfree(r1_bio);
}

#define RESYNC_BLOCK_SIZE (64*1024)
88
#define RESYNC_DEPTH 32
L
Linus Torvalds 已提交
89 90
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
91 92 93
#define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
#define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
#define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
L
Linus Torvalds 已提交
94

A
Al Viro 已提交
95
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
L
Linus Torvalds 已提交
96 97
{
	struct pool_info *pi = data;
98
	struct r1bio *r1_bio;
L
Linus Torvalds 已提交
99
	struct bio *bio;
100
	int need_pages;
L
Linus Torvalds 已提交
101 102 103
	int i, j;

	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
J
Jens Axboe 已提交
104
	if (!r1_bio)
L
Linus Torvalds 已提交
105 106 107 108 109 110
		return NULL;

	/*
	 * Allocate bios : 1 for reading, n-1 for writing
	 */
	for (j = pi->raid_disks ; j-- ; ) {
111
		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
L
Linus Torvalds 已提交
112 113 114 115 116 117
		if (!bio)
			goto out_free_bio;
		r1_bio->bios[j] = bio;
	}
	/*
	 * Allocate RESYNC_PAGES data pages and attach them to
118 119 120
	 * the first bio.
	 * If this is a user-requested check/repair, allocate
	 * RESYNC_PAGES for each bio.
L
Linus Torvalds 已提交
121
	 */
122
	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
123
		need_pages = pi->raid_disks;
124
	else
125 126
		need_pages = 1;
	for (j = 0; j < need_pages; j++) {
127
		bio = r1_bio->bios[j];
K
Kent Overstreet 已提交
128
		bio->bi_vcnt = RESYNC_PAGES;
129

K
Kent Overstreet 已提交
130
		if (bio_alloc_pages(bio, gfp_flags))
131
			goto out_free_pages;
132 133 134 135 136 137 138
	}
	/* If not user-requests, copy the page pointers to all bios */
	if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
		for (i=0; i<RESYNC_PAGES ; i++)
			for (j=1; j<pi->raid_disks; j++)
				r1_bio->bios[j]->bi_io_vec[i].bv_page =
					r1_bio->bios[0]->bi_io_vec[i].bv_page;
L
Linus Torvalds 已提交
139 140 141 142 143 144
	}

	r1_bio->master_bio = NULL;

	return r1_bio;

145 146 147 148 149 150 151 152
out_free_pages:
	while (--j >= 0) {
		struct bio_vec *bv;

		bio_for_each_segment_all(bv, r1_bio->bios[j], i)
			__free_page(bv->bv_page);
	}

L
Linus Torvalds 已提交
153
out_free_bio:
154
	while (++j < pi->raid_disks)
L
Linus Torvalds 已提交
155 156 157 158 159 160 161 162
		bio_put(r1_bio->bios[j]);
	r1bio_pool_free(r1_bio, data);
	return NULL;
}

static void r1buf_pool_free(void *__r1_bio, void *data)
{
	struct pool_info *pi = data;
163
	int i,j;
164
	struct r1bio *r1bio = __r1_bio;
L
Linus Torvalds 已提交
165

166 167 168 169 170
	for (i = 0; i < RESYNC_PAGES; i++)
		for (j = pi->raid_disks; j-- ;) {
			if (j == 0 ||
			    r1bio->bios[j]->bi_io_vec[i].bv_page !=
			    r1bio->bios[0]->bi_io_vec[i].bv_page)
171
				safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
172
		}
L
Linus Torvalds 已提交
173 174 175 176 177 178
	for (i=0 ; i < pi->raid_disks; i++)
		bio_put(r1bio->bios[i]);

	r1bio_pool_free(r1bio, data);
}

179
static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
L
Linus Torvalds 已提交
180 181 182
{
	int i;

183
	for (i = 0; i < conf->raid_disks * 2; i++) {
L
Linus Torvalds 已提交
184
		struct bio **bio = r1_bio->bios + i;
185
		if (!BIO_SPECIAL(*bio))
L
Linus Torvalds 已提交
186 187 188 189 190
			bio_put(*bio);
		*bio = NULL;
	}
}

191
static void free_r1bio(struct r1bio *r1_bio)
L
Linus Torvalds 已提交
192
{
193
	struct r1conf *conf = r1_bio->mddev->private;
L
Linus Torvalds 已提交
194 195 196 197 198

	put_all_bios(conf, r1_bio);
	mempool_free(r1_bio, conf->r1bio_pool);
}

199
static void put_buf(struct r1bio *r1_bio)
L
Linus Torvalds 已提交
200
{
201
	struct r1conf *conf = r1_bio->mddev->private;
202 203
	int i;

204
	for (i = 0; i < conf->raid_disks * 2; i++) {
205 206 207 208
		struct bio *bio = r1_bio->bios[i];
		if (bio->bi_end_io)
			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
	}
L
Linus Torvalds 已提交
209 210 211

	mempool_free(r1_bio, conf->r1buf_pool);

212
	lower_barrier(conf);
L
Linus Torvalds 已提交
213 214
}

215
static void reschedule_retry(struct r1bio *r1_bio)
L
Linus Torvalds 已提交
216 217
{
	unsigned long flags;
218
	struct mddev *mddev = r1_bio->mddev;
219
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
220 221 222

	spin_lock_irqsave(&conf->device_lock, flags);
	list_add(&r1_bio->retry_list, &conf->retry_list);
223
	conf->nr_queued ++;
L
Linus Torvalds 已提交
224 225
	spin_unlock_irqrestore(&conf->device_lock, flags);

226
	wake_up(&conf->wait_barrier);
L
Linus Torvalds 已提交
227 228 229 230 231 232 233 234
	md_wakeup_thread(mddev->thread);
}

/*
 * raid_end_bio_io() is called when we have finished servicing a mirrored
 * operation and are ready to return a success/failure code to the buffer
 * cache layer.
 */
235
static void call_bio_endio(struct r1bio *r1_bio)
236 237 238
{
	struct bio *bio = r1_bio->master_bio;
	int done;
239
	struct r1conf *conf = r1_bio->mddev->private;
240
	sector_t start_next_window = r1_bio->start_next_window;
241
	sector_t bi_sector = bio->bi_iter.bi_sector;
242 243 244 245 246 247 248

	if (bio->bi_phys_segments) {
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
		bio->bi_phys_segments--;
		done = (bio->bi_phys_segments == 0);
		spin_unlock_irqrestore(&conf->device_lock, flags);
249 250 251 252 253
		/*
		 * make_request() might be waiting for
		 * bi_phys_segments to decrease
		 */
		wake_up(&conf->wait_barrier);
254 255 256 257 258 259 260 261 262 263 264
	} else
		done = 1;

	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
	if (done) {
		bio_endio(bio, 0);
		/*
		 * Wake up any possible resync thread that waits for the device
		 * to go idle.
		 */
265
		allow_barrier(conf, start_next_window, bi_sector);
266 267 268
	}
}

269
static void raid_end_bio_io(struct r1bio *r1_bio)
L
Linus Torvalds 已提交
270 271 272
{
	struct bio *bio = r1_bio->master_bio;

273 274
	/* if nobody has done the final endio yet, do it now */
	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
275 276
		pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
			 (bio_data_dir(bio) == WRITE) ? "write" : "read",
277 278
			 (unsigned long long) bio->bi_iter.bi_sector,
			 (unsigned long long) bio_end_sector(bio) - 1);
279

280
		call_bio_endio(r1_bio);
281
	}
L
Linus Torvalds 已提交
282 283 284 285 286 287
	free_r1bio(r1_bio);
}

/*
 * Update disk head position estimator based on IRQ completion info.
 */
288
static inline void update_head_pos(int disk, struct r1bio *r1_bio)
L
Linus Torvalds 已提交
289
{
290
	struct r1conf *conf = r1_bio->mddev->private;
L
Linus Torvalds 已提交
291 292 293 294 295

	conf->mirrors[disk].head_position =
		r1_bio->sector + (r1_bio->sectors);
}

296 297 298
/*
 * Find the disk number which triggered given bio
 */
299
static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
300 301
{
	int mirror;
302 303
	struct r1conf *conf = r1_bio->mddev->private;
	int raid_disks = conf->raid_disks;
304

305
	for (mirror = 0; mirror < raid_disks * 2; mirror++)
306 307 308
		if (r1_bio->bios[mirror] == bio)
			break;

309
	BUG_ON(mirror == raid_disks * 2);
310 311 312 313 314
	update_head_pos(mirror, r1_bio);

	return mirror;
}

315
static void raid1_end_read_request(struct bio *bio, int error)
L
Linus Torvalds 已提交
316 317
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
318
	struct r1bio *r1_bio = bio->bi_private;
L
Linus Torvalds 已提交
319
	int mirror;
320
	struct r1conf *conf = r1_bio->mddev->private;
L
Linus Torvalds 已提交
321 322 323 324 325

	mirror = r1_bio->read_disk;
	/*
	 * this branch is our 'one mirror IO has finished' event handler:
	 */
326 327
	update_head_pos(mirror, r1_bio);

328 329 330 331 332 333
	if (uptodate)
		set_bit(R1BIO_Uptodate, &r1_bio->state);
	else {
		/* If all other devices have failed, we want to return
		 * the error upwards rather than fail the last device.
		 * Here we redefine "uptodate" to mean "Don't want to retry"
L
Linus Torvalds 已提交
334
		 */
335 336 337 338 339 340 341 342
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
		if (r1_bio->mddev->degraded == conf->raid_disks ||
		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
			uptodate = 1;
		spin_unlock_irqrestore(&conf->device_lock, flags);
	}
L
Linus Torvalds 已提交
343

344
	if (uptodate) {
L
Linus Torvalds 已提交
345
		raid_end_bio_io(r1_bio);
346 347
		rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
	} else {
L
Linus Torvalds 已提交
348 349 350 351
		/*
		 * oops, read error:
		 */
		char b[BDEVNAME_SIZE];
352 353 354 355 356 357 358
		printk_ratelimited(
			KERN_ERR "md/raid1:%s: %s: "
			"rescheduling sector %llu\n",
			mdname(conf->mddev),
			bdevname(conf->mirrors[mirror].rdev->bdev,
				 b),
			(unsigned long long)r1_bio->sector);
359
		set_bit(R1BIO_ReadError, &r1_bio->state);
L
Linus Torvalds 已提交
360
		reschedule_retry(r1_bio);
361
		/* don't drop the reference on read_disk yet */
L
Linus Torvalds 已提交
362 363 364
	}
}

365
static void close_write(struct r1bio *r1_bio)
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
{
	/* it really is the end of this request */
	if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
		/* free extra copy of the data pages */
		int i = r1_bio->behind_page_count;
		while (i--)
			safe_put_page(r1_bio->behind_bvecs[i].bv_page);
		kfree(r1_bio->behind_bvecs);
		r1_bio->behind_bvecs = NULL;
	}
	/* clear the bitmap if all writes complete successfully */
	bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
			r1_bio->sectors,
			!test_bit(R1BIO_Degraded, &r1_bio->state),
			test_bit(R1BIO_BehindIO, &r1_bio->state));
	md_write_end(r1_bio->mddev);
}

384
static void r1_bio_write_done(struct r1bio *r1_bio)
385
{
386 387 388 389 390 391 392
	if (!atomic_dec_and_test(&r1_bio->remaining))
		return;

	if (test_bit(R1BIO_WriteError, &r1_bio->state))
		reschedule_retry(r1_bio);
	else {
		close_write(r1_bio);
393 394 395 396
		if (test_bit(R1BIO_MadeGood, &r1_bio->state))
			reschedule_retry(r1_bio);
		else
			raid_end_bio_io(r1_bio);
397 398 399
	}
}

400
static void raid1_end_write_request(struct bio *bio, int error)
L
Linus Torvalds 已提交
401 402
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
403
	struct r1bio *r1_bio = bio->bi_private;
404
	int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
405
	struct r1conf *conf = r1_bio->mddev->private;
406
	struct bio *to_put = NULL;
L
Linus Torvalds 已提交
407

408
	mirror = find_bio_disk(r1_bio, bio);
L
Linus Torvalds 已提交
409

T
Tejun Heo 已提交
410 411 412 413
	/*
	 * 'one mirror IO has finished' event handler:
	 */
	if (!uptodate) {
414 415
		set_bit(WriteErrorSeen,
			&conf->mirrors[mirror].rdev->flags);
416 417 418 419 420
		if (!test_and_set_bit(WantReplacement,
				      &conf->mirrors[mirror].rdev->flags))
			set_bit(MD_RECOVERY_NEEDED, &
				conf->mddev->recovery);

421
		set_bit(R1BIO_WriteError, &r1_bio->state);
422
	} else {
L
Linus Torvalds 已提交
423
		/*
T
Tejun Heo 已提交
424 425 426 427 428 429 430 431
		 * Set R1BIO_Uptodate in our master bio, so that we
		 * will return a good error code for to the higher
		 * levels even if IO on some other mirrored buffer
		 * fails.
		 *
		 * The 'master' represents the composite IO operation
		 * to user-side. So if something waits for IO, then it
		 * will wait for the 'master' bio.
L
Linus Torvalds 已提交
432
		 */
433 434 435
		sector_t first_bad;
		int bad_sectors;

436 437
		r1_bio->bios[mirror] = NULL;
		to_put = bio;
438 439 440 441 442 443 444 445 446 447 448
		/*
		 * Do not set R1BIO_Uptodate if the current device is
		 * rebuilding or Faulty. This is because we cannot use
		 * such device for properly reading the data back (we could
		 * potentially use it, if the current write would have felt
		 * before rdev->recovery_offset, but for simplicity we don't
		 * check this here.
		 */
		if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
		    !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
			set_bit(R1BIO_Uptodate, &r1_bio->state);
T
Tejun Heo 已提交
449

450 451 452 453 454 455 456 457 458
		/* Maybe we can clear some bad blocks. */
		if (is_badblock(conf->mirrors[mirror].rdev,
				r1_bio->sector, r1_bio->sectors,
				&first_bad, &bad_sectors)) {
			r1_bio->bios[mirror] = IO_MADE_GOOD;
			set_bit(R1BIO_MadeGood, &r1_bio->state);
		}
	}

T
Tejun Heo 已提交
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
	if (behind) {
		if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
			atomic_dec(&r1_bio->behind_remaining);

		/*
		 * In behind mode, we ACK the master bio once the I/O
		 * has safely reached all non-writemostly
		 * disks. Setting the Returned bit ensures that this
		 * gets done only once -- we don't ever want to return
		 * -EIO here, instead we'll wait
		 */
		if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
		    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
			/* Maybe we can return now */
			if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
				struct bio *mbio = r1_bio->master_bio;
475 476
				pr_debug("raid1: behind end write sectors"
					 " %llu-%llu\n",
477 478
					 (unsigned long long) mbio->bi_iter.bi_sector,
					 (unsigned long long) bio_end_sector(mbio) - 1);
479
				call_bio_endio(r1_bio);
480 481 482
			}
		}
	}
483 484 485
	if (r1_bio->bios[mirror] == NULL)
		rdev_dec_pending(conf->mirrors[mirror].rdev,
				 conf->mddev);
T
Tejun Heo 已提交
486

L
Linus Torvalds 已提交
487 488 489 490
	/*
	 * Let's see if all mirrored write operations have finished
	 * already.
	 */
491
	r1_bio_write_done(r1_bio);
492

493 494
	if (to_put)
		bio_put(to_put);
L
Linus Torvalds 已提交
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
}

/*
 * This routine returns the disk from which the requested read should
 * be done. There is a per-array 'next expected sequential IO' sector
 * number - if this matches on the next IO then we use the last disk.
 * There is also a per-disk 'last know head position' sector that is
 * maintained from IRQ contexts, both the normal and the resync IO
 * completion handlers update this position correctly. If there is no
 * perfect sequential match then we pick the disk whose head is closest.
 *
 * If there are 2 mirrors in the same 2 devices, performance degrades
 * because position is mirror, not device based.
 *
 * The rdev for the device selected will have nr_pending incremented.
 */
511
static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
L
Linus Torvalds 已提交
512
{
513
	const sector_t this_sector = r1_bio->sector;
514 515
	int sectors;
	int best_good_sectors;
516 517
	int best_disk, best_dist_disk, best_pending_disk;
	int has_nonrot_disk;
518
	int disk;
N
NeilBrown 已提交
519
	sector_t best_dist;
520
	unsigned int min_pending;
521
	struct md_rdev *rdev;
522
	int choose_first;
523
	int choose_next_idle;
L
Linus Torvalds 已提交
524 525 526

	rcu_read_lock();
	/*
527
	 * Check if we can balance. We can balance on the whole
L
Linus Torvalds 已提交
528 529 530 531
	 * device if no resync is going on, or below the resync window.
	 * We take the first readable disk when above the resync window.
	 */
 retry:
532
	sectors = r1_bio->sectors;
N
NeilBrown 已提交
533
	best_disk = -1;
534
	best_dist_disk = -1;
N
NeilBrown 已提交
535
	best_dist = MaxSector;
536 537
	best_pending_disk = -1;
	min_pending = UINT_MAX;
538
	best_good_sectors = 0;
539
	has_nonrot_disk = 0;
540
	choose_next_idle = 0;
541

542 543 544 545 546 547 548
	if ((conf->mddev->recovery_cp < this_sector + sectors) ||
	    (mddev_is_clustered(conf->mddev) &&
	    md_cluster_ops->area_resyncing(conf->mddev, this_sector,
		    this_sector + sectors)))
		choose_first = 1;
	else
		choose_first = 0;
L
Linus Torvalds 已提交
549

550
	for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
N
NeilBrown 已提交
551
		sector_t dist;
552 553
		sector_t first_bad;
		int bad_sectors;
554
		unsigned int pending;
555
		bool nonrot;
556

557 558 559
		rdev = rcu_dereference(conf->mirrors[disk].rdev);
		if (r1_bio->bios[disk] == IO_BLOCKED
		    || rdev == NULL
560
		    || test_bit(Unmerged, &rdev->flags)
N
NeilBrown 已提交
561
		    || test_bit(Faulty, &rdev->flags))
562
			continue;
N
NeilBrown 已提交
563 564
		if (!test_bit(In_sync, &rdev->flags) &&
		    rdev->recovery_offset < this_sector + sectors)
L
Linus Torvalds 已提交
565
			continue;
N
NeilBrown 已提交
566 567 568
		if (test_bit(WriteMostly, &rdev->flags)) {
			/* Don't balance among write-mostly, just
			 * use the first as a last resort */
569 570 571 572 573 574 575 576 577
			if (best_disk < 0) {
				if (is_badblock(rdev, this_sector, sectors,
						&first_bad, &bad_sectors)) {
					if (first_bad < this_sector)
						/* Cannot use this */
						continue;
					best_good_sectors = first_bad - this_sector;
				} else
					best_good_sectors = sectors;
N
NeilBrown 已提交
578
				best_disk = disk;
579
			}
N
NeilBrown 已提交
580 581 582 583 584
			continue;
		}
		/* This is a reasonable device to use.  It might
		 * even be best.
		 */
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
		if (is_badblock(rdev, this_sector, sectors,
				&first_bad, &bad_sectors)) {
			if (best_dist < MaxSector)
				/* already have a better device */
				continue;
			if (first_bad <= this_sector) {
				/* cannot read here. If this is the 'primary'
				 * device, then we must not read beyond
				 * bad_sectors from another device..
				 */
				bad_sectors -= (this_sector - first_bad);
				if (choose_first && sectors > bad_sectors)
					sectors = bad_sectors;
				if (best_good_sectors > sectors)
					best_good_sectors = sectors;

			} else {
				sector_t good_sectors = first_bad - this_sector;
				if (good_sectors > best_good_sectors) {
					best_good_sectors = good_sectors;
					best_disk = disk;
				}
				if (choose_first)
					break;
			}
			continue;
		} else
			best_good_sectors = sectors;

614 615
		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
		has_nonrot_disk |= nonrot;
616
		pending = atomic_read(&rdev->nr_pending);
N
NeilBrown 已提交
617
		dist = abs(this_sector - conf->mirrors[disk].head_position);
618
		if (choose_first) {
N
NeilBrown 已提交
619
			best_disk = disk;
L
Linus Torvalds 已提交
620 621
			break;
		}
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
		/* Don't change to another disk for sequential reads */
		if (conf->mirrors[disk].next_seq_sect == this_sector
		    || dist == 0) {
			int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
			struct raid1_info *mirror = &conf->mirrors[disk];

			best_disk = disk;
			/*
			 * If buffered sequential IO size exceeds optimal
			 * iosize, check if there is idle disk. If yes, choose
			 * the idle disk. read_balance could already choose an
			 * idle disk before noticing it's a sequential IO in
			 * this disk. This doesn't matter because this disk
			 * will idle, next time it will be utilized after the
			 * first disk has IO size exceeds optimal iosize. In
			 * this way, iosize of the first disk will be optimal
			 * iosize at least. iosize of the second disk might be
			 * small, but not a big deal since when the second disk
			 * starts IO, the first disk is likely still busy.
			 */
			if (nonrot && opt_iosize > 0 &&
			    mirror->seq_start != MaxSector &&
			    mirror->next_seq_sect > opt_iosize &&
			    mirror->next_seq_sect - opt_iosize >=
			    mirror->seq_start) {
				choose_next_idle = 1;
				continue;
			}
			break;
		}
		/* If device is idle, use it */
		if (pending == 0) {
			best_disk = disk;
			break;
		}

		if (choose_next_idle)
			continue;
660 661 662 663 664 665

		if (min_pending > pending) {
			min_pending = pending;
			best_pending_disk = disk;
		}

N
NeilBrown 已提交
666 667
		if (dist < best_dist) {
			best_dist = dist;
668
			best_dist_disk = disk;
L
Linus Torvalds 已提交
669
		}
670
	}
L
Linus Torvalds 已提交
671

672 673 674 675 676 677 678 679 680 681 682 683 684
	/*
	 * If all disks are rotational, choose the closest disk. If any disk is
	 * non-rotational, choose the disk with less pending request even the
	 * disk is rotational, which might/might not be optimal for raids with
	 * mixed ratation/non-rotational disks depending on workload.
	 */
	if (best_disk == -1) {
		if (has_nonrot_disk)
			best_disk = best_pending_disk;
		else
			best_disk = best_dist_disk;
	}

N
NeilBrown 已提交
685 686
	if (best_disk >= 0) {
		rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
687 688 689
		if (!rdev)
			goto retry;
		atomic_inc(&rdev->nr_pending);
N
NeilBrown 已提交
690
		if (test_bit(Faulty, &rdev->flags)) {
L
Linus Torvalds 已提交
691 692 693
			/* cannot risk returning a device that failed
			 * before we inc'ed nr_pending
			 */
694
			rdev_dec_pending(rdev, conf->mddev);
L
Linus Torvalds 已提交
695 696
			goto retry;
		}
697
		sectors = best_good_sectors;
698 699 700 701

		if (conf->mirrors[best_disk].next_seq_sect != this_sector)
			conf->mirrors[best_disk].seq_start = this_sector;

702
		conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
L
Linus Torvalds 已提交
703 704
	}
	rcu_read_unlock();
705
	*max_sectors = sectors;
L
Linus Torvalds 已提交
706

N
NeilBrown 已提交
707
	return best_disk;
L
Linus Torvalds 已提交
708 709
}

710
static int raid1_mergeable_bvec(struct mddev *mddev,
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
				struct bvec_merge_data *bvm,
				struct bio_vec *biovec)
{
	struct r1conf *conf = mddev->private;
	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
	int max = biovec->bv_len;

	if (mddev->merge_check_needed) {
		int disk;
		rcu_read_lock();
		for (disk = 0; disk < conf->raid_disks * 2; disk++) {
			struct md_rdev *rdev = rcu_dereference(
				conf->mirrors[disk].rdev);
			if (rdev && !test_bit(Faulty, &rdev->flags)) {
				struct request_queue *q =
					bdev_get_queue(rdev->bdev);
				if (q->merge_bvec_fn) {
					bvm->bi_sector = sector +
						rdev->data_offset;
					bvm->bi_bdev = rdev->bdev;
					max = min(max, q->merge_bvec_fn(
							  q, bvm, biovec));
				}
			}
		}
		rcu_read_unlock();
	}
	return max;

}

742
static int raid1_congested(struct mddev *mddev, int bits)
743
{
744
	struct r1conf *conf = mddev->private;
745 746
	int i, ret = 0;

747 748 749 750
	if ((bits & (1 << BDI_async_congested)) &&
	    conf->pending_count >= max_queued_requests)
		return 1;

751
	rcu_read_lock();
752
	for (i = 0; i < conf->raid_disks * 2; i++) {
753
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
754
		if (rdev && !test_bit(Faulty, &rdev->flags)) {
755
			struct request_queue *q = bdev_get_queue(rdev->bdev);
756

757 758
			BUG_ON(!q);

759 760 761
			/* Note the '|| 1' - when read_balance prefers
			 * non-congested targets, it can be removed
			 */
762
			if ((bits & (1<<BDI_async_congested)) || 1)
763 764 765 766 767 768 769 770 771
				ret |= bdi_congested(&q->backing_dev_info, bits);
			else
				ret &= bdi_congested(&q->backing_dev_info, bits);
		}
	}
	rcu_read_unlock();
	return ret;
}

772
static void flush_pending_writes(struct r1conf *conf)
773 774 775 776 777 778 779 780 781
{
	/* Any writes that have been queued but are awaiting
	 * bitmap updates get flushed here.
	 */
	spin_lock_irq(&conf->device_lock);

	if (conf->pending_bio_list.head) {
		struct bio *bio;
		bio = bio_list_get(&conf->pending_bio_list);
782
		conf->pending_count = 0;
783 784 785 786
		spin_unlock_irq(&conf->device_lock);
		/* flush any pending bitmap writes to
		 * disk before proceeding w/ I/O */
		bitmap_unplug(conf->mddev->bitmap);
787
		wake_up(&conf->wait_barrier);
788 789 790 791

		while (bio) { /* submit pending writes */
			struct bio *next = bio->bi_next;
			bio->bi_next = NULL;
S
Shaohua Li 已提交
792 793 794 795 796 797
			if (unlikely((bio->bi_rw & REQ_DISCARD) &&
			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
				/* Just ignore it */
				bio_endio(bio, 0);
			else
				generic_make_request(bio);
798 799 800 801
			bio = next;
		}
	} else
		spin_unlock_irq(&conf->device_lock);
J
Jens Axboe 已提交
802 803
}

804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
/* Barriers....
 * Sometimes we need to suspend IO while we do something else,
 * either some resync/recovery, or reconfigure the array.
 * To do this we raise a 'barrier'.
 * The 'barrier' is a counter that can be raised multiple times
 * to count how many activities are happening which preclude
 * normal IO.
 * We can only raise the barrier if there is no pending IO.
 * i.e. if nr_pending == 0.
 * We choose only to raise the barrier if no-one is waiting for the
 * barrier to go down.  This means that as soon as an IO request
 * is ready, no other operations which require a barrier will start
 * until the IO request has had a chance.
 *
 * So: regular IO calls 'wait_barrier'.  When that returns there
 *    is no backgroup IO happening,  It must arrange to call
 *    allow_barrier when it has finished its IO.
 * backgroup IO calls must call raise_barrier.  Once that returns
 *    there is no normal IO happeing.  It must arrange to call
 *    lower_barrier when the particular background IO completes.
L
Linus Torvalds 已提交
824
 */
825
static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
L
Linus Torvalds 已提交
826 827
{
	spin_lock_irq(&conf->resync_lock);
828 829 830

	/* Wait until no block IO is waiting */
	wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
831
			    conf->resync_lock);
832 833 834

	/* block any new IO from starting */
	conf->barrier++;
835
	conf->next_resync = sector_nr;
836

837 838 839 840 841 842 843
	/* For these conditions we must wait:
	 * A: while the array is in frozen state
	 * B: while barrier >= RESYNC_DEPTH, meaning resync reach
	 *    the max count which allowed.
	 * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
	 *    next resync will reach to the window which normal bios are
	 *    handling.
844
	 * D: while there are any active requests in the current window.
845
	 */
846
	wait_event_lock_irq(conf->wait_barrier,
847
			    !conf->array_frozen &&
848
			    conf->barrier < RESYNC_DEPTH &&
849
			    conf->current_window_requests == 0 &&
850 851
			    (conf->start_next_window >=
			     conf->next_resync + RESYNC_SECTORS),
852
			    conf->resync_lock);
853

854
	conf->nr_pending++;
855 856 857
	spin_unlock_irq(&conf->resync_lock);
}

858
static void lower_barrier(struct r1conf *conf)
859 860
{
	unsigned long flags;
861
	BUG_ON(conf->barrier <= 0);
862 863
	spin_lock_irqsave(&conf->resync_lock, flags);
	conf->barrier--;
864
	conf->nr_pending--;
865 866 867 868
	spin_unlock_irqrestore(&conf->resync_lock, flags);
	wake_up(&conf->wait_barrier);
}

869
static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
870
{
871 872 873 874 875
	bool wait = false;

	if (conf->array_frozen || !bio)
		wait = true;
	else if (conf->barrier && bio_data_dir(bio) == WRITE) {
876 877 878 879
		if ((conf->mddev->curr_resync_completed
		     >= bio_end_sector(bio)) ||
		    (conf->next_resync + NEXT_NORMALIO_DISTANCE
		     <= bio->bi_iter.bi_sector))
880 881 882 883 884 885 886 887 888 889 890 891
			wait = false;
		else
			wait = true;
	}

	return wait;
}

static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
{
	sector_t sector = 0;

892
	spin_lock_irq(&conf->resync_lock);
893
	if (need_to_wait_for_sync(conf, bio)) {
894
		conf->nr_waiting++;
895 896 897 898
		/* Wait for the barrier to drop.
		 * However if there are already pending
		 * requests (preventing the barrier from
		 * rising completely), and the
899
		 * per-process bio queue isn't empty,
900
		 * then don't wait, as we need to empty
901 902
		 * that queue to allow conf->start_next_window
		 * to increase.
903 904
		 */
		wait_event_lock_irq(conf->wait_barrier,
905 906
				    !conf->array_frozen &&
				    (!conf->barrier ||
907 908 909 910
				     ((conf->start_next_window <
				       conf->next_resync + RESYNC_SECTORS) &&
				      current->bio_list &&
				      !bio_list_empty(current->bio_list))),
911
				    conf->resync_lock);
912
		conf->nr_waiting--;
L
Linus Torvalds 已提交
913
	}
914 915

	if (bio && bio_data_dir(bio) == WRITE) {
916
		if (bio->bi_iter.bi_sector >=
917
		    conf->mddev->curr_resync_completed) {
918 919 920 921 922 923
			if (conf->start_next_window == MaxSector)
				conf->start_next_window =
					conf->next_resync +
					NEXT_NORMALIO_DISTANCE;

			if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
924
			    <= bio->bi_iter.bi_sector)
925 926 927 928
				conf->next_window_requests++;
			else
				conf->current_window_requests++;
			sector = conf->start_next_window;
929
		}
930 931
	}

932
	conf->nr_pending++;
L
Linus Torvalds 已提交
933
	spin_unlock_irq(&conf->resync_lock);
934
	return sector;
L
Linus Torvalds 已提交
935 936
}

937 938
static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
			  sector_t bi_sector)
939 940
{
	unsigned long flags;
941

942 943
	spin_lock_irqsave(&conf->resync_lock, flags);
	conf->nr_pending--;
944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
	if (start_next_window) {
		if (start_next_window == conf->start_next_window) {
			if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
			    <= bi_sector)
				conf->next_window_requests--;
			else
				conf->current_window_requests--;
		} else
			conf->current_window_requests--;

		if (!conf->current_window_requests) {
			if (conf->next_window_requests) {
				conf->current_window_requests =
					conf->next_window_requests;
				conf->next_window_requests = 0;
				conf->start_next_window +=
					NEXT_NORMALIO_DISTANCE;
			} else
				conf->start_next_window = MaxSector;
		}
	}
965 966 967 968
	spin_unlock_irqrestore(&conf->resync_lock, flags);
	wake_up(&conf->wait_barrier);
}

969
static void freeze_array(struct r1conf *conf, int extra)
970 971 972
{
	/* stop syncio and normal IO and wait for everything to
	 * go quite.
973
	 * We wait until nr_pending match nr_queued+extra
974 975 976 977
	 * This is called in the context of one normal IO request
	 * that has failed. Thus any sync request that might be pending
	 * will be blocked by nr_pending, and we need to wait for
	 * pending IO requests to complete or be queued for re-try.
978
	 * Thus the number queued (nr_queued) plus this request (extra)
979 980
	 * must match the number of pending IOs (nr_pending) before
	 * we continue.
981 982
	 */
	spin_lock_irq(&conf->resync_lock);
983
	conf->array_frozen = 1;
984
	wait_event_lock_irq_cmd(conf->wait_barrier,
985
				conf->nr_pending == conf->nr_queued+extra,
986 987
				conf->resync_lock,
				flush_pending_writes(conf));
988 989
	spin_unlock_irq(&conf->resync_lock);
}
990
static void unfreeze_array(struct r1conf *conf)
991 992 993
{
	/* reverse the effect of the freeze */
	spin_lock_irq(&conf->resync_lock);
994
	conf->array_frozen = 0;
995 996 997 998
	wake_up(&conf->wait_barrier);
	spin_unlock_irq(&conf->resync_lock);
}

999
/* duplicate the data pages for behind I/O
1000
 */
1001
static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
1002 1003 1004
{
	int i;
	struct bio_vec *bvec;
1005
	struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
1006
					GFP_NOIO);
1007
	if (unlikely(!bvecs))
1008
		return;
1009

1010
	bio_for_each_segment_all(bvec, bio, i) {
1011 1012 1013
		bvecs[i] = *bvec;
		bvecs[i].bv_page = alloc_page(GFP_NOIO);
		if (unlikely(!bvecs[i].bv_page))
1014
			goto do_sync_io;
1015 1016 1017
		memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
		       kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
		kunmap(bvecs[i].bv_page);
1018 1019
		kunmap(bvec->bv_page);
	}
1020
	r1_bio->behind_bvecs = bvecs;
1021 1022 1023
	r1_bio->behind_page_count = bio->bi_vcnt;
	set_bit(R1BIO_BehindIO, &r1_bio->state);
	return;
1024 1025

do_sync_io:
1026
	for (i = 0; i < bio->bi_vcnt; i++)
1027 1028 1029
		if (bvecs[i].bv_page)
			put_page(bvecs[i].bv_page);
	kfree(bvecs);
1030 1031
	pr_debug("%dB behind alloc failed, doing sync I/O\n",
		 bio->bi_iter.bi_size);
1032 1033
}

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
struct raid1_plug_cb {
	struct blk_plug_cb	cb;
	struct bio_list		pending;
	int			pending_cnt;
};

static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
	struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
						  cb);
	struct mddev *mddev = plug->cb.data;
	struct r1conf *conf = mddev->private;
	struct bio *bio;

1048
	if (from_schedule || current->bio_list) {
1049 1050 1051 1052
		spin_lock_irq(&conf->device_lock);
		bio_list_merge(&conf->pending_bio_list, &plug->pending);
		conf->pending_count += plug->pending_cnt;
		spin_unlock_irq(&conf->device_lock);
1053
		wake_up(&conf->wait_barrier);
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
		md_wakeup_thread(mddev->thread);
		kfree(plug);
		return;
	}

	/* we aren't scheduling, so we can do the write-out directly. */
	bio = bio_list_get(&plug->pending);
	bitmap_unplug(mddev->bitmap);
	wake_up(&conf->wait_barrier);

	while (bio) { /* submit pending writes */
		struct bio *next = bio->bi_next;
		bio->bi_next = NULL;
1067 1068 1069 1070 1071 1072
		if (unlikely((bio->bi_rw & REQ_DISCARD) &&
		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
			/* Just ignore it */
			bio_endio(bio, 0);
		else
			generic_make_request(bio);
1073 1074 1075 1076 1077
		bio = next;
	}
	kfree(plug);
}

1078
static void make_request(struct mddev *mddev, struct bio * bio)
L
Linus Torvalds 已提交
1079
{
1080
	struct r1conf *conf = mddev->private;
1081
	struct raid1_info *mirror;
1082
	struct r1bio *r1_bio;
L
Linus Torvalds 已提交
1083
	struct bio *read_bio;
1084
	int i, disks;
1085
	struct bitmap *bitmap;
1086
	unsigned long flags;
1087
	const int rw = bio_data_dir(bio);
1088
	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
T
Tejun Heo 已提交
1089
	const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
S
Shaohua Li 已提交
1090 1091
	const unsigned long do_discard = (bio->bi_rw
					  & (REQ_DISCARD | REQ_SECURE));
1092
	const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
1093
	struct md_rdev *blocked_rdev;
1094 1095
	struct blk_plug_cb *cb;
	struct raid1_plug_cb *plug = NULL;
1096 1097 1098
	int first_clone;
	int sectors_handled;
	int max_sectors;
1099
	sector_t start_next_window;
1100

L
Linus Torvalds 已提交
1101 1102 1103 1104 1105
	/*
	 * Register the new request and wait if the reconstruction
	 * thread has put up a bar for new requests.
	 * Continue immediately if no resync is active currently.
	 */
1106

1107 1108
	md_write_start(mddev, bio); /* wait on superblock update early */

1109
	if (bio_data_dir(bio) == WRITE &&
1110 1111 1112 1113
	    ((bio_end_sector(bio) > mddev->suspend_lo &&
	    bio->bi_iter.bi_sector < mddev->suspend_hi) ||
	    (mddev_is_clustered(mddev) &&
	     md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
1114 1115 1116 1117 1118 1119 1120 1121 1122
		/* As the suspend_* range is controlled by
		 * userspace, we want an interruptible
		 * wait.
		 */
		DEFINE_WAIT(w);
		for (;;) {
			flush_signals(current);
			prepare_to_wait(&conf->wait_barrier,
					&w, TASK_INTERRUPTIBLE);
K
Kent Overstreet 已提交
1123
			if (bio_end_sector(bio) <= mddev->suspend_lo ||
1124 1125 1126 1127
			    bio->bi_iter.bi_sector >= mddev->suspend_hi ||
			    (mddev_is_clustered(mddev) &&
			     !md_cluster_ops->area_resyncing(mddev,
				     bio->bi_iter.bi_sector, bio_end_sector(bio))))
1128 1129 1130 1131 1132
				break;
			schedule();
		}
		finish_wait(&conf->wait_barrier, &w);
	}
1133

1134
	start_next_window = wait_barrier(conf, bio);
L
Linus Torvalds 已提交
1135

1136 1137
	bitmap = mddev->bitmap;

L
Linus Torvalds 已提交
1138 1139 1140 1141 1142 1143 1144 1145
	/*
	 * make_request() can abort the operation when READA is being
	 * used and no empty request is available.
	 *
	 */
	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);

	r1_bio->master_bio = bio;
1146
	r1_bio->sectors = bio_sectors(bio);
1147
	r1_bio->state = 0;
L
Linus Torvalds 已提交
1148
	r1_bio->mddev = mddev;
1149
	r1_bio->sector = bio->bi_iter.bi_sector;
L
Linus Torvalds 已提交
1150

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	/* We might need to issue multiple reads to different
	 * devices if there are bad blocks around, so we keep
	 * track of the number of reads in bio->bi_phys_segments.
	 * If this is 0, there is only one r1_bio and no locking
	 * will be needed when requests complete.  If it is
	 * non-zero, then it is the number of not-completed requests.
	 */
	bio->bi_phys_segments = 0;
	clear_bit(BIO_SEG_VALID, &bio->bi_flags);

1161
	if (rw == READ) {
L
Linus Torvalds 已提交
1162 1163 1164
		/*
		 * read balancing logic:
		 */
1165 1166 1167 1168
		int rdisk;

read_again:
		rdisk = read_balance(conf, r1_bio, &max_sectors);
L
Linus Torvalds 已提交
1169 1170 1171 1172

		if (rdisk < 0) {
			/* couldn't find anywhere to read from */
			raid_end_bio_io(r1_bio);
1173
			return;
L
Linus Torvalds 已提交
1174 1175 1176
		}
		mirror = conf->mirrors + rdisk;

1177 1178 1179 1180 1181 1182 1183 1184 1185
		if (test_bit(WriteMostly, &mirror->rdev->flags) &&
		    bitmap) {
			/* Reading from a write-mostly device must
			 * take care not to over-take any writes
			 * that are 'behind'
			 */
			wait_event(bitmap->behind_wait,
				   atomic_read(&bitmap->behind_writes) == 0);
		}
L
Linus Torvalds 已提交
1186
		r1_bio->read_disk = rdisk;
1187
		r1_bio->start_next_window = 0;
L
Linus Torvalds 已提交
1188

1189
		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1190
		bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1191
			 max_sectors);
L
Linus Torvalds 已提交
1192 1193 1194

		r1_bio->bios[rdisk] = read_bio;

1195 1196
		read_bio->bi_iter.bi_sector = r1_bio->sector +
			mirror->rdev->data_offset;
L
Linus Torvalds 已提交
1197 1198
		read_bio->bi_bdev = mirror->rdev->bdev;
		read_bio->bi_end_io = raid1_end_read_request;
1199
		read_bio->bi_rw = READ | do_sync;
L
Linus Torvalds 已提交
1200 1201
		read_bio->bi_private = r1_bio;

1202 1203 1204 1205 1206 1207
		if (max_sectors < r1_bio->sectors) {
			/* could not read all from this device, so we will
			 * need another r1_bio.
			 */

			sectors_handled = (r1_bio->sector + max_sectors
1208
					   - bio->bi_iter.bi_sector);
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
			r1_bio->sectors = max_sectors;
			spin_lock_irq(&conf->device_lock);
			if (bio->bi_phys_segments == 0)
				bio->bi_phys_segments = 2;
			else
				bio->bi_phys_segments++;
			spin_unlock_irq(&conf->device_lock);
			/* Cannot call generic_make_request directly
			 * as that will be queued in __make_request
			 * and subsequent mempool_alloc might block waiting
			 * for it.  So hand bio over to raid1d.
			 */
			reschedule_retry(r1_bio);

			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);

			r1_bio->master_bio = bio;
1226
			r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1227 1228
			r1_bio->state = 0;
			r1_bio->mddev = mddev;
1229 1230
			r1_bio->sector = bio->bi_iter.bi_sector +
				sectors_handled;
1231 1232 1233
			goto read_again;
		} else
			generic_make_request(read_bio);
1234
		return;
L
Linus Torvalds 已提交
1235 1236 1237 1238 1239
	}

	/*
	 * WRITE:
	 */
1240 1241 1242 1243 1244
	if (conf->pending_count >= max_queued_requests) {
		md_wakeup_thread(mddev->thread);
		wait_event(conf->wait_barrier,
			   conf->pending_count < max_queued_requests);
	}
1245
	/* first select target devices under rcu_lock and
L
Linus Torvalds 已提交
1246 1247
	 * inc refcount on their rdev.  Record them by setting
	 * bios[x] to bio
1248 1249 1250 1251 1252 1253
	 * If there are known/acknowledged bad blocks on any device on
	 * which we have seen a write error, we want to avoid writing those
	 * blocks.
	 * This potentially requires several writes to write around
	 * the bad blocks.  Each set of writes gets it's own r1bio
	 * with a set of bios attached.
L
Linus Torvalds 已提交
1254
	 */
N
NeilBrown 已提交
1255

1256
	disks = conf->raid_disks * 2;
1257
 retry_write:
1258
	r1_bio->start_next_window = start_next_window;
1259
	blocked_rdev = NULL;
L
Linus Torvalds 已提交
1260
	rcu_read_lock();
1261
	max_sectors = r1_bio->sectors;
L
Linus Torvalds 已提交
1262
	for (i = 0;  i < disks; i++) {
1263
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1264 1265 1266 1267 1268
		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
			atomic_inc(&rdev->nr_pending);
			blocked_rdev = rdev;
			break;
		}
1269
		r1_bio->bios[i] = NULL;
1270 1271
		if (!rdev || test_bit(Faulty, &rdev->flags)
		    || test_bit(Unmerged, &rdev->flags)) {
1272 1273
			if (i < conf->raid_disks)
				set_bit(R1BIO_Degraded, &r1_bio->state);
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
			continue;
		}

		atomic_inc(&rdev->nr_pending);
		if (test_bit(WriteErrorSeen, &rdev->flags)) {
			sector_t first_bad;
			int bad_sectors;
			int is_bad;

			is_bad = is_badblock(rdev, r1_bio->sector,
					     max_sectors,
					     &first_bad, &bad_sectors);
			if (is_bad < 0) {
				/* mustn't write here until the bad block is
				 * acknowledged*/
				set_bit(BlockedBadBlocks, &rdev->flags);
				blocked_rdev = rdev;
				break;
			}
			if (is_bad && first_bad <= r1_bio->sector) {
				/* Cannot write here at all */
				bad_sectors -= (r1_bio->sector - first_bad);
				if (bad_sectors < max_sectors)
					/* mustn't write more than bad_sectors
					 * to other devices yet
					 */
					max_sectors = bad_sectors;
1301
				rdev_dec_pending(rdev, mddev);
1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
				/* We don't set R1BIO_Degraded as that
				 * only applies if the disk is
				 * missing, so it might be re-added,
				 * and we want to know to recover this
				 * chunk.
				 * In this case the device is here,
				 * and the fact that this chunk is not
				 * in-sync is recorded in the bad
				 * block log
				 */
				continue;
1313
			}
1314 1315 1316 1317 1318 1319 1320
			if (is_bad) {
				int good_sectors = first_bad - r1_bio->sector;
				if (good_sectors < max_sectors)
					max_sectors = good_sectors;
			}
		}
		r1_bio->bios[i] = bio;
L
Linus Torvalds 已提交
1321 1322 1323
	}
	rcu_read_unlock();

1324 1325 1326
	if (unlikely(blocked_rdev)) {
		/* Wait for this device to become unblocked */
		int j;
1327
		sector_t old = start_next_window;
1328 1329 1330 1331

		for (j = 0; j < i; j++)
			if (r1_bio->bios[j])
				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1332
		r1_bio->state = 0;
1333
		allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
1334
		md_wait_for_blocked_rdev(blocked_rdev, mddev);
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
		start_next_window = wait_barrier(conf, bio);
		/*
		 * We must make sure the multi r1bios of bio have
		 * the same value of bi_phys_segments
		 */
		if (bio->bi_phys_segments && old &&
		    old != start_next_window)
			/* Wait for the former r1bio(s) to complete */
			wait_event(conf->wait_barrier,
				   bio->bi_phys_segments == 1);
1345 1346 1347
		goto retry_write;
	}

1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
	if (max_sectors < r1_bio->sectors) {
		/* We are splitting this write into multiple parts, so
		 * we need to prepare for allocating another r1_bio.
		 */
		r1_bio->sectors = max_sectors;
		spin_lock_irq(&conf->device_lock);
		if (bio->bi_phys_segments == 0)
			bio->bi_phys_segments = 2;
		else
			bio->bi_phys_segments++;
		spin_unlock_irq(&conf->device_lock);
1359
	}
1360
	sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
1361

1362
	atomic_set(&r1_bio->remaining, 1);
1363
	atomic_set(&r1_bio->behind_remaining, 0);
1364

1365
	first_clone = 1;
L
Linus Torvalds 已提交
1366 1367 1368 1369 1370
	for (i = 0; i < disks; i++) {
		struct bio *mbio;
		if (!r1_bio->bios[i])
			continue;

1371
		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1372
		bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390

		if (first_clone) {
			/* do behind I/O ?
			 * Not if there are too many, or cannot
			 * allocate memory, or a reader on WriteMostly
			 * is waiting for behind writes to flush */
			if (bitmap &&
			    (atomic_read(&bitmap->behind_writes)
			     < mddev->bitmap_info.max_write_behind) &&
			    !waitqueue_active(&bitmap->behind_wait))
				alloc_behind_pages(mbio, r1_bio);

			bitmap_startwrite(bitmap, r1_bio->sector,
					  r1_bio->sectors,
					  test_bit(R1BIO_BehindIO,
						   &r1_bio->state));
			first_clone = 0;
		}
1391
		if (r1_bio->behind_bvecs) {
1392 1393 1394
			struct bio_vec *bvec;
			int j;

1395 1396
			/*
			 * We trimmed the bio, so _all is legit
1397
			 */
1398
			bio_for_each_segment_all(bvec, mbio, j)
1399
				bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1400 1401 1402 1403
			if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
				atomic_inc(&r1_bio->behind_remaining);
		}

1404 1405
		r1_bio->bios[i] = mbio;

1406
		mbio->bi_iter.bi_sector	= (r1_bio->sector +
1407 1408 1409
				   conf->mirrors[i].rdev->data_offset);
		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
		mbio->bi_end_io	= raid1_end_write_request;
1410 1411
		mbio->bi_rw =
			WRITE | do_flush_fua | do_sync | do_discard | do_same;
1412 1413
		mbio->bi_private = r1_bio;

L
Linus Torvalds 已提交
1414
		atomic_inc(&r1_bio->remaining);
1415 1416 1417 1418 1419 1420

		cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
		if (cb)
			plug = container_of(cb, struct raid1_plug_cb, cb);
		else
			plug = NULL;
1421
		spin_lock_irqsave(&conf->device_lock, flags);
1422 1423 1424 1425 1426 1427 1428
		if (plug) {
			bio_list_add(&plug->pending, mbio);
			plug->pending_cnt++;
		} else {
			bio_list_add(&conf->pending_bio_list, mbio);
			conf->pending_count++;
		}
1429
		spin_unlock_irqrestore(&conf->device_lock, flags);
1430
		if (!plug)
N
NeilBrown 已提交
1431
			md_wakeup_thread(mddev->thread);
L
Linus Torvalds 已提交
1432
	}
1433 1434 1435
	/* Mustn't call r1_bio_write_done before this next test,
	 * as it could result in the bio being freed.
	 */
1436
	if (sectors_handled < bio_sectors(bio)) {
1437
		r1_bio_write_done(r1_bio);
1438 1439 1440 1441 1442
		/* We need another r1_bio.  It has already been counted
		 * in bio->bi_phys_segments
		 */
		r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
		r1_bio->master_bio = bio;
1443
		r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1444 1445
		r1_bio->state = 0;
		r1_bio->mddev = mddev;
1446
		r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1447 1448 1449
		goto retry_write;
	}

1450 1451 1452 1453
	r1_bio_write_done(r1_bio);

	/* In case raid1d snuck in to freeze_array */
	wake_up(&conf->wait_barrier);
L
Linus Torvalds 已提交
1454 1455
}

1456
static void status(struct seq_file *seq, struct mddev *mddev)
L
Linus Torvalds 已提交
1457
{
1458
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
1459 1460 1461
	int i;

	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1462
		   conf->raid_disks - mddev->degraded);
1463 1464
	rcu_read_lock();
	for (i = 0; i < conf->raid_disks; i++) {
1465
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
L
Linus Torvalds 已提交
1466
		seq_printf(seq, "%s",
1467 1468 1469
			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
	}
	rcu_read_unlock();
L
Linus Torvalds 已提交
1470 1471 1472
	seq_printf(seq, "]");
}

1473
static void error(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1474 1475
{
	char b[BDEVNAME_SIZE];
1476
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
1477 1478 1479 1480 1481 1482 1483

	/*
	 * If it is not operational, then we have already marked it as dead
	 * else if it is the last working disks, ignore the error, let the
	 * next level up know.
	 * else mark the drive as failed
	 */
1484
	if (test_bit(In_sync, &rdev->flags)
1485
	    && (conf->raid_disks - mddev->degraded) == 1) {
L
Linus Torvalds 已提交
1486 1487
		/*
		 * Don't fail the drive, act as though we were just a
1488 1489 1490
		 * normal single drive.
		 * However don't try a recovery from this drive as
		 * it is very likely to fail.
L
Linus Torvalds 已提交
1491
		 */
1492
		conf->recovery_disabled = mddev->recovery_disabled;
L
Linus Torvalds 已提交
1493
		return;
1494
	}
1495
	set_bit(Blocked, &rdev->flags);
1496 1497 1498
	if (test_and_clear_bit(In_sync, &rdev->flags)) {
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1499
		mddev->degraded++;
1500
		set_bit(Faulty, &rdev->flags);
1501
		spin_unlock_irqrestore(&conf->device_lock, flags);
1502 1503
	} else
		set_bit(Faulty, &rdev->flags);
1504 1505 1506 1507
	/*
	 * if recovery is running, make sure it aborts.
	 */
	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1508
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1509 1510 1511
	printk(KERN_ALERT
	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
	       "md/raid1:%s: Operation continuing on %d devices.\n",
N
NeilBrown 已提交
1512 1513
	       mdname(mddev), bdevname(rdev->bdev, b),
	       mdname(mddev), conf->raid_disks - mddev->degraded);
L
Linus Torvalds 已提交
1514 1515
}

1516
static void print_conf(struct r1conf *conf)
L
Linus Torvalds 已提交
1517 1518 1519
{
	int i;

N
NeilBrown 已提交
1520
	printk(KERN_DEBUG "RAID1 conf printout:\n");
L
Linus Torvalds 已提交
1521
	if (!conf) {
N
NeilBrown 已提交
1522
		printk(KERN_DEBUG "(!conf)\n");
L
Linus Torvalds 已提交
1523 1524
		return;
	}
N
NeilBrown 已提交
1525
	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
L
Linus Torvalds 已提交
1526 1527
		conf->raid_disks);

1528
	rcu_read_lock();
L
Linus Torvalds 已提交
1529 1530
	for (i = 0; i < conf->raid_disks; i++) {
		char b[BDEVNAME_SIZE];
1531
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1532
		if (rdev)
N
NeilBrown 已提交
1533
			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1534 1535 1536
			       i, !test_bit(In_sync, &rdev->flags),
			       !test_bit(Faulty, &rdev->flags),
			       bdevname(rdev->bdev,b));
L
Linus Torvalds 已提交
1537
	}
1538
	rcu_read_unlock();
L
Linus Torvalds 已提交
1539 1540
}

1541
static void close_sync(struct r1conf *conf)
L
Linus Torvalds 已提交
1542
{
1543 1544
	wait_barrier(conf, NULL);
	allow_barrier(conf, 0, 0);
L
Linus Torvalds 已提交
1545 1546 1547

	mempool_destroy(conf->r1buf_pool);
	conf->r1buf_pool = NULL;
1548

1549
	spin_lock_irq(&conf->resync_lock);
1550 1551
	conf->next_resync = 0;
	conf->start_next_window = MaxSector;
1552 1553 1554 1555
	conf->current_window_requests +=
		conf->next_window_requests;
	conf->next_window_requests = 0;
	spin_unlock_irq(&conf->resync_lock);
L
Linus Torvalds 已提交
1556 1557
}

1558
static int raid1_spare_active(struct mddev *mddev)
L
Linus Torvalds 已提交
1559 1560
{
	int i;
1561
	struct r1conf *conf = mddev->private;
1562 1563
	int count = 0;
	unsigned long flags;
L
Linus Torvalds 已提交
1564 1565

	/*
1566
	 * Find all failed disks within the RAID1 configuration
1567 1568
	 * and mark them readable.
	 * Called under mddev lock, so rcu protection not needed.
L
Linus Torvalds 已提交
1569 1570
	 */
	for (i = 0; i < conf->raid_disks; i++) {
1571
		struct md_rdev *rdev = conf->mirrors[i].rdev;
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
		if (repl
		    && repl->recovery_offset == MaxSector
		    && !test_bit(Faulty, &repl->flags)
		    && !test_and_set_bit(In_sync, &repl->flags)) {
			/* replacement has just become active */
			if (!rdev ||
			    !test_and_clear_bit(In_sync, &rdev->flags))
				count++;
			if (rdev) {
				/* Replaced device not technically
				 * faulty, but we need to be sure
				 * it gets removed and never re-added
				 */
				set_bit(Faulty, &rdev->flags);
				sysfs_notify_dirent_safe(
					rdev->sysfs_state);
			}
		}
1591
		if (rdev
1592
		    && rdev->recovery_offset == MaxSector
1593
		    && !test_bit(Faulty, &rdev->flags)
1594
		    && !test_and_set_bit(In_sync, &rdev->flags)) {
1595
			count++;
1596
			sysfs_notify_dirent_safe(rdev->sysfs_state);
L
Linus Torvalds 已提交
1597 1598
		}
	}
1599 1600 1601
	spin_lock_irqsave(&conf->device_lock, flags);
	mddev->degraded -= count;
	spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1602 1603

	print_conf(conf);
1604
	return count;
L
Linus Torvalds 已提交
1605 1606
}

1607
static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1608
{
1609
	struct r1conf *conf = mddev->private;
1610
	int err = -EEXIST;
1611
	int mirror = 0;
1612
	struct raid1_info *p;
1613
	int first = 0;
1614
	int last = conf->raid_disks - 1;
1615
	struct request_queue *q = bdev_get_queue(rdev->bdev);
L
Linus Torvalds 已提交
1616

1617 1618 1619
	if (mddev->recovery_disabled == conf->recovery_disabled)
		return -EBUSY;

1620 1621 1622
	if (rdev->raid_disk >= 0)
		first = last = rdev->raid_disk;

1623 1624 1625 1626 1627
	if (q->merge_bvec_fn) {
		set_bit(Unmerged, &rdev->flags);
		mddev->merge_check_needed = 1;
	}

1628 1629 1630
	for (mirror = first; mirror <= last; mirror++) {
		p = conf->mirrors+mirror;
		if (!p->rdev) {
L
Linus Torvalds 已提交
1631

1632 1633 1634
			if (mddev->gendisk)
				disk_stack_limits(mddev->gendisk, rdev->bdev,
						  rdev->data_offset << 9);
L
Linus Torvalds 已提交
1635 1636 1637

			p->head_position = 0;
			rdev->raid_disk = mirror;
1638
			err = 0;
1639 1640 1641 1642
			/* As all devices are equivalent, we don't need a full recovery
			 * if this was recently any drive of the array
			 */
			if (rdev->saved_raid_disk < 0)
1643
				conf->fullsync = 1;
1644
			rcu_assign_pointer(p->rdev, rdev);
L
Linus Torvalds 已提交
1645 1646
			break;
		}
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
		if (test_bit(WantReplacement, &p->rdev->flags) &&
		    p[conf->raid_disks].rdev == NULL) {
			/* Add this device as a replacement */
			clear_bit(In_sync, &rdev->flags);
			set_bit(Replacement, &rdev->flags);
			rdev->raid_disk = mirror;
			err = 0;
			conf->fullsync = 1;
			rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
			break;
		}
	}
1659 1660 1661 1662 1663 1664 1665 1666 1667
	if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
		/* Some requests might not have seen this new
		 * merge_bvec_fn.  We must wait for them to complete
		 * before merging the device fully.
		 * First we make sure any code which has tested
		 * our function has submitted the request, then
		 * we wait for all outstanding requests to complete.
		 */
		synchronize_sched();
1668 1669
		freeze_array(conf, 0);
		unfreeze_array(conf);
1670 1671
		clear_bit(Unmerged, &rdev->flags);
	}
1672
	md_integrity_add_rdev(rdev, mddev);
1673
	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
S
Shaohua Li 已提交
1674
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
L
Linus Torvalds 已提交
1675
	print_conf(conf);
1676
	return err;
L
Linus Torvalds 已提交
1677 1678
}

1679
static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1680
{
1681
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
1682
	int err = 0;
1683
	int number = rdev->raid_disk;
1684
	struct raid1_info *p = conf->mirrors + number;
L
Linus Torvalds 已提交
1685

1686 1687 1688
	if (rdev != p->rdev)
		p = conf->mirrors + conf->raid_disks + number;

L
Linus Torvalds 已提交
1689
	print_conf(conf);
1690
	if (rdev == p->rdev) {
1691
		if (test_bit(In_sync, &rdev->flags) ||
L
Linus Torvalds 已提交
1692 1693 1694 1695
		    atomic_read(&rdev->nr_pending)) {
			err = -EBUSY;
			goto abort;
		}
N
NeilBrown 已提交
1696
		/* Only remove non-faulty devices if recovery
1697 1698 1699
		 * is not possible.
		 */
		if (!test_bit(Faulty, &rdev->flags) &&
1700
		    mddev->recovery_disabled != conf->recovery_disabled &&
1701 1702 1703 1704
		    mddev->degraded < conf->raid_disks) {
			err = -EBUSY;
			goto abort;
		}
L
Linus Torvalds 已提交
1705
		p->rdev = NULL;
1706
		synchronize_rcu();
L
Linus Torvalds 已提交
1707 1708 1709 1710
		if (atomic_read(&rdev->nr_pending)) {
			/* lost the race, try later */
			err = -EBUSY;
			p->rdev = rdev;
1711
			goto abort;
1712 1713 1714 1715 1716 1717 1718
		} else if (conf->mirrors[conf->raid_disks + number].rdev) {
			/* We just removed a device that is being replaced.
			 * Move down the replacement.  We drain all IO before
			 * doing this to avoid confusion.
			 */
			struct md_rdev *repl =
				conf->mirrors[conf->raid_disks + number].rdev;
1719
			freeze_array(conf, 0);
1720 1721 1722
			clear_bit(Replacement, &repl->flags);
			p->rdev = repl;
			conf->mirrors[conf->raid_disks + number].rdev = NULL;
1723
			unfreeze_array(conf);
1724 1725
			clear_bit(WantReplacement, &rdev->flags);
		} else
1726
			clear_bit(WantReplacement, &rdev->flags);
1727
		err = md_integrity_register(mddev);
L
Linus Torvalds 已提交
1728 1729 1730 1731 1732 1733 1734
	}
abort:

	print_conf(conf);
	return err;
}

1735
static void end_sync_read(struct bio *bio, int error)
L
Linus Torvalds 已提交
1736
{
1737
	struct r1bio *r1_bio = bio->bi_private;
L
Linus Torvalds 已提交
1738

1739
	update_head_pos(r1_bio->read_disk, r1_bio);
1740

L
Linus Torvalds 已提交
1741 1742 1743 1744 1745
	/*
	 * we have read a block, now it needs to be re-written,
	 * or re-read if the read failed.
	 * We don't do much here, just schedule handling by raid1d
	 */
1746
	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
L
Linus Torvalds 已提交
1747
		set_bit(R1BIO_Uptodate, &r1_bio->state);
1748 1749 1750

	if (atomic_dec_and_test(&r1_bio->remaining))
		reschedule_retry(r1_bio);
L
Linus Torvalds 已提交
1751 1752
}

1753
static void end_sync_write(struct bio *bio, int error)
L
Linus Torvalds 已提交
1754 1755
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1756
	struct r1bio *r1_bio = bio->bi_private;
1757
	struct mddev *mddev = r1_bio->mddev;
1758
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
1759
	int mirror=0;
1760 1761
	sector_t first_bad;
	int bad_sectors;
L
Linus Torvalds 已提交
1762

1763 1764
	mirror = find_bio_disk(r1_bio, bio);

1765
	if (!uptodate) {
N
NeilBrown 已提交
1766
		sector_t sync_blocks = 0;
1767 1768 1769 1770
		sector_t s = r1_bio->sector;
		long sectors_to_go = r1_bio->sectors;
		/* make sure these bits doesn't get cleared. */
		do {
1771
			bitmap_end_sync(mddev->bitmap, s,
1772 1773 1774 1775
					&sync_blocks, 1);
			s += sync_blocks;
			sectors_to_go -= sync_blocks;
		} while (sectors_to_go > 0);
1776 1777
		set_bit(WriteErrorSeen,
			&conf->mirrors[mirror].rdev->flags);
1778 1779 1780 1781
		if (!test_and_set_bit(WantReplacement,
				      &conf->mirrors[mirror].rdev->flags))
			set_bit(MD_RECOVERY_NEEDED, &
				mddev->recovery);
1782
		set_bit(R1BIO_WriteError, &r1_bio->state);
1783 1784 1785
	} else if (is_badblock(conf->mirrors[mirror].rdev,
			       r1_bio->sector,
			       r1_bio->sectors,
1786 1787 1788 1789 1790 1791
			       &first_bad, &bad_sectors) &&
		   !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
				r1_bio->sector,
				r1_bio->sectors,
				&first_bad, &bad_sectors)
		)
1792
		set_bit(R1BIO_MadeGood, &r1_bio->state);
1793

L
Linus Torvalds 已提交
1794
	if (atomic_dec_and_test(&r1_bio->remaining)) {
1795
		int s = r1_bio->sectors;
1796 1797
		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
		    test_bit(R1BIO_WriteError, &r1_bio->state))
1798 1799 1800 1801 1802
			reschedule_retry(r1_bio);
		else {
			put_buf(r1_bio);
			md_done_sync(mddev, s, uptodate);
		}
L
Linus Torvalds 已提交
1803 1804 1805
	}
}

1806
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1807 1808 1809 1810 1811
			    int sectors, struct page *page, int rw)
{
	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
		/* success */
		return 1;
1812
	if (rw == WRITE) {
1813
		set_bit(WriteErrorSeen, &rdev->flags);
1814 1815 1816 1817 1818
		if (!test_and_set_bit(WantReplacement,
				      &rdev->flags))
			set_bit(MD_RECOVERY_NEEDED, &
				rdev->mddev->recovery);
	}
1819 1820 1821 1822 1823 1824
	/* need to record an error - either for the block or the device */
	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
		md_error(rdev->mddev, rdev);
	return 0;
}

1825
static int fix_sync_read_error(struct r1bio *r1_bio)
L
Linus Torvalds 已提交
1826
{
1827 1828 1829 1830 1831 1832 1833
	/* Try some synchronous reads of other devices to get
	 * good data, much like with normal read errors.  Only
	 * read into the pages we already have so we don't
	 * need to re-issue the read request.
	 * We don't need to freeze the array, because being in an
	 * active sync request, there is no normal IO, and
	 * no overlapping syncs.
1834 1835 1836
	 * We don't need to check is_badblock() again as we
	 * made sure that anything with a bad block in range
	 * will have bi_end_io clear.
1837
	 */
1838
	struct mddev *mddev = r1_bio->mddev;
1839
	struct r1conf *conf = mddev->private;
1840 1841 1842 1843 1844 1845 1846 1847 1848
	struct bio *bio = r1_bio->bios[r1_bio->read_disk];
	sector_t sect = r1_bio->sector;
	int sectors = r1_bio->sectors;
	int idx = 0;

	while(sectors) {
		int s = sectors;
		int d = r1_bio->read_disk;
		int success = 0;
1849
		struct md_rdev *rdev;
1850
		int start;
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860

		if (s > (PAGE_SIZE>>9))
			s = PAGE_SIZE >> 9;
		do {
			if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
				/* No rcu protection needed here devices
				 * can only be removed when no resync is
				 * active, and resync is currently active
				 */
				rdev = conf->mirrors[d].rdev;
1861
				if (sync_page_io(rdev, sect, s<<9,
1862 1863 1864 1865 1866 1867 1868
						 bio->bi_io_vec[idx].bv_page,
						 READ, false)) {
					success = 1;
					break;
				}
			}
			d++;
1869
			if (d == conf->raid_disks * 2)
1870 1871 1872
				d = 0;
		} while (!success && d != r1_bio->read_disk);

1873
		if (!success) {
1874
			char b[BDEVNAME_SIZE];
1875 1876 1877 1878 1879 1880
			int abort = 0;
			/* Cannot read from anywhere, this block is lost.
			 * Record a bad block on each device.  If that doesn't
			 * work just disable and interrupt the recovery.
			 * Don't fail devices as that won't really help.
			 */
1881 1882 1883 1884 1885
			printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
			       " for block %llu\n",
			       mdname(mddev),
			       bdevname(bio->bi_bdev, b),
			       (unsigned long long)r1_bio->sector);
1886
			for (d = 0; d < conf->raid_disks * 2; d++) {
1887 1888 1889 1890 1891 1892 1893
				rdev = conf->mirrors[d].rdev;
				if (!rdev || test_bit(Faulty, &rdev->flags))
					continue;
				if (!rdev_set_badblocks(rdev, sect, s, 0))
					abort = 1;
			}
			if (abort) {
1894 1895
				conf->recovery_disabled =
					mddev->recovery_disabled;
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
				set_bit(MD_RECOVERY_INTR, &mddev->recovery);
				md_done_sync(mddev, r1_bio->sectors, 0);
				put_buf(r1_bio);
				return 0;
			}
			/* Try next page */
			sectors -= s;
			sect += s;
			idx++;
			continue;
1906
		}
1907 1908 1909 1910 1911

		start = d;
		/* write it back and re-read */
		while (d != r1_bio->read_disk) {
			if (d == 0)
1912
				d = conf->raid_disks * 2;
1913 1914 1915 1916
			d--;
			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
				continue;
			rdev = conf->mirrors[d].rdev;
1917 1918 1919
			if (r1_sync_page_io(rdev, sect, s,
					    bio->bi_io_vec[idx].bv_page,
					    WRITE) == 0) {
1920 1921
				r1_bio->bios[d]->bi_end_io = NULL;
				rdev_dec_pending(rdev, mddev);
1922
			}
1923 1924 1925 1926
		}
		d = start;
		while (d != r1_bio->read_disk) {
			if (d == 0)
1927
				d = conf->raid_disks * 2;
1928 1929 1930 1931
			d--;
			if (r1_bio->bios[d]->bi_end_io != end_sync_read)
				continue;
			rdev = conf->mirrors[d].rdev;
1932 1933 1934
			if (r1_sync_page_io(rdev, sect, s,
					    bio->bi_io_vec[idx].bv_page,
					    READ) != 0)
1935
				atomic_add(s, &rdev->corrected_errors);
1936
		}
1937 1938 1939 1940
		sectors -= s;
		sect += s;
		idx ++;
	}
1941
	set_bit(R1BIO_Uptodate, &r1_bio->state);
1942
	set_bit(BIO_UPTODATE, &bio->bi_flags);
1943 1944 1945
	return 1;
}

1946
static void process_checks(struct r1bio *r1_bio)
1947 1948 1949 1950 1951 1952 1953 1954
{
	/* We have read all readable devices.  If we haven't
	 * got the block, then there is no hope left.
	 * If we have, then we want to do a comparison
	 * and skip the write if everything is the same.
	 * If any blocks failed to read, then we need to
	 * attempt an over-write
	 */
1955
	struct mddev *mddev = r1_bio->mddev;
1956
	struct r1conf *conf = mddev->private;
1957 1958
	int primary;
	int i;
1959
	int vcnt;
1960

1961 1962 1963 1964 1965
	/* Fix variable parts of all bios */
	vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
	for (i = 0; i < conf->raid_disks * 2; i++) {
		int j;
		int size;
1966
		int uptodate;
1967 1968 1969
		struct bio *b = r1_bio->bios[i];
		if (b->bi_end_io != end_sync_read)
			continue;
1970 1971
		/* fixup the bio for reuse, but preserve BIO_UPTODATE */
		uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
1972
		bio_reset(b);
1973 1974
		if (!uptodate)
			clear_bit(BIO_UPTODATE, &b->bi_flags);
1975
		b->bi_vcnt = vcnt;
1976 1977
		b->bi_iter.bi_size = r1_bio->sectors << 9;
		b->bi_iter.bi_sector = r1_bio->sector +
1978 1979 1980 1981 1982
			conf->mirrors[i].rdev->data_offset;
		b->bi_bdev = conf->mirrors[i].rdev->bdev;
		b->bi_end_io = end_sync_read;
		b->bi_private = r1_bio;

1983
		size = b->bi_iter.bi_size;
1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
		for (j = 0; j < vcnt ; j++) {
			struct bio_vec *bi;
			bi = &b->bi_io_vec[j];
			bi->bv_offset = 0;
			if (size > PAGE_SIZE)
				bi->bv_len = PAGE_SIZE;
			else
				bi->bv_len = size;
			size -= PAGE_SIZE;
		}
	}
1995
	for (primary = 0; primary < conf->raid_disks * 2; primary++)
1996 1997 1998 1999 2000 2001 2002
		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
			r1_bio->bios[primary]->bi_end_io = NULL;
			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
			break;
		}
	r1_bio->read_disk = primary;
2003
	for (i = 0; i < conf->raid_disks * 2; i++) {
2004 2005 2006
		int j;
		struct bio *pbio = r1_bio->bios[primary];
		struct bio *sbio = r1_bio->bios[i];
2007
		int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
2008

K
Kent Overstreet 已提交
2009
		if (sbio->bi_end_io != end_sync_read)
2010
			continue;
2011 2012
		/* Now we can 'fixup' the BIO_UPTODATE flag */
		set_bit(BIO_UPTODATE, &sbio->bi_flags);
2013

2014
		if (uptodate) {
2015 2016 2017 2018 2019 2020
			for (j = vcnt; j-- ; ) {
				struct page *p, *s;
				p = pbio->bi_io_vec[j].bv_page;
				s = sbio->bi_io_vec[j].bv_page;
				if (memcmp(page_address(p),
					   page_address(s),
2021
					   sbio->bi_io_vec[j].bv_len))
2022
					break;
2023
			}
2024 2025 2026
		} else
			j = 0;
		if (j >= 0)
2027
			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2028
		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2029
			      && uptodate)) {
2030 2031 2032 2033 2034
			/* No need to write to this device. */
			sbio->bi_end_io = NULL;
			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
			continue;
		}
K
Kent Overstreet 已提交
2035 2036

		bio_copy_data(sbio, pbio);
2037
	}
2038 2039
}

2040
static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2041
{
2042
	struct r1conf *conf = mddev->private;
2043
	int i;
2044
	int disks = conf->raid_disks * 2;
2045 2046 2047 2048 2049 2050 2051 2052
	struct bio *bio, *wbio;

	bio = r1_bio->bios[r1_bio->read_disk];

	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
		/* ouch - failed to read all of that. */
		if (!fix_sync_read_error(r1_bio))
			return;
2053 2054

	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2055 2056
		process_checks(r1_bio);

2057 2058 2059
	/*
	 * schedule writes
	 */
L
Linus Torvalds 已提交
2060 2061 2062
	atomic_set(&r1_bio->remaining, 1);
	for (i = 0; i < disks ; i++) {
		wbio = r1_bio->bios[i];
2063 2064 2065 2066
		if (wbio->bi_end_io == NULL ||
		    (wbio->bi_end_io == end_sync_read &&
		     (i == r1_bio->read_disk ||
		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
L
Linus Torvalds 已提交
2067 2068
			continue;

2069 2070
		wbio->bi_rw = WRITE;
		wbio->bi_end_io = end_sync_write;
L
Linus Torvalds 已提交
2071
		atomic_inc(&r1_bio->remaining);
2072
		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2073

L
Linus Torvalds 已提交
2074 2075 2076 2077
		generic_make_request(wbio);
	}

	if (atomic_dec_and_test(&r1_bio->remaining)) {
2078
		/* if we're here, all write(s) have completed, so clean up */
2079 2080 2081 2082 2083 2084 2085 2086
		int s = r1_bio->sectors;
		if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
		    test_bit(R1BIO_WriteError, &r1_bio->state))
			reschedule_retry(r1_bio);
		else {
			put_buf(r1_bio);
			md_done_sync(mddev, s, 1);
		}
L
Linus Torvalds 已提交
2087 2088 2089 2090 2091 2092 2093 2094
	}
}

/*
 * This is a kernel thread which:
 *
 *	1.	Retries failed read operations on working mirrors.
 *	2.	Updates the raid superblock when problems encounter.
2095
 *	3.	Performs writes following reads for array synchronising.
L
Linus Torvalds 已提交
2096 2097
 */

2098
static void fix_read_error(struct r1conf *conf, int read_disk,
2099 2100
			   sector_t sect, int sectors)
{
2101
	struct mddev *mddev = conf->mddev;
2102 2103 2104 2105 2106
	while(sectors) {
		int s = sectors;
		int d = read_disk;
		int success = 0;
		int start;
2107
		struct md_rdev *rdev;
2108 2109 2110 2111 2112 2113 2114 2115 2116 2117

		if (s > (PAGE_SIZE>>9))
			s = PAGE_SIZE >> 9;

		do {
			/* Note: no rcu protection needed here
			 * as this is synchronous in the raid1d thread
			 * which is the thread that might remove
			 * a device.  If raid1d ever becomes multi-threaded....
			 */
2118 2119 2120
			sector_t first_bad;
			int bad_sectors;

2121 2122
			rdev = conf->mirrors[d].rdev;
			if (rdev &&
2123 2124 2125
			    (test_bit(In_sync, &rdev->flags) ||
			     (!test_bit(Faulty, &rdev->flags) &&
			      rdev->recovery_offset >= sect + s)) &&
2126 2127
			    is_badblock(rdev, sect, s,
					&first_bad, &bad_sectors) == 0 &&
J
Jonathan Brassow 已提交
2128 2129
			    sync_page_io(rdev, sect, s<<9,
					 conf->tmppage, READ, false))
2130 2131 2132
				success = 1;
			else {
				d++;
2133
				if (d == conf->raid_disks * 2)
2134 2135 2136 2137 2138
					d = 0;
			}
		} while (!success && d != read_disk);

		if (!success) {
2139
			/* Cannot read from anywhere - mark it bad */
2140
			struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2141 2142
			if (!rdev_set_badblocks(rdev, sect, s, 0))
				md_error(mddev, rdev);
2143 2144 2145 2146 2147 2148
			break;
		}
		/* write it back and re-read */
		start = d;
		while (d != read_disk) {
			if (d==0)
2149
				d = conf->raid_disks * 2;
2150 2151 2152
			d--;
			rdev = conf->mirrors[d].rdev;
			if (rdev &&
2153
			    !test_bit(Faulty, &rdev->flags))
2154 2155
				r1_sync_page_io(rdev, sect, s,
						conf->tmppage, WRITE);
2156 2157 2158 2159 2160
		}
		d = start;
		while (d != read_disk) {
			char b[BDEVNAME_SIZE];
			if (d==0)
2161
				d = conf->raid_disks * 2;
2162 2163 2164
			d--;
			rdev = conf->mirrors[d].rdev;
			if (rdev &&
2165
			    !test_bit(Faulty, &rdev->flags)) {
2166 2167
				if (r1_sync_page_io(rdev, sect, s,
						    conf->tmppage, READ)) {
2168 2169
					atomic_add(s, &rdev->corrected_errors);
					printk(KERN_INFO
N
NeilBrown 已提交
2170
					       "md/raid1:%s: read error corrected "
2171 2172
					       "(%d sectors at %llu on %s)\n",
					       mdname(mddev), s,
2173 2174
					       (unsigned long long)(sect +
					           rdev->data_offset),
2175 2176 2177 2178 2179 2180 2181 2182 2183
					       bdevname(rdev->bdev, b));
				}
			}
		}
		sectors -= s;
		sect += s;
	}
}

2184
static int narrow_write_error(struct r1bio *r1_bio, int i)
2185
{
2186
	struct mddev *mddev = r1_bio->mddev;
2187
	struct r1conf *conf = mddev->private;
2188
	struct md_rdev *rdev = conf->mirrors[i].rdev;
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209

	/* bio has the data to be written to device 'i' where
	 * we just recently had a write error.
	 * We repeatedly clone the bio and trim down to one block,
	 * then try the write.  Where the write fails we record
	 * a bad block.
	 * It is conceivable that the bio doesn't exactly align with
	 * blocks.  We must handle this somehow.
	 *
	 * We currently own a reference on the rdev.
	 */

	int block_sectors;
	sector_t sector;
	int sectors;
	int sect_to_write = r1_bio->sectors;
	int ok = 1;

	if (rdev->badblocks.shift < 0)
		return 0;

2210 2211
	block_sectors = roundup(1 << rdev->badblocks.shift,
				bdev_logical_block_size(rdev->bdev) >> 9);
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
	sector = r1_bio->sector;
	sectors = ((sector + block_sectors)
		   & ~(sector_t)(block_sectors - 1))
		- sector;

	while (sect_to_write) {
		struct bio *wbio;
		if (sectors > sect_to_write)
			sectors = sect_to_write;
		/* Write at 'sector' for 'sectors'*/

2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
		if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
			unsigned vcnt = r1_bio->behind_page_count;
			struct bio_vec *vec = r1_bio->behind_bvecs;

			while (!vec->bv_page) {
				vec++;
				vcnt--;
			}

			wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
			memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));

			wbio->bi_vcnt = vcnt;
		} else {
			wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
		}

2240
		wbio->bi_rw = WRITE;
2241 2242
		wbio->bi_iter.bi_sector = r1_bio->sector;
		wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2243

2244
		bio_trim(wbio, sector - r1_bio->sector, sectors);
2245
		wbio->bi_iter.bi_sector += rdev->data_offset;
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260
		wbio->bi_bdev = rdev->bdev;
		if (submit_bio_wait(WRITE, wbio) == 0)
			/* failure! */
			ok = rdev_set_badblocks(rdev, sector,
						sectors, 0)
				&& ok;

		bio_put(wbio);
		sect_to_write -= sectors;
		sector += sectors;
		sectors = block_sectors;
	}
	return ok;
}

2261
static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2262 2263 2264
{
	int m;
	int s = r1_bio->sectors;
2265
	for (m = 0; m < conf->raid_disks * 2 ; m++) {
2266
		struct md_rdev *rdev = conf->mirrors[m].rdev;
2267 2268 2269 2270 2271
		struct bio *bio = r1_bio->bios[m];
		if (bio->bi_end_io == NULL)
			continue;
		if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2272
			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283
		}
		if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
				md_error(conf->mddev, rdev);
		}
	}
	put_buf(r1_bio);
	md_done_sync(conf->mddev, s, 1);
}

2284
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2285 2286
{
	int m;
2287
	for (m = 0; m < conf->raid_disks * 2 ; m++)
2288
		if (r1_bio->bios[m] == IO_MADE_GOOD) {
2289
			struct md_rdev *rdev = conf->mirrors[m].rdev;
2290 2291
			rdev_clear_badblocks(rdev,
					     r1_bio->sector,
2292
					     r1_bio->sectors, 0);
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312
			rdev_dec_pending(rdev, conf->mddev);
		} else if (r1_bio->bios[m] != NULL) {
			/* This drive got a write error.  We need to
			 * narrow down and record precise write
			 * errors.
			 */
			if (!narrow_write_error(r1_bio, m)) {
				md_error(conf->mddev,
					 conf->mirrors[m].rdev);
				/* an I/O failed, we can't clear the bitmap */
				set_bit(R1BIO_Degraded, &r1_bio->state);
			}
			rdev_dec_pending(conf->mirrors[m].rdev,
					 conf->mddev);
		}
	if (test_bit(R1BIO_WriteError, &r1_bio->state))
		close_write(r1_bio);
	raid_end_bio_io(r1_bio);
}

2313
static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2314 2315 2316
{
	int disk;
	int max_sectors;
2317
	struct mddev *mddev = conf->mddev;
2318 2319
	struct bio *bio;
	char b[BDEVNAME_SIZE];
2320
	struct md_rdev *rdev;
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331

	clear_bit(R1BIO_ReadError, &r1_bio->state);
	/* we got a read error. Maybe the drive is bad.  Maybe just
	 * the block and we can fix it.
	 * We freeze all other IO, and try reading the block from
	 * other devices.  When we find one, we re-write
	 * and check it that fixes the read error.
	 * This is all done synchronously while the array is
	 * frozen
	 */
	if (mddev->ro == 0) {
2332
		freeze_array(conf, 1);
2333 2334 2335 2336 2337
		fix_read_error(conf, r1_bio->read_disk,
			       r1_bio->sector, r1_bio->sectors);
		unfreeze_array(conf);
	} else
		md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
2338
	rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358

	bio = r1_bio->bios[r1_bio->read_disk];
	bdevname(bio->bi_bdev, b);
read_more:
	disk = read_balance(conf, r1_bio, &max_sectors);
	if (disk == -1) {
		printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
		       " read error for block %llu\n",
		       mdname(mddev), b, (unsigned long long)r1_bio->sector);
		raid_end_bio_io(r1_bio);
	} else {
		const unsigned long do_sync
			= r1_bio->master_bio->bi_rw & REQ_SYNC;
		if (bio) {
			r1_bio->bios[r1_bio->read_disk] =
				mddev->ro ? IO_BLOCKED : NULL;
			bio_put(bio);
		}
		r1_bio->read_disk = disk;
		bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2359 2360
		bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
			 max_sectors);
2361 2362 2363 2364 2365 2366 2367 2368
		r1_bio->bios[r1_bio->read_disk] = bio;
		rdev = conf->mirrors[disk].rdev;
		printk_ratelimited(KERN_ERR
				   "md/raid1:%s: redirecting sector %llu"
				   " to other mirror: %s\n",
				   mdname(mddev),
				   (unsigned long long)r1_bio->sector,
				   bdevname(rdev->bdev, b));
2369
		bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
2370 2371 2372 2373 2374 2375 2376 2377
		bio->bi_bdev = rdev->bdev;
		bio->bi_end_io = raid1_end_read_request;
		bio->bi_rw = READ | do_sync;
		bio->bi_private = r1_bio;
		if (max_sectors < r1_bio->sectors) {
			/* Drat - have to split this up more */
			struct bio *mbio = r1_bio->master_bio;
			int sectors_handled = (r1_bio->sector + max_sectors
2378
					       - mbio->bi_iter.bi_sector);
2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
			r1_bio->sectors = max_sectors;
			spin_lock_irq(&conf->device_lock);
			if (mbio->bi_phys_segments == 0)
				mbio->bi_phys_segments = 2;
			else
				mbio->bi_phys_segments++;
			spin_unlock_irq(&conf->device_lock);
			generic_make_request(bio);
			bio = NULL;

			r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);

			r1_bio->master_bio = mbio;
2392
			r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
2393 2394 2395
			r1_bio->state = 0;
			set_bit(R1BIO_ReadError, &r1_bio->state);
			r1_bio->mddev = mddev;
2396 2397
			r1_bio->sector = mbio->bi_iter.bi_sector +
				sectors_handled;
2398 2399 2400 2401 2402 2403 2404

			goto read_more;
		} else
			generic_make_request(bio);
	}
}

S
Shaohua Li 已提交
2405
static void raid1d(struct md_thread *thread)
L
Linus Torvalds 已提交
2406
{
S
Shaohua Li 已提交
2407
	struct mddev *mddev = thread->mddev;
2408
	struct r1bio *r1_bio;
L
Linus Torvalds 已提交
2409
	unsigned long flags;
2410
	struct r1conf *conf = mddev->private;
L
Linus Torvalds 已提交
2411
	struct list_head *head = &conf->retry_list;
2412
	struct blk_plug plug;
L
Linus Torvalds 已提交
2413 2414

	md_check_recovery(mddev);
2415 2416

	blk_start_plug(&plug);
L
Linus Torvalds 已提交
2417
	for (;;) {
2418

2419
		flush_pending_writes(conf);
2420

2421 2422 2423
		spin_lock_irqsave(&conf->device_lock, flags);
		if (list_empty(head)) {
			spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
2424
			break;
2425
		}
2426
		r1_bio = list_entry(head->prev, struct r1bio, retry_list);
L
Linus Torvalds 已提交
2427
		list_del(head->prev);
2428
		conf->nr_queued--;
L
Linus Torvalds 已提交
2429 2430 2431
		spin_unlock_irqrestore(&conf->device_lock, flags);

		mddev = r1_bio->mddev;
2432
		conf = mddev->private;
2433
		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2434
			if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2435 2436 2437
			    test_bit(R1BIO_WriteError, &r1_bio->state))
				handle_sync_write_finished(conf, r1_bio);
			else
2438
				sync_request_write(mddev, r1_bio);
2439
		} else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2440 2441 2442 2443 2444
			   test_bit(R1BIO_WriteError, &r1_bio->state))
			handle_write_finished(conf, r1_bio);
		else if (test_bit(R1BIO_ReadError, &r1_bio->state))
			handle_read_error(conf, r1_bio);
		else
2445 2446 2447 2448
			/* just a partial read to be scheduled from separate
			 * context
			 */
			generic_make_request(r1_bio->bios[r1_bio->read_disk]);
2449

N
NeilBrown 已提交
2450
		cond_resched();
2451 2452
		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
			md_check_recovery(mddev);
L
Linus Torvalds 已提交
2453
	}
2454
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
2455 2456
}

2457
static int init_resync(struct r1conf *conf)
L
Linus Torvalds 已提交
2458 2459 2460 2461
{
	int buffs;

	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2462
	BUG_ON(conf->r1buf_pool);
L
Linus Torvalds 已提交
2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480
	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
					  conf->poolinfo);
	if (!conf->r1buf_pool)
		return -ENOMEM;
	conf->next_resync = 0;
	return 0;
}

/*
 * perform a "sync" on one "block"
 *
 * We need to make sure that no normal I/O request - particularly write
 * requests - conflict with active sync requests.
 *
 * This is achieved by tracking pending requests and a 'barrier' concept
 * that can be installed to exclude normal IO requests.
 */

2481
static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
L
Linus Torvalds 已提交
2482
{
2483
	struct r1conf *conf = mddev->private;
2484
	struct r1bio *r1_bio;
L
Linus Torvalds 已提交
2485 2486
	struct bio *bio;
	sector_t max_sector, nr_sectors;
2487
	int disk = -1;
L
Linus Torvalds 已提交
2488
	int i;
2489 2490
	int wonly = -1;
	int write_targets = 0, read_targets = 0;
N
NeilBrown 已提交
2491
	sector_t sync_blocks;
2492
	int still_degraded = 0;
2493 2494
	int good_sectors = RESYNC_SECTORS;
	int min_bad = 0; /* number of sectors that are bad in all devices */
L
Linus Torvalds 已提交
2495 2496 2497

	if (!conf->r1buf_pool)
		if (init_resync(conf))
2498
			return 0;
L
Linus Torvalds 已提交
2499

A
Andre Noll 已提交
2500
	max_sector = mddev->dev_sectors;
L
Linus Torvalds 已提交
2501
	if (sector_nr >= max_sector) {
2502 2503 2504 2505 2506
		/* If we aborted, we need to abort the
		 * sync on the 'current' bitmap chunk (there will
		 * only be one in raid1 resync.
		 * We can find the current addess in mddev->curr_resync
		 */
2507 2508
		if (mddev->curr_resync < max_sector) /* aborted */
			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2509
						&sync_blocks, 1);
2510
		else /* completed sync */
2511
			conf->fullsync = 0;
2512 2513

		bitmap_close_sync(mddev->bitmap);
L
Linus Torvalds 已提交
2514 2515 2516 2517
		close_sync(conf);
		return 0;
	}

2518 2519
	if (mddev->bitmap == NULL &&
	    mddev->recovery_cp == MaxSector &&
2520
	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2521 2522 2523 2524
	    conf->fullsync == 0) {
		*skipped = 1;
		return max_sector - sector_nr;
	}
2525 2526 2527
	/* before building a request, check if we can skip these blocks..
	 * This call the bitmap_start_sync doesn't actually record anything
	 */
2528
	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2529
	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2530 2531 2532 2533
		/* We can skip this block, and probably several more */
		*skipped = 1;
		return sync_blocks;
	}
L
Linus Torvalds 已提交
2534
	/*
2535 2536 2537
	 * If there is non-resync activity waiting for a turn,
	 * and resync is going fast enough,
	 * then let it though before starting on this new sync request.
L
Linus Torvalds 已提交
2538
	 */
2539
	if (!go_faster && conf->nr_waiting)
L
Linus Torvalds 已提交
2540
		msleep_interruptible(1000);
2541

N
NeilBrown 已提交
2542
	bitmap_cond_end_sync(mddev->bitmap, sector_nr);
2543
	r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
2544

2545
	raise_barrier(conf, sector_nr);
L
Linus Torvalds 已提交
2546

2547
	rcu_read_lock();
L
Linus Torvalds 已提交
2548
	/*
2549 2550 2551 2552 2553 2554
	 * If we get a correctably read error during resync or recovery,
	 * we might want to read from a different device.  So we
	 * flag all drives that could conceivably be read from for READ,
	 * and any others (which will be non-In_sync devices) for WRITE.
	 * If a read fails, we try reading from something else for which READ
	 * is OK.
L
Linus Torvalds 已提交
2555 2556 2557 2558
	 */

	r1_bio->mddev = mddev;
	r1_bio->sector = sector_nr;
2559
	r1_bio->state = 0;
L
Linus Torvalds 已提交
2560 2561
	set_bit(R1BIO_IsSync, &r1_bio->state);

2562
	for (i = 0; i < conf->raid_disks * 2; i++) {
2563
		struct md_rdev *rdev;
L
Linus Torvalds 已提交
2564
		bio = r1_bio->bios[i];
K
Kent Overstreet 已提交
2565
		bio_reset(bio);
L
Linus Torvalds 已提交
2566

2567 2568
		rdev = rcu_dereference(conf->mirrors[i].rdev);
		if (rdev == NULL ||
2569
		    test_bit(Faulty, &rdev->flags)) {
2570 2571
			if (i < conf->raid_disks)
				still_degraded = 1;
2572
		} else if (!test_bit(In_sync, &rdev->flags)) {
L
Linus Torvalds 已提交
2573 2574 2575
			bio->bi_rw = WRITE;
			bio->bi_end_io = end_sync_write;
			write_targets ++;
2576 2577
		} else {
			/* may need to read from here */
2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602
			sector_t first_bad = MaxSector;
			int bad_sectors;

			if (is_badblock(rdev, sector_nr, good_sectors,
					&first_bad, &bad_sectors)) {
				if (first_bad > sector_nr)
					good_sectors = first_bad - sector_nr;
				else {
					bad_sectors -= (sector_nr - first_bad);
					if (min_bad == 0 ||
					    min_bad > bad_sectors)
						min_bad = bad_sectors;
				}
			}
			if (sector_nr < first_bad) {
				if (test_bit(WriteMostly, &rdev->flags)) {
					if (wonly < 0)
						wonly = i;
				} else {
					if (disk < 0)
						disk = i;
				}
				bio->bi_rw = READ;
				bio->bi_end_io = end_sync_read;
				read_targets++;
2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614
			} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
				test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
				!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
				/*
				 * The device is suitable for reading (InSync),
				 * but has bad block(s) here. Let's try to correct them,
				 * if we are doing resync or repair. Otherwise, leave
				 * this device alone for this sync request.
				 */
				bio->bi_rw = WRITE;
				bio->bi_end_io = end_sync_write;
				write_targets++;
2615 2616
			}
		}
2617 2618
		if (bio->bi_end_io) {
			atomic_inc(&rdev->nr_pending);
2619
			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2620 2621 2622
			bio->bi_bdev = rdev->bdev;
			bio->bi_private = r1_bio;
		}
L
Linus Torvalds 已提交
2623
	}
2624 2625 2626 2627
	rcu_read_unlock();
	if (disk < 0)
		disk = wonly;
	r1_bio->read_disk = disk;
2628

2629 2630 2631 2632 2633
	if (read_targets == 0 && min_bad > 0) {
		/* These sectors are bad on all InSync devices, so we
		 * need to mark them bad on all write targets
		 */
		int ok = 1;
2634
		for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2635
			if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2636
				struct md_rdev *rdev = conf->mirrors[i].rdev;
2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
				ok = rdev_set_badblocks(rdev, sector_nr,
							min_bad, 0
					) && ok;
			}
		set_bit(MD_CHANGE_DEVS, &mddev->flags);
		*skipped = 1;
		put_buf(r1_bio);

		if (!ok) {
			/* Cannot record the badblocks, so need to
			 * abort the resync.
			 * If there are multiple read targets, could just
			 * fail the really bad ones ???
			 */
			conf->recovery_disabled = mddev->recovery_disabled;
			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
			return 0;
		} else
			return min_bad;

	}
	if (min_bad > 0 && min_bad < good_sectors) {
		/* only resync enough to reach the next bad->good
		 * transition */
		good_sectors = min_bad;
	}

2664 2665 2666 2667 2668
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
		/* extra read targets are also write targets */
		write_targets += read_targets-1;

	if (write_targets == 0 || read_targets == 0) {
L
Linus Torvalds 已提交
2669 2670 2671
		/* There is nowhere to write, so all non-sync
		 * drives must be failed - so we are finished
		 */
2672 2673 2674 2675
		sector_t rv;
		if (min_bad > 0)
			max_sector = sector_nr + min_bad;
		rv = max_sector - sector_nr;
2676
		*skipped = 1;
L
Linus Torvalds 已提交
2677 2678 2679 2680
		put_buf(r1_bio);
		return rv;
	}

2681 2682
	if (max_sector > mddev->resync_max)
		max_sector = mddev->resync_max; /* Don't do IO beyond here */
2683 2684
	if (max_sector > sector_nr + good_sectors)
		max_sector = sector_nr + good_sectors;
L
Linus Torvalds 已提交
2685
	nr_sectors = 0;
2686
	sync_blocks = 0;
L
Linus Torvalds 已提交
2687 2688 2689 2690 2691 2692 2693
	do {
		struct page *page;
		int len = PAGE_SIZE;
		if (sector_nr + (len>>9) > max_sector)
			len = (max_sector - sector_nr) << 9;
		if (len == 0)
			break;
2694 2695
		if (sync_blocks == 0) {
			if (!bitmap_start_sync(mddev->bitmap, sector_nr,
2696 2697 2698
					       &sync_blocks, still_degraded) &&
			    !conf->fullsync &&
			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2699
				break;
2700
			BUG_ON(sync_blocks < (PAGE_SIZE>>9));
2701
			if ((len >> 9) > sync_blocks)
2702
				len = sync_blocks<<9;
2703
		}
2704

2705
		for (i = 0 ; i < conf->raid_disks * 2; i++) {
L
Linus Torvalds 已提交
2706 2707
			bio = r1_bio->bios[i];
			if (bio->bi_end_io) {
2708
				page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
L
Linus Torvalds 已提交
2709 2710
				if (bio_add_page(bio, page, len, 0) == 0) {
					/* stop here */
2711
					bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
L
Linus Torvalds 已提交
2712 2713 2714
					while (i > 0) {
						i--;
						bio = r1_bio->bios[i];
2715 2716
						if (bio->bi_end_io==NULL)
							continue;
L
Linus Torvalds 已提交
2717 2718
						/* remove last page from this bio */
						bio->bi_vcnt--;
2719
						bio->bi_iter.bi_size -= len;
2720
						__clear_bit(BIO_SEG_VALID, &bio->bi_flags);
L
Linus Torvalds 已提交
2721 2722 2723 2724 2725 2726 2727
					}
					goto bio_full;
				}
			}
		}
		nr_sectors += len>>9;
		sector_nr += len>>9;
2728
		sync_blocks -= (len>>9);
L
Linus Torvalds 已提交
2729 2730 2731 2732
	} while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
 bio_full:
	r1_bio->sectors = nr_sectors;

2733 2734 2735 2736 2737
	/* For a user-requested sync, we read all readable devices and do a
	 * compare
	 */
	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
		atomic_set(&r1_bio->remaining, read_targets);
2738
		for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2739 2740
			bio = r1_bio->bios[i];
			if (bio->bi_end_io == end_sync_read) {
2741
				read_targets--;
2742
				md_sync_acct(bio->bi_bdev, nr_sectors);
2743 2744 2745 2746 2747 2748
				generic_make_request(bio);
			}
		}
	} else {
		atomic_set(&r1_bio->remaining, 1);
		bio = r1_bio->bios[r1_bio->read_disk];
2749
		md_sync_acct(bio->bi_bdev, nr_sectors);
2750
		generic_make_request(bio);
L
Linus Torvalds 已提交
2751

2752
	}
L
Linus Torvalds 已提交
2753 2754 2755
	return nr_sectors;
}

2756
static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2757 2758 2759 2760 2761 2762 2763
{
	if (sectors)
		return sectors;

	return mddev->dev_sectors;
}

2764
static struct r1conf *setup_conf(struct mddev *mddev)
L
Linus Torvalds 已提交
2765
{
2766
	struct r1conf *conf;
2767
	int i;
2768
	struct raid1_info *disk;
2769
	struct md_rdev *rdev;
2770
	int err = -ENOMEM;
L
Linus Torvalds 已提交
2771

2772
	conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
L
Linus Torvalds 已提交
2773
	if (!conf)
2774
		goto abort;
L
Linus Torvalds 已提交
2775

2776
	conf->mirrors = kzalloc(sizeof(struct raid1_info)
2777
				* mddev->raid_disks * 2,
L
Linus Torvalds 已提交
2778 2779
				 GFP_KERNEL);
	if (!conf->mirrors)
2780
		goto abort;
L
Linus Torvalds 已提交
2781

2782 2783
	conf->tmppage = alloc_page(GFP_KERNEL);
	if (!conf->tmppage)
2784
		goto abort;
2785

2786
	conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
L
Linus Torvalds 已提交
2787
	if (!conf->poolinfo)
2788
		goto abort;
2789
	conf->poolinfo->raid_disks = mddev->raid_disks * 2;
L
Linus Torvalds 已提交
2790 2791 2792 2793
	conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
					  r1bio_pool_free,
					  conf->poolinfo);
	if (!conf->r1bio_pool)
2794 2795
		goto abort;

2796
	conf->poolinfo->mddev = mddev;
L
Linus Torvalds 已提交
2797

2798
	err = -EINVAL;
2799
	spin_lock_init(&conf->device_lock);
N
NeilBrown 已提交
2800
	rdev_for_each(rdev, mddev) {
2801
		struct request_queue *q;
2802
		int disk_idx = rdev->raid_disk;
L
Linus Torvalds 已提交
2803 2804 2805
		if (disk_idx >= mddev->raid_disks
		    || disk_idx < 0)
			continue;
2806
		if (test_bit(Replacement, &rdev->flags))
2807
			disk = conf->mirrors + mddev->raid_disks + disk_idx;
2808 2809
		else
			disk = conf->mirrors + disk_idx;
L
Linus Torvalds 已提交
2810

2811 2812
		if (disk->rdev)
			goto abort;
L
Linus Torvalds 已提交
2813
		disk->rdev = rdev;
2814 2815 2816
		q = bdev_get_queue(rdev->bdev);
		if (q->merge_bvec_fn)
			mddev->merge_check_needed = 1;
L
Linus Torvalds 已提交
2817 2818

		disk->head_position = 0;
2819
		disk->seq_start = MaxSector;
L
Linus Torvalds 已提交
2820 2821 2822 2823 2824 2825
	}
	conf->raid_disks = mddev->raid_disks;
	conf->mddev = mddev;
	INIT_LIST_HEAD(&conf->retry_list);

	spin_lock_init(&conf->resync_lock);
2826
	init_waitqueue_head(&conf->wait_barrier);
L
Linus Torvalds 已提交
2827

2828
	bio_list_init(&conf->pending_bio_list);
2829
	conf->pending_count = 0;
2830
	conf->recovery_disabled = mddev->recovery_disabled - 1;
2831

2832 2833 2834
	conf->start_next_window = MaxSector;
	conf->current_window_requests = conf->next_window_requests = 0;

2835
	err = -EIO;
2836
	for (i = 0; i < conf->raid_disks * 2; i++) {
L
Linus Torvalds 已提交
2837 2838 2839

		disk = conf->mirrors + i;

2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854
		if (i < conf->raid_disks &&
		    disk[conf->raid_disks].rdev) {
			/* This slot has a replacement. */
			if (!disk->rdev) {
				/* No original, just make the replacement
				 * a recovering spare
				 */
				disk->rdev =
					disk[conf->raid_disks].rdev;
				disk[conf->raid_disks].rdev = NULL;
			} else if (!test_bit(In_sync, &disk->rdev->flags))
				/* Original is not in_sync - bad */
				goto abort;
		}

2855 2856
		if (!disk->rdev ||
		    !test_bit(In_sync, &disk->rdev->flags)) {
L
Linus Torvalds 已提交
2857
			disk->head_position = 0;
2858 2859
			if (disk->rdev &&
			    (disk->rdev->saved_raid_disk < 0))
2860
				conf->fullsync = 1;
2861
		}
L
Linus Torvalds 已提交
2862
	}
2863 2864

	err = -ENOMEM;
2865
	conf->thread = md_register_thread(raid1d, mddev, "raid1");
2866 2867
	if (!conf->thread) {
		printk(KERN_ERR
N
NeilBrown 已提交
2868
		       "md/raid1:%s: couldn't allocate thread\n",
2869 2870
		       mdname(mddev));
		goto abort;
2871
	}
L
Linus Torvalds 已提交
2872

2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886
	return conf;

 abort:
	if (conf) {
		if (conf->r1bio_pool)
			mempool_destroy(conf->r1bio_pool);
		kfree(conf->mirrors);
		safe_put_page(conf->tmppage);
		kfree(conf->poolinfo);
		kfree(conf);
	}
	return ERR_PTR(err);
}

N
NeilBrown 已提交
2887
static void raid1_free(struct mddev *mddev, void *priv);
2888
static int run(struct mddev *mddev)
2889
{
2890
	struct r1conf *conf;
2891
	int i;
2892
	struct md_rdev *rdev;
2893
	int ret;
S
Shaohua Li 已提交
2894
	bool discard_supported = false;
2895 2896

	if (mddev->level != 1) {
N
NeilBrown 已提交
2897
		printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
2898 2899 2900 2901
		       mdname(mddev), mddev->level);
		return -EIO;
	}
	if (mddev->reshape_position != MaxSector) {
N
NeilBrown 已提交
2902
		printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
2903 2904 2905
		       mdname(mddev));
		return -EIO;
	}
L
Linus Torvalds 已提交
2906
	/*
2907 2908
	 * copy the already verified devices into our private RAID1
	 * bookkeeping area. [whatever we allocate in run(),
N
NeilBrown 已提交
2909
	 * should be freed in raid1_free()]
L
Linus Torvalds 已提交
2910
	 */
2911 2912 2913 2914
	if (mddev->private == NULL)
		conf = setup_conf(mddev);
	else
		conf = mddev->private;
L
Linus Torvalds 已提交
2915

2916 2917
	if (IS_ERR(conf))
		return PTR_ERR(conf);
L
Linus Torvalds 已提交
2918

2919
	if (mddev->queue)
2920 2921
		blk_queue_max_write_same_sectors(mddev->queue, 0);

N
NeilBrown 已提交
2922
	rdev_for_each(rdev, mddev) {
2923 2924
		if (!mddev->gendisk)
			continue;
2925 2926
		disk_stack_limits(mddev->gendisk, rdev->bdev,
				  rdev->data_offset << 9);
S
Shaohua Li 已提交
2927 2928
		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
			discard_supported = true;
L
Linus Torvalds 已提交
2929
	}
2930

2931 2932 2933 2934 2935 2936 2937 2938 2939 2940
	mddev->degraded = 0;
	for (i=0; i < conf->raid_disks; i++)
		if (conf->mirrors[i].rdev == NULL ||
		    !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
		    test_bit(Faulty, &conf->mirrors[i].rdev->flags))
			mddev->degraded++;

	if (conf->raid_disks - mddev->degraded == 1)
		mddev->recovery_cp = MaxSector;

2941
	if (mddev->recovery_cp != MaxSector)
N
NeilBrown 已提交
2942
		printk(KERN_NOTICE "md/raid1:%s: not clean"
2943 2944
		       " -- starting background reconstruction\n",
		       mdname(mddev));
2945
	printk(KERN_INFO
N
NeilBrown 已提交
2946
		"md/raid1:%s: active with %d out of %d mirrors\n",
2947
		mdname(mddev), mddev->raid_disks - mddev->degraded,
L
Linus Torvalds 已提交
2948
		mddev->raid_disks);
2949

L
Linus Torvalds 已提交
2950 2951 2952
	/*
	 * Ok, everything is just fine now
	 */
2953 2954 2955 2956
	mddev->thread = conf->thread;
	conf->thread = NULL;
	mddev->private = conf;

2957
	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
L
Linus Torvalds 已提交
2958

2959
	if (mddev->queue) {
S
Shaohua Li 已提交
2960 2961 2962 2963 2964 2965
		if (discard_supported)
			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
						mddev->queue);
		else
			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
						  mddev->queue);
2966
	}
2967 2968

	ret =  md_integrity_register(mddev);
2969 2970
	if (ret) {
		md_unregister_thread(&mddev->thread);
N
NeilBrown 已提交
2971
		raid1_free(mddev, conf);
2972
	}
2973
	return ret;
L
Linus Torvalds 已提交
2974 2975
}

N
NeilBrown 已提交
2976
static void raid1_free(struct mddev *mddev, void *priv)
L
Linus Torvalds 已提交
2977
{
N
NeilBrown 已提交
2978
	struct r1conf *conf = priv;
2979

L
Linus Torvalds 已提交
2980 2981
	if (conf->r1bio_pool)
		mempool_destroy(conf->r1bio_pool);
2982
	kfree(conf->mirrors);
2983
	safe_put_page(conf->tmppage);
2984
	kfree(conf->poolinfo);
L
Linus Torvalds 已提交
2985 2986 2987
	kfree(conf);
}

2988
static int raid1_resize(struct mddev *mddev, sector_t sectors)
L
Linus Torvalds 已提交
2989 2990 2991 2992 2993 2994 2995 2996
{
	/* no resync is happening, and there is enough space
	 * on all devices, so we can resize.
	 * We need to make sure resync covers any new space.
	 * If the array is shrinking we should possibly wait until
	 * any io in the removed space completes, but it hardly seems
	 * worth it.
	 */
2997 2998 2999
	sector_t newsize = raid1_size(mddev, sectors, 0);
	if (mddev->external_size &&
	    mddev->array_sectors > newsize)
D
Dan Williams 已提交
3000
		return -EINVAL;
3001 3002 3003 3004 3005 3006
	if (mddev->bitmap) {
		int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
		if (ret)
			return ret;
	}
	md_set_array_sectors(mddev, newsize);
3007
	set_capacity(mddev->gendisk, mddev->array_sectors);
3008
	revalidate_disk(mddev->gendisk);
D
Dan Williams 已提交
3009
	if (sectors > mddev->dev_sectors &&
3010
	    mddev->recovery_cp > mddev->dev_sectors) {
A
Andre Noll 已提交
3011
		mddev->recovery_cp = mddev->dev_sectors;
L
Linus Torvalds 已提交
3012 3013
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	}
D
Dan Williams 已提交
3014
	mddev->dev_sectors = sectors;
3015
	mddev->resync_max_sectors = sectors;
L
Linus Torvalds 已提交
3016 3017 3018
	return 0;
}

3019
static int raid1_reshape(struct mddev *mddev)
L
Linus Torvalds 已提交
3020 3021 3022 3023 3024 3025 3026 3027
{
	/* We need to:
	 * 1/ resize the r1bio_pool
	 * 2/ resize conf->mirrors
	 *
	 * We allocate a new r1bio_pool if we can.
	 * Then raise a device barrier and wait until all IO stops.
	 * Then resize conf->mirrors and swap in the new r1bio pool.
3028 3029 3030
	 *
	 * At the same time, we "pack" the devices so that all the missing
	 * devices have the higher raid_disk numbers.
L
Linus Torvalds 已提交
3031 3032 3033
	 */
	mempool_t *newpool, *oldpool;
	struct pool_info *newpoolinfo;
3034
	struct raid1_info *newmirrors;
3035
	struct r1conf *conf = mddev->private;
3036
	int cnt, raid_disks;
3037
	unsigned long flags;
3038
	int d, d2, err;
L
Linus Torvalds 已提交
3039

3040
	/* Cannot change chunk_size, layout, or level */
3041
	if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3042 3043
	    mddev->layout != mddev->new_layout ||
	    mddev->level != mddev->new_level) {
3044
		mddev->new_chunk_sectors = mddev->chunk_sectors;
3045 3046 3047 3048 3049
		mddev->new_layout = mddev->layout;
		mddev->new_level = mddev->level;
		return -EINVAL;
	}

3050 3051 3052
	err = md_allow_write(mddev);
	if (err)
		return err;
3053

3054 3055
	raid_disks = mddev->raid_disks + mddev->delta_disks;

3056 3057 3058 3059 3060 3061
	if (raid_disks < conf->raid_disks) {
		cnt=0;
		for (d= 0; d < conf->raid_disks; d++)
			if (conf->mirrors[d].rdev)
				cnt++;
		if (cnt > raid_disks)
L
Linus Torvalds 已提交
3062
			return -EBUSY;
3063
	}
L
Linus Torvalds 已提交
3064 3065 3066 3067 3068

	newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
	if (!newpoolinfo)
		return -ENOMEM;
	newpoolinfo->mddev = mddev;
3069
	newpoolinfo->raid_disks = raid_disks * 2;
L
Linus Torvalds 已提交
3070 3071 3072 3073 3074 3075 3076

	newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
				 r1bio_pool_free, newpoolinfo);
	if (!newpool) {
		kfree(newpoolinfo);
		return -ENOMEM;
	}
3077
	newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
3078
			     GFP_KERNEL);
L
Linus Torvalds 已提交
3079 3080 3081 3082 3083 3084
	if (!newmirrors) {
		kfree(newpoolinfo);
		mempool_destroy(newpool);
		return -ENOMEM;
	}

3085
	freeze_array(conf, 0);
L
Linus Torvalds 已提交
3086 3087 3088 3089

	/* ok, everything is stopped */
	oldpool = conf->r1bio_pool;
	conf->r1bio_pool = newpool;
3090

3091
	for (d = d2 = 0; d < conf->raid_disks; d++) {
3092
		struct md_rdev *rdev = conf->mirrors[d].rdev;
3093
		if (rdev && rdev->raid_disk != d2) {
3094
			sysfs_unlink_rdev(mddev, rdev);
3095
			rdev->raid_disk = d2;
3096 3097
			sysfs_unlink_rdev(mddev, rdev);
			if (sysfs_link_rdev(mddev, rdev))
3098
				printk(KERN_WARNING
3099 3100
				       "md/raid1:%s: cannot register rd%d\n",
				       mdname(mddev), rdev->raid_disk);
3101
		}
3102 3103 3104
		if (rdev)
			newmirrors[d2++].rdev = rdev;
	}
L
Linus Torvalds 已提交
3105 3106 3107 3108 3109
	kfree(conf->mirrors);
	conf->mirrors = newmirrors;
	kfree(conf->poolinfo);
	conf->poolinfo = newpoolinfo;

3110
	spin_lock_irqsave(&conf->device_lock, flags);
L
Linus Torvalds 已提交
3111
	mddev->degraded += (raid_disks - conf->raid_disks);
3112
	spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
3113
	conf->raid_disks = mddev->raid_disks = raid_disks;
3114
	mddev->delta_disks = 0;
L
Linus Torvalds 已提交
3115

3116
	unfreeze_array(conf);
L
Linus Torvalds 已提交
3117 3118 3119 3120 3121 3122 3123 3124

	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	md_wakeup_thread(mddev->thread);

	mempool_destroy(oldpool);
	return 0;
}

3125
static void raid1_quiesce(struct mddev *mddev, int state)
3126
{
3127
	struct r1conf *conf = mddev->private;
3128 3129

	switch(state) {
3130 3131 3132
	case 2: /* wake for suspend */
		wake_up(&conf->wait_barrier);
		break;
3133
	case 1:
3134
		freeze_array(conf, 0);
3135
		break;
3136
	case 0:
3137
		unfreeze_array(conf);
3138 3139 3140 3141
		break;
	}
}

3142
static void *raid1_takeover(struct mddev *mddev)
3143 3144 3145 3146 3147
{
	/* raid1 can take over:
	 *  raid5 with 2 devices, any layout or chunk size
	 */
	if (mddev->level == 5 && mddev->raid_disks == 2) {
3148
		struct r1conf *conf;
3149 3150 3151 3152 3153
		mddev->new_level = 1;
		mddev->new_layout = 0;
		mddev->new_chunk_sectors = 0;
		conf = setup_conf(mddev);
		if (!IS_ERR(conf))
3154 3155
			/* Array must appear to be quiesced */
			conf->array_frozen = 1;
3156 3157 3158 3159
		return conf;
	}
	return ERR_PTR(-EINVAL);
}
L
Linus Torvalds 已提交
3160

3161
static struct md_personality raid1_personality =
L
Linus Torvalds 已提交
3162 3163
{
	.name		= "raid1",
3164
	.level		= 1,
L
Linus Torvalds 已提交
3165 3166 3167
	.owner		= THIS_MODULE,
	.make_request	= make_request,
	.run		= run,
N
NeilBrown 已提交
3168
	.free		= raid1_free,
L
Linus Torvalds 已提交
3169 3170 3171 3172 3173 3174 3175
	.status		= status,
	.error_handler	= error,
	.hot_add_disk	= raid1_add_disk,
	.hot_remove_disk= raid1_remove_disk,
	.spare_active	= raid1_spare_active,
	.sync_request	= sync_request,
	.resize		= raid1_resize,
3176
	.size		= raid1_size,
3177
	.check_reshape	= raid1_reshape,
3178
	.quiesce	= raid1_quiesce,
3179
	.takeover	= raid1_takeover,
3180
	.congested	= raid1_congested,
3181
	.mergeable_bvec	= raid1_mergeable_bvec,
L
Linus Torvalds 已提交
3182 3183 3184 3185
};

static int __init raid_init(void)
{
3186
	return register_md_personality(&raid1_personality);
L
Linus Torvalds 已提交
3187 3188 3189 3190
}

static void raid_exit(void)
{
3191
	unregister_md_personality(&raid1_personality);
L
Linus Torvalds 已提交
3192 3193 3194 3195 3196
}

module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
3197
MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
L
Linus Torvalds 已提交
3198
MODULE_ALIAS("md-personality-3"); /* RAID1 */
3199
MODULE_ALIAS("md-raid1");
3200
MODULE_ALIAS("md-level-1");
3201 3202

module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);