raid10.c 98.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 * raid10.c : Multiple Devices driver for Linux
 *
 * Copyright (C) 2000-2004 Neil Brown
 *
 * RAID-10 support for md.
 *
L
Lucas De Marchi 已提交
8
 * Base on code in raid1.c.  See raid1.c for further copyright information.
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19 20
 *
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * You should have received a copy of the GNU General Public License
 * (for example /usr/src/linux/COPYING); if not, write to the Free
 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

21
#include <linux/slab.h>
22
#include <linux/delay.h>
23
#include <linux/blkdev.h>
24
#include <linux/module.h>
25
#include <linux/seq_file.h>
26
#include <linux/ratelimit.h>
27
#include "md.h"
28
#include "raid10.h"
29
#include "raid0.h"
30
#include "bitmap.h"
L
Linus Torvalds 已提交
31 32 33 34 35 36 37 38

/*
 * RAID10 provides a combination of RAID0 and RAID1 functionality.
 * The layout of data is defined by
 *    chunk_size
 *    raid_disks
 *    near_copies (stored in low byte of layout)
 *    far_copies (stored in second byte of layout)
39
 *    far_offset (stored in bit 16 of layout )
L
Linus Torvalds 已提交
40 41 42 43 44 45 46
 *
 * The data to be stored is divided into chunks using chunksize.
 * Each device is divided into far_copies sections.
 * In each section, chunks are laid out in a style similar to raid0, but
 * near_copies copies of each chunk is stored (each on a different drive).
 * The starting device for each section is offset near_copies from the starting
 * device of the previous section.
47
 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
L
Linus Torvalds 已提交
48 49 50
 * drive.
 * near_copies and far_copies must be at least one, and their product is at most
 * raid_disks.
51 52 53 54
 *
 * If far_offset is true, then the far_copies are handled a bit differently.
 * The copies are still in different stripes, but instead of be very far apart
 * on disk, there are adjacent stripes.
L
Linus Torvalds 已提交
55 56 57 58 59 60 61
 */

/*
 * Number of guaranteed r10bios in case of extreme VM load:
 */
#define	NR_RAID10_BIOS 256

62 63 64 65 66 67
/* When there are this many requests queue to be written by
 * the raid10 thread, we become 'congested' to provide back-pressure
 * for writeback.
 */
static int max_queued_requests = 1024;

68 69
static void allow_barrier(struct r10conf *conf);
static void lower_barrier(struct r10conf *conf);
70
static int enough(struct r10conf *conf, int ignore);
71

A
Al Viro 已提交
72
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
L
Linus Torvalds 已提交
73
{
74
	struct r10conf *conf = data;
75
	int size = offsetof(struct r10bio, devs[conf->copies]);
L
Linus Torvalds 已提交
76

77 78
	/* allocate a r10bio with room for raid_disks entries in the
	 * bios array */
J
Jens Axboe 已提交
79
	return kzalloc(size, gfp_flags);
L
Linus Torvalds 已提交
80 81 82 83 84 85 86
}

static void r10bio_pool_free(void *r10_bio, void *data)
{
	kfree(r10_bio);
}

87
/* Maximum size of each resync request */
L
Linus Torvalds 已提交
88 89
#define RESYNC_BLOCK_SIZE (64*1024)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
90 91 92 93
/* amount of memory to reserve for resync requests */
#define RESYNC_WINDOW (1024*1024)
/* maximum number of concurrent requests, memory permitting */
#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
L
Linus Torvalds 已提交
94 95 96 97 98 99 100 101

/*
 * When performing a resync, we need to read and compare, so
 * we need as many pages are there are copies.
 * When performing a recovery, we need 2 bios, one for read,
 * one for write (we recover only one drive per r10buf)
 *
 */
A
Al Viro 已提交
102
static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
L
Linus Torvalds 已提交
103
{
104
	struct r10conf *conf = data;
L
Linus Torvalds 已提交
105
	struct page *page;
106
	struct r10bio *r10_bio;
L
Linus Torvalds 已提交
107 108 109 110 111
	struct bio *bio;
	int i, j;
	int nalloc;

	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
J
Jens Axboe 已提交
112
	if (!r10_bio)
L
Linus Torvalds 已提交
113 114 115 116 117 118 119 120 121 122 123
		return NULL;

	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
		nalloc = conf->copies; /* resync */
	else
		nalloc = 2; /* recovery */

	/*
	 * Allocate bios.
	 */
	for (j = nalloc ; j-- ; ) {
124
		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
L
Linus Torvalds 已提交
125 126 127
		if (!bio)
			goto out_free_bio;
		r10_bio->devs[j].bio = bio;
128 129 130 131 132 133
		if (!conf->have_replacement)
			continue;
		bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
		if (!bio)
			goto out_free_bio;
		r10_bio->devs[j].repl_bio = bio;
L
Linus Torvalds 已提交
134 135 136 137 138 139
	}
	/*
	 * Allocate RESYNC_PAGES data pages and attach them
	 * where needed.
	 */
	for (j = 0 ; j < nalloc; j++) {
140
		struct bio *rbio = r10_bio->devs[j].repl_bio;
L
Linus Torvalds 已提交
141 142
		bio = r10_bio->devs[j].bio;
		for (i = 0; i < RESYNC_PAGES; i++) {
143 144 145 146 147 148 149 150
			if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
						&conf->mddev->recovery)) {
				/* we can share bv_page's during recovery */
				struct bio *rbio = r10_bio->devs[0].bio;
				page = rbio->bi_io_vec[i].bv_page;
				get_page(page);
			} else
				page = alloc_page(gfp_flags);
L
Linus Torvalds 已提交
151 152 153 154
			if (unlikely(!page))
				goto out_free_pages;

			bio->bi_io_vec[i].bv_page = page;
155 156
			if (rbio)
				rbio->bi_io_vec[i].bv_page = page;
L
Linus Torvalds 已提交
157 158 159 160 161 162 163
		}
	}

	return r10_bio;

out_free_pages:
	for ( ; i > 0 ; i--)
164
		safe_put_page(bio->bi_io_vec[i-1].bv_page);
L
Linus Torvalds 已提交
165 166
	while (j--)
		for (i = 0; i < RESYNC_PAGES ; i++)
167
			safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
L
Linus Torvalds 已提交
168 169
	j = -1;
out_free_bio:
170
	while (++j < nalloc) {
L
Linus Torvalds 已提交
171
		bio_put(r10_bio->devs[j].bio);
172 173 174
		if (r10_bio->devs[j].repl_bio)
			bio_put(r10_bio->devs[j].repl_bio);
	}
L
Linus Torvalds 已提交
175 176 177 178 179 180 181
	r10bio_pool_free(r10_bio, conf);
	return NULL;
}

static void r10buf_pool_free(void *__r10_bio, void *data)
{
	int i;
182
	struct r10conf *conf = data;
183
	struct r10bio *r10bio = __r10_bio;
L
Linus Torvalds 已提交
184 185 186 187 188 189
	int j;

	for (j=0; j < conf->copies; j++) {
		struct bio *bio = r10bio->devs[j].bio;
		if (bio) {
			for (i = 0; i < RESYNC_PAGES; i++) {
190
				safe_put_page(bio->bi_io_vec[i].bv_page);
L
Linus Torvalds 已提交
191 192 193 194
				bio->bi_io_vec[i].bv_page = NULL;
			}
			bio_put(bio);
		}
195 196 197
		bio = r10bio->devs[j].repl_bio;
		if (bio)
			bio_put(bio);
L
Linus Torvalds 已提交
198 199 200 201
	}
	r10bio_pool_free(r10bio, conf);
}

202
static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
L
Linus Torvalds 已提交
203 204 205 206 207
{
	int i;

	for (i = 0; i < conf->copies; i++) {
		struct bio **bio = & r10_bio->devs[i].bio;
208
		if (!BIO_SPECIAL(*bio))
L
Linus Torvalds 已提交
209 210
			bio_put(*bio);
		*bio = NULL;
211 212 213 214
		bio = &r10_bio->devs[i].repl_bio;
		if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
			bio_put(*bio);
		*bio = NULL;
L
Linus Torvalds 已提交
215 216 217
	}
}

218
static void free_r10bio(struct r10bio *r10_bio)
L
Linus Torvalds 已提交
219
{
220
	struct r10conf *conf = r10_bio->mddev->private;
L
Linus Torvalds 已提交
221 222 223 224 225

	put_all_bios(conf, r10_bio);
	mempool_free(r10_bio, conf->r10bio_pool);
}

226
static void put_buf(struct r10bio *r10_bio)
L
Linus Torvalds 已提交
227
{
228
	struct r10conf *conf = r10_bio->mddev->private;
L
Linus Torvalds 已提交
229 230 231

	mempool_free(r10_bio, conf->r10buf_pool);

232
	lower_barrier(conf);
L
Linus Torvalds 已提交
233 234
}

235
static void reschedule_retry(struct r10bio *r10_bio)
L
Linus Torvalds 已提交
236 237
{
	unsigned long flags;
238
	struct mddev *mddev = r10_bio->mddev;
239
	struct r10conf *conf = mddev->private;
L
Linus Torvalds 已提交
240 241 242

	spin_lock_irqsave(&conf->device_lock, flags);
	list_add(&r10_bio->retry_list, &conf->retry_list);
243
	conf->nr_queued ++;
L
Linus Torvalds 已提交
244 245
	spin_unlock_irqrestore(&conf->device_lock, flags);

A
Arthur Jones 已提交
246 247 248
	/* wake up frozen array... */
	wake_up(&conf->wait_barrier);

L
Linus Torvalds 已提交
249 250 251 252 253 254 255 256
	md_wakeup_thread(mddev->thread);
}

/*
 * raid_end_bio_io() is called when we have finished servicing a mirrored
 * operation and are ready to return a success/failure code to the buffer
 * cache layer.
 */
257
static void raid_end_bio_io(struct r10bio *r10_bio)
L
Linus Torvalds 已提交
258 259
{
	struct bio *bio = r10_bio->master_bio;
260
	int done;
261
	struct r10conf *conf = r10_bio->mddev->private;
L
Linus Torvalds 已提交
262

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
	if (bio->bi_phys_segments) {
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
		bio->bi_phys_segments--;
		done = (bio->bi_phys_segments == 0);
		spin_unlock_irqrestore(&conf->device_lock, flags);
	} else
		done = 1;
	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
	if (done) {
		bio_endio(bio, 0);
		/*
		 * Wake up any possible resync thread that waits for the device
		 * to go idle.
		 */
		allow_barrier(conf);
	}
L
Linus Torvalds 已提交
281 282 283 284 285 286
	free_r10bio(r10_bio);
}

/*
 * Update disk head position estimator based on IRQ completion info.
 */
287
static inline void update_head_pos(int slot, struct r10bio *r10_bio)
L
Linus Torvalds 已提交
288
{
289
	struct r10conf *conf = r10_bio->mddev->private;
L
Linus Torvalds 已提交
290 291 292 293 294

	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
		r10_bio->devs[slot].addr + (r10_bio->sectors);
}

295 296 297
/*
 * Find the disk number which triggered given bio
 */
298
static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
299
			 struct bio *bio, int *slotp, int *replp)
300 301
{
	int slot;
302
	int repl = 0;
303

304
	for (slot = 0; slot < conf->copies; slot++) {
305 306
		if (r10_bio->devs[slot].bio == bio)
			break;
307 308 309 310 311
		if (r10_bio->devs[slot].repl_bio == bio) {
			repl = 1;
			break;
		}
	}
312 313 314 315

	BUG_ON(slot == conf->copies);
	update_head_pos(slot, r10_bio);

316 317
	if (slotp)
		*slotp = slot;
318 319
	if (replp)
		*replp = repl;
320 321 322
	return r10_bio->devs[slot].devnum;
}

323
static void raid10_end_read_request(struct bio *bio, int error)
L
Linus Torvalds 已提交
324 325
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
326
	struct r10bio *r10_bio = bio->bi_private;
L
Linus Torvalds 已提交
327
	int slot, dev;
328
	struct md_rdev *rdev;
329
	struct r10conf *conf = r10_bio->mddev->private;
L
Linus Torvalds 已提交
330 331 332 333


	slot = r10_bio->read_slot;
	dev = r10_bio->devs[slot].devnum;
334
	rdev = r10_bio->devs[slot].rdev;
L
Linus Torvalds 已提交
335 336 337
	/*
	 * this branch is our 'one mirror IO has finished' event handler:
	 */
338 339 340
	update_head_pos(slot, r10_bio);

	if (uptodate) {
L
Linus Torvalds 已提交
341 342 343 344 345 346 347 348 349 350
		/*
		 * Set R10BIO_Uptodate in our master bio, so that
		 * we will return a good error code to the higher
		 * levels even if IO on some other mirrored buffer fails.
		 *
		 * The 'master' represents the composite IO operation to
		 * user-side. So if something waits for IO, then it will
		 * wait for the 'master' bio.
		 */
		set_bit(R10BIO_Uptodate, &r10_bio->state);
351 352 353 354 355 356 357 358 359 360 361 362 363
	} else {
		/* If all other devices that store this block have
		 * failed, we want to return the error upwards rather
		 * than fail the last device.  Here we redefine
		 * "uptodate" to mean "Don't want to retry"
		 */
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
		if (!enough(conf, rdev->raid_disk))
			uptodate = 1;
		spin_unlock_irqrestore(&conf->device_lock, flags);
	}
	if (uptodate) {
L
Linus Torvalds 已提交
364
		raid_end_bio_io(r10_bio);
365
		rdev_dec_pending(rdev, conf->mddev);
366
	} else {
L
Linus Torvalds 已提交
367
		/*
368
		 * oops, read error - keep the refcount on the rdev
L
Linus Torvalds 已提交
369 370
		 */
		char b[BDEVNAME_SIZE];
371 372 373
		printk_ratelimited(KERN_ERR
				   "md/raid10:%s: %s: rescheduling sector %llu\n",
				   mdname(conf->mddev),
374
				   bdevname(rdev->bdev, b),
375
				   (unsigned long long)r10_bio->sector);
376
		set_bit(R10BIO_ReadError, &r10_bio->state);
L
Linus Torvalds 已提交
377 378 379 380
		reschedule_retry(r10_bio);
	}
}

381
static void close_write(struct r10bio *r10_bio)
382 383 384 385 386 387 388 389 390
{
	/* clear the bitmap if all writes complete successfully */
	bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
			r10_bio->sectors,
			!test_bit(R10BIO_Degraded, &r10_bio->state),
			0);
	md_write_end(r10_bio->mddev);
}

391
static void one_write_done(struct r10bio *r10_bio)
392 393 394 395 396 397 398 399 400 401 402 403 404 405
{
	if (atomic_dec_and_test(&r10_bio->remaining)) {
		if (test_bit(R10BIO_WriteError, &r10_bio->state))
			reschedule_retry(r10_bio);
		else {
			close_write(r10_bio);
			if (test_bit(R10BIO_MadeGood, &r10_bio->state))
				reschedule_retry(r10_bio);
			else
				raid_end_bio_io(r10_bio);
		}
	}
}

406
static void raid10_end_write_request(struct bio *bio, int error)
L
Linus Torvalds 已提交
407 408
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
409
	struct r10bio *r10_bio = bio->bi_private;
410
	int dev;
411
	int dec_rdev = 1;
412
	struct r10conf *conf = r10_bio->mddev->private;
413
	int slot, repl;
414
	struct md_rdev *rdev = NULL;
L
Linus Torvalds 已提交
415

416
	dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
L
Linus Torvalds 已提交
417

418 419
	if (repl)
		rdev = conf->mirrors[dev].replacement;
420 421 422
	if (!rdev) {
		smp_rmb();
		repl = 0;
423
		rdev = conf->mirrors[dev].rdev;
424
	}
L
Linus Torvalds 已提交
425 426 427
	/*
	 * this branch is our 'one mirror IO has finished' event handler:
	 */
428
	if (!uptodate) {
429 430 431 432 433 434 435
		if (repl)
			/* Never record new bad blocks to replacement,
			 * just fail it.
			 */
			md_error(rdev->mddev, rdev);
		else {
			set_bit(WriteErrorSeen,	&rdev->flags);
436 437 438
			if (!test_and_set_bit(WantReplacement, &rdev->flags))
				set_bit(MD_RECOVERY_NEEDED,
					&rdev->mddev->recovery);
439 440 441
			set_bit(R10BIO_WriteError, &r10_bio->state);
			dec_rdev = 0;
		}
442
	} else {
L
Linus Torvalds 已提交
443 444 445 446 447 448 449 450 451
		/*
		 * Set R10BIO_Uptodate in our master bio, so that
		 * we will return a good error code for to the higher
		 * levels even if IO on some other mirrored buffer fails.
		 *
		 * The 'master' represents the composite IO operation to
		 * user-side. So if something waits for IO, then it will
		 * wait for the 'master' bio.
		 */
452 453 454
		sector_t first_bad;
		int bad_sectors;

L
Linus Torvalds 已提交
455 456
		set_bit(R10BIO_Uptodate, &r10_bio->state);

457
		/* Maybe we can clear some bad blocks. */
458
		if (is_badblock(rdev,
459 460 461 462
				r10_bio->devs[slot].addr,
				r10_bio->sectors,
				&first_bad, &bad_sectors)) {
			bio_put(bio);
463 464 465 466
			if (repl)
				r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
			else
				r10_bio->devs[slot].bio = IO_MADE_GOOD;
467 468 469 470 471
			dec_rdev = 0;
			set_bit(R10BIO_MadeGood, &r10_bio->state);
		}
	}

L
Linus Torvalds 已提交
472 473 474 475 476
	/*
	 *
	 * Let's see if all mirrored write operations have finished
	 * already.
	 */
477
	one_write_done(r10_bio);
478 479
	if (dec_rdev)
		rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
L
Linus Torvalds 已提交
480 481 482 483
}

/*
 * RAID10 layout manager
L
Lucas De Marchi 已提交
484
 * As well as the chunksize and raid_disks count, there are two
L
Linus Torvalds 已提交
485 486 487 488 489 490
 * parameters: near_copies and far_copies.
 * near_copies * far_copies must be <= raid_disks.
 * Normally one of these will be 1.
 * If both are 1, we get raid0.
 * If near_copies == raid_disks, we get raid1.
 *
L
Lucas De Marchi 已提交
491
 * Chunks are laid out in raid0 style with near_copies copies of the
L
Linus Torvalds 已提交
492 493 494 495 496 497 498 499 500
 * first chunk, followed by near_copies copies of the next chunk and
 * so on.
 * If far_copies > 1, then after 1/far_copies of the array has been assigned
 * as described above, we start again with a device offset of near_copies.
 * So we effectively have another copy of the whole array further down all
 * the drives, but with blocks on different drives.
 * With this layout, and block is never stored twice on the one device.
 *
 * raid10_find_phys finds the sector offset of a given virtual sector
501
 * on each device that it is on.
L
Linus Torvalds 已提交
502 503 504 505 506
 *
 * raid10_find_virt does the reverse mapping, from a device and a
 * sector offset to a virtual address
 */

507
static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
L
Linus Torvalds 已提交
508 509 510 511 512 513 514 515 516
{
	int n,f;
	sector_t sector;
	sector_t chunk;
	sector_t stripe;
	int dev;
	int slot = 0;

	/* now calculate first sector/dev */
517 518
	chunk = r10bio->sector >> geo->chunk_shift;
	sector = r10bio->sector & geo->chunk_mask;
L
Linus Torvalds 已提交
519

520
	chunk *= geo->near_copies;
L
Linus Torvalds 已提交
521
	stripe = chunk;
522 523 524
	dev = sector_div(stripe, geo->raid_disks);
	if (geo->far_offset)
		stripe *= geo->far_copies;
L
Linus Torvalds 已提交
525

526
	sector += stripe << geo->chunk_shift;
L
Linus Torvalds 已提交
527 528

	/* and calculate all the others */
529
	for (n = 0; n < geo->near_copies; n++) {
L
Linus Torvalds 已提交
530 531 532 533 534 535
		int d = dev;
		sector_t s = sector;
		r10bio->devs[slot].addr = sector;
		r10bio->devs[slot].devnum = d;
		slot++;

536 537 538 539 540
		for (f = 1; f < geo->far_copies; f++) {
			d += geo->near_copies;
			if (d >= geo->raid_disks)
				d -= geo->raid_disks;
			s += geo->stride;
L
Linus Torvalds 已提交
541 542 543 544 545
			r10bio->devs[slot].devnum = d;
			r10bio->devs[slot].addr = s;
			slot++;
		}
		dev++;
546
		if (dev >= geo->raid_disks) {
L
Linus Torvalds 已提交
547
			dev = 0;
548
			sector += (geo->chunk_mask + 1);
L
Linus Torvalds 已提交
549 550
		}
	}
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
}

static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
{
	struct geom *geo = &conf->geo;

	if (conf->reshape_progress != MaxSector &&
	    ((r10bio->sector >= conf->reshape_progress) !=
	     conf->mddev->reshape_backwards)) {
		set_bit(R10BIO_Previous, &r10bio->state);
		geo = &conf->prev;
	} else
		clear_bit(R10BIO_Previous, &r10bio->state);

	__raid10_find_phys(geo, r10bio);
L
Linus Torvalds 已提交
566 567
}

568
static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
L
Linus Torvalds 已提交
569 570
{
	sector_t offset, chunk, vchunk;
571 572 573
	/* Never use conf->prev as this is only called during resync
	 * or recovery, so reshape isn't happening
	 */
574
	struct geom *geo = &conf->geo;
L
Linus Torvalds 已提交
575

576 577
	offset = sector & geo->chunk_mask;
	if (geo->far_offset) {
578
		int fc;
579 580 581
		chunk = sector >> geo->chunk_shift;
		fc = sector_div(chunk, geo->far_copies);
		dev -= fc * geo->near_copies;
582
		if (dev < 0)
583
			dev += geo->raid_disks;
584
	} else {
585 586 587 588
		while (sector >= geo->stride) {
			sector -= geo->stride;
			if (dev < geo->near_copies)
				dev += geo->raid_disks - geo->near_copies;
589
			else
590
				dev -= geo->near_copies;
591
		}
592
		chunk = sector >> geo->chunk_shift;
593
	}
594 595 596
	vchunk = chunk * geo->raid_disks + dev;
	sector_div(vchunk, geo->near_copies);
	return (vchunk << geo->chunk_shift) + offset;
L
Linus Torvalds 已提交
597 598 599 600 601
}

/**
 *	raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
 *	@q: request queue
602
 *	@bvm: properties of new bio
L
Linus Torvalds 已提交
603 604 605
 *	@biovec: the request that could be merged to it.
 *
 *	Return amount of bytes we can accept at this offset
606 607
 *	This requires checking for end-of-chunk if near_copies != raid_disks,
 *	and for subordinate merge_bvec_fns if merge_check_needed.
L
Linus Torvalds 已提交
608
 */
609 610 611
static int raid10_mergeable_bvec(struct request_queue *q,
				 struct bvec_merge_data *bvm,
				 struct bio_vec *biovec)
L
Linus Torvalds 已提交
612
{
613
	struct mddev *mddev = q->queuedata;
614
	struct r10conf *conf = mddev->private;
615
	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
L
Linus Torvalds 已提交
616
	int max;
617
	unsigned int chunk_sectors = mddev->chunk_sectors;
618
	unsigned int bio_sectors = bvm->bi_size >> 9;
619
	struct geom *geo = &conf->geo;
L
Linus Torvalds 已提交
620

621 622 623 624 625
	if (conf->reshape_progress != MaxSector &&
	    ((sector >= conf->reshape_progress) !=
	     conf->mddev->reshape_backwards))
		geo = &conf->prev;

626
	if (geo->near_copies < geo->raid_disks) {
627 628 629 630 631 632 633 634 635 636 637 638 639
		max = (chunk_sectors - ((sector & (chunk_sectors - 1))
					+ bio_sectors)) << 9;
		if (max < 0)
			/* bio_add cannot handle a negative return */
			max = 0;
		if (max <= biovec->bv_len && bio_sectors == 0)
			return biovec->bv_len;
	} else
		max = biovec->bv_len;

	if (mddev->merge_check_needed) {
		struct r10bio r10_bio;
		int s;
640 641 642 643 644 645
		if (conf->reshape_progress != MaxSector) {
			/* Cannot give any guidance during reshape */
			if (max <= biovec->bv_len && bio_sectors == 0)
				return biovec->bv_len;
			return 0;
		}
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
		r10_bio.sector = sector;
		raid10_find_phys(conf, &r10_bio);
		rcu_read_lock();
		for (s = 0; s < conf->copies; s++) {
			int disk = r10_bio.devs[s].devnum;
			struct md_rdev *rdev = rcu_dereference(
				conf->mirrors[disk].rdev);
			if (rdev && !test_bit(Faulty, &rdev->flags)) {
				struct request_queue *q =
					bdev_get_queue(rdev->bdev);
				if (q->merge_bvec_fn) {
					bvm->bi_sector = r10_bio.devs[s].addr
						+ rdev->data_offset;
					bvm->bi_bdev = rdev->bdev;
					max = min(max, q->merge_bvec_fn(
							  q, bvm, biovec));
				}
			}
			rdev = rcu_dereference(conf->mirrors[disk].replacement);
			if (rdev && !test_bit(Faulty, &rdev->flags)) {
				struct request_queue *q =
					bdev_get_queue(rdev->bdev);
				if (q->merge_bvec_fn) {
					bvm->bi_sector = r10_bio.devs[s].addr
						+ rdev->data_offset;
					bvm->bi_bdev = rdev->bdev;
					max = min(max, q->merge_bvec_fn(
							  q, bvm, biovec));
				}
			}
		}
		rcu_read_unlock();
	}
	return max;
L
Linus Torvalds 已提交
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
}

/*
 * This routine returns the disk from which the requested read should
 * be done. There is a per-array 'next expected sequential IO' sector
 * number - if this matches on the next IO then we use the last disk.
 * There is also a per-disk 'last know head position' sector that is
 * maintained from IRQ contexts, both the normal and the resync IO
 * completion handlers update this position correctly. If there is no
 * perfect sequential match then we pick the disk whose head is closest.
 *
 * If there are 2 mirrors in the same 2 devices, performance degrades
 * because position is mirror, not device based.
 *
 * The rdev for the device selected will have nr_pending incremented.
 */

/*
 * FIXME: possibly should rethink readbalancing and do it differently
 * depending on near_copies / far_copies geometry.
 */
701 702 703
static struct md_rdev *read_balance(struct r10conf *conf,
				    struct r10bio *r10_bio,
				    int *max_sectors)
L
Linus Torvalds 已提交
704
{
705
	const sector_t this_sector = r10_bio->sector;
N
NeilBrown 已提交
706
	int disk, slot;
707 708
	int sectors = r10_bio->sectors;
	int best_good_sectors;
N
NeilBrown 已提交
709
	sector_t new_distance, best_dist;
710
	struct md_rdev *rdev, *best_rdev;
N
NeilBrown 已提交
711 712
	int do_balance;
	int best_slot;
713
	struct geom *geo = &conf->geo;
L
Linus Torvalds 已提交
714 715 716

	raid10_find_phys(conf, r10_bio);
	rcu_read_lock();
N
NeilBrown 已提交
717
retry:
718
	sectors = r10_bio->sectors;
N
NeilBrown 已提交
719
	best_slot = -1;
720
	best_rdev = NULL;
N
NeilBrown 已提交
721
	best_dist = MaxSector;
722
	best_good_sectors = 0;
N
NeilBrown 已提交
723
	do_balance = 1;
L
Linus Torvalds 已提交
724 725
	/*
	 * Check if we can balance. We can balance on the whole
726 727 728
	 * device if no resync is going on (recovery is ok), or below
	 * the resync window. We take the first readable disk when
	 * above the resync window.
L
Linus Torvalds 已提交
729 730
	 */
	if (conf->mddev->recovery_cp < MaxSector
N
NeilBrown 已提交
731 732
	    && (this_sector + sectors >= conf->next_resync))
		do_balance = 0;
L
Linus Torvalds 已提交
733

N
NeilBrown 已提交
734
	for (slot = 0; slot < conf->copies ; slot++) {
735 736 737 738
		sector_t first_bad;
		int bad_sectors;
		sector_t dev_sector;

N
NeilBrown 已提交
739 740
		if (r10_bio->devs[slot].bio == IO_BLOCKED)
			continue;
L
Linus Torvalds 已提交
741
		disk = r10_bio->devs[slot].devnum;
742 743
		rdev = rcu_dereference(conf->mirrors[disk].replacement);
		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
744
		    test_bit(Unmerged, &rdev->flags) ||
745 746
		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
			rdev = rcu_dereference(conf->mirrors[disk].rdev);
747 748 749
		if (rdev == NULL ||
		    test_bit(Faulty, &rdev->flags) ||
		    test_bit(Unmerged, &rdev->flags))
750 751 752
			continue;
		if (!test_bit(In_sync, &rdev->flags) &&
		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
N
NeilBrown 已提交
753 754
			continue;

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
		dev_sector = r10_bio->devs[slot].addr;
		if (is_badblock(rdev, dev_sector, sectors,
				&first_bad, &bad_sectors)) {
			if (best_dist < MaxSector)
				/* Already have a better slot */
				continue;
			if (first_bad <= dev_sector) {
				/* Cannot read here.  If this is the
				 * 'primary' device, then we must not read
				 * beyond 'bad_sectors' from another device.
				 */
				bad_sectors -= (dev_sector - first_bad);
				if (!do_balance && sectors > bad_sectors)
					sectors = bad_sectors;
				if (best_good_sectors > sectors)
					best_good_sectors = sectors;
			} else {
				sector_t good_sectors =
					first_bad - dev_sector;
				if (good_sectors > best_good_sectors) {
					best_good_sectors = good_sectors;
					best_slot = slot;
777
					best_rdev = rdev;
778 779 780 781 782 783 784 785 786
				}
				if (!do_balance)
					/* Must read from here */
					break;
			}
			continue;
		} else
			best_good_sectors = sectors;

N
NeilBrown 已提交
787 788
		if (!do_balance)
			break;
L
Linus Torvalds 已提交
789

790 791 792 793
		/* This optimisation is debatable, and completely destroys
		 * sequential read speed for 'far copies' arrays.  So only
		 * keep it for 'near' arrays, and review those later.
		 */
794
		if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
L
Linus Torvalds 已提交
795
			break;
796 797

		/* for far > 1 always use the lowest address */
798
		if (geo->far_copies > 1)
N
NeilBrown 已提交
799
			new_distance = r10_bio->devs[slot].addr;
800
		else
N
NeilBrown 已提交
801 802 803 804 805
			new_distance = abs(r10_bio->devs[slot].addr -
					   conf->mirrors[disk].head_position);
		if (new_distance < best_dist) {
			best_dist = new_distance;
			best_slot = slot;
806
			best_rdev = rdev;
L
Linus Torvalds 已提交
807 808
		}
	}
809
	if (slot >= conf->copies) {
N
NeilBrown 已提交
810
		slot = best_slot;
811 812
		rdev = best_rdev;
	}
L
Linus Torvalds 已提交
813

N
NeilBrown 已提交
814 815 816 817 818 819 820 821 822 823 824
	if (slot >= 0) {
		atomic_inc(&rdev->nr_pending);
		if (test_bit(Faulty, &rdev->flags)) {
			/* Cannot risk returning a device that failed
			 * before we inc'ed nr_pending
			 */
			rdev_dec_pending(rdev, conf->mddev);
			goto retry;
		}
		r10_bio->read_slot = slot;
	} else
825
		rdev = NULL;
L
Linus Torvalds 已提交
826
	rcu_read_unlock();
827
	*max_sectors = best_good_sectors;
L
Linus Torvalds 已提交
828

829
	return rdev;
L
Linus Torvalds 已提交
830 831
}

832 833
static int raid10_congested(void *data, int bits)
{
834
	struct mddev *mddev = data;
835
	struct r10conf *conf = mddev->private;
836 837
	int i, ret = 0;

838 839 840 841
	if ((bits & (1 << BDI_async_congested)) &&
	    conf->pending_count >= max_queued_requests)
		return 1;

842 843
	if (mddev_congested(mddev, bits))
		return 1;
844
	rcu_read_lock();
845 846 847 848
	for (i = 0;
	     (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
		     && ret == 0;
	     i++) {
849
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
850
		if (rdev && !test_bit(Faulty, &rdev->flags)) {
851
			struct request_queue *q = bdev_get_queue(rdev->bdev);
852 853 854 855 856 857 858 859

			ret |= bdi_congested(&q->backing_dev_info, bits);
		}
	}
	rcu_read_unlock();
	return ret;
}

860
static void flush_pending_writes(struct r10conf *conf)
861 862 863 864 865 866 867 868 869
{
	/* Any writes that have been queued but are awaiting
	 * bitmap updates get flushed here.
	 */
	spin_lock_irq(&conf->device_lock);

	if (conf->pending_bio_list.head) {
		struct bio *bio;
		bio = bio_list_get(&conf->pending_bio_list);
870
		conf->pending_count = 0;
871 872 873 874
		spin_unlock_irq(&conf->device_lock);
		/* flush any pending bitmap writes to disk
		 * before proceeding w/ I/O */
		bitmap_unplug(conf->mddev->bitmap);
875
		wake_up(&conf->wait_barrier);
876 877 878 879 880 881 882 883 884 885

		while (bio) { /* submit pending writes */
			struct bio *next = bio->bi_next;
			bio->bi_next = NULL;
			generic_make_request(bio);
			bio = next;
		}
	} else
		spin_unlock_irq(&conf->device_lock);
}
J
Jens Axboe 已提交
886

887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
/* Barriers....
 * Sometimes we need to suspend IO while we do something else,
 * either some resync/recovery, or reconfigure the array.
 * To do this we raise a 'barrier'.
 * The 'barrier' is a counter that can be raised multiple times
 * to count how many activities are happening which preclude
 * normal IO.
 * We can only raise the barrier if there is no pending IO.
 * i.e. if nr_pending == 0.
 * We choose only to raise the barrier if no-one is waiting for the
 * barrier to go down.  This means that as soon as an IO request
 * is ready, no other operations which require a barrier will start
 * until the IO request has had a chance.
 *
 * So: regular IO calls 'wait_barrier'.  When that returns there
 *    is no backgroup IO happening,  It must arrange to call
 *    allow_barrier when it has finished its IO.
 * backgroup IO calls must call raise_barrier.  Once that returns
 *    there is no normal IO happeing.  It must arrange to call
 *    lower_barrier when the particular background IO completes.
L
Linus Torvalds 已提交
907 908
 */

909
static void raise_barrier(struct r10conf *conf, int force)
L
Linus Torvalds 已提交
910
{
911
	BUG_ON(force && !conf->barrier);
L
Linus Torvalds 已提交
912
	spin_lock_irq(&conf->resync_lock);
913

914 915
	/* Wait until no block IO is waiting (unless 'force') */
	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
N
NeilBrown 已提交
916
			    conf->resync_lock, );
917 918 919 920

	/* block any new IO from starting */
	conf->barrier++;

N
NeilBrown 已提交
921
	/* Now wait for all pending IO to complete */
922 923
	wait_event_lock_irq(conf->wait_barrier,
			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
N
NeilBrown 已提交
924
			    conf->resync_lock, );
925 926 927 928

	spin_unlock_irq(&conf->resync_lock);
}

929
static void lower_barrier(struct r10conf *conf)
930 931 932 933 934 935 936 937
{
	unsigned long flags;
	spin_lock_irqsave(&conf->resync_lock, flags);
	conf->barrier--;
	spin_unlock_irqrestore(&conf->resync_lock, flags);
	wake_up(&conf->wait_barrier);
}

938
static void wait_barrier(struct r10conf *conf)
939 940 941 942
{
	spin_lock_irq(&conf->resync_lock);
	if (conf->barrier) {
		conf->nr_waiting++;
943 944 945 946 947 948 949 950 951 952 953 954 955 956
		/* Wait for the barrier to drop.
		 * However if there are already pending
		 * requests (preventing the barrier from
		 * rising completely), and the
		 * pre-process bio queue isn't empty,
		 * then don't wait, as we need to empty
		 * that queue to get the nr_pending
		 * count down.
		 */
		wait_event_lock_irq(conf->wait_barrier,
				    !conf->barrier ||
				    (conf->nr_pending &&
				     current->bio_list &&
				     !bio_list_empty(current->bio_list)),
957
				    conf->resync_lock,
958
			);
959
		conf->nr_waiting--;
L
Linus Torvalds 已提交
960
	}
961
	conf->nr_pending++;
L
Linus Torvalds 已提交
962 963 964
	spin_unlock_irq(&conf->resync_lock);
}

965
static void allow_barrier(struct r10conf *conf)
966 967 968 969 970 971 972 973
{
	unsigned long flags;
	spin_lock_irqsave(&conf->resync_lock, flags);
	conf->nr_pending--;
	spin_unlock_irqrestore(&conf->resync_lock, flags);
	wake_up(&conf->wait_barrier);
}

974
static void freeze_array(struct r10conf *conf)
975 976
{
	/* stop syncio and normal IO and wait for everything to
N
NeilBrown 已提交
977
	 * go quiet.
978
	 * We increment barrier and nr_waiting, and then
979 980 981 982 983 984 985 986
	 * wait until nr_pending match nr_queued+1
	 * This is called in the context of one normal IO request
	 * that has failed. Thus any sync request that might be pending
	 * will be blocked by nr_pending, and we need to wait for
	 * pending IO requests to complete or be queued for re-try.
	 * Thus the number queued (nr_queued) plus this request (1)
	 * must match the number of pending IOs (nr_pending) before
	 * we continue.
987 988 989 990 991
	 */
	spin_lock_irq(&conf->resync_lock);
	conf->barrier++;
	conf->nr_waiting++;
	wait_event_lock_irq(conf->wait_barrier,
992
			    conf->nr_pending == conf->nr_queued+1,
993
			    conf->resync_lock,
N
NeilBrown 已提交
994 995
			    flush_pending_writes(conf));

996 997 998
	spin_unlock_irq(&conf->resync_lock);
}

999
static void unfreeze_array(struct r10conf *conf)
1000 1001 1002 1003 1004 1005 1006 1007 1008
{
	/* reverse the effect of the freeze */
	spin_lock_irq(&conf->resync_lock);
	conf->barrier--;
	conf->nr_waiting--;
	wake_up(&conf->wait_barrier);
	spin_unlock_irq(&conf->resync_lock);
}

1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
static sector_t choose_data_offset(struct r10bio *r10_bio,
				   struct md_rdev *rdev)
{
	if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
	    test_bit(R10BIO_Previous, &r10_bio->state))
		return rdev->data_offset;
	else
		return rdev->new_data_offset;
}

1019
static void make_request(struct mddev *mddev, struct bio * bio)
L
Linus Torvalds 已提交
1020
{
1021
	struct r10conf *conf = mddev->private;
1022
	struct r10bio *r10_bio;
L
Linus Torvalds 已提交
1023 1024
	struct bio *read_bio;
	int i;
1025
	sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1026
	int chunk_sects = chunk_mask + 1;
1027
	const int rw = bio_data_dir(bio);
1028
	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
T
Tejun Heo 已提交
1029
	const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1030
	unsigned long flags;
1031
	struct md_rdev *blocked_rdev;
N
NeilBrown 已提交
1032
	int plugged;
1033 1034
	int sectors_handled;
	int max_sectors;
L
Linus Torvalds 已提交
1035

T
Tejun Heo 已提交
1036 1037
	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
		md_flush_request(mddev, bio);
1038
		return;
1039 1040
	}

L
Linus Torvalds 已提交
1041 1042 1043
	/* If this request crosses a chunk boundary, we need to
	 * split it.  This will only happen for 1 PAGE (or less) requests.
	 */
1044 1045
	if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
		     > chunk_sects
1046 1047
		     && (conf->geo.near_copies < conf->geo.raid_disks
			 || conf->prev.near_copies < conf->prev.raid_disks))) {
L
Linus Torvalds 已提交
1048 1049 1050 1051 1052 1053 1054 1055
		struct bio_pair *bp;
		/* Sanity check -- queue functions should prevent this happening */
		if (bio->bi_vcnt != 1 ||
		    bio->bi_idx != 0)
			goto bad_map;
		/* This is a one page bio that upper layers
		 * refuse to split for us, so we need to split it.
		 */
D
Denis ChengRq 已提交
1056
		bp = bio_split(bio,
L
Linus Torvalds 已提交
1057
			       chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070

		/* Each of these 'make_request' calls will call 'wait_barrier'.
		 * If the first succeeds but the second blocks due to the resync
		 * thread raising the barrier, we will deadlock because the
		 * IO to the underlying device will be queued in generic_make_request
		 * and will never complete, so will never reduce nr_pending.
		 * So increment nr_waiting here so no new raise_barriers will
		 * succeed, and so the second wait_barrier cannot block.
		 */
		spin_lock_irq(&conf->resync_lock);
		conf->nr_waiting++;
		spin_unlock_irq(&conf->resync_lock);

1071 1072
		make_request(mddev, &bp->bio1);
		make_request(mddev, &bp->bio2);
L
Linus Torvalds 已提交
1073

1074 1075 1076 1077 1078
		spin_lock_irq(&conf->resync_lock);
		conf->nr_waiting--;
		wake_up(&conf->wait_barrier);
		spin_unlock_irq(&conf->resync_lock);

L
Linus Torvalds 已提交
1079
		bio_pair_release(bp);
1080
		return;
L
Linus Torvalds 已提交
1081
	bad_map:
N
NeilBrown 已提交
1082 1083
		printk("md/raid10:%s: make_request bug: can't convert block across chunks"
		       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
L
Linus Torvalds 已提交
1084 1085
		       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);

1086
		bio_io_error(bio);
1087
		return;
L
Linus Torvalds 已提交
1088 1089
	}

1090
	md_write_start(mddev, bio);
1091

L
Linus Torvalds 已提交
1092 1093 1094 1095 1096
	/*
	 * Register the new request and wait if the reconstruction
	 * thread has put up a bar for new requests.
	 * Continue immediately if no resync is active currently.
	 */
1097
	wait_barrier(conf);
L
Linus Torvalds 已提交
1098 1099 1100 1101 1102 1103 1104 1105

	r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);

	r10_bio->master_bio = bio;
	r10_bio->sectors = bio->bi_size >> 9;

	r10_bio->mddev = mddev;
	r10_bio->sector = bio->bi_sector;
1106
	r10_bio->state = 0;
L
Linus Torvalds 已提交
1107

1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
	/* We might need to issue multiple reads to different
	 * devices if there are bad blocks around, so we keep
	 * track of the number of reads in bio->bi_phys_segments.
	 * If this is 0, there is only one r10_bio and no locking
	 * will be needed when the request completes.  If it is
	 * non-zero, then it is the number of not-completed requests.
	 */
	bio->bi_phys_segments = 0;
	clear_bit(BIO_SEG_VALID, &bio->bi_flags);

1118
	if (rw == READ) {
L
Linus Torvalds 已提交
1119 1120 1121
		/*
		 * read balancing logic:
		 */
1122
		struct md_rdev *rdev;
1123 1124 1125
		int slot;

read_again:
1126 1127
		rdev = read_balance(conf, r10_bio, &max_sectors);
		if (!rdev) {
L
Linus Torvalds 已提交
1128
			raid_end_bio_io(r10_bio);
1129
			return;
L
Linus Torvalds 已提交
1130
		}
1131
		slot = r10_bio->read_slot;
L
Linus Torvalds 已提交
1132

1133
		read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1134 1135
		md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
			    max_sectors);
L
Linus Torvalds 已提交
1136 1137

		r10_bio->devs[slot].bio = read_bio;
1138
		r10_bio->devs[slot].rdev = rdev;
L
Linus Torvalds 已提交
1139 1140

		read_bio->bi_sector = r10_bio->devs[slot].addr +
1141
			choose_data_offset(r10_bio, rdev);
1142
		read_bio->bi_bdev = rdev->bdev;
L
Linus Torvalds 已提交
1143
		read_bio->bi_end_io = raid10_end_read_request;
1144
		read_bio->bi_rw = READ | do_sync;
L
Linus Torvalds 已提交
1145 1146
		read_bio->bi_private = r10_bio;

1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
		if (max_sectors < r10_bio->sectors) {
			/* Could not read all from this device, so we will
			 * need another r10_bio.
			 */
			sectors_handled = (r10_bio->sectors + max_sectors
					   - bio->bi_sector);
			r10_bio->sectors = max_sectors;
			spin_lock_irq(&conf->device_lock);
			if (bio->bi_phys_segments == 0)
				bio->bi_phys_segments = 2;
			else
				bio->bi_phys_segments++;
			spin_unlock(&conf->device_lock);
			/* Cannot call generic_make_request directly
			 * as that will be queued in __generic_make_request
			 * and subsequent mempool_alloc might block
			 * waiting for it.  so hand bio over to raid10d.
			 */
			reschedule_retry(r10_bio);

			r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);

			r10_bio->master_bio = bio;
			r10_bio->sectors = ((bio->bi_size >> 9)
					    - sectors_handled);
			r10_bio->state = 0;
			r10_bio->mddev = mddev;
			r10_bio->sector = bio->bi_sector + sectors_handled;
			goto read_again;
		} else
			generic_make_request(read_bio);
1178
		return;
L
Linus Torvalds 已提交
1179 1180 1181 1182 1183
	}

	/*
	 * WRITE:
	 */
1184 1185 1186 1187 1188
	if (conf->pending_count >= max_queued_requests) {
		md_wakeup_thread(mddev->thread);
		wait_event(conf->wait_barrier,
			   conf->pending_count < max_queued_requests);
	}
1189
	/* first select target devices under rcu_lock and
L
Linus Torvalds 已提交
1190 1191
	 * inc refcount on their rdev.  Record them by setting
	 * bios[x] to bio
1192 1193 1194 1195 1196 1197 1198
	 * If there are known/acknowledged bad blocks on any device
	 * on which we have seen a write error, we want to avoid
	 * writing to those blocks.  This potentially requires several
	 * writes to write around the bad blocks.  Each set of writes
	 * gets its own r10_bio with a set of bios attached.  The number
	 * of r10_bios is recored in bio->bi_phys_segments just as with
	 * the read case.
L
Linus Torvalds 已提交
1199
	 */
N
NeilBrown 已提交
1200 1201
	plugged = mddev_check_plugged(mddev);

1202
	r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
L
Linus Torvalds 已提交
1203
	raid10_find_phys(conf, r10_bio);
1204
retry_write:
1205
	blocked_rdev = NULL;
L
Linus Torvalds 已提交
1206
	rcu_read_lock();
1207 1208
	max_sectors = r10_bio->sectors;

L
Linus Torvalds 已提交
1209 1210
	for (i = 0;  i < conf->copies; i++) {
		int d = r10_bio->devs[i].devnum;
1211
		struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1212 1213
		struct md_rdev *rrdev = rcu_dereference(
			conf->mirrors[d].replacement);
1214 1215
		if (rdev == rrdev)
			rrdev = NULL;
1216 1217 1218 1219 1220
		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
			atomic_inc(&rdev->nr_pending);
			blocked_rdev = rdev;
			break;
		}
1221 1222 1223 1224 1225
		if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
			atomic_inc(&rrdev->nr_pending);
			blocked_rdev = rrdev;
			break;
		}
1226 1227
		if (rrdev && (test_bit(Faulty, &rrdev->flags)
			      || test_bit(Unmerged, &rrdev->flags)))
1228 1229
			rrdev = NULL;

1230
		r10_bio->devs[i].bio = NULL;
1231
		r10_bio->devs[i].repl_bio = NULL;
1232 1233
		if (!rdev || test_bit(Faulty, &rdev->flags) ||
		    test_bit(Unmerged, &rdev->flags)) {
1234
			set_bit(R10BIO_Degraded, &r10_bio->state);
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
			continue;
		}
		if (test_bit(WriteErrorSeen, &rdev->flags)) {
			sector_t first_bad;
			sector_t dev_sector = r10_bio->devs[i].addr;
			int bad_sectors;
			int is_bad;

			is_bad = is_badblock(rdev, dev_sector,
					     max_sectors,
					     &first_bad, &bad_sectors);
			if (is_bad < 0) {
				/* Mustn't write here until the bad block
				 * is acknowledged
				 */
				atomic_inc(&rdev->nr_pending);
				set_bit(BlockedBadBlocks, &rdev->flags);
				blocked_rdev = rdev;
				break;
			}
			if (is_bad && first_bad <= dev_sector) {
				/* Cannot write here at all */
				bad_sectors -= (dev_sector - first_bad);
				if (bad_sectors < max_sectors)
					/* Mustn't write more than bad_sectors
					 * to other devices yet
					 */
					max_sectors = bad_sectors;
				/* We don't set R10BIO_Degraded as that
				 * only applies if the disk is missing,
				 * so it might be re-added, and we want to
				 * know to recover this chunk.
				 * In this case the device is here, and the
				 * fact that this chunk is not in-sync is
				 * recorded in the bad block log.
				 */
				continue;
			}
			if (is_bad) {
				int good_sectors = first_bad - dev_sector;
				if (good_sectors < max_sectors)
					max_sectors = good_sectors;
			}
1278
		}
1279 1280
		r10_bio->devs[i].bio = bio;
		atomic_inc(&rdev->nr_pending);
1281 1282 1283 1284
		if (rrdev) {
			r10_bio->devs[i].repl_bio = bio;
			atomic_inc(&rrdev->nr_pending);
		}
L
Linus Torvalds 已提交
1285 1286 1287
	}
	rcu_read_unlock();

1288 1289 1290 1291 1292
	if (unlikely(blocked_rdev)) {
		/* Have to wait for this device to get unblocked, then retry */
		int j;
		int d;

1293
		for (j = 0; j < i; j++) {
1294 1295 1296 1297
			if (r10_bio->devs[j].bio) {
				d = r10_bio->devs[j].devnum;
				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
			}
1298
			if (r10_bio->devs[j].repl_bio) {
1299
				struct md_rdev *rdev;
1300
				d = r10_bio->devs[j].devnum;
1301 1302 1303 1304 1305 1306 1307
				rdev = conf->mirrors[d].replacement;
				if (!rdev) {
					/* Race with remove_disk */
					smp_mb();
					rdev = conf->mirrors[d].rdev;
				}
				rdev_dec_pending(rdev, mddev);
1308 1309
			}
		}
1310 1311 1312 1313 1314 1315
		allow_barrier(conf);
		md_wait_for_blocked_rdev(blocked_rdev, mddev);
		wait_barrier(conf);
		goto retry_write;
	}

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
	if (max_sectors < r10_bio->sectors) {
		/* We are splitting this into multiple parts, so
		 * we need to prepare for allocating another r10_bio.
		 */
		r10_bio->sectors = max_sectors;
		spin_lock_irq(&conf->device_lock);
		if (bio->bi_phys_segments == 0)
			bio->bi_phys_segments = 2;
		else
			bio->bi_phys_segments++;
		spin_unlock_irq(&conf->device_lock);
	}
	sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;

1330
	atomic_set(&r10_bio->remaining, 1);
1331
	bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1332

L
Linus Torvalds 已提交
1333 1334 1335 1336 1337 1338
	for (i = 0; i < conf->copies; i++) {
		struct bio *mbio;
		int d = r10_bio->devs[i].devnum;
		if (!r10_bio->devs[i].bio)
			continue;

1339
		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1340 1341
		md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
			    max_sectors);
L
Linus Torvalds 已提交
1342 1343
		r10_bio->devs[i].bio = mbio;

1344
		mbio->bi_sector	= (r10_bio->devs[i].addr+
1345 1346
				   choose_data_offset(r10_bio,
						      conf->mirrors[d].rdev));
L
Linus Torvalds 已提交
1347 1348
		mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
		mbio->bi_end_io	= raid10_end_write_request;
T
Tejun Heo 已提交
1349
		mbio->bi_rw = WRITE | do_sync | do_fua;
L
Linus Torvalds 已提交
1350 1351 1352
		mbio->bi_private = r10_bio;

		atomic_inc(&r10_bio->remaining);
1353 1354
		spin_lock_irqsave(&conf->device_lock, flags);
		bio_list_add(&conf->pending_bio_list, mbio);
1355
		conf->pending_count++;
1356
		spin_unlock_irqrestore(&conf->device_lock, flags);
1357 1358 1359 1360 1361 1362 1363 1364 1365

		if (!r10_bio->devs[i].repl_bio)
			continue;

		mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
		md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
			    max_sectors);
		r10_bio->devs[i].repl_bio = mbio;

1366 1367 1368 1369
		/* We are actively writing to the original device
		 * so it cannot disappear, so the replacement cannot
		 * become NULL here
		 */
1370 1371 1372 1373
		mbio->bi_sector	= (r10_bio->devs[i].addr +
				   choose_data_offset(
					   r10_bio,
					   conf->mirrors[d].replacement));
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
		mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
		mbio->bi_end_io	= raid10_end_write_request;
		mbio->bi_rw = WRITE | do_sync | do_fua;
		mbio->bi_private = r10_bio;

		atomic_inc(&r10_bio->remaining);
		spin_lock_irqsave(&conf->device_lock, flags);
		bio_list_add(&conf->pending_bio_list, mbio);
		conf->pending_count++;
		spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1384 1385
	}

1386 1387 1388
	/* Don't remove the bias on 'remaining' (one_write_done) until
	 * after checking if we need to go around again.
	 */
1389

1390
	if (sectors_handled < (bio->bi_size >> 9)) {
1391
		one_write_done(r10_bio);
1392
		/* We need another r10_bio.  It has already been counted
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
		 * in bio->bi_phys_segments.
		 */
		r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);

		r10_bio->master_bio = bio;
		r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;

		r10_bio->mddev = mddev;
		r10_bio->sector = bio->bi_sector + sectors_handled;
		r10_bio->state = 0;
		goto retry_write;
	}
1405 1406 1407 1408
	one_write_done(r10_bio);

	/* In case raid10d snuck in to freeze_array */
	wake_up(&conf->wait_barrier);
1409

N
NeilBrown 已提交
1410
	if (do_sync || !mddev->bitmap || !plugged)
1411
		md_wakeup_thread(mddev->thread);
L
Linus Torvalds 已提交
1412 1413
}

1414
static void status(struct seq_file *seq, struct mddev *mddev)
L
Linus Torvalds 已提交
1415
{
1416
	struct r10conf *conf = mddev->private;
L
Linus Torvalds 已提交
1417 1418
	int i;

1419
	if (conf->geo.near_copies < conf->geo.raid_disks)
1420
		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1421 1422 1423 1424 1425
	if (conf->geo.near_copies > 1)
		seq_printf(seq, " %d near-copies", conf->geo.near_copies);
	if (conf->geo.far_copies > 1) {
		if (conf->geo.far_offset)
			seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1426
		else
1427
			seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1428
	}
1429 1430 1431
	seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
					conf->geo.raid_disks - mddev->degraded);
	for (i = 0; i < conf->geo.raid_disks; i++)
L
Linus Torvalds 已提交
1432 1433
		seq_printf(seq, "%s",
			      conf->mirrors[i].rdev &&
1434
			      test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
L
Linus Torvalds 已提交
1435 1436 1437
	seq_printf(seq, "]");
}

1438 1439 1440 1441 1442
/* check if there are enough drives for
 * every block to appear on atleast one.
 * Don't consider the device numbered 'ignore'
 * as we might be about to remove it.
 */
1443
static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
{
	int first = 0;

	do {
		int n = conf->copies;
		int cnt = 0;
		while (n--) {
			if (conf->mirrors[first].rdev &&
			    first != ignore)
				cnt++;
1454
			first = (first+1) % geo->raid_disks;
1455 1456 1457 1458 1459 1460 1461
		}
		if (cnt == 0)
			return 0;
	} while (first != 0);
	return 1;
}

1462 1463 1464 1465 1466 1467
static int enough(struct r10conf *conf, int ignore)
{
	return _enough(conf, &conf->geo, ignore) &&
		_enough(conf, &conf->prev, ignore);
}

1468
static void error(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1469 1470
{
	char b[BDEVNAME_SIZE];
1471
	struct r10conf *conf = mddev->private;
L
Linus Torvalds 已提交
1472 1473 1474 1475 1476 1477 1478

	/*
	 * If it is not operational, then we have already marked it as dead
	 * else if it is the last working disks, ignore the error, let the
	 * next level up know.
	 * else mark the drive as failed
	 */
1479
	if (test_bit(In_sync, &rdev->flags)
1480
	    && !enough(conf, rdev->raid_disk))
L
Linus Torvalds 已提交
1481 1482 1483 1484
		/*
		 * Don't fail the drive, just return an IO error.
		 */
		return;
1485 1486 1487
	if (test_and_clear_bit(In_sync, &rdev->flags)) {
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1488
		mddev->degraded++;
1489
		spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1490 1491 1492
		/*
		 * if recovery is running, make sure it aborts.
		 */
1493
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
L
Linus Torvalds 已提交
1494
	}
1495
	set_bit(Blocked, &rdev->flags);
1496
	set_bit(Faulty, &rdev->flags);
1497
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1498 1499 1500
	printk(KERN_ALERT
	       "md/raid10:%s: Disk failure on %s, disabling device.\n"
	       "md/raid10:%s: Operation continuing on %d devices.\n",
N
NeilBrown 已提交
1501
	       mdname(mddev), bdevname(rdev->bdev, b),
1502
	       mdname(mddev), conf->geo.raid_disks - mddev->degraded);
L
Linus Torvalds 已提交
1503 1504
}

1505
static void print_conf(struct r10conf *conf)
L
Linus Torvalds 已提交
1506 1507
{
	int i;
1508
	struct mirror_info *tmp;
L
Linus Torvalds 已提交
1509

N
NeilBrown 已提交
1510
	printk(KERN_DEBUG "RAID10 conf printout:\n");
L
Linus Torvalds 已提交
1511
	if (!conf) {
N
NeilBrown 已提交
1512
		printk(KERN_DEBUG "(!conf)\n");
L
Linus Torvalds 已提交
1513 1514
		return;
	}
1515 1516
	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
		conf->geo.raid_disks);
L
Linus Torvalds 已提交
1517

1518
	for (i = 0; i < conf->geo.raid_disks; i++) {
L
Linus Torvalds 已提交
1519 1520 1521
		char b[BDEVNAME_SIZE];
		tmp = conf->mirrors + i;
		if (tmp->rdev)
N
NeilBrown 已提交
1522
			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1523 1524
				i, !test_bit(In_sync, &tmp->rdev->flags),
			        !test_bit(Faulty, &tmp->rdev->flags),
L
Linus Torvalds 已提交
1525 1526 1527 1528
				bdevname(tmp->rdev->bdev,b));
	}
}

1529
static void close_sync(struct r10conf *conf)
L
Linus Torvalds 已提交
1530
{
1531 1532
	wait_barrier(conf);
	allow_barrier(conf);
L
Linus Torvalds 已提交
1533 1534 1535 1536 1537

	mempool_destroy(conf->r10buf_pool);
	conf->r10buf_pool = NULL;
}

1538
static int raid10_spare_active(struct mddev *mddev)
L
Linus Torvalds 已提交
1539 1540
{
	int i;
1541
	struct r10conf *conf = mddev->private;
1542
	struct mirror_info *tmp;
1543 1544
	int count = 0;
	unsigned long flags;
L
Linus Torvalds 已提交
1545 1546 1547 1548 1549

	/*
	 * Find all non-in_sync disks within the RAID10 configuration
	 * and mark them in_sync
	 */
1550
	for (i = 0; i < conf->geo.raid_disks; i++) {
L
Linus Torvalds 已提交
1551
		tmp = conf->mirrors + i;
1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
		if (tmp->replacement
		    && tmp->replacement->recovery_offset == MaxSector
		    && !test_bit(Faulty, &tmp->replacement->flags)
		    && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
			/* Replacement has just become active */
			if (!tmp->rdev
			    || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
				count++;
			if (tmp->rdev) {
				/* Replaced device not technically faulty,
				 * but we need to be sure it gets removed
				 * and never re-added.
				 */
				set_bit(Faulty, &tmp->rdev->flags);
				sysfs_notify_dirent_safe(
					tmp->rdev->sysfs_state);
			}
			sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
		} else if (tmp->rdev
			   && !test_bit(Faulty, &tmp->rdev->flags)
			   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1573
			count++;
1574
			sysfs_notify_dirent(tmp->rdev->sysfs_state);
L
Linus Torvalds 已提交
1575 1576
		}
	}
1577 1578 1579
	spin_lock_irqsave(&conf->device_lock, flags);
	mddev->degraded -= count;
	spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1580 1581

	print_conf(conf);
1582
	return count;
L
Linus Torvalds 已提交
1583 1584 1585
}


1586
static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1587
{
1588
	struct r10conf *conf = mddev->private;
1589
	int err = -EEXIST;
L
Linus Torvalds 已提交
1590
	int mirror;
1591
	int first = 0;
1592
	int last = conf->geo.raid_disks - 1;
1593
	struct request_queue *q = bdev_get_queue(rdev->bdev);
L
Linus Torvalds 已提交
1594 1595 1596 1597 1598

	if (mddev->recovery_cp < MaxSector)
		/* only hot-add to in-sync arrays, as recovery is
		 * very different from resync
		 */
1599
		return -EBUSY;
1600
	if (rdev->saved_raid_disk < 0 && !_enough(conf, &conf->prev, -1))
1601
		return -EINVAL;
L
Linus Torvalds 已提交
1602

N
NeilBrown 已提交
1603
	if (rdev->raid_disk >= 0)
1604
		first = last = rdev->raid_disk;
L
Linus Torvalds 已提交
1605

1606 1607 1608 1609 1610
	if (q->merge_bvec_fn) {
		set_bit(Unmerged, &rdev->flags);
		mddev->merge_check_needed = 1;
	}

1611
	if (rdev->saved_raid_disk >= first &&
1612 1613 1614
	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
		mirror = rdev->saved_raid_disk;
	else
1615
		mirror = first;
1616
	for ( ; mirror <= last ; mirror++) {
1617
		struct mirror_info *p = &conf->mirrors[mirror];
1618 1619
		if (p->recovery_disabled == mddev->recovery_disabled)
			continue;
1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
		if (p->rdev) {
			if (!test_bit(WantReplacement, &p->rdev->flags) ||
			    p->replacement != NULL)
				continue;
			clear_bit(In_sync, &rdev->flags);
			set_bit(Replacement, &rdev->flags);
			rdev->raid_disk = mirror;
			err = 0;
			disk_stack_limits(mddev->gendisk, rdev->bdev,
					  rdev->data_offset << 9);
			conf->fullsync = 1;
			rcu_assign_pointer(p->replacement, rdev);
			break;
		}
L
Linus Torvalds 已提交
1634

1635 1636
		disk_stack_limits(mddev->gendisk, rdev->bdev,
				  rdev->data_offset << 9);
L
Linus Torvalds 已提交
1637

1638
		p->head_position = 0;
1639
		p->recovery_disabled = mddev->recovery_disabled - 1;
1640 1641 1642 1643 1644 1645 1646
		rdev->raid_disk = mirror;
		err = 0;
		if (rdev->saved_raid_disk != mirror)
			conf->fullsync = 1;
		rcu_assign_pointer(p->rdev, rdev);
		break;
	}
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
	if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
		/* Some requests might not have seen this new
		 * merge_bvec_fn.  We must wait for them to complete
		 * before merging the device fully.
		 * First we make sure any code which has tested
		 * our function has submitted the request, then
		 * we wait for all outstanding requests to complete.
		 */
		synchronize_sched();
		raise_barrier(conf, 0);
		lower_barrier(conf);
		clear_bit(Unmerged, &rdev->flags);
	}
1660
	md_integrity_add_rdev(rdev, mddev);
L
Linus Torvalds 已提交
1661
	print_conf(conf);
1662
	return err;
L
Linus Torvalds 已提交
1663 1664
}

1665
static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1666
{
1667
	struct r10conf *conf = mddev->private;
L
Linus Torvalds 已提交
1668
	int err = 0;
1669
	int number = rdev->raid_disk;
1670 1671
	struct md_rdev **rdevp;
	struct mirror_info *p = conf->mirrors + number;
L
Linus Torvalds 已提交
1672 1673

	print_conf(conf);
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
	if (rdev == p->rdev)
		rdevp = &p->rdev;
	else if (rdev == p->replacement)
		rdevp = &p->replacement;
	else
		return 0;

	if (test_bit(In_sync, &rdev->flags) ||
	    atomic_read(&rdev->nr_pending)) {
		err = -EBUSY;
		goto abort;
	}
	/* Only remove faulty devices if recovery
	 * is not possible.
	 */
	if (!test_bit(Faulty, &rdev->flags) &&
	    mddev->recovery_disabled != p->recovery_disabled &&
1691
	    (!p->replacement || p->replacement == rdev) &&
1692 1693 1694
	    enough(conf, -1)) {
		err = -EBUSY;
		goto abort;
L
Linus Torvalds 已提交
1695
	}
1696 1697 1698 1699 1700 1701 1702
	*rdevp = NULL;
	synchronize_rcu();
	if (atomic_read(&rdev->nr_pending)) {
		/* lost the race, try later */
		err = -EBUSY;
		*rdevp = rdev;
		goto abort;
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
	} else if (p->replacement) {
		/* We must have just cleared 'rdev' */
		p->rdev = p->replacement;
		clear_bit(Replacement, &p->replacement->flags);
		smp_mb(); /* Make sure other CPUs may see both as identical
			   * but will never see neither -- if they are careful.
			   */
		p->replacement = NULL;
		clear_bit(WantReplacement, &rdev->flags);
	} else
		/* We might have just remove the Replacement as faulty
		 * Clear the flag just in case
		 */
		clear_bit(WantReplacement, &rdev->flags);

1718 1719
	err = md_integrity_register(mddev);

L
Linus Torvalds 已提交
1720 1721 1722 1723 1724 1725 1726
abort:

	print_conf(conf);
	return err;
}


1727
static void end_sync_read(struct bio *bio, int error)
L
Linus Torvalds 已提交
1728
{
1729
	struct r10bio *r10_bio = bio->bi_private;
1730
	struct r10conf *conf = r10_bio->mddev->private;
1731
	int d;
L
Linus Torvalds 已提交
1732

1733
	d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1734 1735 1736

	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
		set_bit(R10BIO_Uptodate, &r10_bio->state);
1737 1738 1739 1740
	else
		/* The write handler will notice the lack of
		 * R10BIO_Uptodate and record any errors etc
		 */
1741 1742
		atomic_add(r10_bio->sectors,
			   &conf->mirrors[d].rdev->corrected_errors);
L
Linus Torvalds 已提交
1743 1744 1745 1746

	/* for reconstruct, we always reschedule after a read.
	 * for resync, only after all reads
	 */
1747
	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
L
Linus Torvalds 已提交
1748 1749 1750 1751 1752 1753 1754 1755 1756
	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
	    atomic_dec_and_test(&r10_bio->remaining)) {
		/* we have read all the blocks,
		 * do the comparison in process context in raid10d
		 */
		reschedule_retry(r10_bio);
	}
}

1757
static void end_sync_request(struct r10bio *r10_bio)
L
Linus Torvalds 已提交
1758
{
1759
	struct mddev *mddev = r10_bio->mddev;
1760

L
Linus Torvalds 已提交
1761 1762 1763
	while (atomic_dec_and_test(&r10_bio->remaining)) {
		if (r10_bio->master_bio == NULL) {
			/* the primary of several recovery bios */
1764
			sector_t s = r10_bio->sectors;
1765 1766
			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
			    test_bit(R10BIO_WriteError, &r10_bio->state))
1767 1768 1769
				reschedule_retry(r10_bio);
			else
				put_buf(r10_bio);
1770
			md_done_sync(mddev, s, 1);
L
Linus Torvalds 已提交
1771 1772
			break;
		} else {
1773
			struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1774 1775
			if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
			    test_bit(R10BIO_WriteError, &r10_bio->state))
1776 1777 1778
				reschedule_retry(r10_bio);
			else
				put_buf(r10_bio);
L
Linus Torvalds 已提交
1779 1780 1781 1782 1783
			r10_bio = r10_bio2;
		}
	}
}

1784 1785 1786
static void end_sync_write(struct bio *bio, int error)
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1787
	struct r10bio *r10_bio = bio->bi_private;
1788
	struct mddev *mddev = r10_bio->mddev;
1789
	struct r10conf *conf = mddev->private;
1790 1791 1792 1793
	int d;
	sector_t first_bad;
	int bad_sectors;
	int slot;
1794
	int repl;
1795
	struct md_rdev *rdev = NULL;
1796

1797 1798 1799
	d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
	if (repl)
		rdev = conf->mirrors[d].replacement;
1800
	else
1801
		rdev = conf->mirrors[d].rdev;
1802 1803

	if (!uptodate) {
1804 1805 1806 1807
		if (repl)
			md_error(mddev, rdev);
		else {
			set_bit(WriteErrorSeen, &rdev->flags);
1808 1809 1810
			if (!test_and_set_bit(WantReplacement, &rdev->flags))
				set_bit(MD_RECOVERY_NEEDED,
					&rdev->mddev->recovery);
1811 1812 1813
			set_bit(R10BIO_WriteError, &r10_bio->state);
		}
	} else if (is_badblock(rdev,
1814 1815 1816 1817 1818
			     r10_bio->devs[slot].addr,
			     r10_bio->sectors,
			     &first_bad, &bad_sectors))
		set_bit(R10BIO_MadeGood, &r10_bio->state);

1819
	rdev_dec_pending(rdev, mddev);
1820 1821 1822 1823

	end_sync_request(r10_bio);
}

L
Linus Torvalds 已提交
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839
/*
 * Note: sync and recover and handled very differently for raid10
 * This code is for resync.
 * For resync, we read through virtual addresses and read all blocks.
 * If there is any error, we schedule a write.  The lowest numbered
 * drive is authoritative.
 * However requests come for physical address, so we need to map.
 * For every physical address there are raid_disks/copies virtual addresses,
 * which is always are least one, but is not necessarly an integer.
 * This means that a physical address can span multiple chunks, so we may
 * have to submit multiple io requests for a single sync request.
 */
/*
 * We check if all blocks are in-sync and only write to blocks that
 * aren't in sync
 */
1840
static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
L
Linus Torvalds 已提交
1841
{
1842
	struct r10conf *conf = mddev->private;
L
Linus Torvalds 已提交
1843 1844
	int i, first;
	struct bio *tbio, *fbio;
1845
	int vcnt;
L
Linus Torvalds 已提交
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859

	atomic_set(&r10_bio->remaining, 1);

	/* find the first device with a block */
	for (i=0; i<conf->copies; i++)
		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
			break;

	if (i == conf->copies)
		goto done;

	first = i;
	fbio = r10_bio->devs[i].bio;

1860
	vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
L
Linus Torvalds 已提交
1861
	/* now find blocks with errors */
1862 1863
	for (i=0 ; i < conf->copies ; i++) {
		int  j, d;
L
Linus Torvalds 已提交
1864 1865

		tbio = r10_bio->devs[i].bio;
1866 1867 1868 1869

		if (tbio->bi_end_io != end_sync_read)
			continue;
		if (i == first)
L
Linus Torvalds 已提交
1870
			continue;
1871 1872 1873 1874 1875 1876 1877 1878
		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
			/* We know that the bi_io_vec layout is the same for
			 * both 'first' and 'i', so we just compare them.
			 * All vec entries are PAGE_SIZE;
			 */
			for (j = 0; j < vcnt; j++)
				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
					   page_address(tbio->bi_io_vec[j].bv_page),
1879
					   fbio->bi_io_vec[j].bv_len))
1880 1881 1882 1883
					break;
			if (j == vcnt)
				continue;
			mddev->resync_mismatches += r10_bio->sectors;
1884 1885 1886
			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
				/* Don't fix anything. */
				continue;
1887
		}
1888 1889
		/* Ok, we need to write this bio, either to correct an
		 * inconsistency or to correct an unreadable block.
L
Linus Torvalds 已提交
1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
		 * First we need to fixup bv_offset, bv_len and
		 * bi_vecs, as the read request might have corrupted these
		 */
		tbio->bi_vcnt = vcnt;
		tbio->bi_size = r10_bio->sectors << 9;
		tbio->bi_idx = 0;
		tbio->bi_phys_segments = 0;
		tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
		tbio->bi_flags |= 1 << BIO_UPTODATE;
		tbio->bi_next = NULL;
		tbio->bi_rw = WRITE;
		tbio->bi_private = r10_bio;
		tbio->bi_sector = r10_bio->devs[i].addr;

		for (j=0; j < vcnt ; j++) {
			tbio->bi_io_vec[j].bv_offset = 0;
			tbio->bi_io_vec[j].bv_len = PAGE_SIZE;

			memcpy(page_address(tbio->bi_io_vec[j].bv_page),
			       page_address(fbio->bi_io_vec[j].bv_page),
			       PAGE_SIZE);
		}
		tbio->bi_end_io = end_sync_write;

		d = r10_bio->devs[i].devnum;
		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
		atomic_inc(&r10_bio->remaining);
		md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);

		tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
		generic_make_request(tbio);
	}

1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
	/* Now write out to any replacement devices
	 * that are active
	 */
	for (i = 0; i < conf->copies; i++) {
		int j, d;

		tbio = r10_bio->devs[i].repl_bio;
		if (!tbio || !tbio->bi_end_io)
			continue;
		if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
		    && r10_bio->devs[i].bio != fbio)
			for (j = 0; j < vcnt; j++)
				memcpy(page_address(tbio->bi_io_vec[j].bv_page),
				       page_address(fbio->bi_io_vec[j].bv_page),
				       PAGE_SIZE);
		d = r10_bio->devs[i].devnum;
		atomic_inc(&r10_bio->remaining);
		md_sync_acct(conf->mirrors[d].replacement->bdev,
			     tbio->bi_size >> 9);
		generic_make_request(tbio);
	}

L
Linus Torvalds 已提交
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
done:
	if (atomic_dec_and_test(&r10_bio->remaining)) {
		md_done_sync(mddev, r10_bio->sectors, 1);
		put_buf(r10_bio);
	}
}

/*
 * Now for the recovery code.
 * Recovery happens across physical sectors.
 * We recover all non-is_sync drives by finding the virtual address of
 * each, and then choose a working drive that also has that virt address.
 * There is a separate r10_bio for each non-in_sync drive.
 * Only the first two slots are in use. The first for reading,
 * The second for writing.
 *
 */
1963
static void fix_recovery_read_error(struct r10bio *r10_bio)
1964 1965 1966 1967 1968 1969 1970 1971
{
	/* We got a read error during recovery.
	 * We repeat the read in smaller page-sized sections.
	 * If a read succeeds, write it to the new device or record
	 * a bad block if we cannot.
	 * If a read fails, record a bad block on both old and
	 * new devices.
	 */
1972
	struct mddev *mddev = r10_bio->mddev;
1973
	struct r10conf *conf = mddev->private;
1974 1975 1976 1977 1978 1979 1980 1981 1982
	struct bio *bio = r10_bio->devs[0].bio;
	sector_t sect = 0;
	int sectors = r10_bio->sectors;
	int idx = 0;
	int dr = r10_bio->devs[0].devnum;
	int dw = r10_bio->devs[1].devnum;

	while (sectors) {
		int s = sectors;
1983
		struct md_rdev *rdev;
1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
		sector_t addr;
		int ok;

		if (s > (PAGE_SIZE>>9))
			s = PAGE_SIZE >> 9;

		rdev = conf->mirrors[dr].rdev;
		addr = r10_bio->devs[0].addr + sect,
		ok = sync_page_io(rdev,
				  addr,
				  s << 9,
				  bio->bi_io_vec[idx].bv_page,
				  READ, false);
		if (ok) {
			rdev = conf->mirrors[dw].rdev;
			addr = r10_bio->devs[1].addr + sect;
			ok = sync_page_io(rdev,
					  addr,
					  s << 9,
					  bio->bi_io_vec[idx].bv_page,
					  WRITE, false);
2005
			if (!ok) {
2006
				set_bit(WriteErrorSeen, &rdev->flags);
2007 2008 2009 2010 2011
				if (!test_and_set_bit(WantReplacement,
						      &rdev->flags))
					set_bit(MD_RECOVERY_NEEDED,
						&rdev->mddev->recovery);
			}
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
		}
		if (!ok) {
			/* We don't worry if we cannot set a bad block -
			 * it really is bad so there is no loss in not
			 * recording it yet
			 */
			rdev_set_badblocks(rdev, addr, s, 0);

			if (rdev != conf->mirrors[dw].rdev) {
				/* need bad block on destination too */
2022
				struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
				addr = r10_bio->devs[1].addr + sect;
				ok = rdev_set_badblocks(rdev2, addr, s, 0);
				if (!ok) {
					/* just abort the recovery */
					printk(KERN_NOTICE
					       "md/raid10:%s: recovery aborted"
					       " due to read error\n",
					       mdname(mddev));

					conf->mirrors[dw].recovery_disabled
						= mddev->recovery_disabled;
					set_bit(MD_RECOVERY_INTR,
						&mddev->recovery);
					break;
				}
			}
		}

		sectors -= s;
		sect += s;
		idx++;
	}
}
L
Linus Torvalds 已提交
2046

2047
static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
L
Linus Torvalds 已提交
2048
{
2049
	struct r10conf *conf = mddev->private;
2050
	int d;
2051
	struct bio *wbio, *wbio2;
L
Linus Torvalds 已提交
2052

2053 2054 2055 2056 2057 2058
	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
		fix_recovery_read_error(r10_bio);
		end_sync_request(r10_bio);
		return;
	}

2059 2060
	/*
	 * share the pages with the first bio
L
Linus Torvalds 已提交
2061 2062 2063
	 * and submit the write request
	 */
	d = r10_bio->devs[1].devnum;
2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
	wbio = r10_bio->devs[1].bio;
	wbio2 = r10_bio->devs[1].repl_bio;
	if (wbio->bi_end_io) {
		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
		md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
		generic_make_request(wbio);
	}
	if (wbio2 && wbio2->bi_end_io) {
		atomic_inc(&conf->mirrors[d].replacement->nr_pending);
		md_sync_acct(conf->mirrors[d].replacement->bdev,
			     wbio2->bi_size >> 9);
		generic_make_request(wbio2);
	}
L
Linus Torvalds 已提交
2077 2078 2079
}


2080 2081 2082 2083 2084 2085
/*
 * Used by fix_read_error() to decay the per rdev read_errors.
 * We halve the read error count for every hour that has elapsed
 * since the last recorded read error.
 *
 */
2086
static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
{
	struct timespec cur_time_mon;
	unsigned long hours_since_last;
	unsigned int read_errors = atomic_read(&rdev->read_errors);

	ktime_get_ts(&cur_time_mon);

	if (rdev->last_read_error.tv_sec == 0 &&
	    rdev->last_read_error.tv_nsec == 0) {
		/* first time we've seen a read error */
		rdev->last_read_error = cur_time_mon;
		return;
	}

	hours_since_last = (cur_time_mon.tv_sec -
			    rdev->last_read_error.tv_sec) / 3600;

	rdev->last_read_error = cur_time_mon;

	/*
	 * if hours_since_last is > the number of bits in read_errors
	 * just set read errors to 0. We do this to avoid
	 * overflowing the shift of read_errors by hours_since_last.
	 */
	if (hours_since_last >= 8 * sizeof(read_errors))
		atomic_set(&rdev->read_errors, 0);
	else
		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
}

2117
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128
			    int sectors, struct page *page, int rw)
{
	sector_t first_bad;
	int bad_sectors;

	if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
	    && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
		return -1;
	if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
		/* success */
		return 1;
2129
	if (rw == WRITE) {
2130
		set_bit(WriteErrorSeen, &rdev->flags);
2131 2132 2133 2134
		if (!test_and_set_bit(WantReplacement, &rdev->flags))
			set_bit(MD_RECOVERY_NEEDED,
				&rdev->mddev->recovery);
	}
2135 2136 2137 2138 2139 2140
	/* need to record an error - either for the block or the device */
	if (!rdev_set_badblocks(rdev, sector, sectors, 0))
		md_error(rdev->mddev, rdev);
	return 0;
}

L
Linus Torvalds 已提交
2141 2142 2143 2144 2145
/*
 * This is a kernel thread which:
 *
 *	1.	Retries failed read operations on working mirrors.
 *	2.	Updates the raid superblock when problems encounter.
2146
 *	3.	Performs writes following reads for array synchronising.
L
Linus Torvalds 已提交
2147 2148
 */

2149
static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2150 2151 2152
{
	int sect = 0; /* Offset from r10_bio->sector */
	int sectors = r10_bio->sectors;
2153
	struct md_rdev*rdev;
2154
	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2155
	int d = r10_bio->devs[r10_bio->read_slot].devnum;
2156

2157 2158 2159 2160
	/* still own a reference to this rdev, so it cannot
	 * have been cleared recently.
	 */
	rdev = conf->mirrors[d].rdev;
2161

2162 2163 2164 2165
	if (test_bit(Faulty, &rdev->flags))
		/* drive has already been failed, just ignore any
		   more fix_read_error() attempts */
		return;
2166

2167 2168 2169 2170 2171
	check_decay_read_errors(mddev, rdev);
	atomic_inc(&rdev->read_errors);
	if (atomic_read(&rdev->read_errors) > max_read_errors) {
		char b[BDEVNAME_SIZE];
		bdevname(rdev->bdev, b);
2172

2173 2174 2175 2176 2177 2178 2179 2180 2181
		printk(KERN_NOTICE
		       "md/raid10:%s: %s: Raid device exceeded "
		       "read_error threshold [cur %d:max %d]\n",
		       mdname(mddev), b,
		       atomic_read(&rdev->read_errors), max_read_errors);
		printk(KERN_NOTICE
		       "md/raid10:%s: %s: Failing raid device\n",
		       mdname(mddev), b);
		md_error(mddev, conf->mirrors[d].rdev);
2182
		r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2183
		return;
2184 2185
	}

2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
	while(sectors) {
		int s = sectors;
		int sl = r10_bio->read_slot;
		int success = 0;
		int start;

		if (s > (PAGE_SIZE>>9))
			s = PAGE_SIZE >> 9;

		rcu_read_lock();
		do {
2197 2198 2199
			sector_t first_bad;
			int bad_sectors;

2200
			d = r10_bio->devs[sl].devnum;
2201 2202
			rdev = rcu_dereference(conf->mirrors[d].rdev);
			if (rdev &&
2203
			    !test_bit(Unmerged, &rdev->flags) &&
2204 2205 2206
			    test_bit(In_sync, &rdev->flags) &&
			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
					&first_bad, &bad_sectors) == 0) {
2207 2208
				atomic_inc(&rdev->nr_pending);
				rcu_read_unlock();
2209
				success = sync_page_io(rdev,
2210
						       r10_bio->devs[sl].addr +
J
Jonathan Brassow 已提交
2211
						       sect,
2212
						       s<<9,
J
Jonathan Brassow 已提交
2213
						       conf->tmppage, READ, false);
2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225
				rdev_dec_pending(rdev, mddev);
				rcu_read_lock();
				if (success)
					break;
			}
			sl++;
			if (sl == conf->copies)
				sl = 0;
		} while (!success && sl != r10_bio->read_slot);
		rcu_read_unlock();

		if (!success) {
2226 2227 2228 2229
			/* Cannot read from anywhere, just mark the block
			 * as bad on the first device to discourage future
			 * reads.
			 */
2230
			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2231 2232 2233 2234 2235 2236
			rdev = conf->mirrors[dn].rdev;

			if (!rdev_set_badblocks(
				    rdev,
				    r10_bio->devs[r10_bio->read_slot].addr
				    + sect,
2237
				    s, 0)) {
2238
				md_error(mddev, rdev);
2239 2240 2241
				r10_bio->devs[r10_bio->read_slot].bio
					= IO_BLOCKED;
			}
2242 2243 2244 2245 2246 2247 2248
			break;
		}

		start = sl;
		/* write it back and re-read */
		rcu_read_lock();
		while (sl != r10_bio->read_slot) {
2249
			char b[BDEVNAME_SIZE];
2250

2251 2252 2253 2254 2255
			if (sl==0)
				sl = conf->copies;
			sl--;
			d = r10_bio->devs[sl].devnum;
			rdev = rcu_dereference(conf->mirrors[d].rdev);
2256
			if (!rdev ||
2257
			    test_bit(Unmerged, &rdev->flags) ||
2258 2259 2260 2261 2262
			    !test_bit(In_sync, &rdev->flags))
				continue;

			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();
2263 2264 2265 2266
			if (r10_sync_page_io(rdev,
					     r10_bio->devs[sl].addr +
					     sect,
					     s<<9, conf->tmppage, WRITE)
2267 2268 2269 2270 2271 2272 2273 2274
			    == 0) {
				/* Well, this device is dead */
				printk(KERN_NOTICE
				       "md/raid10:%s: read correction "
				       "write failed"
				       " (%d sectors at %llu on %s)\n",
				       mdname(mddev), s,
				       (unsigned long long)(
2275 2276 2277
					       sect +
					       choose_data_offset(r10_bio,
								  rdev)),
2278 2279 2280 2281 2282
				       bdevname(rdev->bdev, b));
				printk(KERN_NOTICE "md/raid10:%s: %s: failing "
				       "drive\n",
				       mdname(mddev),
				       bdevname(rdev->bdev, b));
2283
			}
2284 2285
			rdev_dec_pending(rdev, mddev);
			rcu_read_lock();
2286 2287 2288
		}
		sl = start;
		while (sl != r10_bio->read_slot) {
2289
			char b[BDEVNAME_SIZE];
2290

2291 2292 2293 2294 2295
			if (sl==0)
				sl = conf->copies;
			sl--;
			d = r10_bio->devs[sl].devnum;
			rdev = rcu_dereference(conf->mirrors[d].rdev);
2296 2297 2298
			if (!rdev ||
			    !test_bit(In_sync, &rdev->flags))
				continue;
2299

2300 2301
			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();
2302 2303 2304 2305 2306 2307
			switch (r10_sync_page_io(rdev,
					     r10_bio->devs[sl].addr +
					     sect,
					     s<<9, conf->tmppage,
						 READ)) {
			case 0:
2308 2309 2310 2311 2312 2313 2314
				/* Well, this device is dead */
				printk(KERN_NOTICE
				       "md/raid10:%s: unable to read back "
				       "corrected sectors"
				       " (%d sectors at %llu on %s)\n",
				       mdname(mddev), s,
				       (unsigned long long)(
2315 2316
					       sect +
					       choose_data_offset(r10_bio, rdev)),
2317 2318 2319 2320 2321
				       bdevname(rdev->bdev, b));
				printk(KERN_NOTICE "md/raid10:%s: %s: failing "
				       "drive\n",
				       mdname(mddev),
				       bdevname(rdev->bdev, b));
2322 2323
				break;
			case 1:
2324 2325 2326 2327 2328
				printk(KERN_INFO
				       "md/raid10:%s: read error corrected"
				       " (%d sectors at %llu on %s)\n",
				       mdname(mddev), s,
				       (unsigned long long)(
2329 2330
					       sect +
					       choose_data_offset(r10_bio, rdev)),
2331 2332
				       bdevname(rdev->bdev, b));
				atomic_add(s, &rdev->corrected_errors);
2333
			}
2334 2335 2336

			rdev_dec_pending(rdev, mddev);
			rcu_read_lock();
2337 2338 2339 2340 2341 2342 2343 2344
		}
		rcu_read_unlock();

		sectors -= s;
		sect += s;
	}
}

2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363
static void bi_complete(struct bio *bio, int error)
{
	complete((struct completion *)bio->bi_private);
}

static int submit_bio_wait(int rw, struct bio *bio)
{
	struct completion event;
	rw |= REQ_SYNC;

	init_completion(&event);
	bio->bi_private = &event;
	bio->bi_end_io = bi_complete;
	submit_bio(rw, bio);
	wait_for_completion(&event);

	return test_bit(BIO_UPTODATE, &bio->bi_flags);
}

2364
static int narrow_write_error(struct r10bio *r10_bio, int i)
2365 2366
{
	struct bio *bio = r10_bio->master_bio;
2367
	struct mddev *mddev = r10_bio->mddev;
2368
	struct r10conf *conf = mddev->private;
2369
	struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403
	/* bio has the data to be written to slot 'i' where
	 * we just recently had a write error.
	 * We repeatedly clone the bio and trim down to one block,
	 * then try the write.  Where the write fails we record
	 * a bad block.
	 * It is conceivable that the bio doesn't exactly align with
	 * blocks.  We must handle this.
	 *
	 * We currently own a reference to the rdev.
	 */

	int block_sectors;
	sector_t sector;
	int sectors;
	int sect_to_write = r10_bio->sectors;
	int ok = 1;

	if (rdev->badblocks.shift < 0)
		return 0;

	block_sectors = 1 << rdev->badblocks.shift;
	sector = r10_bio->sector;
	sectors = ((r10_bio->sector + block_sectors)
		   & ~(sector_t)(block_sectors - 1))
		- sector;

	while (sect_to_write) {
		struct bio *wbio;
		if (sectors > sect_to_write)
			sectors = sect_to_write;
		/* Write at 'sector' for 'sectors' */
		wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
		md_trim_bio(wbio, sector - bio->bi_sector, sectors);
		wbio->bi_sector = (r10_bio->devs[i].addr+
2404
				   choose_data_offset(r10_bio, rdev) +
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
				   (sector - r10_bio->sector));
		wbio->bi_bdev = rdev->bdev;
		if (submit_bio_wait(WRITE, wbio) == 0)
			/* Failure! */
			ok = rdev_set_badblocks(rdev, sector,
						sectors, 0)
				&& ok;

		bio_put(wbio);
		sect_to_write -= sectors;
		sector += sectors;
		sectors = block_sectors;
	}
	return ok;
}

2421
static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2422 2423 2424
{
	int slot = r10_bio->read_slot;
	struct bio *bio;
2425
	struct r10conf *conf = mddev->private;
2426
	struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2427 2428
	char b[BDEVNAME_SIZE];
	unsigned long do_sync;
2429
	int max_sectors;
2430 2431 2432 2433 2434 2435 2436 2437 2438

	/* we got a read error. Maybe the drive is bad.  Maybe just
	 * the block and we can fix it.
	 * We freeze all other IO, and try reading the block from
	 * other devices.  When we find one, we re-write
	 * and check it that fixes the read error.
	 * This is all done synchronously while the array is
	 * frozen.
	 */
2439 2440 2441 2442 2443
	bio = r10_bio->devs[slot].bio;
	bdevname(bio->bi_bdev, b);
	bio_put(bio);
	r10_bio->devs[slot].bio = NULL;

2444 2445 2446 2447
	if (mddev->ro == 0) {
		freeze_array(conf);
		fix_read_error(conf, mddev, r10_bio);
		unfreeze_array(conf);
2448 2449 2450
	} else
		r10_bio->devs[slot].bio = IO_BLOCKED;

2451
	rdev_dec_pending(rdev, mddev);
2452

2453
read_more:
2454 2455
	rdev = read_balance(conf, r10_bio, &max_sectors);
	if (rdev == NULL) {
2456 2457
		printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
		       " read error for block %llu\n",
2458
		       mdname(mddev), b,
2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474
		       (unsigned long long)r10_bio->sector);
		raid_end_bio_io(r10_bio);
		return;
	}

	do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
	slot = r10_bio->read_slot;
	printk_ratelimited(
		KERN_ERR
		"md/raid10:%s: %s: redirecting"
		"sector %llu to another mirror\n",
		mdname(mddev),
		bdevname(rdev->bdev, b),
		(unsigned long long)r10_bio->sector);
	bio = bio_clone_mddev(r10_bio->master_bio,
			      GFP_NOIO, mddev);
2475 2476 2477
	md_trim_bio(bio,
		    r10_bio->sector - bio->bi_sector,
		    max_sectors);
2478
	r10_bio->devs[slot].bio = bio;
2479
	r10_bio->devs[slot].rdev = rdev;
2480
	bio->bi_sector = r10_bio->devs[slot].addr
2481
		+ choose_data_offset(r10_bio, rdev);
2482 2483 2484 2485
	bio->bi_bdev = rdev->bdev;
	bio->bi_rw = READ | do_sync;
	bio->bi_private = r10_bio;
	bio->bi_end_io = raid10_end_read_request;
2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515
	if (max_sectors < r10_bio->sectors) {
		/* Drat - have to split this up more */
		struct bio *mbio = r10_bio->master_bio;
		int sectors_handled =
			r10_bio->sector + max_sectors
			- mbio->bi_sector;
		r10_bio->sectors = max_sectors;
		spin_lock_irq(&conf->device_lock);
		if (mbio->bi_phys_segments == 0)
			mbio->bi_phys_segments = 2;
		else
			mbio->bi_phys_segments++;
		spin_unlock_irq(&conf->device_lock);
		generic_make_request(bio);

		r10_bio = mempool_alloc(conf->r10bio_pool,
					GFP_NOIO);
		r10_bio->master_bio = mbio;
		r10_bio->sectors = (mbio->bi_size >> 9)
			- sectors_handled;
		r10_bio->state = 0;
		set_bit(R10BIO_ReadError,
			&r10_bio->state);
		r10_bio->mddev = mddev;
		r10_bio->sector = mbio->bi_sector
			+ sectors_handled;

		goto read_more;
	} else
		generic_make_request(bio);
2516 2517
}

2518
static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2519 2520 2521 2522
{
	/* Some sort of write request has finished and it
	 * succeeded in writing where we thought there was a
	 * bad block.  So forget the bad block.
2523 2524
	 * Or possibly if failed and we need to record
	 * a bad block.
2525 2526
	 */
	int m;
2527
	struct md_rdev *rdev;
2528 2529 2530

	if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
	    test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2531 2532 2533 2534 2535 2536
		for (m = 0; m < conf->copies; m++) {
			int dev = r10_bio->devs[m].devnum;
			rdev = conf->mirrors[dev].rdev;
			if (r10_bio->devs[m].bio == NULL)
				continue;
			if (test_bit(BIO_UPTODATE,
2537 2538 2539 2540
				     &r10_bio->devs[m].bio->bi_flags)) {
				rdev_clear_badblocks(
					rdev,
					r10_bio->devs[m].addr,
2541
					r10_bio->sectors, 0);
2542 2543 2544 2545 2546 2547
			} else {
				if (!rdev_set_badblocks(
					    rdev,
					    r10_bio->devs[m].addr,
					    r10_bio->sectors, 0))
					md_error(conf->mddev, rdev);
2548
			}
2549 2550 2551 2552 2553 2554 2555 2556
			rdev = conf->mirrors[dev].replacement;
			if (r10_bio->devs[m].repl_bio == NULL)
				continue;
			if (test_bit(BIO_UPTODATE,
				     &r10_bio->devs[m].repl_bio->bi_flags)) {
				rdev_clear_badblocks(
					rdev,
					r10_bio->devs[m].addr,
2557
					r10_bio->sectors, 0);
2558 2559 2560 2561 2562 2563 2564
			} else {
				if (!rdev_set_badblocks(
					    rdev,
					    r10_bio->devs[m].addr,
					    r10_bio->sectors, 0))
					md_error(conf->mddev, rdev);
			}
2565
		}
2566 2567
		put_buf(r10_bio);
	} else {
2568 2569 2570 2571 2572
		for (m = 0; m < conf->copies; m++) {
			int dev = r10_bio->devs[m].devnum;
			struct bio *bio = r10_bio->devs[m].bio;
			rdev = conf->mirrors[dev].rdev;
			if (bio == IO_MADE_GOOD) {
2573 2574 2575
				rdev_clear_badblocks(
					rdev,
					r10_bio->devs[m].addr,
2576
					r10_bio->sectors, 0);
2577
				rdev_dec_pending(rdev, conf->mddev);
2578 2579 2580 2581 2582 2583 2584 2585
			} else if (bio != NULL &&
				   !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
				if (!narrow_write_error(r10_bio, m)) {
					md_error(conf->mddev, rdev);
					set_bit(R10BIO_Degraded,
						&r10_bio->state);
				}
				rdev_dec_pending(rdev, conf->mddev);
2586
			}
2587 2588
			bio = r10_bio->devs[m].repl_bio;
			rdev = conf->mirrors[dev].replacement;
2589
			if (rdev && bio == IO_MADE_GOOD) {
2590 2591 2592
				rdev_clear_badblocks(
					rdev,
					r10_bio->devs[m].addr,
2593
					r10_bio->sectors, 0);
2594 2595
				rdev_dec_pending(rdev, conf->mddev);
			}
2596 2597 2598 2599
		}
		if (test_bit(R10BIO_WriteError,
			     &r10_bio->state))
			close_write(r10_bio);
2600 2601 2602 2603
		raid_end_bio_io(r10_bio);
	}
}

2604
static void raid10d(struct mddev *mddev)
L
Linus Torvalds 已提交
2605
{
2606
	struct r10bio *r10_bio;
L
Linus Torvalds 已提交
2607
	unsigned long flags;
2608
	struct r10conf *conf = mddev->private;
L
Linus Torvalds 已提交
2609
	struct list_head *head = &conf->retry_list;
2610
	struct blk_plug plug;
L
Linus Torvalds 已提交
2611 2612 2613

	md_check_recovery(mddev);

2614
	blk_start_plug(&plug);
L
Linus Torvalds 已提交
2615
	for (;;) {
2616

J
Jens Axboe 已提交
2617
		flush_pending_writes(conf);
2618

2619 2620 2621
		spin_lock_irqsave(&conf->device_lock, flags);
		if (list_empty(head)) {
			spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
2622
			break;
2623
		}
2624
		r10_bio = list_entry(head->prev, struct r10bio, retry_list);
L
Linus Torvalds 已提交
2625
		list_del(head->prev);
2626
		conf->nr_queued--;
L
Linus Torvalds 已提交
2627 2628 2629
		spin_unlock_irqrestore(&conf->device_lock, flags);

		mddev = r10_bio->mddev;
2630
		conf = mddev->private;
2631 2632
		if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
		    test_bit(R10BIO_WriteError, &r10_bio->state))
2633 2634
			handle_write_completed(conf, r10_bio);
		else if (test_bit(R10BIO_IsSync, &r10_bio->state))
L
Linus Torvalds 已提交
2635
			sync_request_write(mddev, r10_bio);
J
Jens Axboe 已提交
2636
		else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
L
Linus Torvalds 已提交
2637
			recovery_request_write(mddev, r10_bio);
2638
		else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2639
			handle_read_error(mddev, r10_bio);
2640 2641 2642 2643 2644 2645 2646
		else {
			/* just a partial read to be scheduled from a
			 * separate context
			 */
			int slot = r10_bio->read_slot;
			generic_make_request(r10_bio->devs[slot].bio);
		}
2647

N
NeilBrown 已提交
2648
		cond_resched();
2649 2650
		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
			md_check_recovery(mddev);
L
Linus Torvalds 已提交
2651
	}
2652
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
2653 2654 2655
}


2656
static int init_resync(struct r10conf *conf)
L
Linus Torvalds 已提交
2657 2658
{
	int buffs;
2659
	int i;
L
Linus Torvalds 已提交
2660 2661

	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2662
	BUG_ON(conf->r10buf_pool);
2663
	conf->have_replacement = 0;
2664
	for (i = 0; i < conf->geo.raid_disks; i++)
2665 2666
		if (conf->mirrors[i].replacement)
			conf->have_replacement = 1;
L
Linus Torvalds 已提交
2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
	if (!conf->r10buf_pool)
		return -ENOMEM;
	conf->next_resync = 0;
	return 0;
}

/*
 * perform a "sync" on one "block"
 *
 * We need to make sure that no normal I/O request - particularly write
 * requests - conflict with active sync requests.
 *
 * This is achieved by tracking pending requests and a 'barrier' concept
 * that can be installed to exclude normal IO requests.
 *
 * Resync and recovery are handled very differently.
 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
 *
 * For resync, we iterate over virtual addresses, read all copies,
 * and update if there are differences.  If only one copy is live,
 * skip it.
 * For recovery, we iterate over physical addresses, read a good
 * value for each non-in_sync drive, and over-write.
 *
 * So, for recovery we may have several outstanding complex requests for a
 * given address, one for each out-of-sync device.  We model this by allocating
 * a number of r10_bio structures, one for each out-of-sync device.
 * As we setup these structures, we collect all bio's together into a list
 * which we then process collectively to add pages, and then process again
 * to pass to generic_make_request.
 *
 * The r10_bio structures are linked using a borrowed master_bio pointer.
 * This link is counted in ->remaining.  When the r10_bio that points to NULL
 * has its remaining count decremented to 0, the whole complex operation
 * is complete.
 *
 */

2706
static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
2707
			     int *skipped, int go_faster)
L
Linus Torvalds 已提交
2708
{
2709
	struct r10conf *conf = mddev->private;
2710
	struct r10bio *r10_bio;
L
Linus Torvalds 已提交
2711 2712 2713
	struct bio *biolist = NULL, *bio;
	sector_t max_sector, nr_sectors;
	int i;
2714
	int max_sync;
N
NeilBrown 已提交
2715
	sector_t sync_blocks;
L
Linus Torvalds 已提交
2716 2717
	sector_t sectors_skipped = 0;
	int chunks_skipped = 0;
2718
	sector_t chunk_mask = conf->geo.chunk_mask;
L
Linus Torvalds 已提交
2719 2720 2721

	if (!conf->r10buf_pool)
		if (init_resync(conf))
2722
			return 0;
L
Linus Torvalds 已提交
2723 2724

 skipped:
A
Andre Noll 已提交
2725
	max_sector = mddev->dev_sectors;
L
Linus Torvalds 已提交
2726 2727 2728
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
		max_sector = mddev->resync_max_sectors;
	if (sector_nr >= max_sector) {
2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741
		/* If we aborted, we need to abort the
		 * sync on the 'current' bitmap chucks (there can
		 * be several when recovering multiple devices).
		 * as we may have started syncing it but not finished.
		 * We can find the current address in
		 * mddev->curr_resync, but for recovery,
		 * we need to convert that to several
		 * virtual addresses.
		 */
		if (mddev->curr_resync < max_sector) { /* aborted */
			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
				bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
						&sync_blocks, 1);
2742
			else for (i = 0; i < conf->geo.raid_disks; i++) {
2743 2744 2745 2746 2747
				sector_t sect =
					raid10_find_virt(conf, mddev->curr_resync, i);
				bitmap_end_sync(mddev->bitmap, sect,
						&sync_blocks, 1);
			}
2748 2749 2750 2751 2752 2753 2754 2755
		} else {
			/* completed sync */
			if ((!mddev->bitmap || conf->fullsync)
			    && conf->have_replacement
			    && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
				/* Completed a full sync so the replacements
				 * are now fully recovered.
				 */
2756
				for (i = 0; i < conf->geo.raid_disks; i++)
2757 2758 2759 2760 2761
					if (conf->mirrors[i].replacement)
						conf->mirrors[i].replacement
							->recovery_offset
							= MaxSector;
			}
2762
			conf->fullsync = 0;
2763
		}
2764
		bitmap_close_sync(mddev->bitmap);
L
Linus Torvalds 已提交
2765
		close_sync(conf);
2766
		*skipped = 1;
L
Linus Torvalds 已提交
2767 2768
		return sectors_skipped;
	}
2769
	if (chunks_skipped >= conf->geo.raid_disks) {
L
Linus Torvalds 已提交
2770 2771 2772
		/* if there has been nothing to do on any drive,
		 * then there is nothing to do at all..
		 */
2773 2774
		*skipped = 1;
		return (max_sector - sector_nr) + sectors_skipped;
L
Linus Torvalds 已提交
2775 2776
	}

2777 2778 2779
	if (max_sector > mddev->resync_max)
		max_sector = mddev->resync_max; /* Don't do IO beyond here */

L
Linus Torvalds 已提交
2780 2781 2782
	/* make sure whole request will fit in a chunk - if chunks
	 * are meaningful
	 */
2783 2784 2785
	if (conf->geo.near_copies < conf->geo.raid_disks &&
	    max_sector > (sector_nr | chunk_mask))
		max_sector = (sector_nr | chunk_mask) + 1;
L
Linus Torvalds 已提交
2786 2787 2788 2789
	/*
	 * If there is non-resync activity waiting for us then
	 * put in a delay to throttle resync.
	 */
2790
	if (!go_faster && conf->nr_waiting)
L
Linus Torvalds 已提交
2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807
		msleep_interruptible(1000);

	/* Again, very different code for resync and recovery.
	 * Both must result in an r10bio with a list of bios that
	 * have bi_end_io, bi_sector, bi_bdev set,
	 * and bi_private set to the r10bio.
	 * For recovery, we may actually create several r10bios
	 * with 2 bios in each, that correspond to the bios in the main one.
	 * In this case, the subordinate r10bios link back through a
	 * borrowed master_bio pointer, and the counter in the master
	 * includes a ref from each subordinate.
	 */
	/* First, we decide what to do and set ->bi_end_io
	 * To end_sync_read if we want to read, and
	 * end_sync_write if we will want to write.
	 */

2808
	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
L
Linus Torvalds 已提交
2809 2810
	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
		/* recovery... the complicated one */
2811
		int j;
L
Linus Torvalds 已提交
2812 2813
		r10_bio = NULL;

2814
		for (i = 0 ; i < conf->geo.raid_disks; i++) {
2815
			int still_degraded;
2816
			struct r10bio *rb2;
2817 2818
			sector_t sect;
			int must_sync;
2819
			int any_working;
2820 2821 2822 2823 2824 2825 2826 2827
			struct mirror_info *mirror = &conf->mirrors[i];

			if ((mirror->rdev == NULL ||
			     test_bit(In_sync, &mirror->rdev->flags))
			    &&
			    (mirror->replacement == NULL ||
			     test_bit(Faulty,
				      &mirror->replacement->flags)))
2828
				continue;
L
Linus Torvalds 已提交
2829

2830 2831 2832 2833
			still_degraded = 0;
			/* want to reconstruct this device */
			rb2 = r10_bio;
			sect = raid10_find_virt(conf, sector_nr, i);
2834 2835 2836
			/* Unless we are doing a full sync, or a replacement
			 * we only need to recover the block if it is set in
			 * the bitmap
2837 2838 2839 2840 2841 2842
			 */
			must_sync = bitmap_start_sync(mddev->bitmap, sect,
						      &sync_blocks, 1);
			if (sync_blocks < max_sync)
				max_sync = sync_blocks;
			if (!must_sync &&
2843
			    mirror->replacement == NULL &&
2844 2845 2846 2847 2848 2849 2850
			    !conf->fullsync) {
				/* yep, skip the sync_blocks here, but don't assume
				 * that there will never be anything to do here
				 */
				chunks_skipped = -1;
				continue;
			}
2851

2852 2853 2854
			r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
			raise_barrier(conf, rb2 != NULL);
			atomic_set(&r10_bio->remaining, 0);
2855

2856 2857 2858 2859 2860 2861
			r10_bio->master_bio = (struct bio*)rb2;
			if (rb2)
				atomic_inc(&rb2->remaining);
			r10_bio->mddev = mddev;
			set_bit(R10BIO_IsRecover, &r10_bio->state);
			r10_bio->sector = sect;
L
Linus Torvalds 已提交
2862

2863 2864 2865 2866 2867
			raid10_find_phys(conf, r10_bio);

			/* Need to check if the array will still be
			 * degraded
			 */
2868
			for (j = 0; j < conf->geo.raid_disks; j++)
2869 2870 2871
				if (conf->mirrors[j].rdev == NULL ||
				    test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
					still_degraded = 1;
2872
					break;
L
Linus Torvalds 已提交
2873
				}
2874 2875 2876 2877

			must_sync = bitmap_start_sync(mddev->bitmap, sect,
						      &sync_blocks, still_degraded);

2878
			any_working = 0;
2879
			for (j=0; j<conf->copies;j++) {
2880
				int k;
2881
				int d = r10_bio->devs[j].devnum;
2882
				sector_t from_addr, to_addr;
2883
				struct md_rdev *rdev;
2884 2885
				sector_t sector, first_bad;
				int bad_sectors;
2886 2887 2888 2889
				if (!conf->mirrors[d].rdev ||
				    !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
					continue;
				/* This is where we read from */
2890
				any_working = 1;
2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905
				rdev = conf->mirrors[d].rdev;
				sector = r10_bio->devs[j].addr;

				if (is_badblock(rdev, sector, max_sync,
						&first_bad, &bad_sectors)) {
					if (first_bad > sector)
						max_sync = first_bad - sector;
					else {
						bad_sectors -= (sector
								- first_bad);
						if (max_sync > bad_sectors)
							max_sync = bad_sectors;
						continue;
					}
				}
2906 2907 2908 2909 2910 2911
				bio = r10_bio->devs[0].bio;
				bio->bi_next = biolist;
				biolist = bio;
				bio->bi_private = r10_bio;
				bio->bi_end_io = end_sync_read;
				bio->bi_rw = READ;
2912
				from_addr = r10_bio->devs[j].addr;
2913 2914 2915 2916
				bio->bi_sector = from_addr + rdev->data_offset;
				bio->bi_bdev = rdev->bdev;
				atomic_inc(&rdev->nr_pending);
				/* and we write to 'i' (if not in_sync) */
2917 2918 2919 2920 2921

				for (k=0; k<conf->copies; k++)
					if (r10_bio->devs[k].devnum == i)
						break;
				BUG_ON(k == conf->copies);
2922
				to_addr = r10_bio->devs[k].addr;
2923
				r10_bio->devs[0].devnum = d;
2924
				r10_bio->devs[0].addr = from_addr;
2925
				r10_bio->devs[1].devnum = i;
2926
				r10_bio->devs[1].addr = to_addr;
2927

2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966
				rdev = mirror->rdev;
				if (!test_bit(In_sync, &rdev->flags)) {
					bio = r10_bio->devs[1].bio;
					bio->bi_next = biolist;
					biolist = bio;
					bio->bi_private = r10_bio;
					bio->bi_end_io = end_sync_write;
					bio->bi_rw = WRITE;
					bio->bi_sector = to_addr
						+ rdev->data_offset;
					bio->bi_bdev = rdev->bdev;
					atomic_inc(&r10_bio->remaining);
				} else
					r10_bio->devs[1].bio->bi_end_io = NULL;

				/* and maybe write to replacement */
				bio = r10_bio->devs[1].repl_bio;
				if (bio)
					bio->bi_end_io = NULL;
				rdev = mirror->replacement;
				/* Note: if rdev != NULL, then bio
				 * cannot be NULL as r10buf_pool_alloc will
				 * have allocated it.
				 * So the second test here is pointless.
				 * But it keeps semantic-checkers happy, and
				 * this comment keeps human reviewers
				 * happy.
				 */
				if (rdev == NULL || bio == NULL ||
				    test_bit(Faulty, &rdev->flags))
					break;
				bio->bi_next = biolist;
				biolist = bio;
				bio->bi_private = r10_bio;
				bio->bi_end_io = end_sync_write;
				bio->bi_rw = WRITE;
				bio->bi_sector = to_addr + rdev->data_offset;
				bio->bi_bdev = rdev->bdev;
				atomic_inc(&r10_bio->remaining);
2967 2968 2969
				break;
			}
			if (j == conf->copies) {
2970 2971
				/* Cannot recover, so abort the recovery or
				 * record a bad block */
2972 2973 2974 2975
				put_buf(r10_bio);
				if (rb2)
					atomic_dec(&rb2->remaining);
				r10_bio = rb2;
2976 2977 2978 2979 2980 2981 2982 2983
				if (any_working) {
					/* problem is that there are bad blocks
					 * on other device(s)
					 */
					int k;
					for (k = 0; k < conf->copies; k++)
						if (r10_bio->devs[k].devnum == i)
							break;
2984 2985 2986 2987 2988 2989 2990 2991 2992 2993
					if (!test_bit(In_sync,
						      &mirror->rdev->flags)
					    && !rdev_set_badblocks(
						    mirror->rdev,
						    r10_bio->devs[k].addr,
						    max_sync, 0))
						any_working = 0;
					if (mirror->replacement &&
					    !rdev_set_badblocks(
						    mirror->replacement,
2994 2995 2996 2997 2998 2999 3000 3001 3002 3003
						    r10_bio->devs[k].addr,
						    max_sync, 0))
						any_working = 0;
				}
				if (!any_working)  {
					if (!test_and_set_bit(MD_RECOVERY_INTR,
							      &mddev->recovery))
						printk(KERN_INFO "md/raid10:%s: insufficient "
						       "working devices for recovery.\n",
						       mdname(mddev));
3004
					mirror->recovery_disabled
3005 3006
						= mddev->recovery_disabled;
				}
3007
				break;
L
Linus Torvalds 已提交
3008
			}
3009
		}
L
Linus Torvalds 已提交
3010 3011
		if (biolist == NULL) {
			while (r10_bio) {
3012 3013
				struct r10bio *rb2 = r10_bio;
				r10_bio = (struct r10bio*) rb2->master_bio;
L
Linus Torvalds 已提交
3014 3015 3016 3017 3018 3019 3020 3021
				rb2->master_bio = NULL;
				put_buf(rb2);
			}
			goto giveup;
		}
	} else {
		/* resync. Schedule a read for every block at this virt offset */
		int count = 0;
3022

3023 3024
		bitmap_cond_end_sync(mddev->bitmap, sector_nr);

3025 3026
		if (!bitmap_start_sync(mddev->bitmap, sector_nr,
				       &sync_blocks, mddev->degraded) &&
3027 3028
		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
						 &mddev->recovery)) {
3029 3030 3031 3032 3033 3034
			/* We can skip this block */
			*skipped = 1;
			return sync_blocks + sectors_skipped;
		}
		if (sync_blocks < max_sync)
			max_sync = sync_blocks;
L
Linus Torvalds 已提交
3035 3036 3037 3038
		r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);

		r10_bio->mddev = mddev;
		atomic_set(&r10_bio->remaining, 0);
3039 3040
		raise_barrier(conf, 0);
		conf->next_resync = sector_nr;
L
Linus Torvalds 已提交
3041 3042 3043 3044 3045

		r10_bio->master_bio = NULL;
		r10_bio->sector = sector_nr;
		set_bit(R10BIO_IsSync, &r10_bio->state);
		raid10_find_phys(conf, r10_bio);
3046
		r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
L
Linus Torvalds 已提交
3047

3048
		for (i = 0; i < conf->copies; i++) {
L
Linus Torvalds 已提交
3049
			int d = r10_bio->devs[i].devnum;
3050 3051 3052
			sector_t first_bad, sector;
			int bad_sectors;

3053 3054 3055
			if (r10_bio->devs[i].repl_bio)
				r10_bio->devs[i].repl_bio->bi_end_io = NULL;

L
Linus Torvalds 已提交
3056 3057
			bio = r10_bio->devs[i].bio;
			bio->bi_end_io = NULL;
N
NeilBrown 已提交
3058
			clear_bit(BIO_UPTODATE, &bio->bi_flags);
L
Linus Torvalds 已提交
3059
			if (conf->mirrors[d].rdev == NULL ||
3060
			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
L
Linus Torvalds 已提交
3061
				continue;
3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074
			sector = r10_bio->devs[i].addr;
			if (is_badblock(conf->mirrors[d].rdev,
					sector, max_sync,
					&first_bad, &bad_sectors)) {
				if (first_bad > sector)
					max_sync = first_bad - sector;
				else {
					bad_sectors -= (sector - first_bad);
					if (max_sync > bad_sectors)
						max_sync = max_sync;
					continue;
				}
			}
L
Linus Torvalds 已提交
3075 3076 3077 3078 3079 3080
			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
			atomic_inc(&r10_bio->remaining);
			bio->bi_next = biolist;
			biolist = bio;
			bio->bi_private = r10_bio;
			bio->bi_end_io = end_sync_read;
3081
			bio->bi_rw = READ;
3082
			bio->bi_sector = sector +
L
Linus Torvalds 已提交
3083 3084 3085
				conf->mirrors[d].rdev->data_offset;
			bio->bi_bdev = conf->mirrors[d].rdev->bdev;
			count++;
3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106

			if (conf->mirrors[d].replacement == NULL ||
			    test_bit(Faulty,
				     &conf->mirrors[d].replacement->flags))
				continue;

			/* Need to set up for writing to the replacement */
			bio = r10_bio->devs[i].repl_bio;
			clear_bit(BIO_UPTODATE, &bio->bi_flags);

			sector = r10_bio->devs[i].addr;
			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
			bio->bi_next = biolist;
			biolist = bio;
			bio->bi_private = r10_bio;
			bio->bi_end_io = end_sync_write;
			bio->bi_rw = WRITE;
			bio->bi_sector = sector +
				conf->mirrors[d].replacement->data_offset;
			bio->bi_bdev = conf->mirrors[d].replacement->bdev;
			count++;
L
Linus Torvalds 已提交
3107 3108 3109 3110 3111 3112
		}

		if (count < 2) {
			for (i=0; i<conf->copies; i++) {
				int d = r10_bio->devs[i].devnum;
				if (r10_bio->devs[i].bio->bi_end_io)
3113 3114
					rdev_dec_pending(conf->mirrors[d].rdev,
							 mddev);
3115 3116 3117 3118 3119
				if (r10_bio->devs[i].repl_bio &&
				    r10_bio->devs[i].repl_bio->bi_end_io)
					rdev_dec_pending(
						conf->mirrors[d].replacement,
						mddev);
L
Linus Torvalds 已提交
3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138
			}
			put_buf(r10_bio);
			biolist = NULL;
			goto giveup;
		}
	}

	for (bio = biolist; bio ; bio=bio->bi_next) {

		bio->bi_flags &= ~(BIO_POOL_MASK - 1);
		if (bio->bi_end_io)
			bio->bi_flags |= 1 << BIO_UPTODATE;
		bio->bi_vcnt = 0;
		bio->bi_idx = 0;
		bio->bi_phys_segments = 0;
		bio->bi_size = 0;
	}

	nr_sectors = 0;
3139 3140
	if (sector_nr + max_sync < max_sector)
		max_sector = sector_nr + max_sync;
L
Linus Torvalds 已提交
3141 3142 3143 3144 3145 3146 3147 3148
	do {
		struct page *page;
		int len = PAGE_SIZE;
		if (sector_nr + (len>>9) > max_sector)
			len = (max_sector - sector_nr) << 9;
		if (len == 0)
			break;
		for (bio= biolist ; bio ; bio=bio->bi_next) {
3149
			struct bio *bio2;
L
Linus Torvalds 已提交
3150
			page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162
			if (bio_add_page(bio, page, len, 0))
				continue;

			/* stop here */
			bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
			for (bio2 = biolist;
			     bio2 && bio2 != bio;
			     bio2 = bio2->bi_next) {
				/* remove last page from this bio */
				bio2->bi_vcnt--;
				bio2->bi_size -= len;
				bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
L
Linus Torvalds 已提交
3163
			}
3164
			goto bio_full;
L
Linus Torvalds 已提交
3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185
		}
		nr_sectors += len>>9;
		sector_nr += len>>9;
	} while (biolist->bi_vcnt < RESYNC_PAGES);
 bio_full:
	r10_bio->sectors = nr_sectors;

	while (biolist) {
		bio = biolist;
		biolist = biolist->bi_next;

		bio->bi_next = NULL;
		r10_bio = bio->bi_private;
		r10_bio->sectors = nr_sectors;

		if (bio->bi_end_io == end_sync_read) {
			md_sync_acct(bio->bi_bdev, nr_sectors);
			generic_make_request(bio);
		}
	}

3186 3187 3188 3189 3190 3191
	if (sectors_skipped)
		/* pretend they weren't skipped, it makes
		 * no important difference in this case
		 */
		md_done_sync(mddev, sectors_skipped, 1);

L
Linus Torvalds 已提交
3192 3193 3194
	return sectors_skipped + nr_sectors;
 giveup:
	/* There is nowhere to write, so all non-sync
3195 3196
	 * drives must be failed or in resync, all drives
	 * have a bad block, so try the next chunk...
L
Linus Torvalds 已提交
3197
	 */
3198 3199 3200 3201
	if (sector_nr + max_sync < max_sector)
		max_sector = sector_nr + max_sync;

	sectors_skipped += (max_sector - sector_nr);
L
Linus Torvalds 已提交
3202 3203 3204 3205 3206
	chunks_skipped ++;
	sector_nr = max_sector;
	goto skipped;
}

3207
static sector_t
3208
raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3209 3210
{
	sector_t size;
3211
	struct r10conf *conf = mddev->private;
3212 3213

	if (!raid_disks)
3214
		raid_disks = conf->geo.raid_disks;
3215
	if (!sectors)
3216
		sectors = conf->dev_sectors;
3217

3218 3219
	size = sectors >> conf->geo.chunk_shift;
	sector_div(size, conf->geo.far_copies);
3220
	size = size * raid_disks;
3221
	sector_div(size, conf->geo.near_copies);
3222

3223
	return size << conf->geo.chunk_shift;
3224 3225
}

3226 3227 3228 3229 3230 3231 3232
static void calc_sectors(struct r10conf *conf, sector_t size)
{
	/* Calculate the number of sectors-per-device that will
	 * actually be used, and set conf->dev_sectors and
	 * conf->stride
	 */

3233 3234 3235 3236
	size = size >> conf->geo.chunk_shift;
	sector_div(size, conf->geo.far_copies);
	size = size * conf->geo.raid_disks;
	sector_div(size, conf->geo.near_copies);
3237 3238 3239 3240 3241 3242 3243
	/* 'size' is now the number of chunks in the array */
	/* calculate "used chunks per device" */
	size = size * conf->copies;

	/* We need to round up when dividing by raid_disks to
	 * get the stride size.
	 */
3244
	size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3245

3246
	conf->dev_sectors = size << conf->geo.chunk_shift;
3247

3248 3249
	if (conf->geo.far_offset)
		conf->geo.stride = 1 << conf->geo.chunk_shift;
3250
	else {
3251 3252
		sector_div(size, conf->geo.far_copies);
		conf->geo.stride = size << conf->geo.chunk_shift;
3253 3254
	}
}
3255

3256
static struct r10conf *setup_conf(struct mddev *mddev)
L
Linus Torvalds 已提交
3257
{
3258
	struct r10conf *conf = NULL;
3259
	int nc, fc, fo;
3260
	int err = -EINVAL;
L
Linus Torvalds 已提交
3261

3262 3263
	if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
	    !is_power_of_2(mddev->new_chunk_sectors)) {
N
NeilBrown 已提交
3264 3265 3266
		printk(KERN_ERR "md/raid10:%s: chunk size must be "
		       "at least PAGE_SIZE(%ld) and be a power of 2.\n",
		       mdname(mddev), PAGE_SIZE);
3267
		goto out;
L
Linus Torvalds 已提交
3268
	}
3269

3270 3271 3272
	nc = mddev->new_layout & 255;
	fc = (mddev->new_layout >> 8) & 255;
	fo = mddev->new_layout & (1<<16);
3273

L
Linus Torvalds 已提交
3274
	if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
3275
	    (mddev->new_layout >> 17)) {
N
NeilBrown 已提交
3276
		printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3277
		       mdname(mddev), mddev->new_layout);
L
Linus Torvalds 已提交
3278 3279
		goto out;
	}
3280 3281

	err = -ENOMEM;
3282
	conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3283
	if (!conf)
L
Linus Torvalds 已提交
3284
		goto out;
3285

3286
	conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
3287 3288 3289
				GFP_KERNEL);
	if (!conf->mirrors)
		goto out;
3290 3291 3292

	conf->tmppage = alloc_page(GFP_KERNEL);
	if (!conf->tmppage)
3293 3294
		goto out;

L
Linus Torvalds 已提交
3295

3296 3297 3298
	conf->geo.raid_disks = mddev->raid_disks;
	conf->geo.near_copies = nc;
	conf->geo.far_copies = fc;
L
Linus Torvalds 已提交
3299
	conf->copies = nc*fc;
3300 3301 3302
	conf->geo.far_offset = fo;
	conf->geo.chunk_mask = mddev->new_chunk_sectors - 1;
	conf->geo.chunk_shift = ffz(~mddev->new_chunk_sectors);
3303 3304 3305 3306 3307 3308

	conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
					   r10bio_pool_free, conf);
	if (!conf->r10bio_pool)
		goto out;

3309
	calc_sectors(conf, mddev->dev_sectors);
3310 3311
	conf->prev = conf->geo;
	conf->reshape_progress = MaxSector;
L
Linus Torvalds 已提交
3312

3313
	spin_lock_init(&conf->device_lock);
3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326
	INIT_LIST_HEAD(&conf->retry_list);

	spin_lock_init(&conf->resync_lock);
	init_waitqueue_head(&conf->wait_barrier);

	conf->thread = md_register_thread(raid10d, mddev, NULL);
	if (!conf->thread)
		goto out;

	conf->mddev = mddev;
	return conf;

 out:
N
NeilBrown 已提交
3327
	printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338
	       mdname(mddev));
	if (conf) {
		if (conf->r10bio_pool)
			mempool_destroy(conf->r10bio_pool);
		kfree(conf->mirrors);
		safe_put_page(conf->tmppage);
		kfree(conf);
	}
	return ERR_PTR(err);
}

3339
static int run(struct mddev *mddev)
3340
{
3341
	struct r10conf *conf;
3342
	int i, disk_idx, chunk_size;
3343
	struct mirror_info *disk;
3344
	struct md_rdev *rdev;
3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365
	sector_t size;

	/*
	 * copy the already verified devices into our private RAID10
	 * bookkeeping area. [whatever we allocate in run(),
	 * should be freed in stop()]
	 */

	if (mddev->private == NULL) {
		conf = setup_conf(mddev);
		if (IS_ERR(conf))
			return PTR_ERR(conf);
		mddev->private = conf;
	}
	conf = mddev->private;
	if (!conf)
		goto out;

	mddev->thread = conf->thread;
	conf->thread = NULL;

3366 3367
	chunk_size = mddev->chunk_sectors << 9;
	blk_queue_io_min(mddev->queue, chunk_size);
3368 3369
	if (conf->geo.raid_disks % conf->geo.near_copies)
		blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3370 3371
	else
		blk_queue_io_opt(mddev->queue, chunk_size *
3372
				 (conf->geo.raid_disks / conf->geo.near_copies));
3373

N
NeilBrown 已提交
3374
	rdev_for_each(rdev, mddev) {
3375

L
Linus Torvalds 已提交
3376
		disk_idx = rdev->raid_disk;
3377 3378 3379 3380
		if (disk_idx < 0)
			continue;
		if (disk_idx >= conf->geo.raid_disks &&
		    disk_idx >= conf->prev.raid_disks)
L
Linus Torvalds 已提交
3381 3382 3383
			continue;
		disk = conf->mirrors + disk_idx;

3384 3385 3386 3387 3388 3389 3390 3391 3392 3393
		if (test_bit(Replacement, &rdev->flags)) {
			if (disk->replacement)
				goto out_free_conf;
			disk->replacement = rdev;
		} else {
			if (disk->rdev)
				goto out_free_conf;
			disk->rdev = rdev;
		}

3394 3395
		disk_stack_limits(mddev->gendisk, rdev->bdev,
				  rdev->data_offset << 9);
L
Linus Torvalds 已提交
3396 3397 3398

		disk->head_position = 0;
	}
3399
	/* need to check that every block has at least one working mirror */
3400
	if (!enough(conf, -1)) {
N
NeilBrown 已提交
3401
		printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
3402
		       mdname(mddev));
L
Linus Torvalds 已提交
3403 3404 3405 3406
		goto out_free_conf;
	}

	mddev->degraded = 0;
3407 3408 3409 3410
	for (i = 0;
	     i < conf->geo.raid_disks
		     || i < conf->prev.raid_disks;
	     i++) {
L
Linus Torvalds 已提交
3411 3412 3413

		disk = conf->mirrors + i;

3414 3415 3416 3417 3418 3419 3420
		if (!disk->rdev && disk->replacement) {
			/* The replacement is all we have - use it */
			disk->rdev = disk->replacement;
			disk->replacement = NULL;
			clear_bit(Replacement, &disk->rdev->flags);
		}

3421
		if (!disk->rdev ||
3422
		    !test_bit(In_sync, &disk->rdev->flags)) {
L
Linus Torvalds 已提交
3423 3424
			disk->head_position = 0;
			mddev->degraded++;
3425 3426
			if (disk->rdev)
				conf->fullsync = 1;
L
Linus Torvalds 已提交
3427
		}
3428
		disk->recovery_disabled = mddev->recovery_disabled - 1;
L
Linus Torvalds 已提交
3429 3430
	}

3431
	if (mddev->recovery_cp != MaxSector)
N
NeilBrown 已提交
3432
		printk(KERN_NOTICE "md/raid10:%s: not clean"
3433 3434
		       " -- starting background reconstruction\n",
		       mdname(mddev));
L
Linus Torvalds 已提交
3435
	printk(KERN_INFO
N
NeilBrown 已提交
3436
		"md/raid10:%s: active with %d out of %d devices\n",
3437 3438
		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
		conf->geo.raid_disks);
L
Linus Torvalds 已提交
3439 3440 3441
	/*
	 * Ok, everything is just fine now
	 */
3442 3443 3444 3445
	mddev->dev_sectors = conf->dev_sectors;
	size = raid10_size(mddev, 0, 0);
	md_set_array_sectors(mddev, size);
	mddev->resync_max_sectors = size;
L
Linus Torvalds 已提交
3446

3447 3448
	mddev->queue->backing_dev_info.congested_fn = raid10_congested;
	mddev->queue->backing_dev_info.congested_data = mddev;
3449

L
Linus Torvalds 已提交
3450 3451 3452 3453 3454
	/* Calculate max read-ahead size.
	 * We need to readahead at least twice a whole stripe....
	 * maybe...
	 */
	{
3455
		int stripe = conf->geo.raid_disks *
3456
			((mddev->chunk_sectors << 9) / PAGE_SIZE);
3457
		stripe /= conf->geo.near_copies;
L
Linus Torvalds 已提交
3458 3459 3460 3461
		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
	}

3462
	blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
3463 3464 3465 3466

	if (md_integrity_register(mddev))
		goto out_free_conf;

L
Linus Torvalds 已提交
3467 3468 3469
	return 0;

out_free_conf:
3470
	md_unregister_thread(&mddev->thread);
L
Linus Torvalds 已提交
3471 3472
	if (conf->r10bio_pool)
		mempool_destroy(conf->r10bio_pool);
3473
	safe_put_page(conf->tmppage);
3474
	kfree(conf->mirrors);
L
Linus Torvalds 已提交
3475 3476 3477 3478 3479 3480
	kfree(conf);
	mddev->private = NULL;
out:
	return -EIO;
}

3481
static int stop(struct mddev *mddev)
L
Linus Torvalds 已提交
3482
{
3483
	struct r10conf *conf = mddev->private;
L
Linus Torvalds 已提交
3484

3485 3486 3487
	raise_barrier(conf, 0);
	lower_barrier(conf);

3488
	md_unregister_thread(&mddev->thread);
L
Linus Torvalds 已提交
3489 3490 3491
	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
	if (conf->r10bio_pool)
		mempool_destroy(conf->r10bio_pool);
3492
	kfree(conf->mirrors);
L
Linus Torvalds 已提交
3493 3494 3495 3496 3497
	kfree(conf);
	mddev->private = NULL;
	return 0;
}

3498
static void raid10_quiesce(struct mddev *mddev, int state)
3499
{
3500
	struct r10conf *conf = mddev->private;
3501 3502 3503 3504 3505 3506 3507 3508 3509 3510

	switch(state) {
	case 1:
		raise_barrier(conf, 0);
		break;
	case 0:
		lower_barrier(conf);
		break;
	}
}
L
Linus Torvalds 已提交
3511

3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528
static int raid10_resize(struct mddev *mddev, sector_t sectors)
{
	/* Resize of 'far' arrays is not supported.
	 * For 'near' and 'offset' arrays we can set the
	 * number of sectors used to be an appropriate multiple
	 * of the chunk size.
	 * For 'offset', this is far_copies*chunksize.
	 * For 'near' the multiplier is the LCM of
	 * near_copies and raid_disks.
	 * So if far_copies > 1 && !far_offset, fail.
	 * Else find LCM(raid_disks, near_copy)*far_copies and
	 * multiply by chunk_size.  Then round to this number.
	 * This is mostly done by raid10_size()
	 */
	struct r10conf *conf = mddev->private;
	sector_t oldsize, size;

3529 3530 3531
	if (mddev->reshape_position != MaxSector)
		return -EBUSY;

3532
	if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546
		return -EINVAL;

	oldsize = raid10_size(mddev, 0, 0);
	size = raid10_size(mddev, sectors, 0);
	md_set_array_sectors(mddev, size);
	if (mddev->array_sectors > size)
		return -EINVAL;
	set_capacity(mddev->gendisk, mddev->array_sectors);
	revalidate_disk(mddev->gendisk);
	if (sectors > mddev->dev_sectors &&
	    mddev->recovery_cp > oldsize) {
		mddev->recovery_cp = oldsize;
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	}
3547 3548
	calc_sectors(conf, sectors);
	mddev->dev_sectors = conf->dev_sectors;
3549 3550 3551 3552
	mddev->resync_max_sectors = size;
	return 0;
}

3553
static void *raid10_takeover_raid0(struct mddev *mddev)
3554
{
3555
	struct md_rdev *rdev;
3556
	struct r10conf *conf;
3557 3558

	if (mddev->degraded > 0) {
N
NeilBrown 已提交
3559 3560
		printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
		       mdname(mddev));
3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574
		return ERR_PTR(-EINVAL);
	}

	/* Set new parameters */
	mddev->new_level = 10;
	/* new layout: far_copies = 1, near_copies = 2 */
	mddev->new_layout = (1<<8) + 2;
	mddev->new_chunk_sectors = mddev->chunk_sectors;
	mddev->delta_disks = mddev->raid_disks;
	mddev->raid_disks *= 2;
	/* make sure it will be not marked as dirty */
	mddev->recovery_cp = MaxSector;

	conf = setup_conf(mddev);
3575
	if (!IS_ERR(conf)) {
N
NeilBrown 已提交
3576
		rdev_for_each(rdev, mddev)
3577 3578
			if (rdev->raid_disk >= 0)
				rdev->new_raid_disk = rdev->raid_disk * 2;
3579 3580 3581
		conf->barrier = 1;
	}

3582 3583 3584
	return conf;
}

3585
static void *raid10_takeover(struct mddev *mddev)
3586
{
3587
	struct r0conf *raid0_conf;
3588 3589 3590 3591 3592 3593

	/* raid10 can take over:
	 *  raid0 - providing it has only two drives
	 */
	if (mddev->level == 0) {
		/* for raid0 takeover only one zone is supported */
3594 3595
		raid0_conf = mddev->private;
		if (raid0_conf->nr_strip_zones > 1) {
N
NeilBrown 已提交
3596 3597 3598
			printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
			       " with more than one zone.\n",
			       mdname(mddev));
3599 3600 3601 3602 3603 3604 3605
			return ERR_PTR(-EINVAL);
		}
		return raid10_takeover_raid0(mddev);
	}
	return ERR_PTR(-EINVAL);
}

3606
static struct md_personality raid10_personality =
L
Linus Torvalds 已提交
3607 3608
{
	.name		= "raid10",
3609
	.level		= 10,
L
Linus Torvalds 已提交
3610 3611 3612 3613 3614 3615 3616 3617 3618 3619
	.owner		= THIS_MODULE,
	.make_request	= make_request,
	.run		= run,
	.stop		= stop,
	.status		= status,
	.error_handler	= error,
	.hot_add_disk	= raid10_add_disk,
	.hot_remove_disk= raid10_remove_disk,
	.spare_active	= raid10_spare_active,
	.sync_request	= sync_request,
3620
	.quiesce	= raid10_quiesce,
3621
	.size		= raid10_size,
3622
	.resize		= raid10_resize,
3623
	.takeover	= raid10_takeover,
L
Linus Torvalds 已提交
3624 3625 3626 3627
};

static int __init raid_init(void)
{
3628
	return register_md_personality(&raid10_personality);
L
Linus Torvalds 已提交
3629 3630 3631 3632
}

static void raid_exit(void)
{
3633
	unregister_md_personality(&raid10_personality);
L
Linus Torvalds 已提交
3634 3635 3636 3637 3638
}

module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
3639
MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
L
Linus Torvalds 已提交
3640
MODULE_ALIAS("md-personality-9"); /* RAID10 */
3641
MODULE_ALIAS("md-raid10");
3642
MODULE_ALIAS("md-level-10");
3643 3644

module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);