raid10.c 64.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * raid10.c : Multiple Devices driver for Linux
 *
 * Copyright (C) 2000-2004 Neil Brown
 *
 * RAID-10 support for md.
 *
 * Base on code in raid1.c.  See raid1.c for futher copyright information.
 *
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * You should have received a copy of the GNU General Public License
 * (for example /usr/src/linux/COPYING); if not, write to the Free
 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

21
#include <linux/slab.h>
22
#include <linux/delay.h>
23 24
#include <linux/blkdev.h>
#include <linux/seq_file.h>
25
#include "md.h"
26 27
#include "raid10.h"
#include "bitmap.h"
L
Linus Torvalds 已提交
28 29 30 31 32 33 34 35

/*
 * RAID10 provides a combination of RAID0 and RAID1 functionality.
 * The layout of data is defined by
 *    chunk_size
 *    raid_disks
 *    near_copies (stored in low byte of layout)
 *    far_copies (stored in second byte of layout)
36
 *    far_offset (stored in bit 16 of layout )
L
Linus Torvalds 已提交
37 38 39 40 41 42 43
 *
 * The data to be stored is divided into chunks using chunksize.
 * Each device is divided into far_copies sections.
 * In each section, chunks are laid out in a style similar to raid0, but
 * near_copies copies of each chunk is stored (each on a different drive).
 * The starting device for each section is offset near_copies from the starting
 * device of the previous section.
44
 * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
L
Linus Torvalds 已提交
45 46 47
 * drive.
 * near_copies and far_copies must be at least one, and their product is at most
 * raid_disks.
48 49 50 51
 *
 * If far_offset is true, then the far_copies are handled a bit differently.
 * The copies are still in different stripes, but instead of be very far apart
 * on disk, there are adjacent stripes.
L
Linus Torvalds 已提交
52 53 54 55 56 57 58 59 60
 */

/*
 * Number of guaranteed r10bios in case of extreme VM load:
 */
#define	NR_RAID10_BIOS 256

static void unplug_slaves(mddev_t *mddev);

61 62 63
static void allow_barrier(conf_t *conf);
static void lower_barrier(conf_t *conf);

A
Al Viro 已提交
64
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
L
Linus Torvalds 已提交
65 66 67 68 69 70
{
	conf_t *conf = data;
	r10bio_t *r10_bio;
	int size = offsetof(struct r10bio_s, devs[conf->copies]);

	/* allocate a r10bio with room for raid_disks entries in the bios array */
71
	r10_bio = kzalloc(size, gfp_flags);
72
	if (!r10_bio && conf->mddev)
L
Linus Torvalds 已提交
73 74 75 76 77 78 79 80 81 82
		unplug_slaves(conf->mddev);

	return r10_bio;
}

static void r10bio_pool_free(void *r10_bio, void *data)
{
	kfree(r10_bio);
}

83
/* Maximum size of each resync request */
L
Linus Torvalds 已提交
84 85
#define RESYNC_BLOCK_SIZE (64*1024)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
86 87 88 89
/* amount of memory to reserve for resync requests */
#define RESYNC_WINDOW (1024*1024)
/* maximum number of concurrent requests, memory permitting */
#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
L
Linus Torvalds 已提交
90 91 92 93 94 95 96 97

/*
 * When performing a resync, we need to read and compare, so
 * we need as many pages are there are copies.
 * When performing a recovery, we need 2 bios, one for read,
 * one for write (we recover only one drive per r10buf)
 *
 */
A
Al Viro 已提交
98
static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
L
Linus Torvalds 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
{
	conf_t *conf = data;
	struct page *page;
	r10bio_t *r10_bio;
	struct bio *bio;
	int i, j;
	int nalloc;

	r10_bio = r10bio_pool_alloc(gfp_flags, conf);
	if (!r10_bio) {
		unplug_slaves(conf->mddev);
		return NULL;
	}

	if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
		nalloc = conf->copies; /* resync */
	else
		nalloc = 2; /* recovery */

	/*
	 * Allocate bios.
	 */
	for (j = nalloc ; j-- ; ) {
		bio = bio_alloc(gfp_flags, RESYNC_PAGES);
		if (!bio)
			goto out_free_bio;
		r10_bio->devs[j].bio = bio;
	}
	/*
	 * Allocate RESYNC_PAGES data pages and attach them
	 * where needed.
	 */
	for (j = 0 ; j < nalloc; j++) {
		bio = r10_bio->devs[j].bio;
		for (i = 0; i < RESYNC_PAGES; i++) {
			page = alloc_page(gfp_flags);
			if (unlikely(!page))
				goto out_free_pages;

			bio->bi_io_vec[i].bv_page = page;
		}
	}

	return r10_bio;

out_free_pages:
	for ( ; i > 0 ; i--)
146
		safe_put_page(bio->bi_io_vec[i-1].bv_page);
L
Linus Torvalds 已提交
147 148
	while (j--)
		for (i = 0; i < RESYNC_PAGES ; i++)
149
			safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
L
Linus Torvalds 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
	j = -1;
out_free_bio:
	while ( ++j < nalloc )
		bio_put(r10_bio->devs[j].bio);
	r10bio_pool_free(r10_bio, conf);
	return NULL;
}

static void r10buf_pool_free(void *__r10_bio, void *data)
{
	int i;
	conf_t *conf = data;
	r10bio_t *r10bio = __r10_bio;
	int j;

	for (j=0; j < conf->copies; j++) {
		struct bio *bio = r10bio->devs[j].bio;
		if (bio) {
			for (i = 0; i < RESYNC_PAGES; i++) {
169
				safe_put_page(bio->bi_io_vec[i].bv_page);
L
Linus Torvalds 已提交
170 171 172 173 174 175 176 177 178 179 180 181 182 183
				bio->bi_io_vec[i].bv_page = NULL;
			}
			bio_put(bio);
		}
	}
	r10bio_pool_free(r10bio, conf);
}

static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
{
	int i;

	for (i = 0; i < conf->copies; i++) {
		struct bio **bio = & r10_bio->devs[i].bio;
184
		if (*bio && *bio != IO_BLOCKED)
L
Linus Torvalds 已提交
185 186 187 188 189
			bio_put(*bio);
		*bio = NULL;
	}
}

190
static void free_r10bio(r10bio_t *r10_bio)
L
Linus Torvalds 已提交
191
{
192
	conf_t *conf = r10_bio->mddev->private;
L
Linus Torvalds 已提交
193 194 195 196 197

	/*
	 * Wake up any possible resync thread that waits for the device
	 * to go idle.
	 */
198
	allow_barrier(conf);
L
Linus Torvalds 已提交
199 200 201 202 203

	put_all_bios(conf, r10_bio);
	mempool_free(r10_bio, conf->r10bio_pool);
}

204
static void put_buf(r10bio_t *r10_bio)
L
Linus Torvalds 已提交
205
{
206
	conf_t *conf = r10_bio->mddev->private;
L
Linus Torvalds 已提交
207 208 209

	mempool_free(r10_bio, conf->r10buf_pool);

210
	lower_barrier(conf);
L
Linus Torvalds 已提交
211 212 213 214 215 216
}

static void reschedule_retry(r10bio_t *r10_bio)
{
	unsigned long flags;
	mddev_t *mddev = r10_bio->mddev;
217
	conf_t *conf = mddev->private;
L
Linus Torvalds 已提交
218 219 220

	spin_lock_irqsave(&conf->device_lock, flags);
	list_add(&r10_bio->retry_list, &conf->retry_list);
221
	conf->nr_queued ++;
L
Linus Torvalds 已提交
222 223
	spin_unlock_irqrestore(&conf->device_lock, flags);

A
Arthur Jones 已提交
224 225 226
	/* wake up frozen array... */
	wake_up(&conf->wait_barrier);

L
Linus Torvalds 已提交
227 228 229 230 231 232 233 234 235 236 237 238
	md_wakeup_thread(mddev->thread);
}

/*
 * raid_end_bio_io() is called when we have finished servicing a mirrored
 * operation and are ready to return a success/failure code to the buffer
 * cache layer.
 */
static void raid_end_bio_io(r10bio_t *r10_bio)
{
	struct bio *bio = r10_bio->master_bio;

239
	bio_endio(bio,
L
Linus Torvalds 已提交
240 241 242 243 244 245 246 247 248
		test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
	free_r10bio(r10_bio);
}

/*
 * Update disk head position estimator based on IRQ completion info.
 */
static inline void update_head_pos(int slot, r10bio_t *r10_bio)
{
249
	conf_t *conf = r10_bio->mddev->private;
L
Linus Torvalds 已提交
250 251 252 253 254

	conf->mirrors[r10_bio->devs[slot].devnum].head_position =
		r10_bio->devs[slot].addr + (r10_bio->sectors);
}

255
static void raid10_end_read_request(struct bio *bio, int error)
L
Linus Torvalds 已提交
256 257 258 259
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
	int slot, dev;
260
	conf_t *conf = r10_bio->mddev->private;
L
Linus Torvalds 已提交
261 262 263 264 265 266 267


	slot = r10_bio->read_slot;
	dev = r10_bio->devs[slot].devnum;
	/*
	 * this branch is our 'one mirror IO has finished' event handler:
	 */
268 269 270
	update_head_pos(slot, r10_bio);

	if (uptodate) {
L
Linus Torvalds 已提交
271 272 273 274 275 276 277 278 279 280 281
		/*
		 * Set R10BIO_Uptodate in our master bio, so that
		 * we will return a good error code to the higher
		 * levels even if IO on some other mirrored buffer fails.
		 *
		 * The 'master' represents the composite IO operation to
		 * user-side. So if something waits for IO, then it will
		 * wait for the 'master' bio.
		 */
		set_bit(R10BIO_Uptodate, &r10_bio->state);
		raid_end_bio_io(r10_bio);
282
	} else {
L
Linus Torvalds 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295
		/*
		 * oops, read error:
		 */
		char b[BDEVNAME_SIZE];
		if (printk_ratelimit())
			printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n",
			       bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
		reschedule_retry(r10_bio);
	}

	rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
}

296
static void raid10_end_write_request(struct bio *bio, int error)
L
Linus Torvalds 已提交
297 298 299 300
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
	int slot, dev;
301
	conf_t *conf = r10_bio->mddev->private;
L
Linus Torvalds 已提交
302 303 304 305 306 307 308 309 310

	for (slot = 0; slot < conf->copies; slot++)
		if (r10_bio->devs[slot].bio == bio)
			break;
	dev = r10_bio->devs[slot].devnum;

	/*
	 * this branch is our 'one mirror IO has finished' event handler:
	 */
311
	if (!uptodate) {
L
Linus Torvalds 已提交
312
		md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
313 314 315
		/* an I/O failed, we can't clear the bitmap */
		set_bit(R10BIO_Degraded, &r10_bio->state);
	} else
L
Linus Torvalds 已提交
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
		/*
		 * Set R10BIO_Uptodate in our master bio, so that
		 * we will return a good error code for to the higher
		 * levels even if IO on some other mirrored buffer fails.
		 *
		 * The 'master' represents the composite IO operation to
		 * user-side. So if something waits for IO, then it will
		 * wait for the 'master' bio.
		 */
		set_bit(R10BIO_Uptodate, &r10_bio->state);

	update_head_pos(slot, r10_bio);

	/*
	 *
	 * Let's see if all mirrored write operations have finished
	 * already.
	 */
	if (atomic_dec_and_test(&r10_bio->remaining)) {
335 336 337 338 339
		/* clear the bitmap if all writes complete successfully */
		bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
				r10_bio->sectors,
				!test_bit(R10BIO_Degraded, &r10_bio->state),
				0);
L
Linus Torvalds 已提交
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
		md_write_end(r10_bio->mddev);
		raid_end_bio_io(r10_bio);
	}

	rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
}


/*
 * RAID10 layout manager
 * Aswell as the chunksize and raid_disks count, there are two
 * parameters: near_copies and far_copies.
 * near_copies * far_copies must be <= raid_disks.
 * Normally one of these will be 1.
 * If both are 1, we get raid0.
 * If near_copies == raid_disks, we get raid1.
 *
 * Chunks are layed out in raid0 style with near_copies copies of the
 * first chunk, followed by near_copies copies of the next chunk and
 * so on.
 * If far_copies > 1, then after 1/far_copies of the array has been assigned
 * as described above, we start again with a device offset of near_copies.
 * So we effectively have another copy of the whole array further down all
 * the drives, but with blocks on different drives.
 * With this layout, and block is never stored twice on the one device.
 *
 * raid10_find_phys finds the sector offset of a given virtual sector
367
 * on each device that it is on.
L
Linus Torvalds 已提交
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
 *
 * raid10_find_virt does the reverse mapping, from a device and a
 * sector offset to a virtual address
 */

static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
{
	int n,f;
	sector_t sector;
	sector_t chunk;
	sector_t stripe;
	int dev;

	int slot = 0;

	/* now calculate first sector/dev */
	chunk = r10bio->sector >> conf->chunk_shift;
	sector = r10bio->sector & conf->chunk_mask;

	chunk *= conf->near_copies;
	stripe = chunk;
	dev = sector_div(stripe, conf->raid_disks);
390 391
	if (conf->far_offset)
		stripe *= conf->far_copies;
L
Linus Torvalds 已提交
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425

	sector += stripe << conf->chunk_shift;

	/* and calculate all the others */
	for (n=0; n < conf->near_copies; n++) {
		int d = dev;
		sector_t s = sector;
		r10bio->devs[slot].addr = sector;
		r10bio->devs[slot].devnum = d;
		slot++;

		for (f = 1; f < conf->far_copies; f++) {
			d += conf->near_copies;
			if (d >= conf->raid_disks)
				d -= conf->raid_disks;
			s += conf->stride;
			r10bio->devs[slot].devnum = d;
			r10bio->devs[slot].addr = s;
			slot++;
		}
		dev++;
		if (dev >= conf->raid_disks) {
			dev = 0;
			sector += (conf->chunk_mask + 1);
		}
	}
	BUG_ON(slot != conf->copies);
}

static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
{
	sector_t offset, chunk, vchunk;

	offset = sector & conf->chunk_mask;
426 427 428 429 430 431 432 433
	if (conf->far_offset) {
		int fc;
		chunk = sector >> conf->chunk_shift;
		fc = sector_div(chunk, conf->far_copies);
		dev -= fc * conf->near_copies;
		if (dev < 0)
			dev += conf->raid_disks;
	} else {
434
		while (sector >= conf->stride) {
435 436 437 438 439 440 441 442
			sector -= conf->stride;
			if (dev < conf->near_copies)
				dev += conf->raid_disks - conf->near_copies;
			else
				dev -= conf->near_copies;
		}
		chunk = sector >> conf->chunk_shift;
	}
L
Linus Torvalds 已提交
443 444 445 446 447 448 449 450
	vchunk = chunk * conf->raid_disks + dev;
	sector_div(vchunk, conf->near_copies);
	return (vchunk << conf->chunk_shift) + offset;
}

/**
 *	raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
 *	@q: request queue
451
 *	@bvm: properties of new bio
L
Linus Torvalds 已提交
452 453 454 455 456 457
 *	@biovec: the request that could be merged to it.
 *
 *	Return amount of bytes we can accept at this offset
 *      If near_copies == raid_disk, there are no striping issues,
 *      but in that case, the function isn't called at all.
 */
458 459 460
static int raid10_mergeable_bvec(struct request_queue *q,
				 struct bvec_merge_data *bvm,
				 struct bio_vec *biovec)
L
Linus Torvalds 已提交
461 462
{
	mddev_t *mddev = q->queuedata;
463
	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
L
Linus Torvalds 已提交
464
	int max;
465
	unsigned int chunk_sectors = mddev->chunk_sectors;
466
	unsigned int bio_sectors = bvm->bi_size >> 9;
L
Linus Torvalds 已提交
467 468 469

	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
	if (max < 0) max = 0; /* bio_add cannot handle a negative return */
470 471
	if (max <= biovec->bv_len && bio_sectors == 0)
		return biovec->bv_len;
L
Linus Torvalds 已提交
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
	else
		return max;
}

/*
 * This routine returns the disk from which the requested read should
 * be done. There is a per-array 'next expected sequential IO' sector
 * number - if this matches on the next IO then we use the last disk.
 * There is also a per-disk 'last know head position' sector that is
 * maintained from IRQ contexts, both the normal and the resync IO
 * completion handlers update this position correctly. If there is no
 * perfect sequential match then we pick the disk whose head is closest.
 *
 * If there are 2 mirrors in the same 2 devices, performance degrades
 * because position is mirror, not device based.
 *
 * The rdev for the device selected will have nr_pending incremented.
 */

/*
 * FIXME: possibly should rethink readbalancing and do it differently
 * depending on near_copies / far_copies geometry.
 */
static int read_balance(conf_t *conf, r10bio_t *r10_bio)
{
	const unsigned long this_sector = r10_bio->sector;
	int disk, slot, nslot;
	const int sectors = r10_bio->sectors;
	sector_t new_distance, current_distance;
501
	mdk_rdev_t *rdev;
L
Linus Torvalds 已提交
502 503 504 505 506

	raid10_find_phys(conf, r10_bio);
	rcu_read_lock();
	/*
	 * Check if we can balance. We can balance on the whole
507 508 509
	 * device if no resync is going on (recovery is ok), or below
	 * the resync window. We take the first readable disk when
	 * above the resync window.
L
Linus Torvalds 已提交
510 511 512 513 514 515 516
	 */
	if (conf->mddev->recovery_cp < MaxSector
	    && (this_sector + sectors >= conf->next_resync)) {
		/* make sure that disk is operational */
		slot = 0;
		disk = r10_bio->devs[slot].devnum;

517
		while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
518
		       r10_bio->devs[slot].bio == IO_BLOCKED ||
519
		       !test_bit(In_sync, &rdev->flags)) {
L
Linus Torvalds 已提交
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
			slot++;
			if (slot == conf->copies) {
				slot = 0;
				disk = -1;
				break;
			}
			disk = r10_bio->devs[slot].devnum;
		}
		goto rb_out;
	}


	/* make sure the disk is operational */
	slot = 0;
	disk = r10_bio->devs[slot].devnum;
535
	while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
536
	       r10_bio->devs[slot].bio == IO_BLOCKED ||
537
	       !test_bit(In_sync, &rdev->flags)) {
L
Linus Torvalds 已提交
538 539 540 541 542 543 544 545 546
		slot ++;
		if (slot == conf->copies) {
			disk = -1;
			goto rb_out;
		}
		disk = r10_bio->devs[slot].devnum;
	}


547 548
	current_distance = abs(r10_bio->devs[slot].addr -
			       conf->mirrors[disk].head_position);
L
Linus Torvalds 已提交
549

550 551
	/* Find the disk whose head is closest,
	 * or - for far > 1 - find the closest to partition beginning */
L
Linus Torvalds 已提交
552 553 554 555 556

	for (nslot = slot; nslot < conf->copies; nslot++) {
		int ndisk = r10_bio->devs[nslot].devnum;


557
		if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
558
		    r10_bio->devs[nslot].bio == IO_BLOCKED ||
559
		    !test_bit(In_sync, &rdev->flags))
L
Linus Torvalds 已提交
560 561
			continue;

562 563 564 565 566
		/* This optimisation is debatable, and completely destroys
		 * sequential read speed for 'far copies' arrays.  So only
		 * keep it for 'near' arrays, and review those later.
		 */
		if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
L
Linus Torvalds 已提交
567 568 569 570
			disk = ndisk;
			slot = nslot;
			break;
		}
571 572 573 574 575 576 577

		/* for far > 1 always use the lowest address */
		if (conf->far_copies > 1)
			new_distance = r10_bio->devs[nslot].addr;
		else
			new_distance = abs(r10_bio->devs[nslot].addr -
					   conf->mirrors[ndisk].head_position);
L
Linus Torvalds 已提交
578 579 580 581 582 583 584 585 586 587 588
		if (new_distance < current_distance) {
			current_distance = new_distance;
			disk = ndisk;
			slot = nslot;
		}
	}

rb_out:
	r10_bio->read_slot = slot;
/*	conf->next_seq_sect = this_sector + sectors;*/

589
	if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
L
Linus Torvalds 已提交
590
		atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
N
NeilBrown 已提交
591 592
	else
		disk = -1;
L
Linus Torvalds 已提交
593 594 595 596 597 598 599
	rcu_read_unlock();

	return disk;
}

static void unplug_slaves(mddev_t *mddev)
{
600
	conf_t *conf = mddev->private;
L
Linus Torvalds 已提交
601 602 603 604
	int i;

	rcu_read_lock();
	for (i=0; i<mddev->raid_disks; i++) {
605
		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
606
		if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
607
			struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
L
Linus Torvalds 已提交
608 609 610 611

			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();

612
			blk_unplug(r_queue);
L
Linus Torvalds 已提交
613 614 615 616 617 618 619 620

			rdev_dec_pending(rdev, mddev);
			rcu_read_lock();
		}
	}
	rcu_read_unlock();
}

621
static void raid10_unplug(struct request_queue *q)
L
Linus Torvalds 已提交
622
{
623 624
	mddev_t *mddev = q->queuedata;

L
Linus Torvalds 已提交
625
	unplug_slaves(q->queuedata);
626
	md_wakeup_thread(mddev->thread);
L
Linus Torvalds 已提交
627 628
}

629 630 631
static int raid10_congested(void *data, int bits)
{
	mddev_t *mddev = data;
632
	conf_t *conf = mddev->private;
633 634
	int i, ret = 0;

635 636
	if (mddev_congested(mddev, bits))
		return 1;
637 638 639 640
	rcu_read_lock();
	for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
		if (rdev && !test_bit(Faulty, &rdev->flags)) {
641
			struct request_queue *q = bdev_get_queue(rdev->bdev);
642 643 644 645 646 647 648 649

			ret |= bdi_congested(&q->backing_dev_info, bits);
		}
	}
	rcu_read_unlock();
	return ret;
}

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
static int flush_pending_writes(conf_t *conf)
{
	/* Any writes that have been queued but are awaiting
	 * bitmap updates get flushed here.
	 * We return 1 if any requests were actually submitted.
	 */
	int rv = 0;

	spin_lock_irq(&conf->device_lock);

	if (conf->pending_bio_list.head) {
		struct bio *bio;
		bio = bio_list_get(&conf->pending_bio_list);
		blk_remove_plug(conf->mddev->queue);
		spin_unlock_irq(&conf->device_lock);
		/* flush any pending bitmap writes to disk
		 * before proceeding w/ I/O */
		bitmap_unplug(conf->mddev->bitmap);

		while (bio) { /* submit pending writes */
			struct bio *next = bio->bi_next;
			bio->bi_next = NULL;
			generic_make_request(bio);
			bio = next;
		}
		rv = 1;
	} else
		spin_unlock_irq(&conf->device_lock);
	return rv;
}
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
/* Barriers....
 * Sometimes we need to suspend IO while we do something else,
 * either some resync/recovery, or reconfigure the array.
 * To do this we raise a 'barrier'.
 * The 'barrier' is a counter that can be raised multiple times
 * to count how many activities are happening which preclude
 * normal IO.
 * We can only raise the barrier if there is no pending IO.
 * i.e. if nr_pending == 0.
 * We choose only to raise the barrier if no-one is waiting for the
 * barrier to go down.  This means that as soon as an IO request
 * is ready, no other operations which require a barrier will start
 * until the IO request has had a chance.
 *
 * So: regular IO calls 'wait_barrier'.  When that returns there
 *    is no backgroup IO happening,  It must arrange to call
 *    allow_barrier when it has finished its IO.
 * backgroup IO calls must call raise_barrier.  Once that returns
 *    there is no normal IO happeing.  It must arrange to call
 *    lower_barrier when the particular background IO completes.
L
Linus Torvalds 已提交
700 701
 */

702
static void raise_barrier(conf_t *conf, int force)
L
Linus Torvalds 已提交
703
{
704
	BUG_ON(force && !conf->barrier);
L
Linus Torvalds 已提交
705
	spin_lock_irq(&conf->resync_lock);
706

707 708
	/* Wait until no block IO is waiting (unless 'force') */
	wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
			    conf->resync_lock,
			    raid10_unplug(conf->mddev->queue));

	/* block any new IO from starting */
	conf->barrier++;

	/* No wait for all pending IO to complete */
	wait_event_lock_irq(conf->wait_barrier,
			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
			    conf->resync_lock,
			    raid10_unplug(conf->mddev->queue));

	spin_unlock_irq(&conf->resync_lock);
}

static void lower_barrier(conf_t *conf)
{
	unsigned long flags;
	spin_lock_irqsave(&conf->resync_lock, flags);
	conf->barrier--;
	spin_unlock_irqrestore(&conf->resync_lock, flags);
	wake_up(&conf->wait_barrier);
}

static void wait_barrier(conf_t *conf)
{
	spin_lock_irq(&conf->resync_lock);
	if (conf->barrier) {
		conf->nr_waiting++;
		wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
				    conf->resync_lock,
				    raid10_unplug(conf->mddev->queue));
		conf->nr_waiting--;
L
Linus Torvalds 已提交
742
	}
743
	conf->nr_pending++;
L
Linus Torvalds 已提交
744 745 746
	spin_unlock_irq(&conf->resync_lock);
}

747 748 749 750 751 752 753 754 755
static void allow_barrier(conf_t *conf)
{
	unsigned long flags;
	spin_lock_irqsave(&conf->resync_lock, flags);
	conf->nr_pending--;
	spin_unlock_irqrestore(&conf->resync_lock, flags);
	wake_up(&conf->wait_barrier);
}

756 757 758
static void freeze_array(conf_t *conf)
{
	/* stop syncio and normal IO and wait for everything to
N
NeilBrown 已提交
759
	 * go quiet.
760
	 * We increment barrier and nr_waiting, and then
761 762 763 764 765 766 767 768
	 * wait until nr_pending match nr_queued+1
	 * This is called in the context of one normal IO request
	 * that has failed. Thus any sync request that might be pending
	 * will be blocked by nr_pending, and we need to wait for
	 * pending IO requests to complete or be queued for re-try.
	 * Thus the number queued (nr_queued) plus this request (1)
	 * must match the number of pending IOs (nr_pending) before
	 * we continue.
769 770 771 772 773
	 */
	spin_lock_irq(&conf->resync_lock);
	conf->barrier++;
	conf->nr_waiting++;
	wait_event_lock_irq(conf->wait_barrier,
774
			    conf->nr_pending == conf->nr_queued+1,
775
			    conf->resync_lock,
776 777
			    ({ flush_pending_writes(conf);
			       raid10_unplug(conf->mddev->queue); }));
778 779 780 781 782 783 784 785 786 787 788 789 790
	spin_unlock_irq(&conf->resync_lock);
}

static void unfreeze_array(conf_t *conf)
{
	/* reverse the effect of the freeze */
	spin_lock_irq(&conf->resync_lock);
	conf->barrier--;
	conf->nr_waiting--;
	wake_up(&conf->wait_barrier);
	spin_unlock_irq(&conf->resync_lock);
}

791
static int make_request(struct request_queue *q, struct bio * bio)
L
Linus Torvalds 已提交
792 793
{
	mddev_t *mddev = q->queuedata;
794
	conf_t *conf = mddev->private;
L
Linus Torvalds 已提交
795 796 797
	mirror_info_t *mirror;
	r10bio_t *r10_bio;
	struct bio *read_bio;
T
Tejun Heo 已提交
798
	int cpu;
L
Linus Torvalds 已提交
799 800
	int i;
	int chunk_sects = conf->chunk_mask + 1;
801
	const int rw = bio_data_dir(bio);
802
	const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
803 804
	struct bio_list bl;
	unsigned long flags;
805
	mdk_rdev_t *blocked_rdev;
L
Linus Torvalds 已提交
806

807
	if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
808
		md_barrier_request(mddev, bio);
809 810 811
		return 0;
	}

L
Linus Torvalds 已提交
812 813 814 815 816 817 818 819 820 821 822 823 824 825
	/* If this request crosses a chunk boundary, we need to
	 * split it.  This will only happen for 1 PAGE (or less) requests.
	 */
	if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
		      > chunk_sects &&
		    conf->near_copies < conf->raid_disks)) {
		struct bio_pair *bp;
		/* Sanity check -- queue functions should prevent this happening */
		if (bio->bi_vcnt != 1 ||
		    bio->bi_idx != 0)
			goto bad_map;
		/* This is a one page bio that upper layers
		 * refuse to split for us, so we need to split it.
		 */
D
Denis ChengRq 已提交
826
		bp = bio_split(bio,
L
Linus Torvalds 已提交
827 828 829 830 831 832 833 834 835 836 837 838 839
			       chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
		if (make_request(q, &bp->bio1))
			generic_make_request(&bp->bio1);
		if (make_request(q, &bp->bio2))
			generic_make_request(&bp->bio2);

		bio_pair_release(bp);
		return 0;
	bad_map:
		printk("raid10_make_request bug: can't convert block across chunks"
		       " or bigger than %dk %llu %d\n", chunk_sects/2,
		       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);

840
		bio_io_error(bio);
L
Linus Torvalds 已提交
841 842 843
		return 0;
	}

844
	md_write_start(mddev, bio);
845

L
Linus Torvalds 已提交
846 847 848 849 850
	/*
	 * Register the new request and wait if the reconstruction
	 * thread has put up a bar for new requests.
	 * Continue immediately if no resync is active currently.
	 */
851
	wait_barrier(conf);
L
Linus Torvalds 已提交
852

T
Tejun Heo 已提交
853 854 855 856 857
	cpu = part_stat_lock();
	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
		      bio_sectors(bio));
	part_stat_unlock();
L
Linus Torvalds 已提交
858 859 860 861 862 863 864 865

	r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);

	r10_bio->master_bio = bio;
	r10_bio->sectors = bio->bi_size >> 9;

	r10_bio->mddev = mddev;
	r10_bio->sector = bio->bi_sector;
866
	r10_bio->state = 0;
L
Linus Torvalds 已提交
867

868
	if (rw == READ) {
L
Linus Torvalds 已提交
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
		/*
		 * read balancing logic:
		 */
		int disk = read_balance(conf, r10_bio);
		int slot = r10_bio->read_slot;
		if (disk < 0) {
			raid_end_bio_io(r10_bio);
			return 0;
		}
		mirror = conf->mirrors + disk;

		read_bio = bio_clone(bio, GFP_NOIO);

		r10_bio->devs[slot].bio = read_bio;

		read_bio->bi_sector = r10_bio->devs[slot].addr +
			mirror->rdev->data_offset;
		read_bio->bi_bdev = mirror->rdev->bdev;
		read_bio->bi_end_io = raid10_end_read_request;
888
		read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
L
Linus Torvalds 已提交
889 890 891 892 893 894 895 896 897
		read_bio->bi_private = r10_bio;

		generic_make_request(read_bio);
		return 0;
	}

	/*
	 * WRITE:
	 */
898
	/* first select target devices under rcu_lock and
L
Linus Torvalds 已提交
899 900 901 902
	 * inc refcount on their rdev.  Record them by setting
	 * bios[x] to bio
	 */
	raid10_find_phys(conf, r10_bio);
903
 retry_write:
904
	blocked_rdev = NULL;
L
Linus Torvalds 已提交
905 906 907
	rcu_read_lock();
	for (i = 0;  i < conf->copies; i++) {
		int d = r10_bio->devs[i].devnum;
908
		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
909 910 911 912 913 914
		if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
			atomic_inc(&rdev->nr_pending);
			blocked_rdev = rdev;
			break;
		}
		if (rdev && !test_bit(Faulty, &rdev->flags)) {
915
			atomic_inc(&rdev->nr_pending);
L
Linus Torvalds 已提交
916
			r10_bio->devs[i].bio = bio;
917
		} else {
L
Linus Torvalds 已提交
918
			r10_bio->devs[i].bio = NULL;
919 920
			set_bit(R10BIO_Degraded, &r10_bio->state);
		}
L
Linus Torvalds 已提交
921 922 923
	}
	rcu_read_unlock();

924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
	if (unlikely(blocked_rdev)) {
		/* Have to wait for this device to get unblocked, then retry */
		int j;
		int d;

		for (j = 0; j < i; j++)
			if (r10_bio->devs[j].bio) {
				d = r10_bio->devs[j].devnum;
				rdev_dec_pending(conf->mirrors[d].rdev, mddev);
			}
		allow_barrier(conf);
		md_wait_for_blocked_rdev(blocked_rdev, mddev);
		wait_barrier(conf);
		goto retry_write;
	}

940
	atomic_set(&r10_bio->remaining, 0);
941

942
	bio_list_init(&bl);
L
Linus Torvalds 已提交
943 944 945 946 947 948 949 950 951 952 953 954 955
	for (i = 0; i < conf->copies; i++) {
		struct bio *mbio;
		int d = r10_bio->devs[i].devnum;
		if (!r10_bio->devs[i].bio)
			continue;

		mbio = bio_clone(bio, GFP_NOIO);
		r10_bio->devs[i].bio = mbio;

		mbio->bi_sector	= r10_bio->devs[i].addr+
			conf->mirrors[d].rdev->data_offset;
		mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
		mbio->bi_end_io	= raid10_end_write_request;
956
		mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
L
Linus Torvalds 已提交
957 958 959
		mbio->bi_private = r10_bio;

		atomic_inc(&r10_bio->remaining);
960
		bio_list_add(&bl, mbio);
L
Linus Torvalds 已提交
961 962
	}

963 964 965 966 967 968 969
	if (unlikely(!atomic_read(&r10_bio->remaining))) {
		/* the array is dead */
		md_write_end(mddev);
		raid_end_bio_io(r10_bio);
		return 0;
	}

970 971 972 973 974
	bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
	spin_lock_irqsave(&conf->device_lock, flags);
	bio_list_merge(&conf->pending_bio_list, &bl);
	blk_plug_device(mddev->queue);
	spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
975

976 977 978
	/* In case raid10d snuck in to freeze_array */
	wake_up(&conf->wait_barrier);

979 980 981
	if (do_sync)
		md_wakeup_thread(mddev->thread);

L
Linus Torvalds 已提交
982 983 984 985 986
	return 0;
}

static void status(struct seq_file *seq, mddev_t *mddev)
{
987
	conf_t *conf = mddev->private;
L
Linus Torvalds 已提交
988 989 990
	int i;

	if (conf->near_copies < conf->raid_disks)
991
		seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
L
Linus Torvalds 已提交
992 993
	if (conf->near_copies > 1)
		seq_printf(seq, " %d near-copies", conf->near_copies);
994 995 996 997 998 999
	if (conf->far_copies > 1) {
		if (conf->far_offset)
			seq_printf(seq, " %d offset-copies", conf->far_copies);
		else
			seq_printf(seq, " %d far-copies", conf->far_copies);
	}
L
Linus Torvalds 已提交
1000
	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1001
					conf->raid_disks - mddev->degraded);
L
Linus Torvalds 已提交
1002 1003 1004
	for (i = 0; i < conf->raid_disks; i++)
		seq_printf(seq, "%s",
			      conf->mirrors[i].rdev &&
1005
			      test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
L
Linus Torvalds 已提交
1006 1007 1008 1009 1010 1011
	seq_printf(seq, "]");
}

static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
	char b[BDEVNAME_SIZE];
1012
	conf_t *conf = mddev->private;
L
Linus Torvalds 已提交
1013 1014 1015 1016 1017 1018 1019

	/*
	 * If it is not operational, then we have already marked it as dead
	 * else if it is the last working disks, ignore the error, let the
	 * next level up know.
	 * else mark the drive as failed
	 */
1020
	if (test_bit(In_sync, &rdev->flags)
1021
	    && conf->raid_disks-mddev->degraded == 1)
L
Linus Torvalds 已提交
1022 1023 1024 1025 1026 1027 1028 1029
		/*
		 * Don't fail the drive, just return an IO error.
		 * The test should really be more sophisticated than
		 * "working_disks == 1", but it isn't critical, and
		 * can wait until we do more sophisticated "is the drive
		 * really dead" tests...
		 */
		return;
1030 1031 1032
	if (test_and_clear_bit(In_sync, &rdev->flags)) {
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1033
		mddev->degraded++;
1034
		spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1035 1036 1037
		/*
		 * if recovery is running, make sure it aborts.
		 */
1038
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
L
Linus Torvalds 已提交
1039
	}
1040
	set_bit(Faulty, &rdev->flags);
1041
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
1042 1043
	printk(KERN_ALERT "raid10: Disk failure on %s, disabling device.\n"
		"raid10: Operation continuing on %d devices.\n",
1044
		bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
L
Linus Torvalds 已提交
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
}

static void print_conf(conf_t *conf)
{
	int i;
	mirror_info_t *tmp;

	printk("RAID10 conf printout:\n");
	if (!conf) {
		printk("(!conf)\n");
		return;
	}
1057
	printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
L
Linus Torvalds 已提交
1058 1059 1060 1061 1062 1063 1064
		conf->raid_disks);

	for (i = 0; i < conf->raid_disks; i++) {
		char b[BDEVNAME_SIZE];
		tmp = conf->mirrors + i;
		if (tmp->rdev)
			printk(" disk %d, wo:%d, o:%d, dev:%s\n",
1065 1066
				i, !test_bit(In_sync, &tmp->rdev->flags),
			        !test_bit(Faulty, &tmp->rdev->flags),
L
Linus Torvalds 已提交
1067 1068 1069 1070 1071 1072
				bdevname(tmp->rdev->bdev,b));
	}
}

static void close_sync(conf_t *conf)
{
1073 1074
	wait_barrier(conf);
	allow_barrier(conf);
L
Linus Torvalds 已提交
1075 1076 1077 1078 1079

	mempool_destroy(conf->r10buf_pool);
	conf->r10buf_pool = NULL;
}

1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
/* check if there are enough drives for
 * every block to appear on atleast one
 */
static int enough(conf_t *conf)
{
	int first = 0;

	do {
		int n = conf->copies;
		int cnt = 0;
		while (n--) {
			if (conf->mirrors[first].rdev)
				cnt++;
			first = (first+1) % conf->raid_disks;
		}
		if (cnt == 0)
			return 0;
	} while (first != 0);
	return 1;
}

L
Linus Torvalds 已提交
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
static int raid10_spare_active(mddev_t *mddev)
{
	int i;
	conf_t *conf = mddev->private;
	mirror_info_t *tmp;

	/*
	 * Find all non-in_sync disks within the RAID10 configuration
	 * and mark them in_sync
	 */
	for (i = 0; i < conf->raid_disks; i++) {
		tmp = conf->mirrors + i;
		if (tmp->rdev
1114
		    && !test_bit(Faulty, &tmp->rdev->flags)
1115 1116 1117
		    && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
			unsigned long flags;
			spin_lock_irqsave(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1118
			mddev->degraded--;
1119
			spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
		}
	}

	print_conf(conf);
	return 0;
}


static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
{
	conf_t *conf = mddev->private;
1131
	int err = -EEXIST;
L
Linus Torvalds 已提交
1132 1133
	int mirror;
	mirror_info_t *p;
1134 1135
	int first = 0;
	int last = mddev->raid_disks - 1;
L
Linus Torvalds 已提交
1136 1137 1138 1139 1140

	if (mddev->recovery_cp < MaxSector)
		/* only hot-add to in-sync arrays, as recovery is
		 * very different from resync
		 */
1141
		return -EBUSY;
1142
	if (!enough(conf))
1143
		return -EINVAL;
L
Linus Torvalds 已提交
1144

N
NeilBrown 已提交
1145
	if (rdev->raid_disk >= 0)
1146
		first = last = rdev->raid_disk;
L
Linus Torvalds 已提交
1147

1148
	if (rdev->saved_raid_disk >= 0 &&
1149
	    rdev->saved_raid_disk >= first &&
1150 1151 1152
	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
		mirror = rdev->saved_raid_disk;
	else
1153 1154
		mirror = first;
	for ( ; mirror <= last ; mirror++)
L
Linus Torvalds 已提交
1155 1156
		if ( !(p=conf->mirrors+mirror)->rdev) {

1157 1158
			disk_stack_limits(mddev->gendisk, rdev->bdev,
					  rdev->data_offset << 9);
1159 1160 1161 1162 1163
			/* as we don't honour merge_bvec_fn, we must
			 * never risk violating it, so limit
			 * ->max_segments to one lying with a single
			 * page, as a one page request is never in
			 * violation.
L
Linus Torvalds 已提交
1164
			 */
1165 1166 1167 1168 1169
			if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
				blk_queue_max_segments(mddev->queue, 1);
				blk_queue_segment_boundary(mddev->queue,
							   PAGE_CACHE_SIZE - 1);
			}
L
Linus Torvalds 已提交
1170 1171 1172

			p->head_position = 0;
			rdev->raid_disk = mirror;
1173
			err = 0;
1174 1175
			if (rdev->saved_raid_disk != mirror)
				conf->fullsync = 1;
1176
			rcu_assign_pointer(p->rdev, rdev);
L
Linus Torvalds 已提交
1177 1178 1179
			break;
		}

1180
	md_integrity_add_rdev(rdev, mddev);
L
Linus Torvalds 已提交
1181
	print_conf(conf);
1182
	return err;
L
Linus Torvalds 已提交
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
}

static int raid10_remove_disk(mddev_t *mddev, int number)
{
	conf_t *conf = mddev->private;
	int err = 0;
	mdk_rdev_t *rdev;
	mirror_info_t *p = conf->mirrors+ number;

	print_conf(conf);
	rdev = p->rdev;
	if (rdev) {
1195
		if (test_bit(In_sync, &rdev->flags) ||
L
Linus Torvalds 已提交
1196 1197 1198 1199
		    atomic_read(&rdev->nr_pending)) {
			err = -EBUSY;
			goto abort;
		}
1200 1201 1202 1203 1204 1205 1206 1207
		/* Only remove faulty devices in recovery
		 * is not possible.
		 */
		if (!test_bit(Faulty, &rdev->flags) &&
		    enough(conf)) {
			err = -EBUSY;
			goto abort;
		}
L
Linus Torvalds 已提交
1208
		p->rdev = NULL;
1209
		synchronize_rcu();
L
Linus Torvalds 已提交
1210 1211 1212 1213
		if (atomic_read(&rdev->nr_pending)) {
			/* lost the race, try later */
			err = -EBUSY;
			p->rdev = rdev;
1214
			goto abort;
L
Linus Torvalds 已提交
1215
		}
1216
		md_integrity_register(mddev);
L
Linus Torvalds 已提交
1217 1218 1219 1220 1221 1222 1223 1224
	}
abort:

	print_conf(conf);
	return err;
}


1225
static void end_sync_read(struct bio *bio, int error)
L
Linus Torvalds 已提交
1226 1227
{
	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1228
	conf_t *conf = r10_bio->mddev->private;
L
Linus Torvalds 已提交
1229 1230 1231 1232 1233
	int i,d;

	for (i=0; i<conf->copies; i++)
		if (r10_bio->devs[i].bio == bio)
			break;
1234
	BUG_ON(i == conf->copies);
L
Linus Torvalds 已提交
1235 1236
	update_head_pos(i, r10_bio);
	d = r10_bio->devs[i].devnum;
1237 1238 1239

	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
		set_bit(R10BIO_Uptodate, &r10_bio->state);
1240 1241 1242 1243 1244 1245 1246
	else {
		atomic_add(r10_bio->sectors,
			   &conf->mirrors[d].rdev->corrected_errors);
		if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
			md_error(r10_bio->mddev,
				 conf->mirrors[d].rdev);
	}
L
Linus Torvalds 已提交
1247 1248 1249 1250

	/* for reconstruct, we always reschedule after a read.
	 * for resync, only after all reads
	 */
1251
	rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
L
Linus Torvalds 已提交
1252 1253 1254 1255 1256 1257 1258 1259 1260
	if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
	    atomic_dec_and_test(&r10_bio->remaining)) {
		/* we have read all the blocks,
		 * do the comparison in process context in raid10d
		 */
		reschedule_retry(r10_bio);
	}
}

1261
static void end_sync_write(struct bio *bio, int error)
L
Linus Torvalds 已提交
1262 1263 1264 1265
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
	mddev_t *mddev = r10_bio->mddev;
1266
	conf_t *conf = mddev->private;
L
Linus Torvalds 已提交
1267 1268 1269 1270 1271 1272 1273 1274 1275
	int i,d;

	for (i = 0; i < conf->copies; i++)
		if (r10_bio->devs[i].bio == bio)
			break;
	d = r10_bio->devs[i].devnum;

	if (!uptodate)
		md_error(mddev, conf->mirrors[d].rdev);
1276

L
Linus Torvalds 已提交
1277 1278
	update_head_pos(i, r10_bio);

1279
	rdev_dec_pending(conf->mirrors[d].rdev, mddev);
L
Linus Torvalds 已提交
1280 1281 1282
	while (atomic_dec_and_test(&r10_bio->remaining)) {
		if (r10_bio->master_bio == NULL) {
			/* the primary of several recovery bios */
1283
			sector_t s = r10_bio->sectors;
L
Linus Torvalds 已提交
1284
			put_buf(r10_bio);
1285
			md_done_sync(mddev, s, 1);
L
Linus Torvalds 已提交
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
			break;
		} else {
			r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
			put_buf(r10_bio);
			r10_bio = r10_bio2;
		}
	}
}

/*
 * Note: sync and recover and handled very differently for raid10
 * This code is for resync.
 * For resync, we read through virtual addresses and read all blocks.
 * If there is any error, we schedule a write.  The lowest numbered
 * drive is authoritative.
 * However requests come for physical address, so we need to map.
 * For every physical address there are raid_disks/copies virtual addresses,
 * which is always are least one, but is not necessarly an integer.
 * This means that a physical address can span multiple chunks, so we may
 * have to submit multiple io requests for a single sync request.
 */
/*
 * We check if all blocks are in-sync and only write to blocks that
 * aren't in sync
 */
static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
{
1313
	conf_t *conf = mddev->private;
L
Linus Torvalds 已提交
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
	int i, first;
	struct bio *tbio, *fbio;

	atomic_set(&r10_bio->remaining, 1);

	/* find the first device with a block */
	for (i=0; i<conf->copies; i++)
		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
			break;

	if (i == conf->copies)
		goto done;

	first = i;
	fbio = r10_bio->devs[i].bio;

	/* now find blocks with errors */
1331 1332 1333
	for (i=0 ; i < conf->copies ; i++) {
		int  j, d;
		int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
L
Linus Torvalds 已提交
1334 1335

		tbio = r10_bio->devs[i].bio;
1336 1337 1338 1339

		if (tbio->bi_end_io != end_sync_read)
			continue;
		if (i == first)
L
Linus Torvalds 已提交
1340
			continue;
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
			/* We know that the bi_io_vec layout is the same for
			 * both 'first' and 'i', so we just compare them.
			 * All vec entries are PAGE_SIZE;
			 */
			for (j = 0; j < vcnt; j++)
				if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
					   page_address(tbio->bi_io_vec[j].bv_page),
					   PAGE_SIZE))
					break;
			if (j == vcnt)
				continue;
			mddev->resync_mismatches += r10_bio->sectors;
		}
1355 1356 1357
		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
			/* Don't fix anything. */
			continue;
L
Linus Torvalds 已提交
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
		/* Ok, we need to write this bio
		 * First we need to fixup bv_offset, bv_len and
		 * bi_vecs, as the read request might have corrupted these
		 */
		tbio->bi_vcnt = vcnt;
		tbio->bi_size = r10_bio->sectors << 9;
		tbio->bi_idx = 0;
		tbio->bi_phys_segments = 0;
		tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
		tbio->bi_flags |= 1 << BIO_UPTODATE;
		tbio->bi_next = NULL;
		tbio->bi_rw = WRITE;
		tbio->bi_private = r10_bio;
		tbio->bi_sector = r10_bio->devs[i].addr;

		for (j=0; j < vcnt ; j++) {
			tbio->bi_io_vec[j].bv_offset = 0;
			tbio->bi_io_vec[j].bv_len = PAGE_SIZE;

			memcpy(page_address(tbio->bi_io_vec[j].bv_page),
			       page_address(fbio->bi_io_vec[j].bv_page),
			       PAGE_SIZE);
		}
		tbio->bi_end_io = end_sync_write;

		d = r10_bio->devs[i].devnum;
		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
		atomic_inc(&r10_bio->remaining);
		md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);

		tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
		generic_make_request(tbio);
	}

done:
	if (atomic_dec_and_test(&r10_bio->remaining)) {
		md_done_sync(mddev, r10_bio->sectors, 1);
		put_buf(r10_bio);
	}
}

/*
 * Now for the recovery code.
 * Recovery happens across physical sectors.
 * We recover all non-is_sync drives by finding the virtual address of
 * each, and then choose a working drive that also has that virt address.
 * There is a separate r10_bio for each non-in_sync drive.
 * Only the first two slots are in use. The first for reading,
 * The second for writing.
 *
 */

static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
{
1413
	conf_t *conf = mddev->private;
L
Linus Torvalds 已提交
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	int i, d;
	struct bio *bio, *wbio;


	/* move the pages across to the second bio
	 * and submit the write request
	 */
	bio = r10_bio->devs[0].bio;
	wbio = r10_bio->devs[1].bio;
	for (i=0; i < wbio->bi_vcnt; i++) {
		struct page *p = bio->bi_io_vec[i].bv_page;
		bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
		wbio->bi_io_vec[i].bv_page = p;
	}
	d = r10_bio->devs[1].devnum;

	atomic_inc(&conf->mirrors[d].rdev->nr_pending);
	md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1432 1433 1434
	if (test_bit(R10BIO_Uptodate, &r10_bio->state))
		generic_make_request(wbio);
	else
1435
		bio_endio(wbio, -EIO);
L
Linus Torvalds 已提交
1436 1437 1438
}


1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475
/*
 * Used by fix_read_error() to decay the per rdev read_errors.
 * We halve the read error count for every hour that has elapsed
 * since the last recorded read error.
 *
 */
static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
{
	struct timespec cur_time_mon;
	unsigned long hours_since_last;
	unsigned int read_errors = atomic_read(&rdev->read_errors);

	ktime_get_ts(&cur_time_mon);

	if (rdev->last_read_error.tv_sec == 0 &&
	    rdev->last_read_error.tv_nsec == 0) {
		/* first time we've seen a read error */
		rdev->last_read_error = cur_time_mon;
		return;
	}

	hours_since_last = (cur_time_mon.tv_sec -
			    rdev->last_read_error.tv_sec) / 3600;

	rdev->last_read_error = cur_time_mon;

	/*
	 * if hours_since_last is > the number of bits in read_errors
	 * just set read errors to 0. We do this to avoid
	 * overflowing the shift of read_errors by hours_since_last.
	 */
	if (hours_since_last >= 8 * sizeof(read_errors))
		atomic_set(&rdev->read_errors, 0);
	else
		atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
}

L
Linus Torvalds 已提交
1476 1477 1478 1479 1480
/*
 * This is a kernel thread which:
 *
 *	1.	Retries failed read operations on working mirrors.
 *	2.	Updates the raid superblock when problems encounter.
1481
 *	3.	Performs writes following reads for array synchronising.
L
Linus Torvalds 已提交
1482 1483
 */

1484 1485 1486 1487 1488
static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
{
	int sect = 0; /* Offset from r10_bio->sector */
	int sectors = r10_bio->sectors;
	mdk_rdev_t*rdev;
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
	int max_read_errors = atomic_read(&mddev->max_corr_read_errors);

	rcu_read_lock();
	{
		int d = r10_bio->devs[r10_bio->read_slot].devnum;
		char b[BDEVNAME_SIZE];
		int cur_read_error_count = 0;

		rdev = rcu_dereference(conf->mirrors[d].rdev);
		bdevname(rdev->bdev, b);

		if (test_bit(Faulty, &rdev->flags)) {
			rcu_read_unlock();
			/* drive has already been failed, just ignore any
			   more fix_read_error() attempts */
			return;
		}

		check_decay_read_errors(mddev, rdev);
		atomic_inc(&rdev->read_errors);
		cur_read_error_count = atomic_read(&rdev->read_errors);
		if (cur_read_error_count > max_read_errors) {
			rcu_read_unlock();
			printk(KERN_NOTICE
			       "raid10: %s: Raid device exceeded "
			       "read_error threshold "
			       "[cur %d:max %d]\n",
			       b, cur_read_error_count, max_read_errors);
			printk(KERN_NOTICE
			       "raid10: %s: Failing raid "
			       "device\n", b);
			md_error(mddev, conf->mirrors[d].rdev);
			return;
		}
	}
	rcu_read_unlock();

1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
	while(sectors) {
		int s = sectors;
		int sl = r10_bio->read_slot;
		int success = 0;
		int start;

		if (s > (PAGE_SIZE>>9))
			s = PAGE_SIZE >> 9;

		rcu_read_lock();
		do {
			int d = r10_bio->devs[sl].devnum;
			rdev = rcu_dereference(conf->mirrors[d].rdev);
			if (rdev &&
			    test_bit(In_sync, &rdev->flags)) {
				atomic_inc(&rdev->nr_pending);
				rcu_read_unlock();
				success = sync_page_io(rdev->bdev,
						       r10_bio->devs[sl].addr +
						       sect + rdev->data_offset,
						       s<<9,
						       conf->tmppage, READ);
				rdev_dec_pending(rdev, mddev);
				rcu_read_lock();
				if (success)
					break;
			}
			sl++;
			if (sl == conf->copies)
				sl = 0;
		} while (!success && sl != r10_bio->read_slot);
		rcu_read_unlock();

		if (!success) {
			/* Cannot read from anywhere -- bye bye array */
			int dn = r10_bio->devs[r10_bio->read_slot].devnum;
			md_error(mddev, conf->mirrors[dn].rdev);
			break;
		}

		start = sl;
		/* write it back and re-read */
		rcu_read_lock();
		while (sl != r10_bio->read_slot) {
1570
			char b[BDEVNAME_SIZE];
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
			int d;
			if (sl==0)
				sl = conf->copies;
			sl--;
			d = r10_bio->devs[sl].devnum;
			rdev = rcu_dereference(conf->mirrors[d].rdev);
			if (rdev &&
			    test_bit(In_sync, &rdev->flags)) {
				atomic_inc(&rdev->nr_pending);
				rcu_read_unlock();
				atomic_add(s, &rdev->corrected_errors);
				if (sync_page_io(rdev->bdev,
						 r10_bio->devs[sl].addr +
						 sect + rdev->data_offset,
						 s<<9, conf->tmppage, WRITE)
1586
				    == 0) {
1587
					/* Well, this device is dead */
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
					printk(KERN_NOTICE
					       "raid10:%s: read correction "
					       "write failed"
					       " (%d sectors at %llu on %s)\n",
					       mdname(mddev), s,
					       (unsigned long long)(sect+
					       rdev->data_offset),
					       bdevname(rdev->bdev, b));
					printk(KERN_NOTICE "raid10:%s: failing "
					       "drive\n",
					       bdevname(rdev->bdev, b));
1599
					md_error(mddev, rdev);
1600
				}
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
				rdev_dec_pending(rdev, mddev);
				rcu_read_lock();
			}
		}
		sl = start;
		while (sl != r10_bio->read_slot) {
			int d;
			if (sl==0)
				sl = conf->copies;
			sl--;
			d = r10_bio->devs[sl].devnum;
			rdev = rcu_dereference(conf->mirrors[d].rdev);
			if (rdev &&
			    test_bit(In_sync, &rdev->flags)) {
				char b[BDEVNAME_SIZE];
				atomic_inc(&rdev->nr_pending);
				rcu_read_unlock();
				if (sync_page_io(rdev->bdev,
						 r10_bio->devs[sl].addr +
						 sect + rdev->data_offset,
1621 1622
						 s<<9, conf->tmppage,
						 READ) == 0) {
1623
					/* Well, this device is dead */
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
					printk(KERN_NOTICE
					       "raid10:%s: unable to read back "
					       "corrected sectors"
					       " (%d sectors at %llu on %s)\n",
					       mdname(mddev), s,
					       (unsigned long long)(sect+
						    rdev->data_offset),
					       bdevname(rdev->bdev, b));
					printk(KERN_NOTICE "raid10:%s: failing drive\n",
					       bdevname(rdev->bdev, b));

1635
					md_error(mddev, rdev);
1636
				} else {
1637 1638 1639 1640
					printk(KERN_INFO
					       "raid10:%s: read error corrected"
					       " (%d sectors at %llu on %s)\n",
					       mdname(mddev), s,
1641 1642
					       (unsigned long long)(sect+
					            rdev->data_offset),
1643
					       bdevname(rdev->bdev, b));
1644
				}
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656

				rdev_dec_pending(rdev, mddev);
				rcu_read_lock();
			}
		}
		rcu_read_unlock();

		sectors -= s;
		sect += s;
	}
}

L
Linus Torvalds 已提交
1657 1658 1659 1660 1661
static void raid10d(mddev_t *mddev)
{
	r10bio_t *r10_bio;
	struct bio *bio;
	unsigned long flags;
1662
	conf_t *conf = mddev->private;
L
Linus Torvalds 已提交
1663 1664 1665 1666 1667 1668 1669 1670
	struct list_head *head = &conf->retry_list;
	int unplug=0;
	mdk_rdev_t *rdev;

	md_check_recovery(mddev);

	for (;;) {
		char b[BDEVNAME_SIZE];
1671

1672
		unplug += flush_pending_writes(conf);
1673

1674 1675 1676
		spin_lock_irqsave(&conf->device_lock, flags);
		if (list_empty(head)) {
			spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1677
			break;
1678
		}
L
Linus Torvalds 已提交
1679 1680
		r10_bio = list_entry(head->prev, r10bio_t, retry_list);
		list_del(head->prev);
1681
		conf->nr_queued--;
L
Linus Torvalds 已提交
1682 1683 1684
		spin_unlock_irqrestore(&conf->device_lock, flags);

		mddev = r10_bio->mddev;
1685
		conf = mddev->private;
L
Linus Torvalds 已提交
1686 1687 1688 1689 1690 1691 1692 1693
		if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
			sync_request_write(mddev, r10_bio);
			unplug = 1;
		} else 	if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
			recovery_request_write(mddev, r10_bio);
			unplug = 1;
		} else {
			int mirror;
1694 1695 1696 1697 1698 1699 1700 1701
			/* we got a read error. Maybe the drive is bad.  Maybe just
			 * the block and we can fix it.
			 * We freeze all other IO, and try reading the block from
			 * other devices.  When we find one, we re-write
			 * and check it that fixes the read error.
			 * This is all done synchronously while the array is
			 * frozen.
			 */
1702 1703 1704 1705
			if (mddev->ro == 0) {
				freeze_array(conf);
				fix_read_error(conf, mddev, r10_bio);
				unfreeze_array(conf);
1706 1707
			}

L
Linus Torvalds 已提交
1708
			bio = r10_bio->devs[r10_bio->read_slot].bio;
1709 1710
			r10_bio->devs[r10_bio->read_slot].bio =
				mddev->ro ? IO_BLOCKED : NULL;
L
Linus Torvalds 已提交
1711 1712 1713 1714 1715 1716 1717
			mirror = read_balance(conf, r10_bio);
			if (mirror == -1) {
				printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
				       " read error for block %llu\n",
				       bdevname(bio->bi_bdev,b),
				       (unsigned long long)r10_bio->sector);
				raid_end_bio_io(r10_bio);
1718
				bio_put(bio);
L
Linus Torvalds 已提交
1719
			} else {
1720
				const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
1721
				bio_put(bio);
L
Linus Torvalds 已提交
1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
				rdev = conf->mirrors[mirror].rdev;
				if (printk_ratelimit())
					printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
					       " another mirror\n",
					       bdevname(rdev->bdev,b),
					       (unsigned long long)r10_bio->sector);
				bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
				r10_bio->devs[r10_bio->read_slot].bio = bio;
				bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
					+ rdev->data_offset;
				bio->bi_bdev = rdev->bdev;
1733
				bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
L
Linus Torvalds 已提交
1734 1735 1736 1737 1738 1739
				bio->bi_private = r10_bio;
				bio->bi_end_io = raid10_end_read_request;
				unplug = 1;
				generic_make_request(bio);
			}
		}
N
NeilBrown 已提交
1740
		cond_resched();
L
Linus Torvalds 已提交
1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
	}
	if (unplug)
		unplug_slaves(mddev);
}


static int init_resync(conf_t *conf)
{
	int buffs;

	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1752
	BUG_ON(conf->r10buf_pool);
L
Linus Torvalds 已提交
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791
	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
	if (!conf->r10buf_pool)
		return -ENOMEM;
	conf->next_resync = 0;
	return 0;
}

/*
 * perform a "sync" on one "block"
 *
 * We need to make sure that no normal I/O request - particularly write
 * requests - conflict with active sync requests.
 *
 * This is achieved by tracking pending requests and a 'barrier' concept
 * that can be installed to exclude normal IO requests.
 *
 * Resync and recovery are handled very differently.
 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
 *
 * For resync, we iterate over virtual addresses, read all copies,
 * and update if there are differences.  If only one copy is live,
 * skip it.
 * For recovery, we iterate over physical addresses, read a good
 * value for each non-in_sync drive, and over-write.
 *
 * So, for recovery we may have several outstanding complex requests for a
 * given address, one for each out-of-sync device.  We model this by allocating
 * a number of r10_bio structures, one for each out-of-sync device.
 * As we setup these structures, we collect all bio's together into a list
 * which we then process collectively to add pages, and then process again
 * to pass to generic_make_request.
 *
 * The r10_bio structures are linked using a borrowed master_bio pointer.
 * This link is counted in ->remaining.  When the r10_bio that points to NULL
 * has its remaining count decremented to 0, the whole complex operation
 * is complete.
 *
 */

1792
static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
L
Linus Torvalds 已提交
1793
{
1794
	conf_t *conf = mddev->private;
L
Linus Torvalds 已提交
1795 1796 1797 1798 1799
	r10bio_t *r10_bio;
	struct bio *biolist = NULL, *bio;
	sector_t max_sector, nr_sectors;
	int disk;
	int i;
1800 1801
	int max_sync;
	int sync_blocks;
L
Linus Torvalds 已提交
1802 1803 1804 1805 1806 1807

	sector_t sectors_skipped = 0;
	int chunks_skipped = 0;

	if (!conf->r10buf_pool)
		if (init_resync(conf))
1808
			return 0;
L
Linus Torvalds 已提交
1809 1810

 skipped:
A
Andre Noll 已提交
1811
	max_sector = mddev->dev_sectors;
L
Linus Torvalds 已提交
1812 1813 1814
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
		max_sector = mddev->resync_max_sectors;
	if (sector_nr >= max_sector) {
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
		/* If we aborted, we need to abort the
		 * sync on the 'current' bitmap chucks (there can
		 * be several when recovering multiple devices).
		 * as we may have started syncing it but not finished.
		 * We can find the current address in
		 * mddev->curr_resync, but for recovery,
		 * we need to convert that to several
		 * virtual addresses.
		 */
		if (mddev->curr_resync < max_sector) { /* aborted */
			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
				bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
						&sync_blocks, 1);
			else for (i=0; i<conf->raid_disks; i++) {
				sector_t sect =
					raid10_find_virt(conf, mddev->curr_resync, i);
				bitmap_end_sync(mddev->bitmap, sect,
						&sync_blocks, 1);
			}
		} else /* completed sync */
			conf->fullsync = 0;

		bitmap_close_sync(mddev->bitmap);
L
Linus Torvalds 已提交
1838
		close_sync(conf);
1839
		*skipped = 1;
L
Linus Torvalds 已提交
1840 1841 1842 1843 1844 1845
		return sectors_skipped;
	}
	if (chunks_skipped >= conf->raid_disks) {
		/* if there has been nothing to do on any drive,
		 * then there is nothing to do at all..
		 */
1846 1847
		*skipped = 1;
		return (max_sector - sector_nr) + sectors_skipped;
L
Linus Torvalds 已提交
1848 1849
	}

1850 1851 1852
	if (max_sector > mddev->resync_max)
		max_sector = mddev->resync_max; /* Don't do IO beyond here */

L
Linus Torvalds 已提交
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862
	/* make sure whole request will fit in a chunk - if chunks
	 * are meaningful
	 */
	if (conf->near_copies < conf->raid_disks &&
	    max_sector > (sector_nr | conf->chunk_mask))
		max_sector = (sector_nr | conf->chunk_mask) + 1;
	/*
	 * If there is non-resync activity waiting for us then
	 * put in a delay to throttle resync.
	 */
1863
	if (!go_faster && conf->nr_waiting)
L
Linus Torvalds 已提交
1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
		msleep_interruptible(1000);

	/* Again, very different code for resync and recovery.
	 * Both must result in an r10bio with a list of bios that
	 * have bi_end_io, bi_sector, bi_bdev set,
	 * and bi_private set to the r10bio.
	 * For recovery, we may actually create several r10bios
	 * with 2 bios in each, that correspond to the bios in the main one.
	 * In this case, the subordinate r10bios link back through a
	 * borrowed master_bio pointer, and the counter in the master
	 * includes a ref from each subordinate.
	 */
	/* First, we decide what to do and set ->bi_end_io
	 * To end_sync_read if we want to read, and
	 * end_sync_write if we will want to write.
	 */

1881
	max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
L
Linus Torvalds 已提交
1882 1883
	if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
		/* recovery... the complicated one */
1884
		int j, k;
L
Linus Torvalds 已提交
1885 1886 1887 1888
		r10_bio = NULL;

		for (i=0 ; i<conf->raid_disks; i++)
			if (conf->mirrors[i].rdev &&
1889
			    !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
1890
				int still_degraded = 0;
L
Linus Torvalds 已提交
1891 1892
				/* want to reconstruct this device */
				r10bio_t *rb2 = r10_bio;
1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
				sector_t sect = raid10_find_virt(conf, sector_nr, i);
				int must_sync;
				/* Unless we are doing a full sync, we only need
				 * to recover the block if it is set in the bitmap
				 */
				must_sync = bitmap_start_sync(mddev->bitmap, sect,
							      &sync_blocks, 1);
				if (sync_blocks < max_sync)
					max_sync = sync_blocks;
				if (!must_sync &&
				    !conf->fullsync) {
					/* yep, skip the sync_blocks here, but don't assume
					 * that there will never be anything to do here
					 */
					chunks_skipped = -1;
					continue;
				}
L
Linus Torvalds 已提交
1910 1911

				r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1912
				raise_barrier(conf, rb2 != NULL);
L
Linus Torvalds 已提交
1913 1914 1915 1916 1917 1918 1919
				atomic_set(&r10_bio->remaining, 0);

				r10_bio->master_bio = (struct bio*)rb2;
				if (rb2)
					atomic_inc(&rb2->remaining);
				r10_bio->mddev = mddev;
				set_bit(R10BIO_IsRecover, &r10_bio->state);
1920 1921
				r10_bio->sector = sect;

L
Linus Torvalds 已提交
1922
				raid10_find_phys(conf, r10_bio);
1923 1924

				/* Need to check if the array will still be
1925 1926
				 * degraded
				 */
1927 1928 1929
				for (j=0; j<conf->raid_disks; j++)
					if (conf->mirrors[j].rdev == NULL ||
					    test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
1930
						still_degraded = 1;
1931 1932
						break;
					}
1933

1934 1935 1936
				must_sync = bitmap_start_sync(mddev->bitmap, sect,
							      &sync_blocks, still_degraded);

L
Linus Torvalds 已提交
1937 1938 1939
				for (j=0; j<conf->copies;j++) {
					int d = r10_bio->devs[j].devnum;
					if (conf->mirrors[d].rdev &&
1940
					    test_bit(In_sync, &conf->mirrors[d].rdev->flags)) {
L
Linus Torvalds 已提交
1941 1942 1943 1944 1945 1946
						/* This is where we read from */
						bio = r10_bio->devs[0].bio;
						bio->bi_next = biolist;
						biolist = bio;
						bio->bi_private = r10_bio;
						bio->bi_end_io = end_sync_read;
1947
						bio->bi_rw = READ;
L
Linus Torvalds 已提交
1948 1949 1950 1951 1952 1953 1954 1955 1956 1957
						bio->bi_sector = r10_bio->devs[j].addr +
							conf->mirrors[d].rdev->data_offset;
						bio->bi_bdev = conf->mirrors[d].rdev->bdev;
						atomic_inc(&conf->mirrors[d].rdev->nr_pending);
						atomic_inc(&r10_bio->remaining);
						/* and we write to 'i' */

						for (k=0; k<conf->copies; k++)
							if (r10_bio->devs[k].devnum == i)
								break;
1958
						BUG_ON(k == conf->copies);
L
Linus Torvalds 已提交
1959 1960 1961 1962 1963
						bio = r10_bio->devs[1].bio;
						bio->bi_next = biolist;
						biolist = bio;
						bio->bi_private = r10_bio;
						bio->bi_end_io = end_sync_write;
1964
						bio->bi_rw = WRITE;
L
Linus Torvalds 已提交
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975
						bio->bi_sector = r10_bio->devs[k].addr +
							conf->mirrors[i].rdev->data_offset;
						bio->bi_bdev = conf->mirrors[i].rdev->bdev;

						r10_bio->devs[0].devnum = d;
						r10_bio->devs[1].devnum = i;

						break;
					}
				}
				if (j == conf->copies) {
1976 1977
					/* Cannot recover, so abort the recovery */
					put_buf(r10_bio);
1978 1979
					if (rb2)
						atomic_dec(&rb2->remaining);
1980
					r10_bio = rb2;
1981 1982
					if (!test_and_set_bit(MD_RECOVERY_INTR,
							      &mddev->recovery))
1983 1984 1985
						printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
						       mdname(mddev));
					break;
L
Linus Torvalds 已提交
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
				}
			}
		if (biolist == NULL) {
			while (r10_bio) {
				r10bio_t *rb2 = r10_bio;
				r10_bio = (r10bio_t*) rb2->master_bio;
				rb2->master_bio = NULL;
				put_buf(rb2);
			}
			goto giveup;
		}
	} else {
		/* resync. Schedule a read for every block at this virt offset */
		int count = 0;
2000

2001 2002
		bitmap_cond_end_sync(mddev->bitmap, sector_nr);

2003 2004 2005 2006 2007 2008 2009 2010 2011
		if (!bitmap_start_sync(mddev->bitmap, sector_nr,
				       &sync_blocks, mddev->degraded) &&
		    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
			/* We can skip this block */
			*skipped = 1;
			return sync_blocks + sectors_skipped;
		}
		if (sync_blocks < max_sync)
			max_sync = sync_blocks;
L
Linus Torvalds 已提交
2012 2013 2014 2015
		r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);

		r10_bio->mddev = mddev;
		atomic_set(&r10_bio->remaining, 0);
2016 2017
		raise_barrier(conf, 0);
		conf->next_resync = sector_nr;
L
Linus Torvalds 已提交
2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028

		r10_bio->master_bio = NULL;
		r10_bio->sector = sector_nr;
		set_bit(R10BIO_IsSync, &r10_bio->state);
		raid10_find_phys(conf, r10_bio);
		r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;

		for (i=0; i<conf->copies; i++) {
			int d = r10_bio->devs[i].devnum;
			bio = r10_bio->devs[i].bio;
			bio->bi_end_io = NULL;
N
NeilBrown 已提交
2029
			clear_bit(BIO_UPTODATE, &bio->bi_flags);
L
Linus Torvalds 已提交
2030
			if (conf->mirrors[d].rdev == NULL ||
2031
			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
L
Linus Torvalds 已提交
2032 2033 2034 2035 2036 2037 2038
				continue;
			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
			atomic_inc(&r10_bio->remaining);
			bio->bi_next = biolist;
			biolist = bio;
			bio->bi_private = r10_bio;
			bio->bi_end_io = end_sync_read;
2039
			bio->bi_rw = READ;
L
Linus Torvalds 已提交
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
			bio->bi_sector = r10_bio->devs[i].addr +
				conf->mirrors[d].rdev->data_offset;
			bio->bi_bdev = conf->mirrors[d].rdev->bdev;
			count++;
		}

		if (count < 2) {
			for (i=0; i<conf->copies; i++) {
				int d = r10_bio->devs[i].devnum;
				if (r10_bio->devs[i].bio->bi_end_io)
					rdev_dec_pending(conf->mirrors[d].rdev, mddev);
			}
			put_buf(r10_bio);
			biolist = NULL;
			goto giveup;
		}
	}

	for (bio = biolist; bio ; bio=bio->bi_next) {

		bio->bi_flags &= ~(BIO_POOL_MASK - 1);
		if (bio->bi_end_io)
			bio->bi_flags |= 1 << BIO_UPTODATE;
		bio->bi_vcnt = 0;
		bio->bi_idx = 0;
		bio->bi_phys_segments = 0;
		bio->bi_size = 0;
	}

	nr_sectors = 0;
2070 2071
	if (sector_nr + max_sync < max_sector)
		max_sector = sector_nr + max_sync;
L
Linus Torvalds 已提交
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
	do {
		struct page *page;
		int len = PAGE_SIZE;
		disk = 0;
		if (sector_nr + (len>>9) > max_sector)
			len = (max_sector - sector_nr) << 9;
		if (len == 0)
			break;
		for (bio= biolist ; bio ; bio=bio->bi_next) {
			page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
			if (bio_add_page(bio, page, len, 0) == 0) {
				/* stop here */
				struct bio *bio2;
				bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
				for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) {
					/* remove last page from this bio */
					bio2->bi_vcnt--;
					bio2->bi_size -= len;
					bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
				}
				goto bio_full;
			}
			disk = i;
		}
		nr_sectors += len>>9;
		sector_nr += len>>9;
	} while (biolist->bi_vcnt < RESYNC_PAGES);
 bio_full:
	r10_bio->sectors = nr_sectors;

	while (biolist) {
		bio = biolist;
		biolist = biolist->bi_next;

		bio->bi_next = NULL;
		r10_bio = bio->bi_private;
		r10_bio->sectors = nr_sectors;

		if (bio->bi_end_io == end_sync_read) {
			md_sync_acct(bio->bi_bdev, nr_sectors);
			generic_make_request(bio);
		}
	}

2116 2117 2118 2119 2120 2121
	if (sectors_skipped)
		/* pretend they weren't skipped, it makes
		 * no important difference in this case
		 */
		md_done_sync(mddev, sectors_skipped, 1);

L
Linus Torvalds 已提交
2122 2123 2124 2125 2126
	return sectors_skipped + nr_sectors;
 giveup:
	/* There is nowhere to write, so all non-sync
	 * drives must be failed, so try the next chunk...
	 */
2127 2128 2129 2130
	if (sector_nr + max_sync < max_sector)
		max_sector = sector_nr + max_sync;

	sectors_skipped += (max_sector - sector_nr);
L
Linus Torvalds 已提交
2131 2132 2133 2134 2135
	chunks_skipped ++;
	sector_nr = max_sector;
	goto skipped;
}

2136 2137 2138 2139
static sector_t
raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
{
	sector_t size;
2140
	conf_t *conf = mddev->private;
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154

	if (!raid_disks)
		raid_disks = mddev->raid_disks;
	if (!sectors)
		sectors = mddev->dev_sectors;

	size = sectors >> conf->chunk_shift;
	sector_div(size, conf->far_copies);
	size = size * raid_disks;
	sector_div(size, conf->near_copies);

	return size << conf->chunk_shift;
}

L
Linus Torvalds 已提交
2155 2156 2157
static int run(mddev_t *mddev)
{
	conf_t *conf;
2158
	int i, disk_idx, chunk_size;
L
Linus Torvalds 已提交
2159 2160
	mirror_info_t *disk;
	mdk_rdev_t *rdev;
2161
	int nc, fc, fo;
L
Linus Torvalds 已提交
2162 2163
	sector_t stride, size;

2164 2165
	if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
	    !is_power_of_2(mddev->chunk_sectors)) {
2166
		printk(KERN_ERR "md/raid10: chunk size must be "
2167
		       "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
2168
		return -EINVAL;
L
Linus Torvalds 已提交
2169
	}
2170

L
Linus Torvalds 已提交
2171 2172
	nc = mddev->layout & 255;
	fc = (mddev->layout >> 8) & 255;
2173
	fo = mddev->layout & (1<<16);
L
Linus Torvalds 已提交
2174
	if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
2175
	    (mddev->layout >> 17)) {
L
Linus Torvalds 已提交
2176 2177 2178 2179 2180 2181 2182 2183 2184
		printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
		       mdname(mddev), mddev->layout);
		goto out;
	}
	/*
	 * copy the already verified devices into our private RAID10
	 * bookkeeping area. [whatever we allocate in run(),
	 * should be freed in stop()]
	 */
2185
	conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
L
Linus Torvalds 已提交
2186 2187 2188 2189 2190 2191
	mddev->private = conf;
	if (!conf) {
		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
			mdname(mddev));
		goto out;
	}
2192
	conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
L
Linus Torvalds 已提交
2193 2194 2195 2196 2197 2198
				 GFP_KERNEL);
	if (!conf->mirrors) {
		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
		       mdname(mddev));
		goto out_free_conf;
	}
2199 2200 2201 2202

	conf->tmppage = alloc_page(GFP_KERNEL);
	if (!conf->tmppage)
		goto out_free_conf;
L
Linus Torvalds 已提交
2203

2204
	conf->raid_disks = mddev->raid_disks;
L
Linus Torvalds 已提交
2205 2206 2207
	conf->near_copies = nc;
	conf->far_copies = fc;
	conf->copies = nc*fc;
2208
	conf->far_offset = fo;
2209 2210
	conf->chunk_mask = mddev->chunk_sectors - 1;
	conf->chunk_shift = ffz(~mddev->chunk_sectors);
A
Andre Noll 已提交
2211
	size = mddev->dev_sectors >> conf->chunk_shift;
2212 2213 2214 2215 2216 2217
	sector_div(size, fc);
	size = size * conf->raid_disks;
	sector_div(size, nc);
	/* 'size' is now the number of chunks in the array */
	/* calculate "used chunks per device" in 'stride' */
	stride = size * conf->copies;
N
NeilBrown 已提交
2218 2219 2220 2221 2222

	/* We need to round up when dividing by raid_disks to
	 * get the stride size.
	 */
	stride += conf->raid_disks - 1;
2223
	sector_div(stride, conf->raid_disks);
A
Andre Noll 已提交
2224
	mddev->dev_sectors = stride << conf->chunk_shift;
2225

2226
	if (fo)
2227 2228
		stride = 1;
	else
2229
		sector_div(stride, fc);
2230 2231
	conf->stride = stride << conf->chunk_shift;

L
Linus Torvalds 已提交
2232 2233 2234 2235 2236 2237 2238 2239
	conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
						r10bio_pool_free, conf);
	if (!conf->r10bio_pool) {
		printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
			mdname(mddev));
		goto out_free_conf;
	}

2240
	conf->mddev = mddev;
2241 2242 2243
	spin_lock_init(&conf->device_lock);
	mddev->queue->queue_lock = &conf->device_lock;

2244 2245 2246 2247 2248 2249 2250 2251
	chunk_size = mddev->chunk_sectors << 9;
	blk_queue_io_min(mddev->queue, chunk_size);
	if (conf->raid_disks % conf->near_copies)
		blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
	else
		blk_queue_io_opt(mddev->queue, chunk_size *
				 (conf->raid_disks / conf->near_copies));

2252
	list_for_each_entry(rdev, &mddev->disks, same_set) {
L
Linus Torvalds 已提交
2253 2254 2255 2256 2257 2258 2259
		disk_idx = rdev->raid_disk;
		if (disk_idx >= mddev->raid_disks
		    || disk_idx < 0)
			continue;
		disk = conf->mirrors + disk_idx;

		disk->rdev = rdev;
2260 2261
		disk_stack_limits(mddev->gendisk, rdev->bdev,
				  rdev->data_offset << 9);
L
Linus Torvalds 已提交
2262
		/* as we don't honour merge_bvec_fn, we must never risk
2263 2264
		 * violating it, so limit max_segments to 1 lying
		 * within a single page.
L
Linus Torvalds 已提交
2265
		 */
2266 2267 2268 2269 2270
		if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
			blk_queue_max_segments(mddev->queue, 1);
			blk_queue_segment_boundary(mddev->queue,
						   PAGE_CACHE_SIZE - 1);
		}
L
Linus Torvalds 已提交
2271 2272 2273 2274 2275 2276

		disk->head_position = 0;
	}
	INIT_LIST_HEAD(&conf->retry_list);

	spin_lock_init(&conf->resync_lock);
2277
	init_waitqueue_head(&conf->wait_barrier);
L
Linus Torvalds 已提交
2278

2279 2280 2281 2282
	/* need to check that every block has at least one working mirror */
	if (!enough(conf)) {
		printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
		       mdname(mddev));
L
Linus Torvalds 已提交
2283 2284 2285 2286 2287 2288 2289 2290
		goto out_free_conf;
	}

	mddev->degraded = 0;
	for (i = 0; i < conf->raid_disks; i++) {

		disk = conf->mirrors + i;

2291
		if (!disk->rdev ||
2292
		    !test_bit(In_sync, &disk->rdev->flags)) {
L
Linus Torvalds 已提交
2293 2294
			disk->head_position = 0;
			mddev->degraded++;
2295 2296
			if (disk->rdev)
				conf->fullsync = 1;
L
Linus Torvalds 已提交
2297 2298 2299 2300
		}
	}


2301
	mddev->thread = md_register_thread(raid10d, mddev, NULL);
L
Linus Torvalds 已提交
2302 2303 2304 2305 2306 2307 2308
	if (!mddev->thread) {
		printk(KERN_ERR
		       "raid10: couldn't allocate thread for %s\n",
		       mdname(mddev));
		goto out_free_conf;
	}

2309 2310 2311 2312
	if (mddev->recovery_cp != MaxSector)
		printk(KERN_NOTICE "raid10: %s is not clean"
		       " -- starting background reconstruction\n",
		       mdname(mddev));
L
Linus Torvalds 已提交
2313 2314 2315 2316 2317 2318 2319
	printk(KERN_INFO
		"raid10: raid set %s active with %d out of %d devices\n",
		mdname(mddev), mddev->raid_disks - mddev->degraded,
		mddev->raid_disks);
	/*
	 * Ok, everything is just fine now
	 */
2320
	md_set_array_sectors(mddev, raid10_size(mddev, 0, 0));
D
Dan Williams 已提交
2321
	mddev->resync_max_sectors = raid10_size(mddev, 0, 0);
L
Linus Torvalds 已提交
2322

2323
	mddev->queue->unplug_fn = raid10_unplug;
2324 2325
	mddev->queue->backing_dev_info.congested_fn = raid10_congested;
	mddev->queue->backing_dev_info.congested_data = mddev;
2326

L
Linus Torvalds 已提交
2327 2328 2329 2330 2331
	/* Calculate max read-ahead size.
	 * We need to readahead at least twice a whole stripe....
	 * maybe...
	 */
	{
2332 2333
		int stripe = conf->raid_disks *
			((mddev->chunk_sectors << 9) / PAGE_SIZE);
L
Linus Torvalds 已提交
2334 2335 2336 2337 2338 2339 2340
		stripe /= conf->near_copies;
		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
	}

	if (conf->near_copies < mddev->raid_disks)
		blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2341
	md_integrity_register(mddev);
L
Linus Torvalds 已提交
2342 2343 2344 2345 2346
	return 0;

out_free_conf:
	if (conf->r10bio_pool)
		mempool_destroy(conf->r10bio_pool);
2347
	safe_put_page(conf->tmppage);
2348
	kfree(conf->mirrors);
L
Linus Torvalds 已提交
2349 2350 2351 2352 2353 2354 2355 2356
	kfree(conf);
	mddev->private = NULL;
out:
	return -EIO;
}

static int stop(mddev_t *mddev)
{
2357
	conf_t *conf = mddev->private;
L
Linus Torvalds 已提交
2358

2359 2360 2361
	raise_barrier(conf, 0);
	lower_barrier(conf);

L
Linus Torvalds 已提交
2362 2363 2364 2365 2366
	md_unregister_thread(mddev->thread);
	mddev->thread = NULL;
	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
	if (conf->r10bio_pool)
		mempool_destroy(conf->r10bio_pool);
2367
	kfree(conf->mirrors);
L
Linus Torvalds 已提交
2368 2369 2370 2371 2372
	kfree(conf);
	mddev->private = NULL;
	return 0;
}

2373 2374
static void raid10_quiesce(mddev_t *mddev, int state)
{
2375
	conf_t *conf = mddev->private;
2376 2377 2378 2379 2380 2381 2382 2383 2384 2385

	switch(state) {
	case 1:
		raise_barrier(conf, 0);
		break;
	case 0:
		lower_barrier(conf);
		break;
	}
}
L
Linus Torvalds 已提交
2386

2387
static struct mdk_personality raid10_personality =
L
Linus Torvalds 已提交
2388 2389
{
	.name		= "raid10",
2390
	.level		= 10,
L
Linus Torvalds 已提交
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
	.owner		= THIS_MODULE,
	.make_request	= make_request,
	.run		= run,
	.stop		= stop,
	.status		= status,
	.error_handler	= error,
	.hot_add_disk	= raid10_add_disk,
	.hot_remove_disk= raid10_remove_disk,
	.spare_active	= raid10_spare_active,
	.sync_request	= sync_request,
2401
	.quiesce	= raid10_quiesce,
2402
	.size		= raid10_size,
L
Linus Torvalds 已提交
2403 2404 2405 2406
};

static int __init raid_init(void)
{
2407
	return register_md_personality(&raid10_personality);
L
Linus Torvalds 已提交
2408 2409 2410 2411
}

static void raid_exit(void)
{
2412
	unregister_md_personality(&raid10_personality);
L
Linus Torvalds 已提交
2413 2414 2415 2416 2417
}

module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
2418
MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
L
Linus Torvalds 已提交
2419
MODULE_ALIAS("md-personality-9"); /* RAID10 */
2420
MODULE_ALIAS("md-raid10");
2421
MODULE_ALIAS("md-level-10");