raid1.c 57.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * raid1.c : Multiple Devices driver for Linux
 *
 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
 *
 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
 *
 * RAID-1 management functions.
 *
 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
 *
12
 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
L
Linus Torvalds 已提交
13 14
 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
 *
15 16 17 18 19 20 21 22 23
 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
 * bitmapped intelligence in resync:
 *
 *      - bitmap marked during normal i/o
 *      - bitmap used to skip nondirty blocks during sync
 *
 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
 * - persistent bitmap code
 *
L
Linus Torvalds 已提交
24 25 26 27 28 29 30 31 32 33
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * You should have received a copy of the GNU General Public License
 * (for example /usr/src/linux/COPYING); if not, write to the Free
 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

34
#include "dm-bio-list.h"
L
Linus Torvalds 已提交
35
#include <linux/raid/raid1.h>
36 37 38 39 40 41 42 43
#include <linux/raid/bitmap.h>

#define DEBUG 0
#if DEBUG
#define PRINTK(x...) printk(x)
#else
#define PRINTK(x...)
#endif
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52

/*
 * Number of guaranteed r1bios in case of extreme VM load:
 */
#define	NR_RAID1_BIOS 256


static void unplug_slaves(mddev_t *mddev);

53 54
static void allow_barrier(conf_t *conf);
static void lower_barrier(conf_t *conf);
L
Linus Torvalds 已提交
55

A
Al Viro 已提交
56
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
L
Linus Torvalds 已提交
57 58 59 60 61 62
{
	struct pool_info *pi = data;
	r1bio_t *r1_bio;
	int size = offsetof(r1bio_t, bios[pi->raid_disks]);

	/* allocate a r1bio with room for raid_disks entries in the bios array */
63 64
	r1_bio = kzalloc(size, gfp_flags);
	if (!r1_bio)
L
Linus Torvalds 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
		unplug_slaves(pi->mddev);

	return r1_bio;
}

static void r1bio_pool_free(void *r1_bio, void *data)
{
	kfree(r1_bio);
}

#define RESYNC_BLOCK_SIZE (64*1024)
//#define RESYNC_BLOCK_SIZE PAGE_SIZE
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
#define RESYNC_WINDOW (2048*1024)

A
Al Viro 已提交
81
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
L
Linus Torvalds 已提交
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
{
	struct pool_info *pi = data;
	struct page *page;
	r1bio_t *r1_bio;
	struct bio *bio;
	int i, j;

	r1_bio = r1bio_pool_alloc(gfp_flags, pi);
	if (!r1_bio) {
		unplug_slaves(pi->mddev);
		return NULL;
	}

	/*
	 * Allocate bios : 1 for reading, n-1 for writing
	 */
	for (j = pi->raid_disks ; j-- ; ) {
		bio = bio_alloc(gfp_flags, RESYNC_PAGES);
		if (!bio)
			goto out_free_bio;
		r1_bio->bios[j] = bio;
	}
	/*
	 * Allocate RESYNC_PAGES data pages and attach them to
106 107 108
	 * the first bio.
	 * If this is a user-requested check/repair, allocate
	 * RESYNC_PAGES for each bio.
L
Linus Torvalds 已提交
109
	 */
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
		j = pi->raid_disks;
	else
		j = 1;
	while(j--) {
		bio = r1_bio->bios[j];
		for (i = 0; i < RESYNC_PAGES; i++) {
			page = alloc_page(gfp_flags);
			if (unlikely(!page))
				goto out_free_pages;

			bio->bi_io_vec[i].bv_page = page;
		}
	}
	/* If not user-requests, copy the page pointers to all bios */
	if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
		for (i=0; i<RESYNC_PAGES ; i++)
			for (j=1; j<pi->raid_disks; j++)
				r1_bio->bios[j]->bi_io_vec[i].bv_page =
					r1_bio->bios[0]->bi_io_vec[i].bv_page;
L
Linus Torvalds 已提交
130 131 132 133 134 135 136
	}

	r1_bio->master_bio = NULL;

	return r1_bio;

out_free_pages:
137 138
	for (i=0; i < RESYNC_PAGES ; i++)
		for (j=0 ; j < pi->raid_disks; j++)
139
			safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
140
	j = -1;
L
Linus Torvalds 已提交
141 142 143 144 145 146 147 148 149 150
out_free_bio:
	while ( ++j < pi->raid_disks )
		bio_put(r1_bio->bios[j]);
	r1bio_pool_free(r1_bio, data);
	return NULL;
}

static void r1buf_pool_free(void *__r1_bio, void *data)
{
	struct pool_info *pi = data;
151
	int i,j;
L
Linus Torvalds 已提交
152 153
	r1bio_t *r1bio = __r1_bio;

154 155 156 157 158
	for (i = 0; i < RESYNC_PAGES; i++)
		for (j = pi->raid_disks; j-- ;) {
			if (j == 0 ||
			    r1bio->bios[j]->bi_io_vec[i].bv_page !=
			    r1bio->bios[0]->bi_io_vec[i].bv_page)
159
				safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
160
		}
L
Linus Torvalds 已提交
161 162 163 164 165 166 167 168 169 170 171 172
	for (i=0 ; i < pi->raid_disks; i++)
		bio_put(r1bio->bios[i]);

	r1bio_pool_free(r1bio, data);
}

static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
{
	int i;

	for (i = 0; i < conf->raid_disks; i++) {
		struct bio **bio = r1_bio->bios + i;
173
		if (*bio && *bio != IO_BLOCKED)
L
Linus Torvalds 已提交
174 175 176 177 178
			bio_put(*bio);
		*bio = NULL;
	}
}

179
static void free_r1bio(r1bio_t *r1_bio)
L
Linus Torvalds 已提交
180 181 182 183 184 185 186
{
	conf_t *conf = mddev_to_conf(r1_bio->mddev);

	/*
	 * Wake up any possible resync thread that waits for the device
	 * to go idle.
	 */
187
	allow_barrier(conf);
L
Linus Torvalds 已提交
188 189 190 191 192

	put_all_bios(conf, r1_bio);
	mempool_free(r1_bio, conf->r1bio_pool);
}

193
static void put_buf(r1bio_t *r1_bio)
L
Linus Torvalds 已提交
194 195
{
	conf_t *conf = mddev_to_conf(r1_bio->mddev);
196 197 198 199 200 201 202
	int i;

	for (i=0; i<conf->raid_disks; i++) {
		struct bio *bio = r1_bio->bios[i];
		if (bio->bi_end_io)
			rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
	}
L
Linus Torvalds 已提交
203 204 205

	mempool_free(r1_bio, conf->r1buf_pool);

206
	lower_barrier(conf);
L
Linus Torvalds 已提交
207 208 209 210 211 212 213 214 215 216
}

static void reschedule_retry(r1bio_t *r1_bio)
{
	unsigned long flags;
	mddev_t *mddev = r1_bio->mddev;
	conf_t *conf = mddev_to_conf(mddev);

	spin_lock_irqsave(&conf->device_lock, flags);
	list_add(&r1_bio->retry_list, &conf->retry_list);
217
	conf->nr_queued ++;
L
Linus Torvalds 已提交
218 219
	spin_unlock_irqrestore(&conf->device_lock, flags);

220
	wake_up(&conf->wait_barrier);
L
Linus Torvalds 已提交
221 222 223 224 225 226 227 228 229 230 231 232
	md_wakeup_thread(mddev->thread);
}

/*
 * raid_end_bio_io() is called when we have finished servicing a mirrored
 * operation and are ready to return a success/failure code to the buffer
 * cache layer.
 */
static void raid_end_bio_io(r1bio_t *r1_bio)
{
	struct bio *bio = r1_bio->master_bio;

233 234 235 236 237 238 239 240
	/* if nobody has done the final endio yet, do it now */
	if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
		PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
			(bio_data_dir(bio) == WRITE) ? "write" : "read",
			(unsigned long long) bio->bi_sector,
			(unsigned long long) bio->bi_sector +
				(bio->bi_size >> 9) - 1);

241
		bio_endio(bio,
242 243
			test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
	}
L
Linus Torvalds 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256 257
	free_r1bio(r1_bio);
}

/*
 * Update disk head position estimator based on IRQ completion info.
 */
static inline void update_head_pos(int disk, r1bio_t *r1_bio)
{
	conf_t *conf = mddev_to_conf(r1_bio->mddev);

	conf->mirrors[disk].head_position =
		r1_bio->sector + (r1_bio->sectors);
}

258
static void raid1_end_read_request(struct bio *bio, int error)
L
Linus Torvalds 已提交
259 260 261 262 263 264 265 266 267 268
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
	int mirror;
	conf_t *conf = mddev_to_conf(r1_bio->mddev);

	mirror = r1_bio->read_disk;
	/*
	 * this branch is our 'one mirror IO has finished' event handler:
	 */
269 270
	update_head_pos(mirror, r1_bio);

271 272 273 274 275 276
	if (uptodate)
		set_bit(R1BIO_Uptodate, &r1_bio->state);
	else {
		/* If all other devices have failed, we want to return
		 * the error upwards rather than fail the last device.
		 * Here we redefine "uptodate" to mean "Don't want to retry"
L
Linus Torvalds 已提交
277
		 */
278 279 280 281 282 283 284 285
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
		if (r1_bio->mddev->degraded == conf->raid_disks ||
		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
			uptodate = 1;
		spin_unlock_irqrestore(&conf->device_lock, flags);
	}
L
Linus Torvalds 已提交
286

287
	if (uptodate)
L
Linus Torvalds 已提交
288
		raid_end_bio_io(r1_bio);
289
	else {
L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298 299 300 301 302
		/*
		 * oops, read error:
		 */
		char b[BDEVNAME_SIZE];
		if (printk_ratelimit())
			printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
			       bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
		reschedule_retry(r1_bio);
	}

	rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
}

303
static void raid1_end_write_request(struct bio *bio, int error)
L
Linus Torvalds 已提交
304 305 306
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
307
	int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
L
Linus Torvalds 已提交
308
	conf_t *conf = mddev_to_conf(r1_bio->mddev);
309
	struct bio *to_put = NULL;
L
Linus Torvalds 已提交
310 311 312 313 314 315


	for (mirror = 0; mirror < conf->raid_disks; mirror++)
		if (r1_bio->bios[mirror] == bio)
			break;

316
	if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
317 318 319
		set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
		set_bit(R1BIO_BarrierRetry, &r1_bio->state);
		r1_bio->mddev->barriers_work = 0;
320
		/* Don't rdev_dec_pending in this branch - keep it for the retry */
321
	} else {
L
Linus Torvalds 已提交
322
		/*
323
		 * this branch is our 'one mirror IO has finished' event handler:
L
Linus Torvalds 已提交
324
		 */
325
		r1_bio->bios[mirror] = NULL;
326
		to_put = bio;
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
		if (!uptodate) {
			md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
			/* an I/O failed, we can't clear the bitmap */
			set_bit(R1BIO_Degraded, &r1_bio->state);
		} else
			/*
			 * Set R1BIO_Uptodate in our master bio, so that
			 * we will return a good error code for to the higher
			 * levels even if IO on some other mirrored buffer fails.
			 *
			 * The 'master' represents the composite IO operation to
			 * user-side. So if something waits for IO, then it will
			 * wait for the 'master' bio.
			 */
			set_bit(R1BIO_Uptodate, &r1_bio->state);

		update_head_pos(mirror, r1_bio);

		if (behind) {
			if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
				atomic_dec(&r1_bio->behind_remaining);

			/* In behind mode, we ACK the master bio once the I/O has safely
			 * reached all non-writemostly disks. Setting the Returned bit
			 * ensures that this gets done only once -- we don't ever want to
			 * return -EIO here, instead we'll wait */

			if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
			    test_bit(R1BIO_Uptodate, &r1_bio->state)) {
				/* Maybe we can return now */
				if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
					struct bio *mbio = r1_bio->master_bio;
					PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
					       (unsigned long long) mbio->bi_sector,
					       (unsigned long long) mbio->bi_sector +
					       (mbio->bi_size >> 9) - 1);
363
					bio_endio(mbio, 0);
364
				}
365 366
			}
		}
367
		rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
368
	}
L
Linus Torvalds 已提交
369 370 371 372 373 374
	/*
	 *
	 * Let's see if all mirrored write operations have finished
	 * already.
	 */
	if (atomic_dec_and_test(&r1_bio->remaining)) {
375
		if (test_bit(R1BIO_BarrierRetry, &r1_bio->state))
376
			reschedule_retry(r1_bio);
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
		else {
			/* it really is the end of this request */
			if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
				/* free extra copy of the data pages */
				int i = bio->bi_vcnt;
				while (i--)
					safe_put_page(bio->bi_io_vec[i].bv_page);
			}
			/* clear the bitmap if all writes complete successfully */
			bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
					r1_bio->sectors,
					!test_bit(R1BIO_Degraded, &r1_bio->state),
					behind);
			md_write_end(r1_bio->mddev);
			raid_end_bio_io(r1_bio);
392
		}
L
Linus Torvalds 已提交
393
	}
394

395 396
	if (to_put)
		bio_put(to_put);
L
Linus Torvalds 已提交
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
}


/*
 * This routine returns the disk from which the requested read should
 * be done. There is a per-array 'next expected sequential IO' sector
 * number - if this matches on the next IO then we use the last disk.
 * There is also a per-disk 'last know head position' sector that is
 * maintained from IRQ contexts, both the normal and the resync IO
 * completion handlers update this position correctly. If there is no
 * perfect sequential match then we pick the disk whose head is closest.
 *
 * If there are 2 mirrors in the same 2 devices, performance degrades
 * because position is mirror, not device based.
 *
 * The rdev for the device selected will have nr_pending incremented.
 */
static int read_balance(conf_t *conf, r1bio_t *r1_bio)
{
	const unsigned long this_sector = r1_bio->sector;
	int new_disk = conf->last_used, disk = new_disk;
418
	int wonly_disk = -1;
L
Linus Torvalds 已提交
419 420
	const int sectors = r1_bio->sectors;
	sector_t new_distance, current_distance;
421
	mdk_rdev_t *rdev;
L
Linus Torvalds 已提交
422 423 424

	rcu_read_lock();
	/*
425
	 * Check if we can balance. We can balance on the whole
L
Linus Torvalds 已提交
426 427 428 429 430 431 432 433 434
	 * device if no resync is going on, or below the resync window.
	 * We take the first readable disk when above the resync window.
	 */
 retry:
	if (conf->mddev->recovery_cp < MaxSector &&
	    (this_sector + sectors >= conf->next_resync)) {
		/* Choose the first operation device, for consistancy */
		new_disk = 0;

435
		for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
436
		     r1_bio->bios[new_disk] == IO_BLOCKED ||
437
		     !rdev || !test_bit(In_sync, &rdev->flags)
438
			     || test_bit(WriteMostly, &rdev->flags);
439
		     rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
440

441 442
			if (rdev && test_bit(In_sync, &rdev->flags) &&
				r1_bio->bios[new_disk] != IO_BLOCKED)
443 444 445 446
				wonly_disk = new_disk;

			if (new_disk == conf->raid_disks - 1) {
				new_disk = wonly_disk;
L
Linus Torvalds 已提交
447 448 449 450 451 452 453 454
				break;
			}
		}
		goto rb_out;
	}


	/* make sure the disk is operational */
455
	for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
456
	     r1_bio->bios[new_disk] == IO_BLOCKED ||
457
	     !rdev || !test_bit(In_sync, &rdev->flags) ||
458
		     test_bit(WriteMostly, &rdev->flags);
459
	     rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
460

461 462
		if (rdev && test_bit(In_sync, &rdev->flags) &&
		    r1_bio->bios[new_disk] != IO_BLOCKED)
463 464
			wonly_disk = new_disk;

L
Linus Torvalds 已提交
465 466 467 468
		if (new_disk <= 0)
			new_disk = conf->raid_disks;
		new_disk--;
		if (new_disk == disk) {
469 470
			new_disk = wonly_disk;
			break;
L
Linus Torvalds 已提交
471 472
		}
	}
473 474 475 476

	if (new_disk < 0)
		goto rb_out;

L
Linus Torvalds 已提交
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
	disk = new_disk;
	/* now disk == new_disk == starting point for search */

	/*
	 * Don't change to another disk for sequential reads:
	 */
	if (conf->next_seq_sect == this_sector)
		goto rb_out;
	if (this_sector == conf->mirrors[new_disk].head_position)
		goto rb_out;

	current_distance = abs(this_sector - conf->mirrors[disk].head_position);

	/* Find the disk whose head is closest */

	do {
		if (disk <= 0)
			disk = conf->raid_disks;
		disk--;

497
		rdev = rcu_dereference(conf->mirrors[disk].rdev);
498

499
		if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
500
		    !test_bit(In_sync, &rdev->flags) ||
501
		    test_bit(WriteMostly, &rdev->flags))
L
Linus Torvalds 已提交
502 503 504 505 506 507 508 509 510 511 512 513 514
			continue;

		if (!atomic_read(&rdev->nr_pending)) {
			new_disk = disk;
			break;
		}
		new_distance = abs(this_sector - conf->mirrors[disk].head_position);
		if (new_distance < current_distance) {
			current_distance = new_distance;
			new_disk = disk;
		}
	} while (disk != conf->last_used);

515
 rb_out:
L
Linus Torvalds 已提交
516 517 518


	if (new_disk >= 0) {
519
		rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
520 521 522
		if (!rdev)
			goto retry;
		atomic_inc(&rdev->nr_pending);
523
		if (!test_bit(In_sync, &rdev->flags)) {
L
Linus Torvalds 已提交
524 525 526
			/* cannot risk returning a device that failed
			 * before we inc'ed nr_pending
			 */
527
			rdev_dec_pending(rdev, conf->mddev);
L
Linus Torvalds 已提交
528 529
			goto retry;
		}
530 531
		conf->next_seq_sect = this_sector + sectors;
		conf->last_used = new_disk;
L
Linus Torvalds 已提交
532 533 534 535 536 537 538 539 540 541 542 543 544
	}
	rcu_read_unlock();

	return new_disk;
}

static void unplug_slaves(mddev_t *mddev)
{
	conf_t *conf = mddev_to_conf(mddev);
	int i;

	rcu_read_lock();
	for (i=0; i<mddev->raid_disks; i++) {
545
		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
546
		if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
547
			struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
L
Linus Torvalds 已提交
548 549 550 551 552 553 554 555 556 557 558 559 560 561

			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();

			if (r_queue->unplug_fn)
				r_queue->unplug_fn(r_queue);

			rdev_dec_pending(rdev, mddev);
			rcu_read_lock();
		}
	}
	rcu_read_unlock();
}

562
static void raid1_unplug(struct request_queue *q)
L
Linus Torvalds 已提交
563
{
564 565 566 567
	mddev_t *mddev = q->queuedata;

	unplug_slaves(mddev);
	md_wakeup_thread(mddev->thread);
L
Linus Torvalds 已提交
568 569
}

570 571 572 573 574 575 576 577 578 579
static int raid1_congested(void *data, int bits)
{
	mddev_t *mddev = data;
	conf_t *conf = mddev_to_conf(mddev);
	int i, ret = 0;

	rcu_read_lock();
	for (i = 0; i < mddev->raid_disks; i++) {
		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
		if (rdev && !test_bit(Faulty, &rdev->flags)) {
580
			struct request_queue *q = bdev_get_queue(rdev->bdev);
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595

			/* Note the '|| 1' - when read_balance prefers
			 * non-congested targets, it can be removed
			 */
			if ((bits & (1<<BDI_write_congested)) || 1)
				ret |= bdi_congested(&q->backing_dev_info, bits);
			else
				ret &= bdi_congested(&q->backing_dev_info, bits);
		}
	}
	rcu_read_unlock();
	return ret;
}


596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
/* Barriers....
 * Sometimes we need to suspend IO while we do something else,
 * either some resync/recovery, or reconfigure the array.
 * To do this we raise a 'barrier'.
 * The 'barrier' is a counter that can be raised multiple times
 * to count how many activities are happening which preclude
 * normal IO.
 * We can only raise the barrier if there is no pending IO.
 * i.e. if nr_pending == 0.
 * We choose only to raise the barrier if no-one is waiting for the
 * barrier to go down.  This means that as soon as an IO request
 * is ready, no other operations which require a barrier will start
 * until the IO request has had a chance.
 *
 * So: regular IO calls 'wait_barrier'.  When that returns there
 *    is no backgroup IO happening,  It must arrange to call
 *    allow_barrier when it has finished its IO.
 * backgroup IO calls must call raise_barrier.  Once that returns
 *    there is no normal IO happeing.  It must arrange to call
 *    lower_barrier when the particular background IO completes.
L
Linus Torvalds 已提交
616 617 618
 */
#define RESYNC_DEPTH 32

619
static void raise_barrier(conf_t *conf)
L
Linus Torvalds 已提交
620 621
{
	spin_lock_irq(&conf->resync_lock);
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657

	/* Wait until no block IO is waiting */
	wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
			    conf->resync_lock,
			    raid1_unplug(conf->mddev->queue));

	/* block any new IO from starting */
	conf->barrier++;

	/* No wait for all pending IO to complete */
	wait_event_lock_irq(conf->wait_barrier,
			    !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
			    conf->resync_lock,
			    raid1_unplug(conf->mddev->queue));

	spin_unlock_irq(&conf->resync_lock);
}

static void lower_barrier(conf_t *conf)
{
	unsigned long flags;
	spin_lock_irqsave(&conf->resync_lock, flags);
	conf->barrier--;
	spin_unlock_irqrestore(&conf->resync_lock, flags);
	wake_up(&conf->wait_barrier);
}

static void wait_barrier(conf_t *conf)
{
	spin_lock_irq(&conf->resync_lock);
	if (conf->barrier) {
		conf->nr_waiting++;
		wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
				    conf->resync_lock,
				    raid1_unplug(conf->mddev->queue));
		conf->nr_waiting--;
L
Linus Torvalds 已提交
658
	}
659
	conf->nr_pending++;
L
Linus Torvalds 已提交
660 661 662
	spin_unlock_irq(&conf->resync_lock);
}

663 664 665 666 667 668 669 670 671
static void allow_barrier(conf_t *conf)
{
	unsigned long flags;
	spin_lock_irqsave(&conf->resync_lock, flags);
	conf->nr_pending--;
	spin_unlock_irqrestore(&conf->resync_lock, flags);
	wake_up(&conf->wait_barrier);
}

672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
static void freeze_array(conf_t *conf)
{
	/* stop syncio and normal IO and wait for everything to
	 * go quite.
	 * We increment barrier and nr_waiting, and then
	 * wait until barrier+nr_pending match nr_queued+2
	 */
	spin_lock_irq(&conf->resync_lock);
	conf->barrier++;
	conf->nr_waiting++;
	wait_event_lock_irq(conf->wait_barrier,
			    conf->barrier+conf->nr_pending == conf->nr_queued+2,
			    conf->resync_lock,
			    raid1_unplug(conf->mddev->queue));
	spin_unlock_irq(&conf->resync_lock);
}
static void unfreeze_array(conf_t *conf)
{
	/* reverse the effect of the freeze */
	spin_lock_irq(&conf->resync_lock);
	conf->barrier--;
	conf->nr_waiting--;
	wake_up(&conf->wait_barrier);
	spin_unlock_irq(&conf->resync_lock);
}

698

699 700 701 702 703
/* duplicate the data pages for behind I/O */
static struct page **alloc_behind_pages(struct bio *bio)
{
	int i;
	struct bio_vec *bvec;
704
	struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
					GFP_NOIO);
	if (unlikely(!pages))
		goto do_sync_io;

	bio_for_each_segment(bvec, bio, i) {
		pages[i] = alloc_page(GFP_NOIO);
		if (unlikely(!pages[i]))
			goto do_sync_io;
		memcpy(kmap(pages[i]) + bvec->bv_offset,
			kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
		kunmap(pages[i]);
		kunmap(bvec->bv_page);
	}

	return pages;

do_sync_io:
	if (pages)
		for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
724
			put_page(pages[i]);
725 726 727 728 729
	kfree(pages);
	PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
	return NULL;
}

730
static int make_request(struct request_queue *q, struct bio * bio)
L
Linus Torvalds 已提交
731 732 733 734 735 736
{
	mddev_t *mddev = q->queuedata;
	conf_t *conf = mddev_to_conf(mddev);
	mirror_info_t *mirror;
	r1bio_t *r1_bio;
	struct bio *read_bio;
737
	int i, targets = 0, disks;
L
Linus Torvalds 已提交
738
	mdk_rdev_t *rdev;
739 740 741
	struct bitmap *bitmap = mddev->bitmap;
	unsigned long flags;
	struct bio_list bl;
742
	struct page **behind_pages = NULL;
743
	const int rw = bio_data_dir(bio);
744
	const int do_sync = bio_sync(bio);
745
	int do_barriers;
746

L
Linus Torvalds 已提交
747 748 749 750
	/*
	 * Register the new request and wait if the reconstruction
	 * thread has put up a bar for new requests.
	 * Continue immediately if no resync is active currently.
751 752 753
	 * We test barriers_work *after* md_write_start as md_write_start
	 * may cause the first superblock write, and that will check out
	 * if barriers work.
L
Linus Torvalds 已提交
754
	 */
755

756 757
	md_write_start(mddev, bio); /* wait on superblock update early */

758 759 760
	if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
		if (rw == WRITE)
			md_write_end(mddev);
761
		bio_endio(bio, -EOPNOTSUPP);
762 763 764
		return 0;
	}

765
	wait_barrier(conf);
L
Linus Torvalds 已提交
766

767 768
	disk_stat_inc(mddev->gendisk, ios[rw]);
	disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
L
Linus Torvalds 已提交
769 770 771 772 773 774 775 776 777 778

	/*
	 * make_request() can abort the operation when READA is being
	 * used and no empty request is available.
	 *
	 */
	r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);

	r1_bio->master_bio = bio;
	r1_bio->sectors = bio->bi_size >> 9;
779
	r1_bio->state = 0;
L
Linus Torvalds 已提交
780 781 782
	r1_bio->mddev = mddev;
	r1_bio->sector = bio->bi_sector;

783
	if (rw == READ) {
L
Linus Torvalds 已提交
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
		/*
		 * read balancing logic:
		 */
		int rdisk = read_balance(conf, r1_bio);

		if (rdisk < 0) {
			/* couldn't find anywhere to read from */
			raid_end_bio_io(r1_bio);
			return 0;
		}
		mirror = conf->mirrors + rdisk;

		r1_bio->read_disk = rdisk;

		read_bio = bio_clone(bio, GFP_NOIO);

		r1_bio->bios[rdisk] = read_bio;

		read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
		read_bio->bi_bdev = mirror->rdev->bdev;
		read_bio->bi_end_io = raid1_end_read_request;
805
		read_bio->bi_rw = READ | do_sync;
L
Linus Torvalds 已提交
806 807 808 809 810 811 812 813 814 815 816 817 818 819
		read_bio->bi_private = r1_bio;

		generic_make_request(read_bio);
		return 0;
	}

	/*
	 * WRITE:
	 */
	/* first select target devices under spinlock and
	 * inc refcount on their rdev.  Record them by setting
	 * bios[x] to bio
	 */
	disks = conf->raid_disks;
820 821 822 823 824 825 826
#if 0
	{ static int first=1;
	if (first) printk("First Write sector %llu disks %d\n",
			  (unsigned long long)r1_bio->sector, disks);
	first = 0;
	}
#endif
L
Linus Torvalds 已提交
827 828
	rcu_read_lock();
	for (i = 0;  i < disks; i++) {
829
		if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
830
		    !test_bit(Faulty, &rdev->flags)) {
L
Linus Torvalds 已提交
831
			atomic_inc(&rdev->nr_pending);
832
			if (test_bit(Faulty, &rdev->flags)) {
833
				rdev_dec_pending(rdev, mddev);
L
Linus Torvalds 已提交
834 835 836
				r1_bio->bios[i] = NULL;
			} else
				r1_bio->bios[i] = bio;
837
			targets++;
L
Linus Torvalds 已提交
838 839 840 841 842
		} else
			r1_bio->bios[i] = NULL;
	}
	rcu_read_unlock();

843 844
	BUG_ON(targets == 0); /* we never fail the last device */

845 846 847 848 849 850
	if (targets < conf->raid_disks) {
		/* array is degraded, we will not clear the bitmap
		 * on I/O completion (see raid1_end_write_request) */
		set_bit(R1BIO_Degraded, &r1_bio->state);
	}

851 852 853 854 855 856
	/* do behind I/O ? */
	if (bitmap &&
	    atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
	    (behind_pages = alloc_behind_pages(bio)) != NULL)
		set_bit(R1BIO_BehindIO, &r1_bio->state);

857
	atomic_set(&r1_bio->remaining, 0);
858
	atomic_set(&r1_bio->behind_remaining, 0);
859

860
	do_barriers = bio_barrier(bio);
861 862 863
	if (do_barriers)
		set_bit(R1BIO_Barrier, &r1_bio->state);

864
	bio_list_init(&bl);
L
Linus Torvalds 已提交
865 866 867 868 869 870 871 872 873 874 875
	for (i = 0; i < disks; i++) {
		struct bio *mbio;
		if (!r1_bio->bios[i])
			continue;

		mbio = bio_clone(bio, GFP_NOIO);
		r1_bio->bios[i] = mbio;

		mbio->bi_sector	= r1_bio->sector + conf->mirrors[i].rdev->data_offset;
		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
		mbio->bi_end_io	= raid1_end_write_request;
876
		mbio->bi_rw = WRITE | do_barriers | do_sync;
L
Linus Torvalds 已提交
877 878
		mbio->bi_private = r1_bio;

879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
		if (behind_pages) {
			struct bio_vec *bvec;
			int j;

			/* Yes, I really want the '__' version so that
			 * we clear any unused pointer in the io_vec, rather
			 * than leave them unchanged.  This is important
			 * because when we come to free the pages, we won't
			 * know the originial bi_idx, so we just free
			 * them all
			 */
			__bio_for_each_segment(bvec, mbio, j, 0)
				bvec->bv_page = behind_pages[j];
			if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
				atomic_inc(&r1_bio->behind_remaining);
		}

L
Linus Torvalds 已提交
896 897
		atomic_inc(&r1_bio->remaining);

898
		bio_list_add(&bl, mbio);
L
Linus Torvalds 已提交
899
	}
900
	kfree(behind_pages); /* the behind pages are attached to the bios now */
L
Linus Torvalds 已提交
901

902 903
	bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
				test_bit(R1BIO_BehindIO, &r1_bio->state));
904 905 906 907 908 909 910
	spin_lock_irqsave(&conf->device_lock, flags);
	bio_list_merge(&conf->pending_bio_list, &bl);
	bio_list_init(&bl);

	blk_plug_device(mddev->queue);
	spin_unlock_irqrestore(&conf->device_lock, flags);

911 912
	if (do_sync)
		md_wakeup_thread(mddev->thread);
913 914 915 916 917
#if 0
	while ((bio = bio_list_pop(&bl)) != NULL)
		generic_make_request(bio);
#endif

L
Linus Torvalds 已提交
918 919 920 921 922 923 924 925 926
	return 0;
}

static void status(struct seq_file *seq, mddev_t *mddev)
{
	conf_t *conf = mddev_to_conf(mddev);
	int i;

	seq_printf(seq, " [%d/%d] [", conf->raid_disks,
927
		   conf->raid_disks - mddev->degraded);
928 929 930
	rcu_read_lock();
	for (i = 0; i < conf->raid_disks; i++) {
		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
L
Linus Torvalds 已提交
931
		seq_printf(seq, "%s",
932 933 934
			   rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
	}
	rcu_read_unlock();
L
Linus Torvalds 已提交
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
	seq_printf(seq, "]");
}


static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
	char b[BDEVNAME_SIZE];
	conf_t *conf = mddev_to_conf(mddev);

	/*
	 * If it is not operational, then we have already marked it as dead
	 * else if it is the last working disks, ignore the error, let the
	 * next level up know.
	 * else mark the drive as failed
	 */
950
	if (test_bit(In_sync, &rdev->flags)
951
	    && (conf->raid_disks - mddev->degraded) == 1)
L
Linus Torvalds 已提交
952 953 954 955 956
		/*
		 * Don't fail the drive, act as though we were just a
		 * normal single drive
		 */
		return;
957 958 959
	if (test_and_clear_bit(In_sync, &rdev->flags)) {
		unsigned long flags;
		spin_lock_irqsave(&conf->device_lock, flags);
L
Linus Torvalds 已提交
960
		mddev->degraded++;
961
		set_bit(Faulty, &rdev->flags);
962
		spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
963 964 965 966
		/*
		 * if recovery is running, make sure it aborts.
		 */
		set_bit(MD_RECOVERY_ERR, &mddev->recovery);
967 968
	} else
		set_bit(Faulty, &rdev->flags);
969
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
L
Linus Torvalds 已提交
970 971
	printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
		"	Operation continuing on %d devices\n",
972
		bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
L
Linus Torvalds 已提交
973 974 975 976 977 978 979 980 981 982 983
}

static void print_conf(conf_t *conf)
{
	int i;

	printk("RAID1 conf printout:\n");
	if (!conf) {
		printk("(!conf)\n");
		return;
	}
984
	printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
L
Linus Torvalds 已提交
985 986
		conf->raid_disks);

987
	rcu_read_lock();
L
Linus Torvalds 已提交
988 989
	for (i = 0; i < conf->raid_disks; i++) {
		char b[BDEVNAME_SIZE];
990 991
		mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
		if (rdev)
L
Linus Torvalds 已提交
992
			printk(" disk %d, wo:%d, o:%d, dev:%s\n",
993 994 995
			       i, !test_bit(In_sync, &rdev->flags),
			       !test_bit(Faulty, &rdev->flags),
			       bdevname(rdev->bdev,b));
L
Linus Torvalds 已提交
996
	}
997
	rcu_read_unlock();
L
Linus Torvalds 已提交
998 999 1000 1001
}

static void close_sync(conf_t *conf)
{
1002 1003
	wait_barrier(conf);
	allow_barrier(conf);
L
Linus Torvalds 已提交
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015

	mempool_destroy(conf->r1buf_pool);
	conf->r1buf_pool = NULL;
}

static int raid1_spare_active(mddev_t *mddev)
{
	int i;
	conf_t *conf = mddev->private;

	/*
	 * Find all failed disks within the RAID1 configuration 
1016 1017
	 * and mark them readable.
	 * Called under mddev lock, so rcu protection not needed.
L
Linus Torvalds 已提交
1018 1019
	 */
	for (i = 0; i < conf->raid_disks; i++) {
1020 1021 1022
		mdk_rdev_t *rdev = conf->mirrors[i].rdev;
		if (rdev
		    && !test_bit(Faulty, &rdev->flags)
1023 1024 1025
		    && !test_and_set_bit(In_sync, &rdev->flags)) {
			unsigned long flags;
			spin_lock_irqsave(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1026
			mddev->degraded--;
1027
			spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
		}
	}

	print_conf(conf);
	return 0;
}


static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
{
	conf_t *conf = mddev->private;
	int found = 0;
1040
	int mirror = 0;
L
Linus Torvalds 已提交
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	mirror_info_t *p;

	for (mirror=0; mirror < mddev->raid_disks; mirror++)
		if ( !(p=conf->mirrors+mirror)->rdev) {

			blk_queue_stack_limits(mddev->queue,
					       rdev->bdev->bd_disk->queue);
			/* as we don't honour merge_bvec_fn, we must never risk
			 * violating it, so limit ->max_sector to one PAGE, as
			 * a one page request is never in violation.
			 */
			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
			    mddev->queue->max_sectors > (PAGE_SIZE>>9))
				blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);

			p->head_position = 0;
			rdev->raid_disk = mirror;
			found = 1;
1059 1060 1061 1062
			/* As all devices are equivalent, we don't need a full recovery
			 * if this was recently any drive of the array
			 */
			if (rdev->saved_raid_disk < 0)
1063
				conf->fullsync = 1;
1064
			rcu_assign_pointer(p->rdev, rdev);
L
Linus Torvalds 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
			break;
		}

	print_conf(conf);
	return found;
}

static int raid1_remove_disk(mddev_t *mddev, int number)
{
	conf_t *conf = mddev->private;
	int err = 0;
	mdk_rdev_t *rdev;
	mirror_info_t *p = conf->mirrors+ number;

	print_conf(conf);
	rdev = p->rdev;
	if (rdev) {
1082
		if (test_bit(In_sync, &rdev->flags) ||
L
Linus Torvalds 已提交
1083 1084 1085 1086 1087
		    atomic_read(&rdev->nr_pending)) {
			err = -EBUSY;
			goto abort;
		}
		p->rdev = NULL;
1088
		synchronize_rcu();
L
Linus Torvalds 已提交
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
		if (atomic_read(&rdev->nr_pending)) {
			/* lost the race, try later */
			err = -EBUSY;
			p->rdev = rdev;
		}
	}
abort:

	print_conf(conf);
	return err;
}


1102
static void end_sync_read(struct bio *bio, int error)
L
Linus Torvalds 已提交
1103 1104
{
	r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1105
	int i;
L
Linus Torvalds 已提交
1106

1107 1108 1109 1110 1111
	for (i=r1_bio->mddev->raid_disks; i--; )
		if (r1_bio->bios[i] == bio)
			break;
	BUG_ON(i < 0);
	update_head_pos(i, r1_bio);
L
Linus Torvalds 已提交
1112 1113 1114 1115 1116
	/*
	 * we have read a block, now it needs to be re-written,
	 * or re-read if the read failed.
	 * We don't do much here, just schedule handling by raid1d
	 */
1117
	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
L
Linus Torvalds 已提交
1118
		set_bit(R1BIO_Uptodate, &r1_bio->state);
1119 1120 1121

	if (atomic_dec_and_test(&r1_bio->remaining))
		reschedule_retry(r1_bio);
L
Linus Torvalds 已提交
1122 1123
}

1124
static void end_sync_write(struct bio *bio, int error)
L
Linus Torvalds 已提交
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
	mddev_t *mddev = r1_bio->mddev;
	conf_t *conf = mddev_to_conf(mddev);
	int i;
	int mirror=0;

	for (i = 0; i < conf->raid_disks; i++)
		if (r1_bio->bios[i] == bio) {
			mirror = i;
			break;
		}
1138 1139 1140 1141 1142 1143
	if (!uptodate) {
		int sync_blocks = 0;
		sector_t s = r1_bio->sector;
		long sectors_to_go = r1_bio->sectors;
		/* make sure these bits doesn't get cleared. */
		do {
1144
			bitmap_end_sync(mddev->bitmap, s,
1145 1146 1147 1148
					&sync_blocks, 1);
			s += sync_blocks;
			sectors_to_go -= sync_blocks;
		} while (sectors_to_go > 0);
L
Linus Torvalds 已提交
1149
		md_error(mddev, conf->mirrors[mirror].rdev);
1150
	}
1151

L
Linus Torvalds 已提交
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	update_head_pos(mirror, r1_bio);

	if (atomic_dec_and_test(&r1_bio->remaining)) {
		md_done_sync(mddev, r1_bio->sectors, uptodate);
		put_buf(r1_bio);
	}
}

static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
{
	conf_t *conf = mddev_to_conf(mddev);
	int i;
	int disks = conf->raid_disks;
	struct bio *bio, *wbio;

	bio = r1_bio->bios[r1_bio->read_disk];

1169

1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
		/* We have read all readable devices.  If we haven't
		 * got the block, then there is no hope left.
		 * If we have, then we want to do a comparison
		 * and skip the write if everything is the same.
		 * If any blocks failed to read, then we need to
		 * attempt an over-write
		 */
		int primary;
		if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
			for (i=0; i<mddev->raid_disks; i++)
				if (r1_bio->bios[i]->bi_end_io == end_sync_read)
					md_error(mddev, conf->mirrors[i].rdev);

			md_done_sync(mddev, r1_bio->sectors, 1);
			put_buf(r1_bio);
			return;
		}
		for (primary=0; primary<mddev->raid_disks; primary++)
			if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
			    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
				r1_bio->bios[primary]->bi_end_io = NULL;
1192
				rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
1193 1194 1195 1196
				break;
			}
		r1_bio->read_disk = primary;
		for (i=0; i<mddev->raid_disks; i++)
1197
			if (r1_bio->bios[i]->bi_end_io == end_sync_read) {
1198 1199 1200 1201
				int j;
				int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
				struct bio *pbio = r1_bio->bios[primary];
				struct bio *sbio = r1_bio->bios[i];
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214

				if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
					for (j = vcnt; j-- ; ) {
						struct page *p, *s;
						p = pbio->bi_io_vec[j].bv_page;
						s = sbio->bi_io_vec[j].bv_page;
						if (memcmp(page_address(p),
							   page_address(s),
							   PAGE_SIZE))
							break;
					}
				} else
					j = 0;
1215 1216
				if (j >= 0)
					mddev->resync_mismatches += r1_bio->sectors;
1217 1218
				if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
					      && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
1219
					sbio->bi_end_io = NULL;
1220 1221
					rdev_dec_pending(conf->mirrors[i].rdev, mddev);
				} else {
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
					/* fixup the bio for reuse */
					sbio->bi_vcnt = vcnt;
					sbio->bi_size = r1_bio->sectors << 9;
					sbio->bi_idx = 0;
					sbio->bi_phys_segments = 0;
					sbio->bi_hw_segments = 0;
					sbio->bi_hw_front_size = 0;
					sbio->bi_hw_back_size = 0;
					sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
					sbio->bi_flags |= 1 << BIO_UPTODATE;
					sbio->bi_next = NULL;
					sbio->bi_sector = r1_bio->sector +
						conf->mirrors[i].rdev->data_offset;
					sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1236 1237 1238 1239 1240
					for (j = 0; j < vcnt ; j++)
						memcpy(page_address(sbio->bi_io_vec[j].bv_page),
						       page_address(pbio->bi_io_vec[j].bv_page),
						       PAGE_SIZE);

1241 1242 1243
				}
			}
	}
L
Linus Torvalds 已提交
1244
	if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
1245 1246 1247
		/* ouch - failed to read all of that.
		 * Try some synchronous reads of other devices to get
		 * good data, much like with normal read errors.  Only
1248
		 * read into the pages we already have so we don't
1249 1250 1251 1252
		 * need to re-issue the read request.
		 * We don't need to freeze the array, because being in an
		 * active sync request, there is no normal IO, and
		 * no overlapping syncs.
L
Linus Torvalds 已提交
1253
		 */
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
		sector_t sect = r1_bio->sector;
		int sectors = r1_bio->sectors;
		int idx = 0;

		while(sectors) {
			int s = sectors;
			int d = r1_bio->read_disk;
			int success = 0;
			mdk_rdev_t *rdev;

			if (s > (PAGE_SIZE>>9))
				s = PAGE_SIZE >> 9;
			do {
				if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1268 1269 1270 1271
					/* No rcu protection needed here devices
					 * can only be removed when no resync is
					 * active, and resync is currently active
					 */
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
					rdev = conf->mirrors[d].rdev;
					if (sync_page_io(rdev->bdev,
							 sect + rdev->data_offset,
							 s<<9,
							 bio->bi_io_vec[idx].bv_page,
							 READ)) {
						success = 1;
						break;
					}
				}
				d++;
				if (d == conf->raid_disks)
					d = 0;
			} while (!success && d != r1_bio->read_disk);

			if (success) {
1288
				int start = d;
1289 1290 1291 1292 1293 1294 1295 1296 1297
				/* write it back and re-read */
				set_bit(R1BIO_Uptodate, &r1_bio->state);
				while (d != r1_bio->read_disk) {
					if (d == 0)
						d = conf->raid_disks;
					d--;
					if (r1_bio->bios[d]->bi_end_io != end_sync_read)
						continue;
					rdev = conf->mirrors[d].rdev;
1298
					atomic_add(s, &rdev->corrected_errors);
1299 1300 1301 1302
					if (sync_page_io(rdev->bdev,
							 sect + rdev->data_offset,
							 s<<9,
							 bio->bi_io_vec[idx].bv_page,
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
							 WRITE) == 0)
						md_error(mddev, rdev);
				}
				d = start;
				while (d != r1_bio->read_disk) {
					if (d == 0)
						d = conf->raid_disks;
					d--;
					if (r1_bio->bios[d]->bi_end_io != end_sync_read)
						continue;
					rdev = conf->mirrors[d].rdev;
					if (sync_page_io(rdev->bdev,
1315 1316 1317
							 sect + rdev->data_offset,
							 s<<9,
							 bio->bi_io_vec[idx].bv_page,
1318
							 READ) == 0)
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
						md_error(mddev, rdev);
				}
			} else {
				char b[BDEVNAME_SIZE];
				/* Cannot read from anywhere, array is toast */
				md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
				printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
				       " for block %llu\n",
				       bdevname(bio->bi_bdev,b),
				       (unsigned long long)r1_bio->sector);
				md_done_sync(mddev, r1_bio->sectors, 0);
				put_buf(r1_bio);
				return;
			}
			sectors -= s;
			sect += s;
			idx ++;
		}
L
Linus Torvalds 已提交
1337
	}
1338 1339 1340 1341

	/*
	 * schedule writes
	 */
L
Linus Torvalds 已提交
1342 1343 1344
	atomic_set(&r1_bio->remaining, 1);
	for (i = 0; i < disks ; i++) {
		wbio = r1_bio->bios[i];
1345 1346 1347 1348
		if (wbio->bi_end_io == NULL ||
		    (wbio->bi_end_io == end_sync_read &&
		     (i == r1_bio->read_disk ||
		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
L
Linus Torvalds 已提交
1349 1350
			continue;

1351 1352
		wbio->bi_rw = WRITE;
		wbio->bi_end_io = end_sync_write;
L
Linus Torvalds 已提交
1353 1354
		atomic_inc(&r1_bio->remaining);
		md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
1355

L
Linus Torvalds 已提交
1356 1357 1358 1359
		generic_make_request(wbio);
	}

	if (atomic_dec_and_test(&r1_bio->remaining)) {
1360
		/* if we're here, all write(s) have completed, so clean up */
L
Linus Torvalds 已提交
1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
		md_done_sync(mddev, r1_bio->sectors, 1);
		put_buf(r1_bio);
	}
}

/*
 * This is a kernel thread which:
 *
 *	1.	Retries failed read operations on working mirrors.
 *	2.	Updates the raid superblock when problems encounter.
 *	3.	Performs writes following reads for array syncronising.
 */

1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
static void fix_read_error(conf_t *conf, int read_disk,
			   sector_t sect, int sectors)
{
	mddev_t *mddev = conf->mddev;
	while(sectors) {
		int s = sectors;
		int d = read_disk;
		int success = 0;
		int start;
		mdk_rdev_t *rdev;

		if (s > (PAGE_SIZE>>9))
			s = PAGE_SIZE >> 9;

		do {
			/* Note: no rcu protection needed here
			 * as this is synchronous in the raid1d thread
			 * which is the thread that might remove
			 * a device.  If raid1d ever becomes multi-threaded....
			 */
			rdev = conf->mirrors[d].rdev;
			if (rdev &&
			    test_bit(In_sync, &rdev->flags) &&
			    sync_page_io(rdev->bdev,
					 sect + rdev->data_offset,
					 s<<9,
					 conf->tmppage, READ))
				success = 1;
			else {
				d++;
				if (d == conf->raid_disks)
					d = 0;
			}
		} while (!success && d != read_disk);

		if (!success) {
			/* Cannot read from anywhere -- bye bye array */
			md_error(mddev, conf->mirrors[read_disk].rdev);
			break;
		}
		/* write it back and re-read */
		start = d;
		while (d != read_disk) {
			if (d==0)
				d = conf->raid_disks;
			d--;
			rdev = conf->mirrors[d].rdev;
			if (rdev &&
			    test_bit(In_sync, &rdev->flags)) {
				if (sync_page_io(rdev->bdev,
						 sect + rdev->data_offset,
						 s<<9, conf->tmppage, WRITE)
				    == 0)
					/* Well, this device is dead */
					md_error(mddev, rdev);
			}
		}
		d = start;
		while (d != read_disk) {
			char b[BDEVNAME_SIZE];
			if (d==0)
				d = conf->raid_disks;
			d--;
			rdev = conf->mirrors[d].rdev;
			if (rdev &&
			    test_bit(In_sync, &rdev->flags)) {
				if (sync_page_io(rdev->bdev,
						 sect + rdev->data_offset,
						 s<<9, conf->tmppage, READ)
				    == 0)
					/* Well, this device is dead */
					md_error(mddev, rdev);
				else {
					atomic_add(s, &rdev->corrected_errors);
					printk(KERN_INFO
					       "raid1:%s: read error corrected "
					       "(%d sectors at %llu on %s)\n",
					       mdname(mddev), s,
1452 1453
					       (unsigned long long)(sect +
					           rdev->data_offset),
1454 1455 1456 1457 1458 1459 1460 1461 1462
					       bdevname(rdev->bdev, b));
				}
			}
		}
		sectors -= s;
		sect += s;
	}
}

L
Linus Torvalds 已提交
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
static void raid1d(mddev_t *mddev)
{
	r1bio_t *r1_bio;
	struct bio *bio;
	unsigned long flags;
	conf_t *conf = mddev_to_conf(mddev);
	struct list_head *head = &conf->retry_list;
	int unplug=0;
	mdk_rdev_t *rdev;

	md_check_recovery(mddev);
	
	for (;;) {
		char b[BDEVNAME_SIZE];
		spin_lock_irqsave(&conf->device_lock, flags);
1478 1479 1480 1481 1482 1483

		if (conf->pending_bio_list.head) {
			bio = bio_list_get(&conf->pending_bio_list);
			blk_remove_plug(mddev->queue);
			spin_unlock_irqrestore(&conf->device_lock, flags);
			/* flush any pending bitmap writes to disk before proceeding w/ I/O */
1484
			bitmap_unplug(mddev->bitmap);
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496

			while (bio) { /* submit pending writes */
				struct bio *next = bio->bi_next;
				bio->bi_next = NULL;
				generic_make_request(bio);
				bio = next;
			}
			unplug = 1;

			continue;
		}

L
Linus Torvalds 已提交
1497 1498 1499 1500
		if (list_empty(head))
			break;
		r1_bio = list_entry(head->prev, r1bio_t, retry_list);
		list_del(head->prev);
1501
		conf->nr_queued--;
L
Linus Torvalds 已提交
1502 1503 1504 1505 1506 1507 1508
		spin_unlock_irqrestore(&conf->device_lock, flags);

		mddev = r1_bio->mddev;
		conf = mddev_to_conf(mddev);
		if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
			sync_request_write(mddev, r1_bio);
			unplug = 1;
1509 1510
		} else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
			/* some requests in the r1bio were BIO_RW_BARRIER
1511
			 * requests which failed with -EOPNOTSUPP.  Hohumm..
1512 1513 1514
			 * Better resubmit without the barrier.
			 * We know which devices to resubmit for, because
			 * all others have had their bios[] entry cleared.
1515
			 * We already have a nr_pending reference on these rdevs.
1516 1517
			 */
			int i;
1518
			const int do_sync = bio_sync(r1_bio->master_bio);
1519 1520
			clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
			clear_bit(R1BIO_Barrier, &r1_bio->state);
1521 1522 1523
			for (i=0; i < conf->raid_disks; i++)
				if (r1_bio->bios[i])
					atomic_inc(&r1_bio->remaining);
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538
			for (i=0; i < conf->raid_disks; i++)
				if (r1_bio->bios[i]) {
					struct bio_vec *bvec;
					int j;

					bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
					/* copy pages from the failed bio, as
					 * this might be a write-behind device */
					__bio_for_each_segment(bvec, bio, j, 0)
						bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
					bio_put(r1_bio->bios[i]);
					bio->bi_sector = r1_bio->sector +
						conf->mirrors[i].rdev->data_offset;
					bio->bi_bdev = conf->mirrors[i].rdev->bdev;
					bio->bi_end_io = raid1_end_write_request;
1539
					bio->bi_rw = WRITE | do_sync;
1540 1541 1542 1543
					bio->bi_private = r1_bio;
					r1_bio->bios[i] = bio;
					generic_make_request(bio);
				}
L
Linus Torvalds 已提交
1544 1545
		} else {
			int disk;
1546 1547 1548 1549 1550 1551 1552 1553 1554

			/* we got a read error. Maybe the drive is bad.  Maybe just
			 * the block and we can fix it.
			 * We freeze all other IO, and try reading the block from
			 * other devices.  When we find one, we re-write
			 * and check it that fixes the read error.
			 * This is all done synchronously while the array is
			 * frozen
			 */
1555 1556 1557 1558 1559 1560
			if (mddev->ro == 0) {
				freeze_array(conf);
				fix_read_error(conf, r1_bio->read_disk,
					       r1_bio->sector,
					       r1_bio->sectors);
				unfreeze_array(conf);
1561 1562
			}

L
Linus Torvalds 已提交
1563 1564 1565 1566 1567 1568 1569 1570
			bio = r1_bio->bios[r1_bio->read_disk];
			if ((disk=read_balance(conf, r1_bio)) == -1) {
				printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
				       " read error for block %llu\n",
				       bdevname(bio->bi_bdev,b),
				       (unsigned long long)r1_bio->sector);
				raid_end_bio_io(r1_bio);
			} else {
1571
				const int do_sync = bio_sync(r1_bio->master_bio);
1572 1573
				r1_bio->bios[r1_bio->read_disk] =
					mddev->ro ? IO_BLOCKED : NULL;
L
Linus Torvalds 已提交
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
				r1_bio->read_disk = disk;
				bio_put(bio);
				bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
				r1_bio->bios[r1_bio->read_disk] = bio;
				rdev = conf->mirrors[disk].rdev;
				if (printk_ratelimit())
					printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
					       " another mirror\n",
					       bdevname(rdev->bdev,b),
					       (unsigned long long)r1_bio->sector);
				bio->bi_sector = r1_bio->sector + rdev->data_offset;
				bio->bi_bdev = rdev->bdev;
				bio->bi_end_io = raid1_end_read_request;
1587
				bio->bi_rw = READ | do_sync;
L
Linus Torvalds 已提交
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
				bio->bi_private = r1_bio;
				unplug = 1;
				generic_make_request(bio);
			}
		}
	}
	spin_unlock_irqrestore(&conf->device_lock, flags);
	if (unplug)
		unplug_slaves(mddev);
}


static int init_resync(conf_t *conf)
{
	int buffs;

	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1605
	BUG_ON(conf->r1buf_pool);
L
Linus Torvalds 已提交
1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
					  conf->poolinfo);
	if (!conf->r1buf_pool)
		return -ENOMEM;
	conf->next_resync = 0;
	return 0;
}

/*
 * perform a "sync" on one "block"
 *
 * We need to make sure that no normal I/O request - particularly write
 * requests - conflict with active sync requests.
 *
 * This is achieved by tracking pending requests and a 'barrier' concept
 * that can be installed to exclude normal IO requests.
 */

1624
static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
L
Linus Torvalds 已提交
1625 1626 1627 1628 1629
{
	conf_t *conf = mddev_to_conf(mddev);
	r1bio_t *r1_bio;
	struct bio *bio;
	sector_t max_sector, nr_sectors;
1630
	int disk = -1;
L
Linus Torvalds 已提交
1631
	int i;
1632 1633
	int wonly = -1;
	int write_targets = 0, read_targets = 0;
1634
	int sync_blocks;
1635
	int still_degraded = 0;
L
Linus Torvalds 已提交
1636 1637

	if (!conf->r1buf_pool)
1638 1639 1640 1641
	{
/*
		printk("sync start - bitmap %p\n", mddev->bitmap);
*/
L
Linus Torvalds 已提交
1642
		if (init_resync(conf))
1643
			return 0;
1644
	}
L
Linus Torvalds 已提交
1645 1646 1647

	max_sector = mddev->size << 1;
	if (sector_nr >= max_sector) {
1648 1649 1650 1651 1652
		/* If we aborted, we need to abort the
		 * sync on the 'current' bitmap chunk (there will
		 * only be one in raid1 resync.
		 * We can find the current addess in mddev->curr_resync
		 */
1653 1654
		if (mddev->curr_resync < max_sector) /* aborted */
			bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1655
						&sync_blocks, 1);
1656
		else /* completed sync */
1657
			conf->fullsync = 0;
1658 1659

		bitmap_close_sync(mddev->bitmap);
L
Linus Torvalds 已提交
1660 1661 1662 1663
		close_sync(conf);
		return 0;
	}

1664 1665
	if (mddev->bitmap == NULL &&
	    mddev->recovery_cp == MaxSector &&
1666
	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1667 1668 1669 1670
	    conf->fullsync == 0) {
		*skipped = 1;
		return max_sector - sector_nr;
	}
1671 1672 1673
	/* before building a request, check if we can skip these blocks..
	 * This call the bitmap_start_sync doesn't actually record anything
	 */
1674
	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1675
	    !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1676 1677 1678 1679
		/* We can skip this block, and probably several more */
		*skipped = 1;
		return sync_blocks;
	}
L
Linus Torvalds 已提交
1680
	/*
1681 1682 1683
	 * If there is non-resync activity waiting for a turn,
	 * and resync is going fast enough,
	 * then let it though before starting on this new sync request.
L
Linus Torvalds 已提交
1684
	 */
1685
	if (!go_faster && conf->nr_waiting)
L
Linus Torvalds 已提交
1686
		msleep_interruptible(1000);
1687 1688 1689 1690

	raise_barrier(conf);

	conf->next_resync = sector_nr;
L
Linus Torvalds 已提交
1691

1692 1693
	r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
	rcu_read_lock();
L
Linus Torvalds 已提交
1694
	/*
1695 1696 1697 1698 1699 1700
	 * If we get a correctably read error during resync or recovery,
	 * we might want to read from a different device.  So we
	 * flag all drives that could conceivably be read from for READ,
	 * and any others (which will be non-In_sync devices) for WRITE.
	 * If a read fails, we try reading from something else for which READ
	 * is OK.
L
Linus Torvalds 已提交
1701 1702 1703 1704
	 */

	r1_bio->mddev = mddev;
	r1_bio->sector = sector_nr;
1705
	r1_bio->state = 0;
L
Linus Torvalds 已提交
1706 1707 1708
	set_bit(R1BIO_IsSync, &r1_bio->state);

	for (i=0; i < conf->raid_disks; i++) {
1709
		mdk_rdev_t *rdev;
L
Linus Torvalds 已提交
1710 1711 1712 1713 1714
		bio = r1_bio->bios[i];

		/* take from bio_init */
		bio->bi_next = NULL;
		bio->bi_flags |= 1 << BIO_UPTODATE;
1715
		bio->bi_rw = READ;
L
Linus Torvalds 已提交
1716 1717 1718 1719 1720 1721 1722 1723
		bio->bi_vcnt = 0;
		bio->bi_idx = 0;
		bio->bi_phys_segments = 0;
		bio->bi_hw_segments = 0;
		bio->bi_size = 0;
		bio->bi_end_io = NULL;
		bio->bi_private = NULL;

1724 1725 1726
		rdev = rcu_dereference(conf->mirrors[i].rdev);
		if (rdev == NULL ||
			   test_bit(Faulty, &rdev->flags)) {
1727 1728
			still_degraded = 1;
			continue;
1729
		} else if (!test_bit(In_sync, &rdev->flags)) {
L
Linus Torvalds 已提交
1730 1731 1732
			bio->bi_rw = WRITE;
			bio->bi_end_io = end_sync_write;
			write_targets ++;
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748
		} else {
			/* may need to read from here */
			bio->bi_rw = READ;
			bio->bi_end_io = end_sync_read;
			if (test_bit(WriteMostly, &rdev->flags)) {
				if (wonly < 0)
					wonly = i;
			} else {
				if (disk < 0)
					disk = i;
			}
			read_targets++;
		}
		atomic_inc(&rdev->nr_pending);
		bio->bi_sector = sector_nr + rdev->data_offset;
		bio->bi_bdev = rdev->bdev;
L
Linus Torvalds 已提交
1749 1750
		bio->bi_private = r1_bio;
	}
1751 1752 1753 1754
	rcu_read_unlock();
	if (disk < 0)
		disk = wonly;
	r1_bio->read_disk = disk;
1755

1756 1757 1758 1759 1760
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
		/* extra read targets are also write targets */
		write_targets += read_targets-1;

	if (write_targets == 0 || read_targets == 0) {
L
Linus Torvalds 已提交
1761 1762 1763
		/* There is nowhere to write, so all non-sync
		 * drives must be failed - so we are finished
		 */
1764 1765
		sector_t rv = max_sector - sector_nr;
		*skipped = 1;
L
Linus Torvalds 已提交
1766 1767 1768 1769 1770
		put_buf(r1_bio);
		return rv;
	}

	nr_sectors = 0;
1771
	sync_blocks = 0;
L
Linus Torvalds 已提交
1772 1773 1774 1775 1776 1777 1778
	do {
		struct page *page;
		int len = PAGE_SIZE;
		if (sector_nr + (len>>9) > max_sector)
			len = (max_sector - sector_nr) << 9;
		if (len == 0)
			break;
1779 1780
		if (sync_blocks == 0) {
			if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1781 1782 1783
					       &sync_blocks, still_degraded) &&
			    !conf->fullsync &&
			    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1784
				break;
1785
			BUG_ON(sync_blocks < (PAGE_SIZE>>9));
1786 1787
			if (len > (sync_blocks<<9))
				len = sync_blocks<<9;
1788
		}
1789

L
Linus Torvalds 已提交
1790 1791 1792
		for (i=0 ; i < conf->raid_disks; i++) {
			bio = r1_bio->bios[i];
			if (bio->bi_end_io) {
1793
				page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
L
Linus Torvalds 已提交
1794 1795
				if (bio_add_page(bio, page, len, 0) == 0) {
					/* stop here */
1796
					bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
L
Linus Torvalds 已提交
1797 1798 1799
					while (i > 0) {
						i--;
						bio = r1_bio->bios[i];
1800 1801
						if (bio->bi_end_io==NULL)
							continue;
L
Linus Torvalds 已提交
1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
						/* remove last page from this bio */
						bio->bi_vcnt--;
						bio->bi_size -= len;
						bio->bi_flags &= ~(1<< BIO_SEG_VALID);
					}
					goto bio_full;
				}
			}
		}
		nr_sectors += len>>9;
		sector_nr += len>>9;
1813
		sync_blocks -= (len>>9);
L
Linus Torvalds 已提交
1814 1815 1816 1817
	} while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
 bio_full:
	r1_bio->sectors = nr_sectors;

1818 1819 1820 1821 1822 1823 1824 1825
	/* For a user-requested sync, we read all readable devices and do a
	 * compare
	 */
	if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
		atomic_set(&r1_bio->remaining, read_targets);
		for (i=0; i<conf->raid_disks; i++) {
			bio = r1_bio->bios[i];
			if (bio->bi_end_io == end_sync_read) {
1826
				md_sync_acct(bio->bi_bdev, nr_sectors);
1827 1828 1829 1830 1831 1832
				generic_make_request(bio);
			}
		}
	} else {
		atomic_set(&r1_bio->remaining, 1);
		bio = r1_bio->bios[r1_bio->read_disk];
1833
		md_sync_acct(bio->bi_bdev, nr_sectors);
1834
		generic_make_request(bio);
L
Linus Torvalds 已提交
1835

1836
	}
L
Linus Torvalds 已提交
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852
	return nr_sectors;
}

static int run(mddev_t *mddev)
{
	conf_t *conf;
	int i, j, disk_idx;
	mirror_info_t *disk;
	mdk_rdev_t *rdev;
	struct list_head *tmp;

	if (mddev->level != 1) {
		printk("raid1: %s: raid level not set to mirroring (%d)\n",
		       mdname(mddev), mddev->level);
		goto out;
	}
1853 1854 1855 1856 1857
	if (mddev->reshape_position != MaxSector) {
		printk("raid1: %s: reshape_position set but not supported\n",
		       mdname(mddev));
		goto out;
	}
L
Linus Torvalds 已提交
1858 1859 1860 1861 1862
	/*
	 * copy the already verified devices into our private RAID1
	 * bookkeeping area. [whatever we allocate in run(),
	 * should be freed in stop()]
	 */
1863
	conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
L
Linus Torvalds 已提交
1864 1865 1866 1867
	mddev->private = conf;
	if (!conf)
		goto out_no_mem;

1868
	conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
L
Linus Torvalds 已提交
1869 1870 1871 1872
				 GFP_KERNEL);
	if (!conf->mirrors)
		goto out_no_mem;

1873 1874 1875 1876
	conf->tmppage = alloc_page(GFP_KERNEL);
	if (!conf->tmppage)
		goto out_no_mem;

L
Linus Torvalds 已提交
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
	conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
	if (!conf->poolinfo)
		goto out_no_mem;
	conf->poolinfo->mddev = mddev;
	conf->poolinfo->raid_disks = mddev->raid_disks;
	conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
					  r1bio_pool_free,
					  conf->poolinfo);
	if (!conf->r1bio_pool)
		goto out_no_mem;

	ITERATE_RDEV(mddev, rdev, tmp) {
		disk_idx = rdev->raid_disk;
		if (disk_idx >= mddev->raid_disks
		    || disk_idx < 0)
			continue;
		disk = conf->mirrors + disk_idx;

		disk->rdev = rdev;

		blk_queue_stack_limits(mddev->queue,
				       rdev->bdev->bd_disk->queue);
		/* as we don't honour merge_bvec_fn, we must never risk
		 * violating it, so limit ->max_sector to one PAGE, as
		 * a one page request is never in violation.
		 */
		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
		    mddev->queue->max_sectors > (PAGE_SIZE>>9))
			blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);

		disk->head_position = 0;
	}
	conf->raid_disks = mddev->raid_disks;
	conf->mddev = mddev;
	spin_lock_init(&conf->device_lock);
	INIT_LIST_HEAD(&conf->retry_list);

	spin_lock_init(&conf->resync_lock);
1915
	init_waitqueue_head(&conf->wait_barrier);
L
Linus Torvalds 已提交
1916

1917 1918 1919
	bio_list_init(&conf->pending_bio_list);
	bio_list_init(&conf->flushing_bio_list);

L
Linus Torvalds 已提交
1920 1921 1922 1923 1924 1925

	mddev->degraded = 0;
	for (i = 0; i < conf->raid_disks; i++) {

		disk = conf->mirrors + i;

1926 1927
		if (!disk->rdev ||
		    !test_bit(In_sync, &disk->rdev->flags)) {
L
Linus Torvalds 已提交
1928 1929
			disk->head_position = 0;
			mddev->degraded++;
1930 1931
			if (disk->rdev)
				conf->fullsync = 1;
L
Linus Torvalds 已提交
1932 1933
		}
	}
1934 1935 1936 1937 1938 1939 1940
	if (mddev->degraded == conf->raid_disks) {
		printk(KERN_ERR "raid1: no operational mirrors for %s\n",
			mdname(mddev));
		goto out_free_conf;
	}
	if (conf->raid_disks - mddev->degraded == 1)
		mddev->recovery_cp = MaxSector;
L
Linus Torvalds 已提交
1941 1942 1943 1944 1945 1946 1947

	/*
	 * find the first working one and use it as a starting point
	 * to read balancing.
	 */
	for (j = 0; j < conf->raid_disks &&
		     (!conf->mirrors[j].rdev ||
1948
		      !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
L
Linus Torvalds 已提交
1949 1950 1951 1952
		/* nothing */;
	conf->last_used = j;


1953 1954 1955 1956 1957 1958
	mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1");
	if (!mddev->thread) {
		printk(KERN_ERR
		       "raid1: couldn't allocate thread for %s\n",
		       mdname(mddev));
		goto out_free_conf;
L
Linus Torvalds 已提交
1959
	}
1960

L
Linus Torvalds 已提交
1961 1962 1963 1964 1965 1966 1967 1968 1969
	printk(KERN_INFO 
		"raid1: raid set %s active with %d out of %d mirrors\n",
		mdname(mddev), mddev->raid_disks - mddev->degraded, 
		mddev->raid_disks);
	/*
	 * Ok, everything is just fine now
	 */
	mddev->array_size = mddev->size;

1970
	mddev->queue->unplug_fn = raid1_unplug;
1971 1972
	mddev->queue->backing_dev_info.congested_fn = raid1_congested;
	mddev->queue->backing_dev_info.congested_data = mddev;
1973

L
Linus Torvalds 已提交
1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
	return 0;

out_no_mem:
	printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
	       mdname(mddev));

out_free_conf:
	if (conf) {
		if (conf->r1bio_pool)
			mempool_destroy(conf->r1bio_pool);
1984
		kfree(conf->mirrors);
1985
		safe_put_page(conf->tmppage);
1986
		kfree(conf->poolinfo);
L
Linus Torvalds 已提交
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996
		kfree(conf);
		mddev->private = NULL;
	}
out:
	return -EIO;
}

static int stop(mddev_t *mddev)
{
	conf_t *conf = mddev_to_conf(mddev);
1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007
	struct bitmap *bitmap = mddev->bitmap;
	int behind_wait = 0;

	/* wait for behind writes to complete */
	while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
		behind_wait++;
		printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(HZ); /* wait a second */
		/* need to kick something here to make sure I/O goes? */
	}
L
Linus Torvalds 已提交
2008 2009 2010 2011 2012 2013

	md_unregister_thread(mddev->thread);
	mddev->thread = NULL;
	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
	if (conf->r1bio_pool)
		mempool_destroy(conf->r1bio_pool);
2014 2015
	kfree(conf->mirrors);
	kfree(conf->poolinfo);
L
Linus Torvalds 已提交
2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
	kfree(conf);
	mddev->private = NULL;
	return 0;
}

static int raid1_resize(mddev_t *mddev, sector_t sectors)
{
	/* no resync is happening, and there is enough space
	 * on all devices, so we can resize.
	 * We need to make sure resync covers any new space.
	 * If the array is shrinking we should possibly wait until
	 * any io in the removed space completes, but it hardly seems
	 * worth it.
	 */
	mddev->array_size = sectors>>1;
	set_capacity(mddev->gendisk, mddev->array_size << 1);
2032
	mddev->changed = 1;
L
Linus Torvalds 已提交
2033 2034 2035 2036 2037
	if (mddev->array_size > mddev->size && mddev->recovery_cp == MaxSector) {
		mddev->recovery_cp = mddev->size << 1;
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	}
	mddev->size = mddev->array_size;
2038
	mddev->resync_max_sectors = sectors;
L
Linus Torvalds 已提交
2039 2040 2041
	return 0;
}

2042
static int raid1_reshape(mddev_t *mddev)
L
Linus Torvalds 已提交
2043 2044 2045 2046 2047 2048 2049 2050
{
	/* We need to:
	 * 1/ resize the r1bio_pool
	 * 2/ resize conf->mirrors
	 *
	 * We allocate a new r1bio_pool if we can.
	 * Then raise a device barrier and wait until all IO stops.
	 * Then resize conf->mirrors and swap in the new r1bio pool.
2051 2052 2053
	 *
	 * At the same time, we "pack" the devices so that all the missing
	 * devices have the higher raid_disk numbers.
L
Linus Torvalds 已提交
2054 2055 2056 2057 2058
	 */
	mempool_t *newpool, *oldpool;
	struct pool_info *newpoolinfo;
	mirror_info_t *newmirrors;
	conf_t *conf = mddev_to_conf(mddev);
2059
	int cnt, raid_disks;
2060
	unsigned long flags;
2061
	int d, d2;
L
Linus Torvalds 已提交
2062

2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
	/* Cannot change chunk_size, layout, or level */
	if (mddev->chunk_size != mddev->new_chunk ||
	    mddev->layout != mddev->new_layout ||
	    mddev->level != mddev->new_level) {
		mddev->new_chunk = mddev->chunk_size;
		mddev->new_layout = mddev->layout;
		mddev->new_level = mddev->level;
		return -EINVAL;
	}

2073 2074
	md_allow_write(mddev);

2075 2076
	raid_disks = mddev->raid_disks + mddev->delta_disks;

2077 2078 2079 2080 2081 2082
	if (raid_disks < conf->raid_disks) {
		cnt=0;
		for (d= 0; d < conf->raid_disks; d++)
			if (conf->mirrors[d].rdev)
				cnt++;
		if (cnt > raid_disks)
L
Linus Torvalds 已提交
2083
			return -EBUSY;
2084
	}
L
Linus Torvalds 已提交
2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097

	newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
	if (!newpoolinfo)
		return -ENOMEM;
	newpoolinfo->mddev = mddev;
	newpoolinfo->raid_disks = raid_disks;

	newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
				 r1bio_pool_free, newpoolinfo);
	if (!newpool) {
		kfree(newpoolinfo);
		return -ENOMEM;
	}
2098
	newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
L
Linus Torvalds 已提交
2099 2100 2101 2102 2103 2104
	if (!newmirrors) {
		kfree(newpoolinfo);
		mempool_destroy(newpool);
		return -ENOMEM;
	}

2105
	raise_barrier(conf);
L
Linus Torvalds 已提交
2106 2107 2108 2109

	/* ok, everything is stopped */
	oldpool = conf->r1bio_pool;
	conf->r1bio_pool = newpool;
2110

2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
	for (d = d2 = 0; d < conf->raid_disks; d++) {
		mdk_rdev_t *rdev = conf->mirrors[d].rdev;
		if (rdev && rdev->raid_disk != d2) {
			char nm[20];
			sprintf(nm, "rd%d", rdev->raid_disk);
			sysfs_remove_link(&mddev->kobj, nm);
			rdev->raid_disk = d2;
			sprintf(nm, "rd%d", rdev->raid_disk);
			sysfs_remove_link(&mddev->kobj, nm);
			if (sysfs_create_link(&mddev->kobj,
					      &rdev->kobj, nm))
				printk(KERN_WARNING
				       "md/raid1: cannot register "
				       "%s for %s\n",
				       nm, mdname(mddev));
2126
		}
2127 2128 2129
		if (rdev)
			newmirrors[d2++].rdev = rdev;
	}
L
Linus Torvalds 已提交
2130 2131 2132 2133 2134
	kfree(conf->mirrors);
	conf->mirrors = newmirrors;
	kfree(conf->poolinfo);
	conf->poolinfo = newpoolinfo;

2135
	spin_lock_irqsave(&conf->device_lock, flags);
L
Linus Torvalds 已提交
2136
	mddev->degraded += (raid_disks - conf->raid_disks);
2137
	spin_unlock_irqrestore(&conf->device_lock, flags);
L
Linus Torvalds 已提交
2138
	conf->raid_disks = mddev->raid_disks = raid_disks;
2139
	mddev->delta_disks = 0;
L
Linus Torvalds 已提交
2140

2141
	conf->last_used = 0; /* just make sure it is in-range */
2142
	lower_barrier(conf);
L
Linus Torvalds 已提交
2143 2144 2145 2146 2147 2148 2149 2150

	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	md_wakeup_thread(mddev->thread);

	mempool_destroy(oldpool);
	return 0;
}

2151
static void raid1_quiesce(mddev_t *mddev, int state)
2152 2153 2154 2155
{
	conf_t *conf = mddev_to_conf(mddev);

	switch(state) {
2156
	case 1:
2157
		raise_barrier(conf);
2158
		break;
2159
	case 0:
2160
		lower_barrier(conf);
2161 2162 2163 2164
		break;
	}
}

L
Linus Torvalds 已提交
2165

2166
static struct mdk_personality raid1_personality =
L
Linus Torvalds 已提交
2167 2168
{
	.name		= "raid1",
2169
	.level		= 1,
L
Linus Torvalds 已提交
2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
	.owner		= THIS_MODULE,
	.make_request	= make_request,
	.run		= run,
	.stop		= stop,
	.status		= status,
	.error_handler	= error,
	.hot_add_disk	= raid1_add_disk,
	.hot_remove_disk= raid1_remove_disk,
	.spare_active	= raid1_spare_active,
	.sync_request	= sync_request,
	.resize		= raid1_resize,
2181
	.check_reshape	= raid1_reshape,
2182
	.quiesce	= raid1_quiesce,
L
Linus Torvalds 已提交
2183 2184 2185 2186
};

static int __init raid_init(void)
{
2187
	return register_md_personality(&raid1_personality);
L
Linus Torvalds 已提交
2188 2189 2190 2191
}

static void raid_exit(void)
{
2192
	unregister_md_personality(&raid1_personality);
L
Linus Torvalds 已提交
2193 2194 2195 2196 2197 2198
}

module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md-personality-3"); /* RAID1 */
2199
MODULE_ALIAS("md-raid1");
2200
MODULE_ALIAS("md-level-1");