md.c 221.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
   md.c : Multiple Devices driver for Linux
	  Copyright (C) 1998, 1999, 2000 Ingo Molnar

     completely rewritten, based on the MD driver code from Marc Zyngier

   Changes:

   - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
   - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
   - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
   - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
   - kmod support by: Cyrus Durgin
   - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
   - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>

   - lots of fixes and improvements to the RAID1/RAID5 and generic
     RAID code (such as request based resynchronization):

     Neil Brown <neilb@cse.unsw.edu.au>.

22 23 24
   - persistent bitmap code
     Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.

L
Linus Torvalds 已提交
25 26 27 28 29 30 31 32 33 34
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2, or (at your option)
   any later version.

   You should have received a copy of the GNU General Public License
   (for example /usr/src/linux/COPYING); if not, write to the Free
   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/

35
#include <linux/kthread.h>
36
#include <linux/blkdev.h>
L
Linus Torvalds 已提交
37
#include <linux/sysctl.h>
38
#include <linux/seq_file.h>
A
Al Viro 已提交
39
#include <linux/fs.h>
40
#include <linux/poll.h>
41
#include <linux/ctype.h>
42
#include <linux/string.h>
43 44 45
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
46
#include <linux/module.h>
47
#include <linux/reboot.h>
48
#include <linux/file.h>
49
#include <linux/compat.h>
50
#include <linux/delay.h>
51 52
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
53
#include <linux/slab.h>
54
#include "md.h"
55
#include "bitmap.h"
L
Linus Torvalds 已提交
56 57

#ifndef MODULE
58
static void autostart_arrays(int part);
L
Linus Torvalds 已提交
59 60
#endif

61 62 63 64 65
/* pers_list is a list of registered personalities protected
 * by pers_lock.
 * pers_lock does extra service to protect accesses to
 * mddev->thread when the mutex cannot be held.
 */
66
static LIST_HEAD(pers_list);
L
Linus Torvalds 已提交
67 68
static DEFINE_SPINLOCK(pers_lock);

69 70
static void md_print_devices(void);

71
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
T
Tejun Heo 已提交
72 73
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
74

75 76
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }

77 78 79 80 81 82
/*
 * Default number of read corrections we'll attempt on an rdev
 * before ejecting it from the array. We divide the read error
 * count by 2 for every hour elapsed between read errors.
 */
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
L
Linus Torvalds 已提交
83 84 85 86
/*
 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
 * is 1000 KB/sec, so the extra system load does not show up that much.
 * Increase it if you want to have more _guaranteed_ speed. Note that
87
 * the RAID driver will use the maximum available bandwidth if the IO
L
Linus Torvalds 已提交
88 89 90 91 92
 * subsystem is idle. There is also an 'absolute maximum' reconstruction
 * speed limit - in case reconstruction slows down your system despite
 * idle IO detection.
 *
 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
93
 * or /sys/block/mdX/md/sync_speed_{min,max}
L
Linus Torvalds 已提交
94 95 96 97
 */

static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
98
static inline int speed_min(struct mddev *mddev)
99 100 101 102 103
{
	return mddev->sync_speed_min ?
		mddev->sync_speed_min : sysctl_speed_limit_min;
}

104
static inline int speed_max(struct mddev *mddev)
105 106 107 108
{
	return mddev->sync_speed_max ?
		mddev->sync_speed_max : sysctl_speed_limit_max;
}
L
Linus Torvalds 已提交
109 110 111 112 113 114 115 116

static struct ctl_table_header *raid_table_header;

static ctl_table raid_table[] = {
	{
		.procname	= "speed_limit_min",
		.data		= &sysctl_speed_limit_min,
		.maxlen		= sizeof(int),
117
		.mode		= S_IRUGO|S_IWUSR,
118
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
119 120 121 122 123
	},
	{
		.procname	= "speed_limit_max",
		.data		= &sysctl_speed_limit_max,
		.maxlen		= sizeof(int),
124
		.mode		= S_IRUGO|S_IWUSR,
125
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
126
	},
127
	{ }
L
Linus Torvalds 已提交
128 129 130 131 132 133
};

static ctl_table raid_dir_table[] = {
	{
		.procname	= "raid",
		.maxlen		= 0,
134
		.mode		= S_IRUGO|S_IXUGO,
L
Linus Torvalds 已提交
135 136
		.child		= raid_table,
	},
137
	{ }
L
Linus Torvalds 已提交
138 139 140 141 142 143 144 145 146
};

static ctl_table raid_root_table[] = {
	{
		.procname	= "dev",
		.maxlen		= 0,
		.mode		= 0555,
		.child		= raid_dir_table,
	},
147
	{  }
L
Linus Torvalds 已提交
148 149
};

150
static const struct block_device_operations md_fops;
L
Linus Torvalds 已提交
151

152 153
static int start_readonly;

154 155 156 157 158
/* bio_clone_mddev
 * like bio_clone, but with a local bio set
 */

struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
159
			    struct mddev *mddev)
160 161 162 163 164 165
{
	struct bio *b;

	if (!mddev || !mddev->bio_set)
		return bio_alloc(gfp_mask, nr_iovecs);

166
	b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
167 168 169 170 171 172 173
	if (!b)
		return NULL;
	return b;
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);

struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
174
			    struct mddev *mddev)
175 176 177 178
{
	if (!mddev || !mddev->bio_set)
		return bio_clone(bio, gfp_mask);

179
	return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
180 181 182
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
void md_trim_bio(struct bio *bio, int offset, int size)
{
	/* 'bio' is a cloned bio which we need to trim to match
	 * the given offset and size.
	 * This requires adjusting bi_sector, bi_size, and bi_io_vec
	 */
	int i;
	struct bio_vec *bvec;
	int sofar = 0;

	size <<= 9;
	if (offset == 0 && size == bio->bi_size)
		return;

	bio->bi_sector += offset;
	bio->bi_size = size;
	offset <<= 9;
	clear_bit(BIO_SEG_VALID, &bio->bi_flags);

	while (bio->bi_idx < bio->bi_vcnt &&
	       bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
		/* remove this whole bio_vec */
		offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
		bio->bi_idx++;
	}
	if (bio->bi_idx < bio->bi_vcnt) {
		bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
		bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
	}
	/* avoid any complications with bi_idx being non-zero*/
	if (bio->bi_idx) {
		memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
			(bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
		bio->bi_vcnt -= bio->bi_idx;
		bio->bi_idx = 0;
	}
	/* Make sure vcnt and last bv are not too big */
	bio_for_each_segment(bvec, bio, i) {
		if (sofar + bvec->bv_len > size)
			bvec->bv_len = size - sofar;
		if (bvec->bv_len == 0) {
			bio->bi_vcnt = i;
			break;
		}
		sofar += bvec->bv_len;
	}
}
EXPORT_SYMBOL_GPL(md_trim_bio);

232 233 234 235 236 237 238 239 240 241
/*
 * We have a system wide 'event count' that is incremented
 * on any 'interesting' event, and readers of /proc/mdstat
 * can use 'poll' or 'select' to find out when the event
 * count increases.
 *
 * Events are:
 *  start array, stop array, error, add device, remove device,
 *  start build, activate spare
 */
242
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
243
static atomic_t md_event_count;
244
void md_new_event(struct mddev *mddev)
245 246 247 248
{
	atomic_inc(&md_event_count);
	wake_up(&md_event_waiters);
}
249
EXPORT_SYMBOL_GPL(md_new_event);
250

251 252 253
/* Alternate version that can be called from interrupts
 * when calling sysfs_notify isn't needed.
 */
254
static void md_new_event_inintr(struct mddev *mddev)
255 256 257 258 259
{
	atomic_inc(&md_event_count);
	wake_up(&md_event_waiters);
}

L
Linus Torvalds 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
/*
 * Enables to iterate over all existing md arrays
 * all_mddevs_lock protects this list.
 */
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);


/*
 * iterates through all used mddevs in the system.
 * We take care to grab the all_mddevs_lock whenever navigating
 * the list, and to always hold a refcount when unlocked.
 * Any code which breaks out of this loop while own
 * a reference to the current mddev and must mddev_put it.
 */
275
#define for_each_mddev(_mddev,_tmp)					\
L
Linus Torvalds 已提交
276 277
									\
	for (({ spin_lock(&all_mddevs_lock); 				\
278 279 280 281
		_tmp = all_mddevs.next;					\
		_mddev = NULL;});					\
	     ({ if (_tmp != &all_mddevs)				\
			mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
L
Linus Torvalds 已提交
282
		spin_unlock(&all_mddevs_lock);				\
283 284 285
		if (_mddev) mddev_put(_mddev);				\
		_mddev = list_entry(_tmp, struct mddev, all_mddevs);	\
		_tmp != &all_mddevs;});					\
L
Linus Torvalds 已提交
286
	     ({ spin_lock(&all_mddevs_lock);				\
287
		_tmp = _tmp->next;})					\
L
Linus Torvalds 已提交
288 289 290
		)


291 292 293 294 295 296 297
/* Rather than calling directly into the personality make_request function,
 * IO requests come here first so that we can check if the device is
 * being suspended pending a reconfiguration.
 * We hold a refcount over the call to ->make_request.  By the time that
 * call has finished, the bio has been linked into some internal structure
 * and so is visible to ->quiesce(), so we don't need the refcount any more.
 */
298
static void md_make_request(struct request_queue *q, struct bio *bio)
L
Linus Torvalds 已提交
299
{
300
	const int rw = bio_data_dir(bio);
301
	struct mddev *mddev = q->queuedata;
302
	int cpu;
303
	unsigned int sectors;
304

305 306
	if (mddev == NULL || mddev->pers == NULL
	    || !mddev->ready) {
307
		bio_io_error(bio);
308
		return;
309
	}
310
	smp_rmb(); /* Ensure implications of  'active' are visible */
311
	rcu_read_lock();
T
Tejun Heo 已提交
312
	if (mddev->suspended) {
313 314 315 316
		DEFINE_WAIT(__wait);
		for (;;) {
			prepare_to_wait(&mddev->sb_wait, &__wait,
					TASK_UNINTERRUPTIBLE);
T
Tejun Heo 已提交
317
			if (!mddev->suspended)
318 319 320 321 322 323 324 325 326
				break;
			rcu_read_unlock();
			schedule();
			rcu_read_lock();
		}
		finish_wait(&mddev->sb_wait, &__wait);
	}
	atomic_inc(&mddev->active_io);
	rcu_read_unlock();
327

328 329 330 331 332
	/*
	 * save the sectors now since our bio can
	 * go away inside make_request
	 */
	sectors = bio_sectors(bio);
333
	mddev->pers->make_request(mddev, bio);
334 335 336

	cpu = part_stat_lock();
	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
337
	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
338 339
	part_stat_unlock();

340 341 342 343
	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
		wake_up(&mddev->sb_wait);
}

344 345 346 347 348 349
/* mddev_suspend makes sure no new requests are submitted
 * to the device, and that any requests that have been submitted
 * are completely handled.
 * Once ->stop is called and completes, the module will be completely
 * unused.
 */
350
void mddev_suspend(struct mddev *mddev)
351 352 353 354 355 356
{
	BUG_ON(mddev->suspended);
	mddev->suspended = 1;
	synchronize_rcu();
	wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
	mddev->pers->quiesce(mddev, 1);
357 358

	del_timer_sync(&mddev->safemode_timer);
359
}
360
EXPORT_SYMBOL_GPL(mddev_suspend);
361

362
void mddev_resume(struct mddev *mddev)
363 364 365 366
{
	mddev->suspended = 0;
	wake_up(&mddev->sb_wait);
	mddev->pers->quiesce(mddev, 0);
367

368
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
369 370
	md_wakeup_thread(mddev->thread);
	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
L
Linus Torvalds 已提交
371
}
372
EXPORT_SYMBOL_GPL(mddev_resume);
L
Linus Torvalds 已提交
373

374
int mddev_congested(struct mddev *mddev, int bits)
375 376 377 378 379
{
	return mddev->suspended;
}
EXPORT_SYMBOL(mddev_congested);

380
/*
T
Tejun Heo 已提交
381
 * Generic flush handling for md
382 383
 */

T
Tejun Heo 已提交
384
static void md_end_flush(struct bio *bio, int err)
385
{
386
	struct md_rdev *rdev = bio->bi_private;
387
	struct mddev *mddev = rdev->mddev;
388 389 390 391

	rdev_dec_pending(rdev, mddev);

	if (atomic_dec_and_test(&mddev->flush_pending)) {
T
Tejun Heo 已提交
392
		/* The pre-request flush has finished */
T
Tejun Heo 已提交
393
		queue_work(md_wq, &mddev->flush_work);
394 395 396 397
	}
	bio_put(bio);
}

N
NeilBrown 已提交
398 399
static void md_submit_flush_data(struct work_struct *ws);

400
static void submit_flushes(struct work_struct *ws)
401
{
402
	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
403
	struct md_rdev *rdev;
404

N
NeilBrown 已提交
405 406
	INIT_WORK(&mddev->flush_work, md_submit_flush_data);
	atomic_set(&mddev->flush_pending, 1);
407
	rcu_read_lock();
N
NeilBrown 已提交
408
	rdev_for_each_rcu(rdev, mddev)
409 410 411 412 413 414 415 416 417 418
		if (rdev->raid_disk >= 0 &&
		    !test_bit(Faulty, &rdev->flags)) {
			/* Take two references, one is dropped
			 * when request finishes, one after
			 * we reclaim rcu_read_lock
			 */
			struct bio *bi;
			atomic_inc(&rdev->nr_pending);
			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();
419
			bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
T
Tejun Heo 已提交
420
			bi->bi_end_io = md_end_flush;
421 422 423
			bi->bi_private = rdev;
			bi->bi_bdev = rdev->bdev;
			atomic_inc(&mddev->flush_pending);
T
Tejun Heo 已提交
424
			submit_bio(WRITE_FLUSH, bi);
425 426 427 428
			rcu_read_lock();
			rdev_dec_pending(rdev, mddev);
		}
	rcu_read_unlock();
N
NeilBrown 已提交
429 430
	if (atomic_dec_and_test(&mddev->flush_pending))
		queue_work(md_wq, &mddev->flush_work);
431 432
}

T
Tejun Heo 已提交
433
static void md_submit_flush_data(struct work_struct *ws)
434
{
435
	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
T
Tejun Heo 已提交
436
	struct bio *bio = mddev->flush_bio;
437

T
Tejun Heo 已提交
438
	if (bio->bi_size == 0)
439 440 441
		/* an empty barrier - all done */
		bio_endio(bio, 0);
	else {
T
Tejun Heo 已提交
442
		bio->bi_rw &= ~REQ_FLUSH;
443
		mddev->pers->make_request(mddev, bio);
444
	}
445 446 447

	mddev->flush_bio = NULL;
	wake_up(&mddev->sb_wait);
448 449
}

450
void md_flush_request(struct mddev *mddev, struct bio *bio)
451 452 453
{
	spin_lock_irq(&mddev->write_lock);
	wait_event_lock_irq(mddev->sb_wait,
T
Tejun Heo 已提交
454
			    !mddev->flush_bio,
455
			    mddev->write_lock, /*nothing*/);
T
Tejun Heo 已提交
456
	mddev->flush_bio = bio;
457 458
	spin_unlock_irq(&mddev->write_lock);

459 460
	INIT_WORK(&mddev->flush_work, submit_flushes);
	queue_work(md_wq, &mddev->flush_work);
461
}
T
Tejun Heo 已提交
462
EXPORT_SYMBOL(md_flush_request);
463

464
void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
465
{
466 467 468
	struct mddev *mddev = cb->data;
	md_wakeup_thread(mddev->thread);
	kfree(cb);
469
}
470
EXPORT_SYMBOL(md_unplug);
471

472
static inline struct mddev *mddev_get(struct mddev *mddev)
L
Linus Torvalds 已提交
473 474 475 476 477
{
	atomic_inc(&mddev->active);
	return mddev;
}

478
static void mddev_delayed_delete(struct work_struct *ws);
479

480
static void mddev_put(struct mddev *mddev)
L
Linus Torvalds 已提交
481
{
482 483
	struct bio_set *bs = NULL;

L
Linus Torvalds 已提交
484 485
	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
		return;
486
	if (!mddev->raid_disks && list_empty(&mddev->disks) &&
487 488 489
	    mddev->ctime == 0 && !mddev->hold_active) {
		/* Array is not configured at all, and not held active,
		 * so destroy it */
490
		list_del_init(&mddev->all_mddevs);
491 492
		bs = mddev->bio_set;
		mddev->bio_set = NULL;
493
		if (mddev->gendisk) {
T
Tejun Heo 已提交
494 495 496 497
			/* We did a probe so need to clean up.  Call
			 * queue_work inside the spinlock so that
			 * flush_workqueue() after mddev_find will
			 * succeed in waiting for the work to be done.
498 499
			 */
			INIT_WORK(&mddev->del_work, mddev_delayed_delete);
T
Tejun Heo 已提交
500
			queue_work(md_misc_wq, &mddev->del_work);
501 502 503 504
		} else
			kfree(mddev);
	}
	spin_unlock(&all_mddevs_lock);
505 506
	if (bs)
		bioset_free(bs);
L
Linus Torvalds 已提交
507 508
}

509
void mddev_init(struct mddev *mddev)
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
{
	mutex_init(&mddev->open_mutex);
	mutex_init(&mddev->reconfig_mutex);
	mutex_init(&mddev->bitmap_info.mutex);
	INIT_LIST_HEAD(&mddev->disks);
	INIT_LIST_HEAD(&mddev->all_mddevs);
	init_timer(&mddev->safemode_timer);
	atomic_set(&mddev->active, 1);
	atomic_set(&mddev->openers, 0);
	atomic_set(&mddev->active_io, 0);
	spin_lock_init(&mddev->write_lock);
	atomic_set(&mddev->flush_pending, 0);
	init_waitqueue_head(&mddev->sb_wait);
	init_waitqueue_head(&mddev->recovery_wait);
	mddev->reshape_position = MaxSector;
525
	mddev->reshape_backwards = 0;
526 527 528 529
	mddev->resync_min = 0;
	mddev->resync_max = MaxSector;
	mddev->level = LEVEL_NONE;
}
530
EXPORT_SYMBOL_GPL(mddev_init);
531

532
static struct mddev * mddev_find(dev_t unit)
L
Linus Torvalds 已提交
533
{
534
	struct mddev *mddev, *new = NULL;
L
Linus Torvalds 已提交
535

536 537 538
	if (unit && MAJOR(unit) != MD_MAJOR)
		unit &= ~((1<<MdpMinorShift)-1);

L
Linus Torvalds 已提交
539 540
 retry:
	spin_lock(&all_mddevs_lock);
541 542 543 544 545 546 547 548 549 550 551 552

	if (unit) {
		list_for_each_entry(mddev, &all_mddevs, all_mddevs)
			if (mddev->unit == unit) {
				mddev_get(mddev);
				spin_unlock(&all_mddevs_lock);
				kfree(new);
				return mddev;
			}

		if (new) {
			list_add(&new->all_mddevs, &all_mddevs);
L
Linus Torvalds 已提交
553
			spin_unlock(&all_mddevs_lock);
554 555
			new->hold_active = UNTIL_IOCTL;
			return new;
L
Linus Torvalds 已提交
556
		}
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	} else if (new) {
		/* find an unused unit number */
		static int next_minor = 512;
		int start = next_minor;
		int is_free = 0;
		int dev = 0;
		while (!is_free) {
			dev = MKDEV(MD_MAJOR, next_minor);
			next_minor++;
			if (next_minor > MINORMASK)
				next_minor = 0;
			if (next_minor == start) {
				/* Oh dear, all in use. */
				spin_unlock(&all_mddevs_lock);
				kfree(new);
				return NULL;
			}
				
			is_free = 1;
			list_for_each_entry(mddev, &all_mddevs, all_mddevs)
				if (mddev->unit == dev) {
					is_free = 0;
					break;
				}
		}
		new->unit = dev;
		new->md_minor = MINOR(dev);
		new->hold_active = UNTIL_STOP;
L
Linus Torvalds 已提交
585 586 587 588 589 590
		list_add(&new->all_mddevs, &all_mddevs);
		spin_unlock(&all_mddevs_lock);
		return new;
	}
	spin_unlock(&all_mddevs_lock);

591
	new = kzalloc(sizeof(*new), GFP_KERNEL);
L
Linus Torvalds 已提交
592 593 594 595 596 597 598 599 600
	if (!new)
		return NULL;

	new->unit = unit;
	if (MAJOR(unit) == MD_MAJOR)
		new->md_minor = MINOR(unit);
	else
		new->md_minor = MINOR(unit) >> MdpMinorShift;

601
	mddev_init(new);
L
Linus Torvalds 已提交
602 603 604 605

	goto retry;
}

606
static inline int mddev_lock(struct mddev * mddev)
L
Linus Torvalds 已提交
607
{
608
	return mutex_lock_interruptible(&mddev->reconfig_mutex);
L
Linus Torvalds 已提交
609 610
}

611
static inline int mddev_is_locked(struct mddev *mddev)
D
Dan Williams 已提交
612 613 614 615
{
	return mutex_is_locked(&mddev->reconfig_mutex);
}

616
static inline int mddev_trylock(struct mddev * mddev)
L
Linus Torvalds 已提交
617
{
618
	return mutex_trylock(&mddev->reconfig_mutex);
L
Linus Torvalds 已提交
619 620
}

621 622
static struct attribute_group md_redundancy_group;

623
static void mddev_unlock(struct mddev * mddev)
L
Linus Torvalds 已提交
624
{
625
	if (mddev->to_remove) {
626 627 628 629
		/* These cannot be removed under reconfig_mutex as
		 * an access to the files will try to take reconfig_mutex
		 * while holding the file unremovable, which leads to
		 * a deadlock.
630 631 632 633 634 635 636
		 * So hold set sysfs_active while the remove in happeing,
		 * and anything else which might set ->to_remove or my
		 * otherwise change the sysfs namespace will fail with
		 * -EBUSY if sysfs_active is still set.
		 * We set sysfs_active under reconfig_mutex and elsewhere
		 * test it under the same mutex to ensure its correct value
		 * is seen.
637
		 */
638 639
		struct attribute_group *to_remove = mddev->to_remove;
		mddev->to_remove = NULL;
640
		mddev->sysfs_active = 1;
641 642
		mutex_unlock(&mddev->reconfig_mutex);

N
NeilBrown 已提交
643 644 645 646 647 648 649 650 651 652
		if (mddev->kobj.sd) {
			if (to_remove != &md_redundancy_group)
				sysfs_remove_group(&mddev->kobj, to_remove);
			if (mddev->pers == NULL ||
			    mddev->pers->sync_request == NULL) {
				sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
				if (mddev->sysfs_action)
					sysfs_put(mddev->sysfs_action);
				mddev->sysfs_action = NULL;
			}
653
		}
654
		mddev->sysfs_active = 0;
655 656
	} else
		mutex_unlock(&mddev->reconfig_mutex);
L
Linus Torvalds 已提交
657

C
Chris Dunlop 已提交
658 659
	/* As we've dropped the mutex we need a spinlock to
	 * make sure the thread doesn't disappear
660 661
	 */
	spin_lock(&pers_lock);
662
	md_wakeup_thread(mddev->thread);
663
	spin_unlock(&pers_lock);
L
Linus Torvalds 已提交
664 665
}

666
static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
L
Linus Torvalds 已提交
667
{
668
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
669

N
NeilBrown 已提交
670
	rdev_for_each(rdev, mddev)
L
Linus Torvalds 已提交
671 672
		if (rdev->desc_nr == nr)
			return rdev;
673

L
Linus Torvalds 已提交
674 675 676
	return NULL;
}

677 678 679 680 681 682 683 684 685 686 687 688
static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
{
	struct md_rdev *rdev;

	rdev_for_each_rcu(rdev, mddev)
		if (rdev->desc_nr == nr)
			return rdev;

	return NULL;
}

static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
L
Linus Torvalds 已提交
689
{
690
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
691

N
NeilBrown 已提交
692
	rdev_for_each(rdev, mddev)
L
Linus Torvalds 已提交
693 694
		if (rdev->bdev->bd_dev == dev)
			return rdev;
695

L
Linus Torvalds 已提交
696 697 698
	return NULL;
}

699 700 701 702 703 704 705 706 707 708 709
static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
{
	struct md_rdev *rdev;

	rdev_for_each_rcu(rdev, mddev)
		if (rdev->bdev->bd_dev == dev)
			return rdev;

	return NULL;
}

710
static struct md_personality *find_pers(int level, char *clevel)
711
{
712
	struct md_personality *pers;
713 714
	list_for_each_entry(pers, &pers_list, list) {
		if (level != LEVEL_NONE && pers->level == level)
715
			return pers;
716 717 718
		if (strcmp(pers->name, clevel)==0)
			return pers;
	}
719 720 721
	return NULL;
}

722
/* return the offset of the super block in 512byte sectors */
723
static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
L
Linus Torvalds 已提交
724
{
725
	sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
726
	return MD_NEW_SIZE_SECTORS(num_sectors);
L
Linus Torvalds 已提交
727 728
}

729
static int alloc_disk_sb(struct md_rdev * rdev)
L
Linus Torvalds 已提交
730 731 732 733 734 735 736
{
	if (rdev->sb_page)
		MD_BUG();

	rdev->sb_page = alloc_page(GFP_KERNEL);
	if (!rdev->sb_page) {
		printk(KERN_ALERT "md: out of memory.\n");
737
		return -ENOMEM;
L
Linus Torvalds 已提交
738 739 740 741 742
	}

	return 0;
}

743
void md_rdev_clear(struct md_rdev *rdev)
L
Linus Torvalds 已提交
744 745
{
	if (rdev->sb_page) {
746
		put_page(rdev->sb_page);
L
Linus Torvalds 已提交
747 748
		rdev->sb_loaded = 0;
		rdev->sb_page = NULL;
749
		rdev->sb_start = 0;
750
		rdev->sectors = 0;
L
Linus Torvalds 已提交
751
	}
752 753 754 755
	if (rdev->bb_page) {
		put_page(rdev->bb_page);
		rdev->bb_page = NULL;
	}
756 757
	kfree(rdev->badblocks.page);
	rdev->badblocks.page = NULL;
L
Linus Torvalds 已提交
758
}
759
EXPORT_SYMBOL_GPL(md_rdev_clear);
L
Linus Torvalds 已提交
760

761
static void super_written(struct bio *bio, int error)
762
{
763
	struct md_rdev *rdev = bio->bi_private;
764
	struct mddev *mddev = rdev->mddev;
765

766 767 768 769
	if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
		printk("md: super_written gets error=%d, uptodate=%d\n",
		       error, test_bit(BIO_UPTODATE, &bio->bi_flags));
		WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
770
		md_error(mddev, rdev);
771
	}
772

773 774
	if (atomic_dec_and_test(&mddev->pending_writes))
		wake_up(&mddev->sb_wait);
N
Neil Brown 已提交
775
	bio_put(bio);
776 777
}

778
void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
779 780 781 782 783 784 785 786
		   sector_t sector, int size, struct page *page)
{
	/* write first size bytes of page to sector of rdev
	 * Increment mddev->pending_writes before returning
	 * and decrement it on completion, waking up sb_wait
	 * if zero is reached.
	 * If an error occurred, call md_error
	 */
787
	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
788

789
	bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
790 791 792 793
	bio->bi_sector = sector;
	bio_add_page(bio, page, size, 0);
	bio->bi_private = rdev;
	bio->bi_end_io = super_written;
794

795
	atomic_inc(&mddev->pending_writes);
796
	submit_bio(WRITE_FLUSH_FUA, bio);
797 798
}

799
void md_super_wait(struct mddev *mddev)
800
{
T
Tejun Heo 已提交
801
	/* wait for all superblock writes that were scheduled to complete */
802 803 804 805 806 807 808 809
	DEFINE_WAIT(wq);
	for(;;) {
		prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
		if (atomic_read(&mddev->pending_writes)==0)
			break;
		schedule();
	}
	finish_wait(&mddev->sb_wait, &wq);
810 811
}

812
static void bi_complete(struct bio *bio, int error)
L
Linus Torvalds 已提交
813 814 815 816
{
	complete((struct completion*)bio->bi_private);
}

817
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
J
Jonathan Brassow 已提交
818
		 struct page *page, int rw, bool metadata_op)
L
Linus Torvalds 已提交
819
{
820
	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
L
Linus Torvalds 已提交
821 822 823
	struct completion event;
	int ret;

J
Jens Axboe 已提交
824
	rw |= REQ_SYNC;
L
Linus Torvalds 已提交
825

826 827
	bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
		rdev->meta_bdev : rdev->bdev;
J
Jonathan Brassow 已提交
828 829
	if (metadata_op)
		bio->bi_sector = sector + rdev->sb_start;
830 831 832 833
	else if (rdev->mddev->reshape_position != MaxSector &&
		 (rdev->mddev->reshape_backwards ==
		  (sector >= rdev->mddev->reshape_position)))
		bio->bi_sector = sector + rdev->new_data_offset;
J
Jonathan Brassow 已提交
834 835
	else
		bio->bi_sector = sector + rdev->data_offset;
L
Linus Torvalds 已提交
836 837 838 839 840 841 842 843 844 845 846
	bio_add_page(bio, page, size, 0);
	init_completion(&event);
	bio->bi_private = &event;
	bio->bi_end_io = bi_complete;
	submit_bio(rw, bio);
	wait_for_completion(&event);

	ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_put(bio);
	return ret;
}
847
EXPORT_SYMBOL_GPL(sync_page_io);
L
Linus Torvalds 已提交
848

849
static int read_disk_sb(struct md_rdev * rdev, int size)
L
Linus Torvalds 已提交
850 851 852 853 854 855 856 857 858 859
{
	char b[BDEVNAME_SIZE];
	if (!rdev->sb_page) {
		MD_BUG();
		return -EINVAL;
	}
	if (rdev->sb_loaded)
		return 0;


J
Jonathan Brassow 已提交
860
	if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
L
Linus Torvalds 已提交
861 862 863 864 865 866 867 868 869 870 871 872
		goto fail;
	rdev->sb_loaded = 1;
	return 0;

fail:
	printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
		bdevname(rdev->bdev,b));
	return -EINVAL;
}

static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
A
Andre Noll 已提交
873 874 875 876
	return 	sb1->set_uuid0 == sb2->set_uuid0 &&
		sb1->set_uuid1 == sb2->set_uuid1 &&
		sb1->set_uuid2 == sb2->set_uuid2 &&
		sb1->set_uuid3 == sb2->set_uuid3;
L
Linus Torvalds 已提交
877 878 879 880 881 882 883 884 885 886 887 888
}

static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
	int ret;
	mdp_super_t *tmp1, *tmp2;

	tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
	tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);

	if (!tmp1 || !tmp2) {
		ret = 0;
889
		printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
L
Linus Torvalds 已提交
890 891 892 893 894 895 896 897 898 899 900 901
		goto abort;
	}

	*tmp1 = *sb1;
	*tmp2 = *sb2;

	/*
	 * nr_disks is not constant
	 */
	tmp1->nr_disks = 0;
	tmp2->nr_disks = 0;

A
Andre Noll 已提交
902
	ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
L
Linus Torvalds 已提交
903
abort:
904 905
	kfree(tmp1);
	kfree(tmp2);
L
Linus Torvalds 已提交
906 907 908
	return ret;
}

909 910 911 912 913 914 915

static u32 md_csum_fold(u32 csum)
{
	csum = (csum & 0xffff) + (csum >> 16);
	return (csum & 0xffff) + (csum >> 16);
}

L
Linus Torvalds 已提交
916 917
static unsigned int calc_sb_csum(mdp_super_t * sb)
{
918 919 920
	u64 newcsum = 0;
	u32 *sb32 = (u32*)sb;
	int i;
L
Linus Torvalds 已提交
921 922 923 924
	unsigned int disk_csum, csum;

	disk_csum = sb->sb_csum;
	sb->sb_csum = 0;
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941

	for (i = 0; i < MD_SB_BYTES/4 ; i++)
		newcsum += sb32[i];
	csum = (newcsum & 0xffffffff) + (newcsum>>32);


#ifdef CONFIG_ALPHA
	/* This used to use csum_partial, which was wrong for several
	 * reasons including that different results are returned on
	 * different architectures.  It isn't critical that we get exactly
	 * the same return value as before (we always csum_fold before
	 * testing, and that removes any differences).  However as we
	 * know that csum_partial always returned a 16bit value on
	 * alphas, do a fold to maximise conformity to previous behaviour.
	 */
	sb->sb_csum = md_csum_fold(disk_csum);
#else
L
Linus Torvalds 已提交
942
	sb->sb_csum = disk_csum;
943
#endif
L
Linus Torvalds 已提交
944 945 946 947 948 949 950 951 952 953 954 955
	return csum;
}


/*
 * Handle superblock details.
 * We want to be able to handle multiple superblock formats
 * so we have a common interface to them all, and an array of
 * different handlers.
 * We rely on user-space to write the initial superblock, and support
 * reading and updating of superblocks.
 * Interface methods are:
956
 *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
L
Linus Torvalds 已提交
957 958 959 960 961 962 963 964 965
 *      loads and validates a superblock on dev.
 *      if refdev != NULL, compare superblocks on both devices
 *    Return:
 *      0 - dev has a superblock that is compatible with refdev
 *      1 - dev has a superblock that is compatible and newer than refdev
 *          so dev should be used as the refdev in future
 *     -EINVAL superblock incompatible or invalid
 *     -othererror e.g. -EIO
 *
966
 *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
L
Linus Torvalds 已提交
967 968 969 970 971
 *      Verify that dev is acceptable into mddev.
 *       The first time, mddev->raid_disks will be 0, and data from
 *       dev should be merged in.  Subsequent calls check that dev
 *       is new enough.  Return 0 or -EINVAL
 *
972
 *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
L
Linus Torvalds 已提交
973 974 975 976 977 978
 *     Update the superblock for rdev with data in mddev
 *     This does not write to disc.
 *
 */

struct super_type  {
979 980
	char		    *name;
	struct module	    *owner;
981 982
	int		    (*load_super)(struct md_rdev *rdev,
					  struct md_rdev *refdev,
983
					  int minor_version);
984 985 986 987
	int		    (*validate_super)(struct mddev *mddev,
					      struct md_rdev *rdev);
	void		    (*sync_super)(struct mddev *mddev,
					  struct md_rdev *rdev);
988
	unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
989
						sector_t num_sectors);
990 991
	int		    (*allow_new_offset)(struct md_rdev *rdev,
						unsigned long long new_offset);
L
Linus Torvalds 已提交
992 993
};

994 995 996 997 998 999 1000 1001
/*
 * Check that the given mddev has no bitmap.
 *
 * This function is called from the run method of all personalities that do not
 * support bitmaps. It prints an error message and returns non-zero if mddev
 * has a bitmap. Otherwise, it returns 0.
 *
 */
1002
int md_check_no_bitmap(struct mddev *mddev)
1003
{
1004
	if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
1005 1006 1007 1008 1009 1010 1011
		return 0;
	printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
		mdname(mddev), mddev->pers->name);
	return 1;
}
EXPORT_SYMBOL(md_check_no_bitmap);

L
Linus Torvalds 已提交
1012 1013 1014
/*
 * load_super for 0.90.0 
 */
1015
static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
L
Linus Torvalds 已提交
1016 1017 1018 1019 1020 1021
{
	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
	mdp_super_t *sb;
	int ret;

	/*
1022
	 * Calculate the position of the superblock (512byte sectors),
L
Linus Torvalds 已提交
1023 1024 1025 1026
	 * it's at the end of the disk.
	 *
	 * It also happens to be a multiple of 4Kb.
	 */
1027
	rdev->sb_start = calc_dev_sboffset(rdev);
L
Linus Torvalds 已提交
1028

1029
	ret = read_disk_sb(rdev, MD_SB_BYTES);
L
Linus Torvalds 已提交
1030 1031 1032 1033 1034
	if (ret) return ret;

	ret = -EINVAL;

	bdevname(rdev->bdev, b);
1035
	sb = page_address(rdev->sb_page);
L
Linus Torvalds 已提交
1036 1037 1038 1039 1040 1041 1042 1043

	if (sb->md_magic != MD_SB_MAGIC) {
		printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
		       b);
		goto abort;
	}

	if (sb->major_version != 0 ||
1044 1045
	    sb->minor_version < 90 ||
	    sb->minor_version > 91) {
L
Linus Torvalds 已提交
1046 1047 1048 1049 1050 1051 1052 1053 1054
		printk(KERN_WARNING "Bad version number %d.%d on %s\n",
			sb->major_version, sb->minor_version,
			b);
		goto abort;
	}

	if (sb->raid_disks <= 0)
		goto abort;

1055
	if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
L
Linus Torvalds 已提交
1056 1057 1058 1059 1060 1061 1062
		printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
			b);
		goto abort;
	}

	rdev->preferred_minor = sb->md_minor;
	rdev->data_offset = 0;
1063
	rdev->new_data_offset = 0;
1064
	rdev->sb_size = MD_SB_BYTES;
1065
	rdev->badblocks.shift = -1;
L
Linus Torvalds 已提交
1066 1067 1068 1069 1070 1071

	if (sb->level == LEVEL_MULTIPATH)
		rdev->desc_nr = -1;
	else
		rdev->desc_nr = sb->this_disk.number;

1072
	if (!refdev) {
L
Linus Torvalds 已提交
1073
		ret = 1;
1074
	} else {
L
Linus Torvalds 已提交
1075
		__u64 ev1, ev2;
1076
		mdp_super_t *refsb = page_address(refdev->sb_page);
L
Linus Torvalds 已提交
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
		if (!uuid_equal(refsb, sb)) {
			printk(KERN_WARNING "md: %s has different UUID to %s\n",
				b, bdevname(refdev->bdev,b2));
			goto abort;
		}
		if (!sb_equal(refsb, sb)) {
			printk(KERN_WARNING "md: %s has same UUID"
			       " but different superblock to %s\n",
			       b, bdevname(refdev->bdev, b2));
			goto abort;
		}
		ev1 = md_event(sb);
		ev2 = md_event(refsb);
		if (ev1 > ev2)
			ret = 1;
		else 
			ret = 0;
	}
1095
	rdev->sectors = rdev->sb_start;
1096 1097 1098 1099 1100
	/* Limit to 4TB as metadata cannot record more than that.
	 * (not needed for Linear and RAID0 as metadata doesn't
	 * record this size)
	 */
	if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1101
		rdev->sectors = (2ULL << 32) - 2;
L
Linus Torvalds 已提交
1102

1103
	if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1104 1105 1106
		/* "this cannot possibly happen" ... */
		ret = -EINVAL;

L
Linus Torvalds 已提交
1107 1108 1109 1110 1111 1112 1113
 abort:
	return ret;
}

/*
 * validate_super for 0.90.0
 */
1114
static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1115 1116
{
	mdp_disk_t *desc;
1117
	mdp_super_t *sb = page_address(rdev->sb_page);
1118
	__u64 ev1 = md_event(sb);
L
Linus Torvalds 已提交
1119

1120
	rdev->raid_disk = -1;
1121 1122 1123 1124
	clear_bit(Faulty, &rdev->flags);
	clear_bit(In_sync, &rdev->flags);
	clear_bit(WriteMostly, &rdev->flags);

L
Linus Torvalds 已提交
1125 1126 1127 1128
	if (mddev->raid_disks == 0) {
		mddev->major_version = 0;
		mddev->minor_version = sb->minor_version;
		mddev->patch_version = sb->patch_version;
1129
		mddev->external = 0;
1130
		mddev->chunk_sectors = sb->chunk_size >> 9;
L
Linus Torvalds 已提交
1131 1132 1133
		mddev->ctime = sb->ctime;
		mddev->utime = sb->utime;
		mddev->level = sb->level;
1134
		mddev->clevel[0] = 0;
L
Linus Torvalds 已提交
1135 1136
		mddev->layout = sb->layout;
		mddev->raid_disks = sb->raid_disks;
1137
		mddev->dev_sectors = ((sector_t)sb->size) * 2;
1138
		mddev->events = ev1;
1139
		mddev->bitmap_info.offset = 0;
1140 1141
		mddev->bitmap_info.space = 0;
		/* bitmap can use 60 K after the 4K superblocks */
1142
		mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1143
		mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1144
		mddev->reshape_backwards = 0;
L
Linus Torvalds 已提交
1145

1146 1147 1148 1149 1150
		if (mddev->minor_version >= 91) {
			mddev->reshape_position = sb->reshape_position;
			mddev->delta_disks = sb->delta_disks;
			mddev->new_level = sb->new_level;
			mddev->new_layout = sb->new_layout;
1151
			mddev->new_chunk_sectors = sb->new_chunk >> 9;
1152 1153
			if (mddev->delta_disks < 0)
				mddev->reshape_backwards = 1;
1154 1155 1156 1157 1158
		} else {
			mddev->reshape_position = MaxSector;
			mddev->delta_disks = 0;
			mddev->new_level = mddev->level;
			mddev->new_layout = mddev->layout;
1159
			mddev->new_chunk_sectors = mddev->chunk_sectors;
1160 1161
		}

L
Linus Torvalds 已提交
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
		if (sb->state & (1<<MD_SB_CLEAN))
			mddev->recovery_cp = MaxSector;
		else {
			if (sb->events_hi == sb->cp_events_hi && 
				sb->events_lo == sb->cp_events_lo) {
				mddev->recovery_cp = sb->recovery_cp;
			} else
				mddev->recovery_cp = 0;
		}

		memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
		memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
		memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
		memcpy(mddev->uuid+12,&sb->set_uuid3, 4);

		mddev->max_disks = MD_SB_DISKS;
1178 1179

		if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1180
		    mddev->bitmap_info.file == NULL) {
1181 1182
			mddev->bitmap_info.offset =
				mddev->bitmap_info.default_offset;
1183 1184 1185
			mddev->bitmap_info.space =
				mddev->bitmap_info.space;
		}
1186

1187
	} else if (mddev->pers == NULL) {
1188 1189
		/* Insist on good event counter while assembling, except
		 * for spares (which don't need an event count) */
L
Linus Torvalds 已提交
1190
		++ev1;
1191 1192 1193 1194
		if (sb->disks[rdev->desc_nr].state & (
			    (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
			if (ev1 < mddev->events) 
				return -EINVAL;
1195 1196 1197 1198 1199 1200
	} else if (mddev->bitmap) {
		/* if adding to array with a bitmap, then we can accept an
		 * older device ... but not too old.
		 */
		if (ev1 < mddev->bitmap->events_cleared)
			return 0;
1201 1202 1203 1204 1205
	} else {
		if (ev1 < mddev->events)
			/* just a hot-add of a new device, leave raid_disk at -1 */
			return 0;
	}
1206

L
Linus Torvalds 已提交
1207 1208 1209 1210
	if (mddev->level != LEVEL_MULTIPATH) {
		desc = sb->disks + rdev->desc_nr;

		if (desc->state & (1<<MD_DISK_FAULTY))
1211
			set_bit(Faulty, &rdev->flags);
1212 1213
		else if (desc->state & (1<<MD_DISK_SYNC) /* &&
			    desc->raid_disk < mddev->raid_disks */) {
1214
			set_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
1215
			rdev->raid_disk = desc->raid_disk;
1216 1217 1218 1219 1220 1221 1222 1223
		} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
			/* active but not in sync implies recovery up to
			 * reshape position.  We don't know exactly where
			 * that is, so set to zero for now */
			if (mddev->minor_version >= 91) {
				rdev->recovery_offset = 0;
				rdev->raid_disk = desc->raid_disk;
			}
L
Linus Torvalds 已提交
1224
		}
1225 1226
		if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
			set_bit(WriteMostly, &rdev->flags);
1227
	} else /* MULTIPATH are always insync */
1228
		set_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
1229 1230 1231 1232 1233 1234
	return 0;
}

/*
 * sync_super for 0.90.0
 */
1235
static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1236 1237
{
	mdp_super_t *sb;
1238
	struct md_rdev *rdev2;
L
Linus Torvalds 已提交
1239
	int next_spare = mddev->raid_disks;
1240

L
Linus Torvalds 已提交
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254

	/* make rdev->sb match mddev data..
	 *
	 * 1/ zero out disks
	 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
	 * 3/ any empty disks < next_spare become removed
	 *
	 * disks[0] gets initialised to REMOVED because
	 * we cannot be sure from other fields if it has
	 * been initialised or not.
	 */
	int i;
	int active=0, working=0,failed=0,spare=0,nr_disks=0;

1255 1256
	rdev->sb_size = MD_SB_BYTES;

1257
	sb = page_address(rdev->sb_page);
L
Linus Torvalds 已提交
1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271

	memset(sb, 0, sizeof(*sb));

	sb->md_magic = MD_SB_MAGIC;
	sb->major_version = mddev->major_version;
	sb->patch_version = mddev->patch_version;
	sb->gvalid_words  = 0; /* ignored */
	memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
	memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
	memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
	memcpy(&sb->set_uuid3, mddev->uuid+12,4);

	sb->ctime = mddev->ctime;
	sb->level = mddev->level;
A
Andre Noll 已提交
1272
	sb->size = mddev->dev_sectors / 2;
L
Linus Torvalds 已提交
1273 1274
	sb->raid_disks = mddev->raid_disks;
	sb->md_minor = mddev->md_minor;
1275
	sb->not_persistent = 0;
L
Linus Torvalds 已提交
1276 1277 1278 1279 1280
	sb->utime = mddev->utime;
	sb->state = 0;
	sb->events_hi = (mddev->events>>32);
	sb->events_lo = (u32)mddev->events;

1281 1282 1283 1284 1285 1286 1287 1288
	if (mddev->reshape_position == MaxSector)
		sb->minor_version = 90;
	else {
		sb->minor_version = 91;
		sb->reshape_position = mddev->reshape_position;
		sb->new_level = mddev->new_level;
		sb->delta_disks = mddev->delta_disks;
		sb->new_layout = mddev->new_layout;
1289
		sb->new_chunk = mddev->new_chunk_sectors << 9;
1290 1291
	}
	mddev->minor_version = sb->minor_version;
L
Linus Torvalds 已提交
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
	if (mddev->in_sync)
	{
		sb->recovery_cp = mddev->recovery_cp;
		sb->cp_events_hi = (mddev->events>>32);
		sb->cp_events_lo = (u32)mddev->events;
		if (mddev->recovery_cp == MaxSector)
			sb->state = (1<< MD_SB_CLEAN);
	} else
		sb->recovery_cp = 0;

	sb->layout = mddev->layout;
1303
	sb->chunk_size = mddev->chunk_sectors << 9;
L
Linus Torvalds 已提交
1304

1305
	if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1306 1307
		sb->state |= (1<<MD_SB_BITMAP_PRESENT);

L
Linus Torvalds 已提交
1308
	sb->disks[0].state = (1<<MD_DISK_REMOVED);
N
NeilBrown 已提交
1309
	rdev_for_each(rdev2, mddev) {
L
Linus Torvalds 已提交
1310
		mdp_disk_t *d;
1311
		int desc_nr;
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
		int is_active = test_bit(In_sync, &rdev2->flags);

		if (rdev2->raid_disk >= 0 &&
		    sb->minor_version >= 91)
			/* we have nowhere to store the recovery_offset,
			 * but if it is not below the reshape_position,
			 * we can piggy-back on that.
			 */
			is_active = 1;
		if (rdev2->raid_disk < 0 ||
		    test_bit(Faulty, &rdev2->flags))
			is_active = 0;
		if (is_active)
1325
			desc_nr = rdev2->raid_disk;
L
Linus Torvalds 已提交
1326
		else
1327
			desc_nr = next_spare++;
1328
		rdev2->desc_nr = desc_nr;
L
Linus Torvalds 已提交
1329 1330 1331 1332 1333
		d = &sb->disks[rdev2->desc_nr];
		nr_disks++;
		d->number = rdev2->desc_nr;
		d->major = MAJOR(rdev2->bdev->bd_dev);
		d->minor = MINOR(rdev2->bdev->bd_dev);
1334
		if (is_active)
L
Linus Torvalds 已提交
1335 1336 1337
			d->raid_disk = rdev2->raid_disk;
		else
			d->raid_disk = rdev2->desc_nr; /* compatibility */
1338
		if (test_bit(Faulty, &rdev2->flags))
L
Linus Torvalds 已提交
1339
			d->state = (1<<MD_DISK_FAULTY);
1340
		else if (is_active) {
L
Linus Torvalds 已提交
1341
			d->state = (1<<MD_DISK_ACTIVE);
1342 1343
			if (test_bit(In_sync, &rdev2->flags))
				d->state |= (1<<MD_DISK_SYNC);
L
Linus Torvalds 已提交
1344 1345 1346 1347 1348 1349 1350
			active++;
			working++;
		} else {
			d->state = 0;
			spare++;
			working++;
		}
1351 1352
		if (test_bit(WriteMostly, &rdev2->flags))
			d->state |= (1<<MD_DISK_WRITEMOSTLY);
L
Linus Torvalds 已提交
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
	}
	/* now set the "removed" and "faulty" bits on any missing devices */
	for (i=0 ; i < mddev->raid_disks ; i++) {
		mdp_disk_t *d = &sb->disks[i];
		if (d->state == 0 && d->number == 0) {
			d->number = i;
			d->raid_disk = i;
			d->state = (1<<MD_DISK_REMOVED);
			d->state |= (1<<MD_DISK_FAULTY);
			failed++;
		}
	}
	sb->nr_disks = nr_disks;
	sb->active_disks = active;
	sb->working_disks = working;
	sb->failed_disks = failed;
	sb->spare_disks = spare;

	sb->this_disk = sb->disks[rdev->desc_nr];
	sb->sb_csum = calc_sb_csum(sb);
}

1375 1376 1377 1378
/*
 * rdev_size_change for 0.90.0
 */
static unsigned long long
1379
super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1380
{
A
Andre Noll 已提交
1381
	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1382
		return 0; /* component must fit device */
1383
	if (rdev->mddev->bitmap_info.offset)
1384
		return 0; /* can't move bitmap */
1385
	rdev->sb_start = calc_dev_sboffset(rdev);
1386 1387
	if (!num_sectors || num_sectors > rdev->sb_start)
		num_sectors = rdev->sb_start;
1388 1389 1390
	/* Limit to 4TB as metadata cannot record more than that.
	 * 4TB == 2^32 KB, or 2*2^32 sectors.
	 */
1391
	if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1392
		num_sectors = (2ULL << 32) - 2;
1393
	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1394 1395
		       rdev->sb_page);
	md_super_wait(rdev->mddev);
1396
	return num_sectors;
1397 1398
}

1399 1400 1401 1402 1403 1404
static int
super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
{
	/* non-zero offset changes not possible with v0.90 */
	return new_offset == 0;
}
1405

L
Linus Torvalds 已提交
1406 1407 1408 1409
/*
 * version 1 superblock
 */

1410
static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
L
Linus Torvalds 已提交
1411
{
1412 1413
	__le32 disk_csum;
	u32 csum;
L
Linus Torvalds 已提交
1414 1415
	unsigned long long newcsum;
	int size = 256 + le32_to_cpu(sb->max_dev)*2;
1416
	__le32 *isuper = (__le32*)sb;
L
Linus Torvalds 已提交
1417 1418 1419 1420 1421 1422 1423 1424 1425
	int i;

	disk_csum = sb->sb_csum;
	sb->sb_csum = 0;
	newcsum = 0;
	for (i=0; size>=4; size -= 4 )
		newcsum += le32_to_cpu(*isuper++);

	if (size == 2)
1426
		newcsum += le16_to_cpu(*(__le16*) isuper);
L
Linus Torvalds 已提交
1427 1428 1429 1430 1431 1432

	csum = (newcsum & 0xffffffff) + (newcsum >> 32);
	sb->sb_csum = disk_csum;
	return cpu_to_le32(csum);
}

1433 1434
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
			    int acknowledged);
1435
static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
L
Linus Torvalds 已提交
1436 1437 1438
{
	struct mdp_superblock_1 *sb;
	int ret;
1439
	sector_t sb_start;
1440
	sector_t sectors;
L
Linus Torvalds 已提交
1441
	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1442
	int bmask;
L
Linus Torvalds 已提交
1443 1444

	/*
1445
	 * Calculate the position of the superblock in 512byte sectors.
L
Linus Torvalds 已提交
1446 1447 1448 1449 1450 1451 1452 1453
	 * It is always aligned to a 4K boundary and
	 * depeding on minor_version, it can be:
	 * 0: At least 8K, but less than 12K, from end of device
	 * 1: At start of device
	 * 2: 4K from start of device.
	 */
	switch(minor_version) {
	case 0:
1454
		sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1455 1456
		sb_start -= 8*2;
		sb_start &= ~(sector_t)(4*2-1);
L
Linus Torvalds 已提交
1457 1458
		break;
	case 1:
1459
		sb_start = 0;
L
Linus Torvalds 已提交
1460 1461
		break;
	case 2:
1462
		sb_start = 8;
L
Linus Torvalds 已提交
1463 1464 1465 1466
		break;
	default:
		return -EINVAL;
	}
1467
	rdev->sb_start = sb_start;
L
Linus Torvalds 已提交
1468

1469 1470 1471 1472
	/* superblock is rarely larger than 1K, but it can be larger,
	 * and it is safe to read 4k, so we do that
	 */
	ret = read_disk_sb(rdev, 4096);
L
Linus Torvalds 已提交
1473 1474 1475
	if (ret) return ret;


1476
	sb = page_address(rdev->sb_page);
L
Linus Torvalds 已提交
1477 1478 1479 1480

	if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
	    sb->major_version != cpu_to_le32(1) ||
	    le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1481
	    le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1482
	    (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
L
Linus Torvalds 已提交
1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
		return -EINVAL;

	if (calc_sb_1_csum(sb) != sb->sb_csum) {
		printk("md: invalid superblock checksum on %s\n",
			bdevname(rdev->bdev,b));
		return -EINVAL;
	}
	if (le64_to_cpu(sb->data_size) < 10) {
		printk("md: data_size too small on %s\n",
		       bdevname(rdev->bdev,b));
		return -EINVAL;
	}
1495 1496 1497 1498 1499
	if (sb->pad0 ||
	    sb->pad3[0] ||
	    memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
		/* Some padding is non-zero, might be a new feature */
		return -EINVAL;
1500

L
Linus Torvalds 已提交
1501 1502
	rdev->preferred_minor = 0xffff;
	rdev->data_offset = le64_to_cpu(sb->data_offset);
1503 1504 1505 1506
	rdev->new_data_offset = rdev->data_offset;
	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
		rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1507
	atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
L
Linus Torvalds 已提交
1508

1509
	rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1510
	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1511
	if (rdev->sb_size & bmask)
1512 1513 1514
		rdev->sb_size = (rdev->sb_size | bmask) + 1;

	if (minor_version
1515
	    && rdev->data_offset < sb_start + (rdev->sb_size/512))
1516
		return -EINVAL;
1517 1518 1519
	if (minor_version
	    && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
		return -EINVAL;
1520

1521 1522 1523 1524 1525
	if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
		rdev->desc_nr = -1;
	else
		rdev->desc_nr = le32_to_cpu(sb->dev_number);

1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
	if (!rdev->bb_page) {
		rdev->bb_page = alloc_page(GFP_KERNEL);
		if (!rdev->bb_page)
			return -ENOMEM;
	}
	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
	    rdev->badblocks.count == 0) {
		/* need to load the bad block list.
		 * Currently we limit it to one page.
		 */
		s32 offset;
		sector_t bb_sector;
		u64 *bbp;
		int i;
		int sectors = le16_to_cpu(sb->bblog_size);
		if (sectors > (PAGE_SIZE / 512))
			return -EINVAL;
		offset = le32_to_cpu(sb->bblog_offset);
		if (offset == 0)
			return -EINVAL;
		bb_sector = (long long)offset;
		if (!sync_page_io(rdev, bb_sector, sectors << 9,
				  rdev->bb_page, READ, true))
			return -EIO;
		bbp = (u64 *)page_address(rdev->bb_page);
		rdev->badblocks.shift = sb->bblog_shift;
		for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
			u64 bb = le64_to_cpu(*bbp);
			int count = bb & (0x3ff);
			u64 sector = bb >> 10;
			sector <<= sb->bblog_shift;
			count <<= sb->bblog_shift;
			if (bb + 1 == 0)
				break;
			if (md_set_badblocks(&rdev->badblocks,
					     sector, count, 1) == 0)
				return -EINVAL;
		}
	} else if (sb->bblog_offset == 0)
		rdev->badblocks.shift = -1;

1567
	if (!refdev) {
1568
		ret = 1;
1569
	} else {
L
Linus Torvalds 已提交
1570
		__u64 ev1, ev2;
1571
		struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
L
Linus Torvalds 已提交
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586

		if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
		    sb->level != refsb->level ||
		    sb->layout != refsb->layout ||
		    sb->chunksize != refsb->chunksize) {
			printk(KERN_WARNING "md: %s has strangely different"
				" superblock to %s\n",
				bdevname(rdev->bdev,b),
				bdevname(refdev->bdev,b2));
			return -EINVAL;
		}
		ev1 = le64_to_cpu(sb->events);
		ev2 = le64_to_cpu(refsb->events);

		if (ev1 > ev2)
1587 1588 1589
			ret = 1;
		else
			ret = 0;
L
Linus Torvalds 已提交
1590
	}
1591 1592 1593 1594 1595 1596
	if (minor_version) {
		sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
		sectors -= rdev->data_offset;
	} else
		sectors = rdev->sb_start;
	if (sectors < le64_to_cpu(sb->data_size))
L
Linus Torvalds 已提交
1597
		return -EINVAL;
1598
	rdev->sectors = le64_to_cpu(sb->data_size);
1599
	return ret;
L
Linus Torvalds 已提交
1600 1601
}

1602
static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1603
{
1604
	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1605
	__u64 ev1 = le64_to_cpu(sb->events);
L
Linus Torvalds 已提交
1606

1607
	rdev->raid_disk = -1;
1608 1609 1610 1611
	clear_bit(Faulty, &rdev->flags);
	clear_bit(In_sync, &rdev->flags);
	clear_bit(WriteMostly, &rdev->flags);

L
Linus Torvalds 已提交
1612 1613 1614
	if (mddev->raid_disks == 0) {
		mddev->major_version = 1;
		mddev->patch_version = 0;
1615
		mddev->external = 0;
1616
		mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
L
Linus Torvalds 已提交
1617 1618 1619
		mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
		mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
		mddev->level = le32_to_cpu(sb->level);
1620
		mddev->clevel[0] = 0;
L
Linus Torvalds 已提交
1621 1622
		mddev->layout = le32_to_cpu(sb->layout);
		mddev->raid_disks = le32_to_cpu(sb->raid_disks);
A
Andre Noll 已提交
1623
		mddev->dev_sectors = le64_to_cpu(sb->size);
1624
		mddev->events = ev1;
1625
		mddev->bitmap_info.offset = 0;
1626 1627 1628 1629
		mddev->bitmap_info.space = 0;
		/* Default location for bitmap is 1K after superblock
		 * using 3K - total of 4K
		 */
1630
		mddev->bitmap_info.default_offset = 1024 >> 9;
1631
		mddev->bitmap_info.default_space = (4096-1024) >> 9;
1632 1633
		mddev->reshape_backwards = 0;

L
Linus Torvalds 已提交
1634 1635 1636 1637
		mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
		memcpy(mddev->uuid, sb->set_uuid, 16);

		mddev->max_disks =  (4096-256)/2;
1638

1639
		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1640
		    mddev->bitmap_info.file == NULL) {
1641 1642
			mddev->bitmap_info.offset =
				(__s32)le32_to_cpu(sb->bitmap_offset);
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
			/* Metadata doesn't record how much space is available.
			 * For 1.0, we assume we can use up to the superblock
			 * if before, else to 4K beyond superblock.
			 * For others, assume no change is possible.
			 */
			if (mddev->minor_version > 0)
				mddev->bitmap_info.space = 0;
			else if (mddev->bitmap_info.offset > 0)
				mddev->bitmap_info.space =
					8 - mddev->bitmap_info.offset;
			else
				mddev->bitmap_info.space =
					-mddev->bitmap_info.offset;
		}
1657

1658 1659 1660 1661 1662
		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
			mddev->reshape_position = le64_to_cpu(sb->reshape_position);
			mddev->delta_disks = le32_to_cpu(sb->delta_disks);
			mddev->new_level = le32_to_cpu(sb->new_level);
			mddev->new_layout = le32_to_cpu(sb->new_layout);
1663
			mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1664 1665 1666 1667 1668
			if (mddev->delta_disks < 0 ||
			    (mddev->delta_disks == 0 &&
			     (le32_to_cpu(sb->feature_map)
			      & MD_FEATURE_RESHAPE_BACKWARDS)))
				mddev->reshape_backwards = 1;
1669 1670 1671 1672 1673
		} else {
			mddev->reshape_position = MaxSector;
			mddev->delta_disks = 0;
			mddev->new_level = mddev->level;
			mddev->new_layout = mddev->layout;
1674
			mddev->new_chunk_sectors = mddev->chunk_sectors;
1675 1676
		}

1677
	} else if (mddev->pers == NULL) {
1678 1679
		/* Insist of good event counter while assembling, except for
		 * spares (which don't need an event count) */
L
Linus Torvalds 已提交
1680
		++ev1;
1681 1682 1683 1684 1685
		if (rdev->desc_nr >= 0 &&
		    rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
		    le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
			if (ev1 < mddev->events)
				return -EINVAL;
1686 1687 1688 1689 1690 1691
	} else if (mddev->bitmap) {
		/* If adding to array with a bitmap, then we can accept an
		 * older device, but not too old.
		 */
		if (ev1 < mddev->bitmap->events_cleared)
			return 0;
1692 1693 1694 1695 1696
	} else {
		if (ev1 < mddev->events)
			/* just a hot-add of a new device, leave raid_disk at -1 */
			return 0;
	}
L
Linus Torvalds 已提交
1697 1698
	if (mddev->level != LEVEL_MULTIPATH) {
		int role;
1699 1700 1701 1702 1703 1704
		if (rdev->desc_nr < 0 ||
		    rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
			role = 0xffff;
			rdev->desc_nr = -1;
		} else
			role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
L
Linus Torvalds 已提交
1705 1706 1707 1708
		switch(role) {
		case 0xffff: /* spare */
			break;
		case 0xfffe: /* faulty */
1709
			set_bit(Faulty, &rdev->flags);
L
Linus Torvalds 已提交
1710 1711
			break;
		default:
1712 1713 1714 1715 1716
			if ((le32_to_cpu(sb->feature_map) &
			     MD_FEATURE_RECOVERY_OFFSET))
				rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
			else
				set_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
1717 1718 1719
			rdev->raid_disk = role;
			break;
		}
1720 1721
		if (sb->devflags & WriteMostly1)
			set_bit(WriteMostly, &rdev->flags);
1722 1723
		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
			set_bit(Replacement, &rdev->flags);
1724
	} else /* MULTIPATH are always insync */
1725
		set_bit(In_sync, &rdev->flags);
1726

L
Linus Torvalds 已提交
1727 1728 1729
	return 0;
}

1730
static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1731 1732
{
	struct mdp_superblock_1 *sb;
1733
	struct md_rdev *rdev2;
L
Linus Torvalds 已提交
1734 1735 1736
	int max_dev, i;
	/* make rdev->sb match mddev and rdev data. */

1737
	sb = page_address(rdev->sb_page);
L
Linus Torvalds 已提交
1738 1739 1740

	sb->feature_map = 0;
	sb->pad0 = 0;
1741
	sb->recovery_offset = cpu_to_le64(0);
L
Linus Torvalds 已提交
1742 1743 1744 1745 1746 1747 1748 1749 1750
	memset(sb->pad3, 0, sizeof(sb->pad3));

	sb->utime = cpu_to_le64((__u64)mddev->utime);
	sb->events = cpu_to_le64(mddev->events);
	if (mddev->in_sync)
		sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
	else
		sb->resync_offset = cpu_to_le64(0);

1751
	sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1752

1753
	sb->raid_disks = cpu_to_le32(mddev->raid_disks);
A
Andre Noll 已提交
1754
	sb->size = cpu_to_le64(mddev->dev_sectors);
1755
	sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1756 1757
	sb->level = cpu_to_le32(mddev->level);
	sb->layout = cpu_to_le32(mddev->layout);
1758

1759 1760 1761 1762
	if (test_bit(WriteMostly, &rdev->flags))
		sb->devflags |= WriteMostly1;
	else
		sb->devflags &= ~WriteMostly1;
1763 1764
	sb->data_offset = cpu_to_le64(rdev->data_offset);
	sb->data_size = cpu_to_le64(rdev->sectors);
1765

1766 1767
	if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
		sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1768
		sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1769
	}
1770 1771

	if (rdev->raid_disk >= 0 &&
1772
	    !test_bit(In_sync, &rdev->flags)) {
1773 1774 1775 1776
		sb->feature_map |=
			cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
		sb->recovery_offset =
			cpu_to_le64(rdev->recovery_offset);
1777
	}
1778 1779 1780
	if (test_bit(Replacement, &rdev->flags))
		sb->feature_map |=
			cpu_to_le32(MD_FEATURE_REPLACEMENT);
1781

1782 1783 1784 1785 1786 1787
	if (mddev->reshape_position != MaxSector) {
		sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
		sb->reshape_position = cpu_to_le64(mddev->reshape_position);
		sb->new_layout = cpu_to_le32(mddev->new_layout);
		sb->delta_disks = cpu_to_le32(mddev->delta_disks);
		sb->new_level = cpu_to_le32(mddev->new_level);
1788
		sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1789 1790 1791 1792
		if (mddev->delta_disks == 0 &&
		    mddev->reshape_backwards)
			sb->feature_map
				|= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1793 1794 1795 1796 1797 1798
		if (rdev->new_data_offset != rdev->data_offset) {
			sb->feature_map
				|= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
			sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
							     - rdev->data_offset));
		}
1799
	}
1800

1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
	if (rdev->badblocks.count == 0)
		/* Nothing to do for bad blocks*/ ;
	else if (sb->bblog_offset == 0)
		/* Cannot record bad blocks on this device */
		md_error(mddev, rdev);
	else {
		struct badblocks *bb = &rdev->badblocks;
		u64 *bbp = (u64 *)page_address(rdev->bb_page);
		u64 *p = bb->page;
		sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
		if (bb->changed) {
			unsigned seq;

retry:
			seq = read_seqbegin(&bb->lock);

			memset(bbp, 0xff, PAGE_SIZE);

			for (i = 0 ; i < bb->count ; i++) {
				u64 internal_bb = *p++;
				u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
						| BB_LEN(internal_bb));
				*bbp++ = cpu_to_le64(store_bb);
			}
1825
			bb->changed = 0;
1826 1827 1828 1829 1830 1831 1832 1833 1834
			if (read_seqretry(&bb->lock, seq))
				goto retry;

			bb->sector = (rdev->sb_start +
				      (int)le32_to_cpu(sb->bblog_offset));
			bb->size = le16_to_cpu(sb->bblog_size);
		}
	}

L
Linus Torvalds 已提交
1835
	max_dev = 0;
N
NeilBrown 已提交
1836
	rdev_for_each(rdev2, mddev)
L
Linus Torvalds 已提交
1837 1838
		if (rdev2->desc_nr+1 > max_dev)
			max_dev = rdev2->desc_nr+1;
1839

1840 1841
	if (max_dev > le32_to_cpu(sb->max_dev)) {
		int bmask;
1842
		sb->max_dev = cpu_to_le32(max_dev);
1843 1844 1845 1846
		rdev->sb_size = max_dev * 2 + 256;
		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
		if (rdev->sb_size & bmask)
			rdev->sb_size = (rdev->sb_size | bmask) + 1;
1847 1848 1849
	} else
		max_dev = le32_to_cpu(sb->max_dev);

L
Linus Torvalds 已提交
1850 1851 1852
	for (i=0; i<max_dev;i++)
		sb->dev_roles[i] = cpu_to_le16(0xfffe);
	
N
NeilBrown 已提交
1853
	rdev_for_each(rdev2, mddev) {
L
Linus Torvalds 已提交
1854
		i = rdev2->desc_nr;
1855
		if (test_bit(Faulty, &rdev2->flags))
L
Linus Torvalds 已提交
1856
			sb->dev_roles[i] = cpu_to_le16(0xfffe);
1857
		else if (test_bit(In_sync, &rdev2->flags))
L
Linus Torvalds 已提交
1858
			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1859
		else if (rdev2->raid_disk >= 0)
1860
			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
L
Linus Torvalds 已提交
1861 1862 1863 1864 1865 1866 1867
		else
			sb->dev_roles[i] = cpu_to_le16(0xffff);
	}

	sb->sb_csum = calc_sb_1_csum(sb);
}

1868
static unsigned long long
1869
super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1870 1871
{
	struct mdp_superblock_1 *sb;
1872
	sector_t max_sectors;
A
Andre Noll 已提交
1873
	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1874
		return 0; /* component must fit device */
1875 1876
	if (rdev->data_offset != rdev->new_data_offset)
		return 0; /* too confusing */
1877
	if (rdev->sb_start < rdev->data_offset) {
1878
		/* minor versions 1 and 2; superblock before data */
1879
		max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1880 1881 1882
		max_sectors -= rdev->data_offset;
		if (!num_sectors || num_sectors > max_sectors)
			num_sectors = max_sectors;
1883
	} else if (rdev->mddev->bitmap_info.offset) {
1884 1885 1886 1887
		/* minor version 0 with bitmap we can't move */
		return 0;
	} else {
		/* minor version 0; superblock after data */
1888
		sector_t sb_start;
1889
		sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1890
		sb_start &= ~(sector_t)(4*2 - 1);
1891
		max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1892 1893
		if (!num_sectors || num_sectors > max_sectors)
			num_sectors = max_sectors;
1894
		rdev->sb_start = sb_start;
1895
	}
1896
	sb = page_address(rdev->sb_page);
1897
	sb->data_size = cpu_to_le64(num_sectors);
1898
	sb->super_offset = rdev->sb_start;
1899
	sb->sb_csum = calc_sb_1_csum(sb);
1900
	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1901 1902
		       rdev->sb_page);
	md_super_wait(rdev->mddev);
1903
	return num_sectors;
1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931

}

static int
super_1_allow_new_offset(struct md_rdev *rdev,
			 unsigned long long new_offset)
{
	/* All necessary checks on new >= old have been done */
	struct bitmap *bitmap;
	if (new_offset >= rdev->data_offset)
		return 1;

	/* with 1.0 metadata, there is no metadata to tread on
	 * so we can always move back */
	if (rdev->mddev->minor_version == 0)
		return 1;

	/* otherwise we must be sure not to step on
	 * any metadata, so stay:
	 * 36K beyond start of superblock
	 * beyond end of badblocks
	 * beyond write-intent bitmap
	 */
	if (rdev->sb_start + (32+4)*2 > new_offset)
		return 0;
	bitmap = rdev->mddev->bitmap;
	if (bitmap && !rdev->mddev->bitmap_info.file &&
	    rdev->sb_start + rdev->mddev->bitmap_info.offset +
1932
	    bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1933 1934 1935 1936 1937
		return 0;
	if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
		return 0;

	return 1;
1938
}
L
Linus Torvalds 已提交
1939

A
Adrian Bunk 已提交
1940
static struct super_type super_types[] = {
L
Linus Torvalds 已提交
1941 1942 1943
	[0] = {
		.name	= "0.90.0",
		.owner	= THIS_MODULE,
1944 1945 1946 1947
		.load_super	    = super_90_load,
		.validate_super	    = super_90_validate,
		.sync_super	    = super_90_sync,
		.rdev_size_change   = super_90_rdev_size_change,
1948
		.allow_new_offset   = super_90_allow_new_offset,
L
Linus Torvalds 已提交
1949 1950 1951 1952
	},
	[1] = {
		.name	= "md-1",
		.owner	= THIS_MODULE,
1953 1954 1955 1956
		.load_super	    = super_1_load,
		.validate_super	    = super_1_validate,
		.sync_super	    = super_1_sync,
		.rdev_size_change   = super_1_rdev_size_change,
1957
		.allow_new_offset   = super_1_allow_new_offset,
L
Linus Torvalds 已提交
1958 1959 1960
	},
};

1961
static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
{
	if (mddev->sync_super) {
		mddev->sync_super(mddev, rdev);
		return;
	}

	BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));

	super_types[mddev->major_version].sync_super(mddev, rdev);
}

1973
static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
L
Linus Torvalds 已提交
1974
{
1975
	struct md_rdev *rdev, *rdev2;
L
Linus Torvalds 已提交
1976

1977 1978 1979
	rcu_read_lock();
	rdev_for_each_rcu(rdev, mddev1)
		rdev_for_each_rcu(rdev2, mddev2)
1980
			if (rdev->bdev->bd_contains ==
1981 1982
			    rdev2->bdev->bd_contains) {
				rcu_read_unlock();
1983
				return 1;
1984 1985
			}
	rcu_read_unlock();
L
Linus Torvalds 已提交
1986 1987 1988 1989 1990
	return 0;
}

static LIST_HEAD(pending_raid_disks);

1991 1992 1993 1994 1995 1996 1997
/*
 * Try to register data integrity profile for an mddev
 *
 * This is called when an array is started and after a disk has been kicked
 * from the array. It only succeeds if all working and active component devices
 * are integrity capable with matching profiles.
 */
1998
int md_integrity_register(struct mddev *mddev)
1999
{
2000
	struct md_rdev *rdev, *reference = NULL;
2001 2002 2003

	if (list_empty(&mddev->disks))
		return 0; /* nothing to do */
2004 2005
	if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
		return 0; /* shouldn't register, or already is */
N
NeilBrown 已提交
2006
	rdev_for_each(rdev, mddev) {
2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
		/* skip spares and non-functional disks */
		if (test_bit(Faulty, &rdev->flags))
			continue;
		if (rdev->raid_disk < 0)
			continue;
		if (!reference) {
			/* Use the first rdev as the reference */
			reference = rdev;
			continue;
		}
		/* does this rdev's profile match the reference profile? */
		if (blk_integrity_compare(reference->bdev->bd_disk,
				rdev->bdev->bd_disk) < 0)
			return -EINVAL;
	}
2022 2023
	if (!reference || !bdev_get_integrity(reference->bdev))
		return 0;
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033
	/*
	 * All component devices are integrity capable and have matching
	 * profiles, register the common profile for the md device.
	 */
	if (blk_integrity_register(mddev->gendisk,
			bdev_get_integrity(reference->bdev)) != 0) {
		printk(KERN_ERR "md: failed to register integrity for %s\n",
			mdname(mddev));
		return -EINVAL;
	}
2034 2035 2036 2037 2038 2039
	printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
	if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
		printk(KERN_ERR "md: failed to create integrity pool for %s\n",
		       mdname(mddev));
		return -EINVAL;
	}
2040 2041 2042 2043 2044
	return 0;
}
EXPORT_SYMBOL(md_integrity_register);

/* Disable data integrity if non-capable/non-matching disk is being added */
2045
void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
M
Martin K. Petersen 已提交
2046
{
2047 2048 2049 2050 2051 2052 2053 2054
	struct blk_integrity *bi_rdev;
	struct blk_integrity *bi_mddev;

	if (!mddev->gendisk)
		return;

	bi_rdev = bdev_get_integrity(rdev->bdev);
	bi_mddev = blk_get_integrity(mddev->gendisk);
M
Martin K. Petersen 已提交
2055

2056
	if (!bi_mddev) /* nothing to do */
M
Martin K. Petersen 已提交
2057
		return;
2058
	if (rdev->raid_disk < 0) /* skip spares */
M
Martin K. Petersen 已提交
2059
		return;
2060 2061 2062 2063 2064
	if (bi_rdev && blk_integrity_compare(mddev->gendisk,
					     rdev->bdev->bd_disk) >= 0)
		return;
	printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
	blk_integrity_unregister(mddev->gendisk);
M
Martin K. Petersen 已提交
2065
}
2066
EXPORT_SYMBOL(md_integrity_add_rdev);
M
Martin K. Petersen 已提交
2067

2068
static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev)
L
Linus Torvalds 已提交
2069
{
2070
	char b[BDEVNAME_SIZE];
2071
	struct kobject *ko;
2072
	char *s;
2073
	int err;
L
Linus Torvalds 已提交
2074 2075 2076 2077 2078

	if (rdev->mddev) {
		MD_BUG();
		return -EINVAL;
	}
2079 2080 2081 2082 2083

	/* prevent duplicates */
	if (find_rdev(mddev, rdev->bdev->bd_dev))
		return -EEXIST;

2084 2085 2086
	/* make sure rdev->sectors exceeds mddev->dev_sectors */
	if (rdev->sectors && (mddev->dev_sectors == 0 ||
			rdev->sectors < mddev->dev_sectors)) {
2087 2088 2089 2090 2091 2092 2093 2094
		if (mddev->pers) {
			/* Cannot change size, so fail
			 * If mddev->level <= 0, then we don't care
			 * about aligning sizes (e.g. linear)
			 */
			if (mddev->level > 0)
				return -ENOSPC;
		} else
2095
			mddev->dev_sectors = rdev->sectors;
2096
	}
L
Linus Torvalds 已提交
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111

	/* Verify rdev->desc_nr is unique.
	 * If it is -1, assign a free number, else
	 * check number is not in use
	 */
	if (rdev->desc_nr < 0) {
		int choice = 0;
		if (mddev->pers) choice = mddev->raid_disks;
		while (find_rdev_nr(mddev, choice))
			choice++;
		rdev->desc_nr = choice;
	} else {
		if (find_rdev_nr(mddev, rdev->desc_nr))
			return -EBUSY;
	}
2112 2113 2114 2115 2116
	if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
		printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
		       mdname(mddev), mddev->max_disks);
		return -EBUSY;
	}
2117
	bdevname(rdev->bdev,b);
2118
	while ( (s=strchr(b, '/')) != NULL)
2119
		*s = '!';
2120

L
Linus Torvalds 已提交
2121
	rdev->mddev = mddev;
2122
	printk(KERN_INFO "md: bind<%s>\n", b);
2123

2124
	if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2125
		goto fail;
2126

T
Tejun Heo 已提交
2127
	ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
N
NeilBrown 已提交
2128 2129 2130
	if (sysfs_create_link(&rdev->kobj, ko, "block"))
		/* failure here is OK */;
	rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2131

2132
	list_add_rcu(&rdev->same_set, &mddev->disks);
2133
	bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2134 2135

	/* May as well allow recovery to be retried once */
2136
	mddev->recovery_disabled++;
M
Martin K. Petersen 已提交
2137

L
Linus Torvalds 已提交
2138
	return 0;
2139 2140 2141 2142 2143

 fail:
	printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
	       b, mdname(mddev));
	return err;
L
Linus Torvalds 已提交
2144 2145
}

2146
static void md_delayed_delete(struct work_struct *ws)
2147
{
2148
	struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2149
	kobject_del(&rdev->kobj);
2150
	kobject_put(&rdev->kobj);
2151 2152
}

2153
static void unbind_rdev_from_array(struct md_rdev * rdev)
L
Linus Torvalds 已提交
2154 2155 2156 2157 2158 2159
{
	char b[BDEVNAME_SIZE];
	if (!rdev->mddev) {
		MD_BUG();
		return;
	}
2160
	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2161
	list_del_rcu(&rdev->same_set);
L
Linus Torvalds 已提交
2162 2163
	printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
	rdev->mddev = NULL;
2164
	sysfs_remove_link(&rdev->kobj, "block");
2165 2166
	sysfs_put(rdev->sysfs_state);
	rdev->sysfs_state = NULL;
2167
	rdev->badblocks.count = 0;
2168
	/* We need to delay this, otherwise we can deadlock when
2169 2170
	 * writing to 'remove' to "dev/state".  We also need
	 * to delay it due to rcu usage.
2171
	 */
2172
	synchronize_rcu();
2173 2174
	INIT_WORK(&rdev->del_work, md_delayed_delete);
	kobject_get(&rdev->kobj);
T
Tejun Heo 已提交
2175
	queue_work(md_misc_wq, &rdev->del_work);
L
Linus Torvalds 已提交
2176 2177 2178 2179 2180 2181 2182
}

/*
 * prevent the device from being mounted, repartitioned or
 * otherwise reused by a RAID array (or any other kernel
 * subsystem), by bd_claiming the device.
 */
2183
static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
L
Linus Torvalds 已提交
2184 2185 2186 2187 2188
{
	int err = 0;
	struct block_device *bdev;
	char b[BDEVNAME_SIZE];

2189
	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2190
				 shared ? (struct md_rdev *)lock_rdev : rdev);
L
Linus Torvalds 已提交
2191 2192 2193 2194 2195 2196 2197 2198 2199
	if (IS_ERR(bdev)) {
		printk(KERN_ERR "md: could not open %s.\n",
			__bdevname(dev, b));
		return PTR_ERR(bdev);
	}
	rdev->bdev = bdev;
	return err;
}

2200
static void unlock_rdev(struct md_rdev *rdev)
L
Linus Torvalds 已提交
2201 2202 2203 2204 2205
{
	struct block_device *bdev = rdev->bdev;
	rdev->bdev = NULL;
	if (!bdev)
		MD_BUG();
2206
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
L
Linus Torvalds 已提交
2207 2208 2209 2210
}

void md_autodetect_dev(dev_t dev);

2211
static void export_rdev(struct md_rdev * rdev)
L
Linus Torvalds 已提交
2212 2213 2214 2215 2216 2217
{
	char b[BDEVNAME_SIZE];
	printk(KERN_INFO "md: export_rdev(%s)\n",
		bdevname(rdev->bdev,b));
	if (rdev->mddev)
		MD_BUG();
2218
	md_rdev_clear(rdev);
L
Linus Torvalds 已提交
2219
#ifndef MODULE
2220 2221
	if (test_bit(AutoDetected, &rdev->flags))
		md_autodetect_dev(rdev->bdev->bd_dev);
L
Linus Torvalds 已提交
2222 2223
#endif
	unlock_rdev(rdev);
2224
	kobject_put(&rdev->kobj);
L
Linus Torvalds 已提交
2225 2226
}

2227
static void kick_rdev_from_array(struct md_rdev * rdev)
L
Linus Torvalds 已提交
2228 2229 2230 2231 2232
{
	unbind_rdev_from_array(rdev);
	export_rdev(rdev);
}

2233
static void export_array(struct mddev *mddev)
L
Linus Torvalds 已提交
2234
{
2235
	struct md_rdev *rdev, *tmp;
L
Linus Torvalds 已提交
2236

N
NeilBrown 已提交
2237
	rdev_for_each_safe(rdev, tmp, mddev) {
L
Linus Torvalds 已提交
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
		if (!rdev->mddev) {
			MD_BUG();
			continue;
		}
		kick_rdev_from_array(rdev);
	}
	if (!list_empty(&mddev->disks))
		MD_BUG();
	mddev->raid_disks = 0;
	mddev->major_version = 0;
}

static void print_desc(mdp_disk_t *desc)
{
	printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
		desc->major,desc->minor,desc->raid_disk,desc->state);
}

2256
static void print_sb_90(mdp_super_t *sb)
L
Linus Torvalds 已提交
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
{
	int i;

	printk(KERN_INFO 
		"md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
		sb->major_version, sb->minor_version, sb->patch_version,
		sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
		sb->ctime);
	printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
		sb->level, sb->size, sb->nr_disks, sb->raid_disks,
		sb->md_minor, sb->layout, sb->chunk_size);
	printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
		" FD:%d SD:%d CSUM:%08x E:%08lx\n",
		sb->utime, sb->state, sb->active_disks, sb->working_disks,
		sb->failed_disks, sb->spare_disks,
		sb->sb_csum, (unsigned long)sb->events_lo);

	printk(KERN_INFO);
	for (i = 0; i < MD_SB_DISKS; i++) {
		mdp_disk_t *desc;

		desc = sb->disks + i;
		if (desc->number || desc->major || desc->minor ||
		    desc->raid_disk || (desc->state && (desc->state != 4))) {
			printk("     D %2d: ", i);
			print_desc(desc);
		}
	}
	printk(KERN_INFO "md:     THIS: ");
	print_desc(&sb->this_disk);
2287
}
L
Linus Torvalds 已提交
2288

2289 2290 2291 2292 2293
static void print_sb_1(struct mdp_superblock_1 *sb)
{
	__u8 *uuid;

	uuid = sb->set_uuid;
2294
	printk(KERN_INFO
2295
	       "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
2296
	       "md:    Name: \"%s\" CT:%llu\n",
2297 2298
		le32_to_cpu(sb->major_version),
		le32_to_cpu(sb->feature_map),
2299
		uuid,
2300 2301 2302 2303 2304
		sb->set_name,
		(unsigned long long)le64_to_cpu(sb->ctime)
		       & MD_SUPERBLOCK_1_TIME_SEC_MASK);

	uuid = sb->device_uuid;
2305 2306
	printk(KERN_INFO
	       "md:       L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
2307
			" RO:%llu\n"
2308
	       "md:     Dev:%08x UUID: %pU\n"
2309 2310
	       "md:       (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
	       "md:         (MaxDev:%u) \n",
2311 2312 2313 2314 2315 2316 2317 2318 2319 2320
		le32_to_cpu(sb->level),
		(unsigned long long)le64_to_cpu(sb->size),
		le32_to_cpu(sb->raid_disks),
		le32_to_cpu(sb->layout),
		le32_to_cpu(sb->chunksize),
		(unsigned long long)le64_to_cpu(sb->data_offset),
		(unsigned long long)le64_to_cpu(sb->data_size),
		(unsigned long long)le64_to_cpu(sb->super_offset),
		(unsigned long long)le64_to_cpu(sb->recovery_offset),
		le32_to_cpu(sb->dev_number),
2321
		uuid,
2322 2323 2324 2325 2326 2327 2328
		sb->devflags,
		(unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
		(unsigned long long)le64_to_cpu(sb->events),
		(unsigned long long)le64_to_cpu(sb->resync_offset),
		le32_to_cpu(sb->sb_csum),
		le32_to_cpu(sb->max_dev)
		);
L
Linus Torvalds 已提交
2329 2330
}

2331
static void print_rdev(struct md_rdev *rdev, int major_version)
L
Linus Torvalds 已提交
2332 2333
{
	char b[BDEVNAME_SIZE];
2334 2335
	printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
		bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
2336 2337
	        test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
	        rdev->desc_nr);
L
Linus Torvalds 已提交
2338
	if (rdev->sb_loaded) {
2339 2340 2341
		printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
		switch (major_version) {
		case 0:
2342
			print_sb_90(page_address(rdev->sb_page));
2343 2344
			break;
		case 1:
2345
			print_sb_1(page_address(rdev->sb_page));
2346 2347
			break;
		}
L
Linus Torvalds 已提交
2348 2349 2350 2351
	} else
		printk(KERN_INFO "md: no rdev superblock!\n");
}

2352
static void md_print_devices(void)
L
Linus Torvalds 已提交
2353
{
2354
	struct list_head *tmp;
2355
	struct md_rdev *rdev;
2356
	struct mddev *mddev;
L
Linus Torvalds 已提交
2357 2358 2359 2360 2361 2362
	char b[BDEVNAME_SIZE];

	printk("\n");
	printk("md:	**********************************\n");
	printk("md:	* <COMPLETE RAID STATE PRINTOUT> *\n");
	printk("md:	**********************************\n");
2363
	for_each_mddev(mddev, tmp) {
L
Linus Torvalds 已提交
2364

2365 2366 2367 2368
		if (mddev->bitmap)
			bitmap_print_sb(mddev->bitmap);
		else
			printk("%s: ", mdname(mddev));
N
NeilBrown 已提交
2369
		rdev_for_each(rdev, mddev)
L
Linus Torvalds 已提交
2370 2371 2372
			printk("<%s>", bdevname(rdev->bdev,b));
		printk("\n");

N
NeilBrown 已提交
2373
		rdev_for_each(rdev, mddev)
2374
			print_rdev(rdev, mddev->major_version);
L
Linus Torvalds 已提交
2375 2376 2377 2378 2379 2380
	}
	printk("md:	**********************************\n");
	printk("\n");
}


2381
static void sync_sbs(struct mddev * mddev, int nospares)
L
Linus Torvalds 已提交
2382
{
2383 2384 2385 2386 2387 2388
	/* Update each superblock (in-memory image), but
	 * if we are allowed to, skip spares which already
	 * have the right event counter, or have one earlier
	 * (which would mean they aren't being marked as dirty
	 * with the rest of the array)
	 */
2389
	struct md_rdev *rdev;
N
NeilBrown 已提交
2390
	rdev_for_each(rdev, mddev) {
2391 2392 2393 2394 2395 2396 2397
		if (rdev->sb_events == mddev->events ||
		    (nospares &&
		     rdev->raid_disk < 0 &&
		     rdev->sb_events+1 == mddev->events)) {
			/* Don't update this superblock */
			rdev->sb_loaded = 2;
		} else {
2398
			sync_super(mddev, rdev);
2399 2400
			rdev->sb_loaded = 1;
		}
L
Linus Torvalds 已提交
2401 2402 2403
	}
}

2404
static void md_update_sb(struct mddev * mddev, int force_change)
L
Linus Torvalds 已提交
2405
{
2406
	struct md_rdev *rdev;
2407
	int sync_req;
2408
	int nospares = 0;
2409
	int any_badblocks_changed = 0;
L
Linus Torvalds 已提交
2410 2411

repeat:
2412
	/* First make sure individual recovery_offsets are correct */
N
NeilBrown 已提交
2413
	rdev_for_each(rdev, mddev) {
2414 2415 2416 2417 2418 2419 2420
		if (rdev->raid_disk >= 0 &&
		    mddev->delta_disks >= 0 &&
		    !test_bit(In_sync, &rdev->flags) &&
		    mddev->curr_resync_completed > rdev->recovery_offset)
				rdev->recovery_offset = mddev->curr_resync_completed;

	}	
2421
	if (!mddev->persistent) {
2422
		clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2423
		clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2424
		if (!mddev->external) {
2425
			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
N
NeilBrown 已提交
2426
			rdev_for_each(rdev, mddev) {
2427
				if (rdev->badblocks.changed) {
2428
					rdev->badblocks.changed = 0;
2429 2430 2431 2432 2433 2434 2435 2436
					md_ack_all_badblocks(&rdev->badblocks);
					md_error(mddev, rdev);
				}
				clear_bit(Blocked, &rdev->flags);
				clear_bit(BlockedBadBlocks, &rdev->flags);
				wake_up(&rdev->blocked_wait);
			}
		}
2437 2438 2439 2440
		wake_up(&mddev->sb_wait);
		return;
	}

2441
	spin_lock_irq(&mddev->write_lock);
2442

2443 2444
	mddev->utime = get_seconds();

2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455
	if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
		force_change = 1;
	if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
		/* just a clean<-> dirty transition, possibly leave spares alone,
		 * though if events isn't the right even/odd, we will have to do
		 * spares after all
		 */
		nospares = 1;
	if (force_change)
		nospares = 0;
	if (mddev->degraded)
2456 2457 2458 2459 2460 2461 2462 2463 2464
		/* If the array is degraded, then skipping spares is both
		 * dangerous and fairly pointless.
		 * Dangerous because a device that was removed from the array
		 * might have a event_count that still looks up-to-date,
		 * so it can be re-added without a resync.
		 * Pointless because if there are any spares to skip,
		 * then a recovery will happen and soon that array won't
		 * be degraded any more and the spare can go back to sleep then.
		 */
2465
		nospares = 0;
2466

2467
	sync_req = mddev->in_sync;
2468 2469 2470

	/* If this is just a dirty<->clean transition, and the array is clean
	 * and 'events' is odd, we can roll back to the previous clean state */
2471
	if (nospares
2472
	    && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2473 2474
	    && mddev->can_decrease_events
	    && mddev->events != 1) {
2475
		mddev->events--;
2476 2477
		mddev->can_decrease_events = 0;
	} else {
2478 2479
		/* otherwise we have to go forward and ... */
		mddev->events ++;
2480
		mddev->can_decrease_events = nospares;
2481
	}
L
Linus Torvalds 已提交
2482 2483 2484 2485 2486 2487 2488 2489 2490 2491

	if (!mddev->events) {
		/*
		 * oops, this 64-bit counter should never wrap.
		 * Either we are in around ~1 trillion A.C., assuming
		 * 1 reboot per second, or we have a bug:
		 */
		MD_BUG();
		mddev->events --;
	}
2492

N
NeilBrown 已提交
2493
	rdev_for_each(rdev, mddev) {
2494 2495
		if (rdev->badblocks.changed)
			any_badblocks_changed++;
2496 2497 2498
		if (test_bit(Faulty, &rdev->flags))
			set_bit(FaultRecorded, &rdev->flags);
	}
2499

2500
	sync_sbs(mddev, nospares);
2501
	spin_unlock_irq(&mddev->write_lock);
L
Linus Torvalds 已提交
2502

2503 2504
	pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
		 mdname(mddev), mddev->in_sync);
L
Linus Torvalds 已提交
2505

2506
	bitmap_update_sb(mddev->bitmap);
N
NeilBrown 已提交
2507
	rdev_for_each(rdev, mddev) {
L
Linus Torvalds 已提交
2508
		char b[BDEVNAME_SIZE];
2509

2510 2511
		if (rdev->sb_loaded != 1)
			continue; /* no noise on spare devices */
L
Linus Torvalds 已提交
2512

2513 2514
		if (!test_bit(Faulty, &rdev->flags) &&
		    rdev->saved_raid_disk == -1) {
2515
			md_super_write(mddev,rdev,
2516
				       rdev->sb_start, rdev->sb_size,
2517
				       rdev->sb_page);
2518 2519 2520
			pr_debug("md: (write) %s's sb offset: %llu\n",
				 bdevname(rdev->bdev, b),
				 (unsigned long long)rdev->sb_start);
2521
			rdev->sb_events = mddev->events;
2522 2523 2524 2525 2526 2527 2528
			if (rdev->badblocks.size) {
				md_super_write(mddev, rdev,
					       rdev->badblocks.sector,
					       rdev->badblocks.size << 9,
					       rdev->bb_page);
				rdev->badblocks.size = 0;
			}
2529

2530
		} else if (test_bit(Faulty, &rdev->flags))
2531 2532
			pr_debug("md: %s (skipping faulty)\n",
				 bdevname(rdev->bdev, b));
2533 2534 2535
		else
			pr_debug("(skipping incremental s/r ");

2536
		if (mddev->level == LEVEL_MULTIPATH)
L
Linus Torvalds 已提交
2537 2538 2539
			/* only need to write one superblock... */
			break;
	}
2540
	md_super_wait(mddev);
2541
	/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2542

2543
	spin_lock_irq(&mddev->write_lock);
2544 2545
	if (mddev->in_sync != sync_req ||
	    test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2546
		/* have to write it out again */
2547
		spin_unlock_irq(&mddev->write_lock);
2548 2549
		goto repeat;
	}
2550
	clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2551
	spin_unlock_irq(&mddev->write_lock);
2552
	wake_up(&mddev->sb_wait);
2553 2554
	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2555

N
NeilBrown 已提交
2556
	rdev_for_each(rdev, mddev) {
2557 2558 2559 2560
		if (test_and_clear_bit(FaultRecorded, &rdev->flags))
			clear_bit(Blocked, &rdev->flags);

		if (any_badblocks_changed)
2561
			md_ack_all_badblocks(&rdev->badblocks);
2562 2563 2564
		clear_bit(BlockedBadBlocks, &rdev->flags);
		wake_up(&rdev->blocked_wait);
	}
L
Linus Torvalds 已提交
2565 2566
}

2567
/* words written to sysfs files may, or may not, be \n terminated.
2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
 * We want to accept with case. For this we use cmd_match.
 */
static int cmd_match(const char *cmd, const char *str)
{
	/* See if cmd, written into a sysfs file, matches
	 * str.  They must either be the same, or cmd can
	 * have a trailing newline
	 */
	while (*cmd && *str && *cmd == *str) {
		cmd++;
		str++;
	}
	if (*cmd == '\n')
		cmd++;
	if (*str || *cmd)
		return 0;
	return 1;
}

2587 2588
struct rdev_sysfs_entry {
	struct attribute attr;
2589 2590
	ssize_t (*show)(struct md_rdev *, char *);
	ssize_t (*store)(struct md_rdev *, const char *, size_t);
2591 2592 2593
};

static ssize_t
2594
state_show(struct md_rdev *rdev, char *page)
2595 2596
{
	char *sep = "";
2597
	size_t len = 0;
2598

2599 2600
	if (test_bit(Faulty, &rdev->flags) ||
	    rdev->badblocks.unacked_exist) {
2601 2602 2603
		len+= sprintf(page+len, "%sfaulty",sep);
		sep = ",";
	}
2604
	if (test_bit(In_sync, &rdev->flags)) {
2605 2606 2607
		len += sprintf(page+len, "%sin_sync",sep);
		sep = ",";
	}
2608 2609 2610 2611
	if (test_bit(WriteMostly, &rdev->flags)) {
		len += sprintf(page+len, "%swrite_mostly",sep);
		sep = ",";
	}
2612
	if (test_bit(Blocked, &rdev->flags) ||
2613 2614
	    (rdev->badblocks.unacked_exist
	     && !test_bit(Faulty, &rdev->flags))) {
2615 2616 2617
		len += sprintf(page+len, "%sblocked", sep);
		sep = ",";
	}
2618 2619
	if (!test_bit(Faulty, &rdev->flags) &&
	    !test_bit(In_sync, &rdev->flags)) {
2620 2621 2622
		len += sprintf(page+len, "%sspare", sep);
		sep = ",";
	}
2623 2624 2625 2626
	if (test_bit(WriteErrorSeen, &rdev->flags)) {
		len += sprintf(page+len, "%swrite_error", sep);
		sep = ",";
	}
2627 2628 2629 2630 2631 2632 2633 2634 2635
	if (test_bit(WantReplacement, &rdev->flags)) {
		len += sprintf(page+len, "%swant_replacement", sep);
		sep = ",";
	}
	if (test_bit(Replacement, &rdev->flags)) {
		len += sprintf(page+len, "%sreplacement", sep);
		sep = ",";
	}

2636 2637 2638
	return len+sprintf(page+len, "\n");
}

2639
static ssize_t
2640
state_store(struct md_rdev *rdev, const char *buf, size_t len)
2641 2642
{
	/* can write
2643
	 *  faulty  - simulates an error
2644
	 *  remove  - disconnects the device
2645 2646
	 *  writemostly - sets write_mostly
	 *  -writemostly - clears write_mostly
2647 2648
	 *  blocked - sets the Blocked flags
	 *  -blocked - clears the Blocked and possibly simulates an error
2649
	 *  insync - sets Insync providing device isn't active
2650 2651
	 *  write_error - sets WriteErrorSeen
	 *  -write_error - clears WriteErrorSeen
2652 2653 2654 2655
	 */
	int err = -EINVAL;
	if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
		md_error(rdev->mddev, rdev);
2656 2657 2658 2659
		if (test_bit(Faulty, &rdev->flags))
			err = 0;
		else
			err = -EBUSY;
2660 2661 2662 2663
	} else if (cmd_match(buf, "remove")) {
		if (rdev->raid_disk >= 0)
			err = -EBUSY;
		else {
2664
			struct mddev *mddev = rdev->mddev;
2665
			kick_rdev_from_array(rdev);
2666 2667
			if (mddev->pers)
				md_update_sb(mddev, 1);
2668 2669 2670
			md_new_event(mddev);
			err = 0;
		}
2671 2672 2673 2674 2675
	} else if (cmd_match(buf, "writemostly")) {
		set_bit(WriteMostly, &rdev->flags);
		err = 0;
	} else if (cmd_match(buf, "-writemostly")) {
		clear_bit(WriteMostly, &rdev->flags);
2676 2677 2678 2679 2680
		err = 0;
	} else if (cmd_match(buf, "blocked")) {
		set_bit(Blocked, &rdev->flags);
		err = 0;
	} else if (cmd_match(buf, "-blocked")) {
2681
		if (!test_bit(Faulty, &rdev->flags) &&
2682
		    rdev->badblocks.unacked_exist) {
2683 2684 2685 2686 2687
			/* metadata handler doesn't understand badblocks,
			 * so we need to fail the device
			 */
			md_error(rdev->mddev, rdev);
		}
2688
		clear_bit(Blocked, &rdev->flags);
2689
		clear_bit(BlockedBadBlocks, &rdev->flags);
2690 2691 2692 2693
		wake_up(&rdev->blocked_wait);
		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
		md_wakeup_thread(rdev->mddev->thread);

2694 2695 2696
		err = 0;
	} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
		set_bit(In_sync, &rdev->flags);
2697
		err = 0;
2698 2699 2700 2701 2702 2703
	} else if (cmd_match(buf, "write_error")) {
		set_bit(WriteErrorSeen, &rdev->flags);
		err = 0;
	} else if (cmd_match(buf, "-write_error")) {
		clear_bit(WriteErrorSeen, &rdev->flags);
		err = 0;
2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739
	} else if (cmd_match(buf, "want_replacement")) {
		/* Any non-spare device that is not a replacement can
		 * become want_replacement at any time, but we then need to
		 * check if recovery is needed.
		 */
		if (rdev->raid_disk >= 0 &&
		    !test_bit(Replacement, &rdev->flags))
			set_bit(WantReplacement, &rdev->flags);
		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
		md_wakeup_thread(rdev->mddev->thread);
		err = 0;
	} else if (cmd_match(buf, "-want_replacement")) {
		/* Clearing 'want_replacement' is always allowed.
		 * Once replacements starts it is too late though.
		 */
		err = 0;
		clear_bit(WantReplacement, &rdev->flags);
	} else if (cmd_match(buf, "replacement")) {
		/* Can only set a device as a replacement when array has not
		 * yet been started.  Once running, replacement is automatic
		 * from spares, or by assigning 'slot'.
		 */
		if (rdev->mddev->pers)
			err = -EBUSY;
		else {
			set_bit(Replacement, &rdev->flags);
			err = 0;
		}
	} else if (cmd_match(buf, "-replacement")) {
		/* Similarly, can only clear Replacement before start */
		if (rdev->mddev->pers)
			err = -EBUSY;
		else {
			clear_bit(Replacement, &rdev->flags);
			err = 0;
		}
2740
	}
N
NeilBrown 已提交
2741 2742
	if (!err)
		sysfs_notify_dirent_safe(rdev->sysfs_state);
2743 2744
	return err ? err : len;
}
2745 2746
static struct rdev_sysfs_entry rdev_state =
__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2747

2748
static ssize_t
2749
errors_show(struct md_rdev *rdev, char *page)
2750 2751 2752 2753 2754
{
	return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
}

static ssize_t
2755
errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2756 2757 2758 2759 2760 2761 2762 2763 2764 2765
{
	char *e;
	unsigned long n = simple_strtoul(buf, &e, 10);
	if (*buf && (*e == 0 || *e == '\n')) {
		atomic_set(&rdev->corrected_errors, n);
		return len;
	}
	return -EINVAL;
}
static struct rdev_sysfs_entry rdev_errors =
2766
__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2767

2768
static ssize_t
2769
slot_show(struct md_rdev *rdev, char *page)
2770 2771 2772 2773 2774 2775 2776 2777
{
	if (rdev->raid_disk < 0)
		return sprintf(page, "none\n");
	else
		return sprintf(page, "%d\n", rdev->raid_disk);
}

static ssize_t
2778
slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2779 2780
{
	char *e;
2781
	int err;
2782 2783 2784 2785 2786
	int slot = simple_strtoul(buf, &e, 10);
	if (strncmp(buf, "none", 4)==0)
		slot = -1;
	else if (e==buf || (*e && *e!= '\n'))
		return -EINVAL;
2787
	if (rdev->mddev->pers && slot == -1) {
2788 2789 2790 2791 2792 2793 2794 2795 2796 2797
		/* Setting 'slot' on an active array requires also
		 * updating the 'rd%d' link, and communicating
		 * with the personality with ->hot_*_disk.
		 * For now we only support removing
		 * failed/spare devices.  This normally happens automatically,
		 * but not when the metadata is externally managed.
		 */
		if (rdev->raid_disk == -1)
			return -EEXIST;
		/* personality does all needed checks */
2798
		if (rdev->mddev->pers->hot_remove_disk == NULL)
2799 2800
			return -EINVAL;
		err = rdev->mddev->pers->
2801
			hot_remove_disk(rdev->mddev, rdev);
2802 2803
		if (err)
			return err;
2804
		sysfs_unlink_rdev(rdev->mddev, rdev);
2805
		rdev->raid_disk = -1;
2806 2807
		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
		md_wakeup_thread(rdev->mddev->thread);
2808 2809
	} else if (rdev->mddev->pers) {
		/* Activating a spare .. or possibly reactivating
2810
		 * if we ever get bitmaps working here.
2811 2812 2813 2814 2815
		 */

		if (rdev->raid_disk != -1)
			return -EBUSY;

2816 2817 2818
		if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
			return -EBUSY;

2819 2820 2821
		if (rdev->mddev->pers->hot_add_disk == NULL)
			return -EINVAL;

2822 2823 2824 2825
		if (slot >= rdev->mddev->raid_disks &&
		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
			return -ENOSPC;

2826 2827 2828 2829 2830
		rdev->raid_disk = slot;
		if (test_bit(In_sync, &rdev->flags))
			rdev->saved_raid_disk = slot;
		else
			rdev->saved_raid_disk = -1;
2831
		clear_bit(In_sync, &rdev->flags);
2832 2833
		err = rdev->mddev->pers->
			hot_add_disk(rdev->mddev, rdev);
2834
		if (err) {
2835 2836
			rdev->raid_disk = -1;
			return err;
2837
		} else
N
NeilBrown 已提交
2838
			sysfs_notify_dirent_safe(rdev->sysfs_state);
2839
		if (sysfs_link_rdev(rdev->mddev, rdev))
N
NeilBrown 已提交
2840
			/* failure here is OK */;
2841
		/* don't wakeup anyone, leave that to userspace. */
2842
	} else {
2843 2844
		if (slot >= rdev->mddev->raid_disks &&
		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2845 2846 2847
			return -ENOSPC;
		rdev->raid_disk = slot;
		/* assume it is working */
2848 2849
		clear_bit(Faulty, &rdev->flags);
		clear_bit(WriteMostly, &rdev->flags);
2850
		set_bit(In_sync, &rdev->flags);
N
NeilBrown 已提交
2851
		sysfs_notify_dirent_safe(rdev->sysfs_state);
2852
	}
2853 2854 2855 2856 2857
	return len;
}


static struct rdev_sysfs_entry rdev_slot =
2858
__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2859

2860
static ssize_t
2861
offset_show(struct md_rdev *rdev, char *page)
2862
{
2863
	return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2864 2865 2866
}

static ssize_t
2867
offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2868
{
2869 2870
	unsigned long long offset;
	if (strict_strtoull(buf, 10, &offset) < 0)
2871
		return -EINVAL;
2872
	if (rdev->mddev->pers && rdev->raid_disk >= 0)
2873
		return -EBUSY;
2874
	if (rdev->sectors && rdev->mddev->external)
2875 2876 2877
		/* Must set offset before size, so overlap checks
		 * can be sane */
		return -EBUSY;
2878
	rdev->data_offset = offset;
2879
	rdev->new_data_offset = offset;
2880 2881 2882 2883
	return len;
}

static struct rdev_sysfs_entry rdev_offset =
2884
__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2885

2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
{
	return sprintf(page, "%llu\n",
		       (unsigned long long)rdev->new_data_offset);
}

static ssize_t new_offset_store(struct md_rdev *rdev,
				const char *buf, size_t len)
{
	unsigned long long new_offset;
	struct mddev *mddev = rdev->mddev;

	if (strict_strtoull(buf, 10, &new_offset) < 0)
		return -EINVAL;

	if (mddev->sync_thread)
		return -EBUSY;
	if (new_offset == rdev->data_offset)
		/* reset is always permitted */
		;
	else if (new_offset > rdev->data_offset) {
		/* must not push array size beyond rdev_sectors */
		if (new_offset - rdev->data_offset
		    + mddev->dev_sectors > rdev->sectors)
				return -E2BIG;
	}
	/* Metadata worries about other space details. */

	/* decreasing the offset is inconsistent with a backwards
	 * reshape.
	 */
	if (new_offset < rdev->data_offset &&
	    mddev->reshape_backwards)
		return -EINVAL;
	/* Increasing offset is inconsistent with forwards
	 * reshape.  reshape_direction should be set to
	 * 'backwards' first.
	 */
	if (new_offset > rdev->data_offset &&
	    !mddev->reshape_backwards)
		return -EINVAL;

	if (mddev->pers && mddev->persistent &&
	    !super_types[mddev->major_version]
	    .allow_new_offset(rdev, new_offset))
		return -E2BIG;
	rdev->new_data_offset = new_offset;
	if (new_offset > rdev->data_offset)
		mddev->reshape_backwards = 1;
	else if (new_offset < rdev->data_offset)
		mddev->reshape_backwards = 0;

	return len;
}
static struct rdev_sysfs_entry rdev_new_offset =
__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);

2943
static ssize_t
2944
rdev_size_show(struct md_rdev *rdev, char *page)
2945
{
2946
	return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2947 2948
}

2949 2950 2951 2952 2953 2954 2955 2956 2957 2958
static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
{
	/* check if two start/length pairs overlap */
	if (s1+l1 <= s2)
		return 0;
	if (s2+l2 <= s1)
		return 0;
	return 1;
}

D
Dan Williams 已提交
2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
{
	unsigned long long blocks;
	sector_t new;

	if (strict_strtoull(buf, 10, &blocks) < 0)
		return -EINVAL;

	if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
		return -EINVAL; /* sector conversion overflow */

	new = blocks * 2;
	if (new != blocks * 2)
		return -EINVAL; /* unsigned long long to sector_t overflow */

	*sectors = new;
	return 0;
}

2978
static ssize_t
2979
rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2980
{
2981
	struct mddev *my_mddev = rdev->mddev;
2982
	sector_t oldsectors = rdev->sectors;
D
Dan Williams 已提交
2983
	sector_t sectors;
2984

D
Dan Williams 已提交
2985
	if (strict_blocks_to_sectors(buf, &sectors) < 0)
N
Neil Brown 已提交
2986
		return -EINVAL;
2987 2988
	if (rdev->data_offset != rdev->new_data_offset)
		return -EINVAL; /* too confusing */
2989
	if (my_mddev->pers && rdev->raid_disk >= 0) {
N
Neil Brown 已提交
2990
		if (my_mddev->persistent) {
2991 2992 2993
			sectors = super_types[my_mddev->major_version].
				rdev_size_change(rdev, sectors);
			if (!sectors)
2994
				return -EBUSY;
2995
		} else if (!sectors)
2996
			sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2997
				rdev->data_offset;
2998
	}
2999
	if (sectors < my_mddev->dev_sectors)
3000
		return -EINVAL; /* component must fit device */
3001

3002 3003
	rdev->sectors = sectors;
	if (sectors > oldsectors && my_mddev->external) {
3004 3005
		/* need to check that all other rdevs with the same ->bdev
		 * do not overlap.  We need to unlock the mddev to avoid
3006
		 * a deadlock.  We have already changed rdev->sectors, and if
3007 3008
		 * we have to change it back, we will have the lock again.
		 */
3009
		struct mddev *mddev;
3010
		int overlap = 0;
3011
		struct list_head *tmp;
3012

3013
		mddev_unlock(my_mddev);
3014
		for_each_mddev(mddev, tmp) {
3015
			struct md_rdev *rdev2;
3016 3017

			mddev_lock(mddev);
N
NeilBrown 已提交
3018
			rdev_for_each(rdev2, mddev)
3019 3020 3021 3022 3023
				if (rdev->bdev == rdev2->bdev &&
				    rdev != rdev2 &&
				    overlaps(rdev->data_offset, rdev->sectors,
					     rdev2->data_offset,
					     rdev2->sectors)) {
3024 3025 3026 3027 3028 3029 3030 3031 3032
					overlap = 1;
					break;
				}
			mddev_unlock(mddev);
			if (overlap) {
				mddev_put(mddev);
				break;
			}
		}
3033
		mddev_lock(my_mddev);
3034 3035 3036
		if (overlap) {
			/* Someone else could have slipped in a size
			 * change here, but doing so is just silly.
3037
			 * We put oldsectors back because we *know* it is
3038 3039 3040
			 * safe, and trust userspace not to race with
			 * itself
			 */
3041
			rdev->sectors = oldsectors;
3042 3043 3044
			return -EBUSY;
		}
	}
3045 3046 3047 3048
	return len;
}

static struct rdev_sysfs_entry rdev_size =
3049
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3050

3051

3052
static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3053 3054 3055 3056 3057 3058 3059 3060 3061 3062
{
	unsigned long long recovery_start = rdev->recovery_offset;

	if (test_bit(In_sync, &rdev->flags) ||
	    recovery_start == MaxSector)
		return sprintf(page, "none\n");

	return sprintf(page, "%llu\n", recovery_start);
}

3063
static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086
{
	unsigned long long recovery_start;

	if (cmd_match(buf, "none"))
		recovery_start = MaxSector;
	else if (strict_strtoull(buf, 10, &recovery_start))
		return -EINVAL;

	if (rdev->mddev->pers &&
	    rdev->raid_disk >= 0)
		return -EBUSY;

	rdev->recovery_offset = recovery_start;
	if (recovery_start == MaxSector)
		set_bit(In_sync, &rdev->flags);
	else
		clear_bit(In_sync, &rdev->flags);
	return len;
}

static struct rdev_sysfs_entry rdev_recovery_start =
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);

3087 3088 3089 3090 3091 3092

static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack);
static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);

3093
static ssize_t bb_show(struct md_rdev *rdev, char *page)
3094 3095 3096
{
	return badblocks_show(&rdev->badblocks, page, 0);
}
3097
static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3098
{
3099 3100 3101 3102 3103
	int rv = badblocks_store(&rdev->badblocks, page, len, 0);
	/* Maybe that ack was all we needed */
	if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
		wake_up(&rdev->blocked_wait);
	return rv;
3104 3105 3106 3107 3108
}
static struct rdev_sysfs_entry rdev_bad_blocks =
__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);


3109
static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3110 3111 3112
{
	return badblocks_show(&rdev->badblocks, page, 1);
}
3113
static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3114 3115 3116 3117 3118 3119
{
	return badblocks_store(&rdev->badblocks, page, len, 1);
}
static struct rdev_sysfs_entry rdev_unack_bad_blocks =
__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);

3120 3121
static struct attribute *rdev_default_attrs[] = {
	&rdev_state.attr,
3122
	&rdev_errors.attr,
3123
	&rdev_slot.attr,
3124
	&rdev_offset.attr,
3125
	&rdev_new_offset.attr,
3126
	&rdev_size.attr,
3127
	&rdev_recovery_start.attr,
3128 3129
	&rdev_bad_blocks.attr,
	&rdev_unack_bad_blocks.attr,
3130 3131 3132 3133 3134 3135
	NULL,
};
static ssize_t
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3136
	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3137
	struct mddev *mddev = rdev->mddev;
3138
	ssize_t rv;
3139 3140 3141

	if (!entry->show)
		return -EIO;
3142 3143 3144 3145 3146 3147 3148 3149 3150 3151

	rv = mddev ? mddev_lock(mddev) : -EBUSY;
	if (!rv) {
		if (rdev->mddev == NULL)
			rv = -EBUSY;
		else
			rv = entry->show(rdev, page);
		mddev_unlock(mddev);
	}
	return rv;
3152 3153 3154 3155 3156 3157 3158
}

static ssize_t
rdev_attr_store(struct kobject *kobj, struct attribute *attr,
	      const char *page, size_t length)
{
	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3159
	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3160
	ssize_t rv;
3161
	struct mddev *mddev = rdev->mddev;
3162 3163 3164

	if (!entry->store)
		return -EIO;
3165 3166
	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
3167
	rv = mddev ? mddev_lock(mddev): -EBUSY;
3168
	if (!rv) {
3169 3170 3171 3172
		if (rdev->mddev == NULL)
			rv = -EBUSY;
		else
			rv = entry->store(rdev, page, length);
3173
		mddev_unlock(mddev);
3174 3175
	}
	return rv;
3176 3177 3178 3179
}

static void rdev_free(struct kobject *ko)
{
3180
	struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3181 3182
	kfree(rdev);
}
3183
static const struct sysfs_ops rdev_sysfs_ops = {
3184 3185 3186 3187 3188 3189 3190 3191 3192
	.show		= rdev_attr_show,
	.store		= rdev_attr_store,
};
static struct kobj_type rdev_ktype = {
	.release	= rdev_free,
	.sysfs_ops	= &rdev_sysfs_ops,
	.default_attrs	= rdev_default_attrs,
};

3193
int md_rdev_init(struct md_rdev *rdev)
N
NeilBrown 已提交
3194 3195 3196 3197 3198 3199
{
	rdev->desc_nr = -1;
	rdev->saved_raid_disk = -1;
	rdev->raid_disk = -1;
	rdev->flags = 0;
	rdev->data_offset = 0;
3200
	rdev->new_data_offset = 0;
N
NeilBrown 已提交
3201 3202 3203
	rdev->sb_events = 0;
	rdev->last_read_error.tv_sec  = 0;
	rdev->last_read_error.tv_nsec = 0;
3204 3205
	rdev->sb_loaded = 0;
	rdev->bb_page = NULL;
N
NeilBrown 已提交
3206 3207 3208 3209 3210 3211
	atomic_set(&rdev->nr_pending, 0);
	atomic_set(&rdev->read_errors, 0);
	atomic_set(&rdev->corrected_errors, 0);

	INIT_LIST_HEAD(&rdev->same_set);
	init_waitqueue_head(&rdev->blocked_wait);
3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224

	/* Add space to store bad block list.
	 * This reserves the space even on arrays where it cannot
	 * be used - I wonder if that matters
	 */
	rdev->badblocks.count = 0;
	rdev->badblocks.shift = 0;
	rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
	seqlock_init(&rdev->badblocks.lock);
	if (rdev->badblocks.page == NULL)
		return -ENOMEM;

	return 0;
N
NeilBrown 已提交
3225 3226
}
EXPORT_SYMBOL_GPL(md_rdev_init);
L
Linus Torvalds 已提交
3227 3228 3229 3230 3231 3232 3233 3234 3235 3236
/*
 * Import a device. If 'super_format' >= 0, then sanity check the superblock
 *
 * mark the device faulty if:
 *
 *   - the device is nonexistent (zero size)
 *   - the device has no valid superblock
 *
 * a faulty rdev _never_ has rdev->sb set.
 */
3237
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
L
Linus Torvalds 已提交
3238 3239 3240
{
	char b[BDEVNAME_SIZE];
	int err;
3241
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
3242 3243
	sector_t size;

3244
	rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
L
Linus Torvalds 已提交
3245 3246 3247 3248 3249
	if (!rdev) {
		printk(KERN_ERR "md: could not alloc mem for new device!\n");
		return ERR_PTR(-ENOMEM);
	}

3250 3251 3252 3253 3254
	err = md_rdev_init(rdev);
	if (err)
		goto abort_free;
	err = alloc_disk_sb(rdev);
	if (err)
L
Linus Torvalds 已提交
3255 3256
		goto abort_free;

3257
	err = lock_rdev(rdev, newdev, super_format == -2);
L
Linus Torvalds 已提交
3258 3259 3260
	if (err)
		goto abort_free;

3261
	kobject_init(&rdev->kobj, &rdev_ktype);
3262

3263
	size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
L
Linus Torvalds 已提交
3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275
	if (!size) {
		printk(KERN_WARNING 
			"md: %s has zero or unknown size, marking faulty!\n",
			bdevname(rdev->bdev,b));
		err = -EINVAL;
		goto abort_free;
	}

	if (super_format >= 0) {
		err = super_types[super_format].
			load_super(rdev, NULL, super_minor);
		if (err == -EINVAL) {
3276 3277 3278 3279 3280
			printk(KERN_WARNING
				"md: %s does not have a valid v%d.%d "
			       "superblock, not importing!\n",
				bdevname(rdev->bdev,b),
			       super_format, super_minor);
L
Linus Torvalds 已提交
3281 3282 3283 3284 3285 3286 3287 3288 3289
			goto abort_free;
		}
		if (err < 0) {
			printk(KERN_WARNING 
				"md: could not read %s's sb, not importing!\n",
				bdevname(rdev->bdev,b));
			goto abort_free;
		}
	}
3290 3291 3292
	if (super_format == -1)
		/* hot-add for 0.90, or non-persistent: so no badblocks */
		rdev->badblocks.shift = -1;
3293

L
Linus Torvalds 已提交
3294 3295 3296
	return rdev;

abort_free:
3297 3298
	if (rdev->bdev)
		unlock_rdev(rdev);
3299
	md_rdev_clear(rdev);
L
Linus Torvalds 已提交
3300 3301 3302 3303 3304 3305 3306 3307 3308
	kfree(rdev);
	return ERR_PTR(err);
}

/*
 * Check a full RAID array for plausibility
 */


3309
static void analyze_sbs(struct mddev * mddev)
L
Linus Torvalds 已提交
3310 3311
{
	int i;
3312
	struct md_rdev *rdev, *freshest, *tmp;
L
Linus Torvalds 已提交
3313 3314 3315
	char b[BDEVNAME_SIZE];

	freshest = NULL;
N
NeilBrown 已提交
3316
	rdev_for_each_safe(rdev, tmp, mddev)
L
Linus Torvalds 已提交
3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336
		switch (super_types[mddev->major_version].
			load_super(rdev, freshest, mddev->minor_version)) {
		case 1:
			freshest = rdev;
			break;
		case 0:
			break;
		default:
			printk( KERN_ERR \
				"md: fatal superblock inconsistency in %s"
				" -- removing from array\n", 
				bdevname(rdev->bdev,b));
			kick_rdev_from_array(rdev);
		}


	super_types[mddev->major_version].
		validate_super(mddev, freshest);

	i = 0;
N
NeilBrown 已提交
3337
	rdev_for_each_safe(rdev, tmp, mddev) {
3338 3339 3340
		if (mddev->max_disks &&
		    (rdev->desc_nr >= mddev->max_disks ||
		     i > mddev->max_disks)) {
3341 3342 3343 3344 3345 3346 3347
			printk(KERN_WARNING
			       "md: %s: %s: only %d devices permitted\n",
			       mdname(mddev), bdevname(rdev->bdev, b),
			       mddev->max_disks);
			kick_rdev_from_array(rdev);
			continue;
		}
L
Linus Torvalds 已提交
3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359
		if (rdev != freshest)
			if (super_types[mddev->major_version].
			    validate_super(mddev, rdev)) {
				printk(KERN_WARNING "md: kicking non-fresh %s"
					" from array!\n",
					bdevname(rdev->bdev,b));
				kick_rdev_from_array(rdev);
				continue;
			}
		if (mddev->level == LEVEL_MULTIPATH) {
			rdev->desc_nr = i++;
			rdev->raid_disk = rdev->desc_nr;
3360
			set_bit(In_sync, &rdev->flags);
3361
		} else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
3362 3363
			rdev->raid_disk = -1;
			clear_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
3364 3365 3366 3367
		}
	}
}

3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408
/* Read a fixed-point number.
 * Numbers in sysfs attributes should be in "standard" units where
 * possible, so time should be in seconds.
 * However we internally use a a much smaller unit such as 
 * milliseconds or jiffies.
 * This function takes a decimal number with a possible fractional
 * component, and produces an integer which is the result of
 * multiplying that number by 10^'scale'.
 * all without any floating-point arithmetic.
 */
int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
{
	unsigned long result = 0;
	long decimals = -1;
	while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
		if (*cp == '.')
			decimals = 0;
		else if (decimals < scale) {
			unsigned int value;
			value = *cp - '0';
			result = result * 10 + value;
			if (decimals >= 0)
				decimals++;
		}
		cp++;
	}
	if (*cp == '\n')
		cp++;
	if (*cp)
		return -EINVAL;
	if (decimals < 0)
		decimals = 0;
	while (decimals < scale) {
		result *= 10;
		decimals ++;
	}
	*res = result;
	return 0;
}


3409 3410
static void md_safemode_timeout(unsigned long data);

3411
static ssize_t
3412
safe_delay_show(struct mddev *mddev, char *page)
3413 3414 3415 3416 3417
{
	int msec = (mddev->safemode_delay*1000)/HZ;
	return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
}
static ssize_t
3418
safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3419 3420
{
	unsigned long msec;
3421

3422
	if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3423 3424 3425 3426
		return -EINVAL;
	if (msec == 0)
		mddev->safemode_delay = 0;
	else {
3427
		unsigned long old_delay = mddev->safemode_delay;
3428 3429 3430
		mddev->safemode_delay = (msec*HZ)/1000;
		if (mddev->safemode_delay == 0)
			mddev->safemode_delay = 1;
3431 3432
		if (mddev->safemode_delay < old_delay)
			md_safemode_timeout((unsigned long)mddev);
3433 3434 3435 3436
	}
	return len;
}
static struct md_sysfs_entry md_safe_delay =
3437
__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3438

3439
static ssize_t
3440
level_show(struct mddev *mddev, char *page)
3441
{
3442
	struct md_personality *p = mddev->pers;
3443
	if (p)
3444
		return sprintf(page, "%s\n", p->name);
3445 3446 3447 3448 3449 3450
	else if (mddev->clevel[0])
		return sprintf(page, "%s\n", mddev->clevel);
	else if (mddev->level != LEVEL_NONE)
		return sprintf(page, "%d\n", mddev->level);
	else
		return 0;
3451 3452
}

3453
static ssize_t
3454
level_store(struct mddev *mddev, const char *buf, size_t len)
3455
{
3456
	char clevel[16];
3457
	ssize_t rv = len;
3458
	struct md_personality *pers;
3459
	long level;
3460
	void *priv;
3461
	struct md_rdev *rdev;
3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481

	if (mddev->pers == NULL) {
		if (len == 0)
			return 0;
		if (len >= sizeof(mddev->clevel))
			return -ENOSPC;
		strncpy(mddev->clevel, buf, len);
		if (mddev->clevel[len-1] == '\n')
			len--;
		mddev->clevel[len] = 0;
		mddev->level = LEVEL_NONE;
		return rv;
	}

	/* request to change the personality.  Need to ensure:
	 *  - array is not engaged in resync/recovery/reshape
	 *  - old personality can be suspended
	 *  - new personality will access other array.
	 */

3482 3483 3484
	if (mddev->sync_thread ||
	    mddev->reshape_position != MaxSector ||
	    mddev->sysfs_active)
3485
		return -EBUSY;
3486 3487 3488 3489 3490 3491 3492 3493

	if (!mddev->pers->quiesce) {
		printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
		       mdname(mddev), mddev->pers->name);
		return -EINVAL;
	}

	/* Now find the new personality */
3494
	if (len == 0 || len >= sizeof(clevel))
3495
		return -EINVAL;
3496 3497
	strncpy(clevel, buf, len);
	if (clevel[len-1] == '\n')
3498
		len--;
3499 3500 3501
	clevel[len] = 0;
	if (strict_strtol(clevel, 10, &level))
		level = LEVEL_NONE;
3502

3503 3504
	if (request_module("md-%s", clevel) != 0)
		request_module("md-level-%s", clevel);
3505
	spin_lock(&pers_lock);
3506
	pers = find_pers(level, clevel);
3507 3508
	if (!pers || !try_module_get(pers->owner)) {
		spin_unlock(&pers_lock);
3509
		printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521
		return -EINVAL;
	}
	spin_unlock(&pers_lock);

	if (pers == mddev->pers) {
		/* Nothing to do! */
		module_put(pers->owner);
		return rv;
	}
	if (!pers->takeover) {
		module_put(pers->owner);
		printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3522
		       mdname(mddev), clevel);
3523 3524 3525
		return -EINVAL;
	}

N
NeilBrown 已提交
3526
	rdev_for_each(rdev, mddev)
3527 3528
		rdev->new_raid_disk = rdev->raid_disk;

3529 3530 3531 3532 3533 3534 3535
	/* ->takeover must set new_* and/or delta_disks
	 * if it succeeds, and may set them when it fails.
	 */
	priv = pers->takeover(mddev);
	if (IS_ERR(priv)) {
		mddev->new_level = mddev->level;
		mddev->new_layout = mddev->layout;
3536
		mddev->new_chunk_sectors = mddev->chunk_sectors;
3537 3538
		mddev->raid_disks -= mddev->delta_disks;
		mddev->delta_disks = 0;
3539
		mddev->reshape_backwards = 0;
3540 3541
		module_put(pers->owner);
		printk(KERN_WARNING "md: %s: %s would not accept array\n",
3542
		       mdname(mddev), clevel);
3543 3544 3545 3546 3547 3548
		return PTR_ERR(priv);
	}

	/* Looks like we have a winner */
	mddev_suspend(mddev);
	mddev->pers->stop(mddev);
3549 3550 3551 3552 3553 3554 3555 3556
	
	if (mddev->pers->sync_request == NULL &&
	    pers->sync_request != NULL) {
		/* need to add the md_redundancy_group */
		if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
			printk(KERN_WARNING
			       "md: cannot register extra attributes for %s\n",
			       mdname(mddev));
3557
		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action");
3558 3559 3560 3561 3562 3563 3564 3565
	}		
	if (mddev->pers->sync_request != NULL &&
	    pers->sync_request == NULL) {
		/* need to remove the md_redundancy_group */
		if (mddev->to_remove == NULL)
			mddev->to_remove = &md_redundancy_group;
	}

3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579
	if (mddev->pers->sync_request == NULL &&
	    mddev->external) {
		/* We are converting from a no-redundancy array
		 * to a redundancy array and metadata is managed
		 * externally so we need to be sure that writes
		 * won't block due to a need to transition
		 *      clean->dirty
		 * until external management is started.
		 */
		mddev->in_sync = 0;
		mddev->safemode_delay = 0;
		mddev->safemode = 0;
	}

N
NeilBrown 已提交
3580
	rdev_for_each(rdev, mddev) {
3581 3582
		if (rdev->raid_disk < 0)
			continue;
3583
		if (rdev->new_raid_disk >= mddev->raid_disks)
3584 3585 3586
			rdev->new_raid_disk = -1;
		if (rdev->new_raid_disk == rdev->raid_disk)
			continue;
3587
		sysfs_unlink_rdev(mddev, rdev);
3588
	}
N
NeilBrown 已提交
3589
	rdev_for_each(rdev, mddev) {
3590 3591 3592 3593 3594 3595
		if (rdev->raid_disk < 0)
			continue;
		if (rdev->new_raid_disk == rdev->raid_disk)
			continue;
		rdev->raid_disk = rdev->new_raid_disk;
		if (rdev->raid_disk < 0)
3596
			clear_bit(In_sync, &rdev->flags);
3597
		else {
3598 3599 3600 3601
			if (sysfs_link_rdev(mddev, rdev))
				printk(KERN_WARNING "md: cannot register rd%d"
				       " for %s after level change\n",
				       rdev->raid_disk, mdname(mddev));
3602
		}
3603 3604 3605
	}

	module_put(mddev->pers->owner);
3606 3607 3608 3609 3610
	mddev->pers = pers;
	mddev->private = priv;
	strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
	mddev->level = mddev->new_level;
	mddev->layout = mddev->new_layout;
3611
	mddev->chunk_sectors = mddev->new_chunk_sectors;
3612
	mddev->delta_disks = 0;
3613
	mddev->reshape_backwards = 0;
3614
	mddev->degraded = 0;
3615 3616 3617 3618 3619 3620 3621
	if (mddev->pers->sync_request == NULL) {
		/* this is now an array without redundancy, so
		 * it must always be in_sync
		 */
		mddev->in_sync = 1;
		del_timer_sync(&mddev->safemode_timer);
	}
3622 3623
	pers->run(mddev);
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
3624
	mddev_resume(mddev);
3625
	sysfs_notify(&mddev->kobj, NULL, "level");
3626
	md_new_event(mddev);
3627 3628 3629 3630
	return rv;
}

static struct md_sysfs_entry md_level =
3631
__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3632

3633 3634

static ssize_t
3635
layout_show(struct mddev *mddev, char *page)
3636 3637
{
	/* just a number, not meaningful for all levels */
3638 3639 3640 3641
	if (mddev->reshape_position != MaxSector &&
	    mddev->layout != mddev->new_layout)
		return sprintf(page, "%d (%d)\n",
			       mddev->new_layout, mddev->layout);
3642 3643 3644 3645
	return sprintf(page, "%d\n", mddev->layout);
}

static ssize_t
3646
layout_store(struct mddev *mddev, const char *buf, size_t len)
3647 3648 3649 3650 3651 3652 3653
{
	char *e;
	unsigned long n = simple_strtoul(buf, &e, 10);

	if (!*buf || (*e && *e != '\n'))
		return -EINVAL;

3654 3655
	if (mddev->pers) {
		int err;
3656
		if (mddev->pers->check_reshape == NULL)
3657
			return -EBUSY;
3658
		mddev->new_layout = n;
3659
		err = mddev->pers->check_reshape(mddev);
3660 3661
		if (err) {
			mddev->new_layout = mddev->layout;
3662
			return err;
3663
		}
3664
	} else {
3665
		mddev->new_layout = n;
3666 3667 3668
		if (mddev->reshape_position == MaxSector)
			mddev->layout = n;
	}
3669 3670 3671
	return len;
}
static struct md_sysfs_entry md_layout =
3672
__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3673 3674


3675
static ssize_t
3676
raid_disks_show(struct mddev *mddev, char *page)
3677
{
3678 3679
	if (mddev->raid_disks == 0)
		return 0;
3680 3681 3682 3683
	if (mddev->reshape_position != MaxSector &&
	    mddev->delta_disks != 0)
		return sprintf(page, "%d (%d)\n", mddev->raid_disks,
			       mddev->raid_disks - mddev->delta_disks);
3684 3685 3686
	return sprintf(page, "%d\n", mddev->raid_disks);
}

3687
static int update_raid_disks(struct mddev *mddev, int raid_disks);
3688 3689

static ssize_t
3690
raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3691 3692 3693 3694 3695 3696 3697 3698 3699 3700
{
	char *e;
	int rv = 0;
	unsigned long n = simple_strtoul(buf, &e, 10);

	if (!*buf || (*e && *e != '\n'))
		return -EINVAL;

	if (mddev->pers)
		rv = update_raid_disks(mddev, n);
3701
	else if (mddev->reshape_position != MaxSector) {
3702
		struct md_rdev *rdev;
3703
		int olddisks = mddev->raid_disks - mddev->delta_disks;
3704 3705 3706 3707 3708 3709 3710 3711 3712

		rdev_for_each(rdev, mddev) {
			if (olddisks < n &&
			    rdev->data_offset < rdev->new_data_offset)
				return -EINVAL;
			if (olddisks > n &&
			    rdev->data_offset > rdev->new_data_offset)
				return -EINVAL;
		}
3713 3714
		mddev->delta_disks = n - olddisks;
		mddev->raid_disks = n;
3715
		mddev->reshape_backwards = (mddev->delta_disks < 0);
3716
	} else
3717 3718 3719 3720
		mddev->raid_disks = n;
	return rv ? rv : len;
}
static struct md_sysfs_entry md_raid_disks =
3721
__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3722

3723
static ssize_t
3724
chunk_size_show(struct mddev *mddev, char *page)
3725
{
3726
	if (mddev->reshape_position != MaxSector &&
3727 3728 3729
	    mddev->chunk_sectors != mddev->new_chunk_sectors)
		return sprintf(page, "%d (%d)\n",
			       mddev->new_chunk_sectors << 9,
3730 3731
			       mddev->chunk_sectors << 9);
	return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3732 3733 3734
}

static ssize_t
3735
chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3736 3737 3738 3739 3740 3741 3742
{
	char *e;
	unsigned long n = simple_strtoul(buf, &e, 10);

	if (!*buf || (*e && *e != '\n'))
		return -EINVAL;

3743 3744
	if (mddev->pers) {
		int err;
3745
		if (mddev->pers->check_reshape == NULL)
3746
			return -EBUSY;
3747
		mddev->new_chunk_sectors = n >> 9;
3748
		err = mddev->pers->check_reshape(mddev);
3749 3750
		if (err) {
			mddev->new_chunk_sectors = mddev->chunk_sectors;
3751
			return err;
3752
		}
3753
	} else {
3754
		mddev->new_chunk_sectors = n >> 9;
3755
		if (mddev->reshape_position == MaxSector)
3756
			mddev->chunk_sectors = n >> 9;
3757
	}
3758 3759 3760
	return len;
}
static struct md_sysfs_entry md_chunk_size =
3761
__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3762

3763
static ssize_t
3764
resync_start_show(struct mddev *mddev, char *page)
3765
{
3766 3767
	if (mddev->recovery_cp == MaxSector)
		return sprintf(page, "none\n");
3768 3769 3770 3771
	return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
}

static ssize_t
3772
resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3773 3774 3775 3776
{
	char *e;
	unsigned long long n = simple_strtoull(buf, &e, 10);

3777
	if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3778
		return -EBUSY;
3779 3780 3781
	if (cmd_match(buf, "none"))
		n = MaxSector;
	else if (!*buf || (*e && *e != '\n'))
3782 3783 3784 3785 3786 3787
		return -EINVAL;

	mddev->recovery_cp = n;
	return len;
}
static struct md_sysfs_entry md_resync_start =
3788
__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
3789

3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801
/*
 * The array state can be:
 *
 * clear
 *     No devices, no size, no level
 *     Equivalent to STOP_ARRAY ioctl
 * inactive
 *     May have some settings, but array is not active
 *        all IO results in error
 *     When written, doesn't tear down array, but just stops it
 * suspended (not supported yet)
 *     All IO requests will block. The array can be reconfigured.
3802
 *     Writing this, if accepted, will block until array is quiescent
3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827
 * readonly
 *     no resync can happen.  no superblocks get written.
 *     write requests fail
 * read-auto
 *     like readonly, but behaves like 'clean' on a write request.
 *
 * clean - no pending writes, but otherwise active.
 *     When written to inactive array, starts without resync
 *     If a write request arrives then
 *       if metadata is known, mark 'dirty' and switch to 'active'.
 *       if not known, block and switch to write-pending
 *     If written to an active array that has pending writes, then fails.
 * active
 *     fully active: IO and resync can be happening.
 *     When written to inactive array, starts with resync
 *
 * write-pending
 *     clean, but writes are blocked waiting for 'active' to be written.
 *
 * active-idle
 *     like active, but no writes have been seen for a while (100msec).
 *
 */
enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
		   write_pending, active_idle, bad_word};
3828
static char *array_states[] = {
3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841
	"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
	"write-pending", "active-idle", NULL };

static int match_word(const char *word, char **list)
{
	int n;
	for (n=0; list[n]; n++)
		if (cmd_match(word, list[n]))
			break;
	return n;
}

static ssize_t
3842
array_state_show(struct mddev *mddev, char *page)
3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856
{
	enum array_state st = inactive;

	if (mddev->pers)
		switch(mddev->ro) {
		case 1:
			st = readonly;
			break;
		case 2:
			st = read_auto;
			break;
		case 0:
			if (mddev->in_sync)
				st = clean;
3857
			else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3858
				st = write_pending;
3859 3860 3861 3862 3863 3864 3865 3866
			else if (mddev->safemode)
				st = active_idle;
			else
				st = active;
		}
	else {
		if (list_empty(&mddev->disks) &&
		    mddev->raid_disks == 0 &&
A
Andre Noll 已提交
3867
		    mddev->dev_sectors == 0)
3868 3869 3870 3871 3872 3873 3874
			st = clear;
		else
			st = inactive;
	}
	return sprintf(page, "%s\n", array_states[st]);
}

3875 3876
static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
3877 3878
static int do_md_run(struct mddev * mddev);
static int restart_array(struct mddev *mddev);
3879 3880

static ssize_t
3881
array_state_store(struct mddev *mddev, const char *buf, size_t len)
3882 3883 3884 3885 3886 3887 3888 3889
{
	int err = -EINVAL;
	enum array_state st = match_word(buf, array_states);
	switch(st) {
	case bad_word:
		break;
	case clear:
		/* stopping an active array */
3890
		err = do_md_stop(mddev, 0, NULL);
3891 3892 3893
		break;
	case inactive:
		/* stopping an active array */
3894
		if (mddev->pers)
3895
			err = do_md_stop(mddev, 2, NULL);
3896
		else
3897
			err = 0; /* already inactive */
3898 3899 3900 3901 3902
		break;
	case suspended:
		break; /* not supported yet */
	case readonly:
		if (mddev->pers)
3903
			err = md_set_readonly(mddev, NULL);
3904 3905
		else {
			mddev->ro = 1;
3906
			set_disk_ro(mddev->gendisk, 1);
3907 3908 3909 3910 3911
			err = do_md_run(mddev);
		}
		break;
	case read_auto:
		if (mddev->pers) {
3912
			if (mddev->ro == 0)
3913
				err = md_set_readonly(mddev, NULL);
3914
			else if (mddev->ro == 1)
3915 3916 3917 3918 3919
				err = restart_array(mddev);
			if (err == 0) {
				mddev->ro = 2;
				set_disk_ro(mddev->gendisk, 0);
			}
3920 3921 3922 3923 3924 3925 3926 3927 3928 3929
		} else {
			mddev->ro = 2;
			err = do_md_run(mddev);
		}
		break;
	case clean:
		if (mddev->pers) {
			restart_array(mddev);
			spin_lock_irq(&mddev->write_lock);
			if (atomic_read(&mddev->writes_pending) == 0) {
3930 3931
				if (mddev->in_sync == 0) {
					mddev->in_sync = 1;
3932 3933
					if (mddev->safemode == 1)
						mddev->safemode = 0;
3934
					set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3935 3936 3937 3938
				}
				err = 0;
			} else
				err = -EBUSY;
3939
			spin_unlock_irq(&mddev->write_lock);
3940 3941
		} else
			err = -EINVAL;
3942 3943 3944 3945
		break;
	case active:
		if (mddev->pers) {
			restart_array(mddev);
3946
			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3947 3948 3949 3950
			wake_up(&mddev->sb_wait);
			err = 0;
		} else {
			mddev->ro = 0;
3951
			set_disk_ro(mddev->gendisk, 0);
3952 3953 3954 3955 3956 3957 3958 3959 3960 3961
			err = do_md_run(mddev);
		}
		break;
	case write_pending:
	case active_idle:
		/* these cannot be set */
		break;
	}
	if (err)
		return err;
3962
	else {
3963 3964
		if (mddev->hold_active == UNTIL_IOCTL)
			mddev->hold_active = 0;
N
NeilBrown 已提交
3965
		sysfs_notify_dirent_safe(mddev->sysfs_state);
3966
		return len;
3967
	}
3968
}
3969 3970
static struct md_sysfs_entry md_array_state =
__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3971

3972
static ssize_t
3973
max_corrected_read_errors_show(struct mddev *mddev, char *page) {
3974 3975 3976 3977 3978
	return sprintf(page, "%d\n",
		       atomic_read(&mddev->max_corr_read_errors));
}

static ssize_t
3979
max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994
{
	char *e;
	unsigned long n = simple_strtoul(buf, &e, 10);

	if (*buf && (*e == 0 || *e == '\n')) {
		atomic_set(&mddev->max_corr_read_errors, n);
		return len;
	}
	return -EINVAL;
}

static struct md_sysfs_entry max_corr_read_errors =
__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
	max_corrected_read_errors_store);

3995
static ssize_t
3996
null_show(struct mddev *mddev, char *page)
3997 3998 3999 4000 4001
{
	return -EINVAL;
}

static ssize_t
4002
new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014
{
	/* buf must be %d:%d\n? giving major and minor numbers */
	/* The new device is added to the array.
	 * If the array has a persistent superblock, we read the
	 * superblock to initialise info and check validity.
	 * Otherwise, only checking done is that in bind_rdev_to_array,
	 * which mainly checks size.
	 */
	char *e;
	int major = simple_strtoul(buf, &e, 10);
	int minor;
	dev_t dev;
4015
	struct md_rdev *rdev;
4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032
	int err;

	if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
		return -EINVAL;
	minor = simple_strtoul(e+1, &e, 10);
	if (*e && *e != '\n')
		return -EINVAL;
	dev = MKDEV(major, minor);
	if (major != MAJOR(dev) ||
	    minor != MINOR(dev))
		return -EOVERFLOW;


	if (mddev->persistent) {
		rdev = md_import_device(dev, mddev->major_version,
					mddev->minor_version);
		if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4033 4034 4035
			struct md_rdev *rdev0
				= list_entry(mddev->disks.next,
					     struct md_rdev, same_set);
4036 4037 4038 4039 4040
			err = super_types[mddev->major_version]
				.load_super(rdev, rdev0, mddev->minor_version);
			if (err < 0)
				goto out;
		}
4041 4042 4043
	} else if (mddev->external)
		rdev = md_import_device(dev, -2, -1);
	else
4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055
		rdev = md_import_device(dev, -1, -1);

	if (IS_ERR(rdev))
		return PTR_ERR(rdev);
	err = bind_rdev_to_array(rdev, mddev);
 out:
	if (err)
		export_rdev(rdev);
	return err ? err : len;
}

static struct md_sysfs_entry md_new_device =
4056
__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4057

4058
static ssize_t
4059
bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076
{
	char *end;
	unsigned long chunk, end_chunk;

	if (!mddev->bitmap)
		goto out;
	/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
	while (*buf) {
		chunk = end_chunk = simple_strtoul(buf, &end, 0);
		if (buf == end) break;
		if (*end == '-') { /* range */
			buf = end + 1;
			end_chunk = simple_strtoul(buf, &end, 0);
			if (buf == end) break;
		}
		if (*end && !isspace(*end)) break;
		bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4077
		buf = skip_spaces(end);
4078 4079 4080 4081 4082 4083 4084 4085 4086
	}
	bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out:
	return len;
}

static struct md_sysfs_entry md_bitmap =
__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);

4087
static ssize_t
4088
size_show(struct mddev *mddev, char *page)
4089
{
A
Andre Noll 已提交
4090 4091
	return sprintf(page, "%llu\n",
		(unsigned long long)mddev->dev_sectors / 2);
4092 4093
}

4094
static int update_size(struct mddev *mddev, sector_t num_sectors);
4095 4096

static ssize_t
4097
size_store(struct mddev *mddev, const char *buf, size_t len)
4098 4099 4100 4101 4102
{
	/* If array is inactive, we can reduce the component size, but
	 * not increase it (except from 0).
	 * If array is active, we can try an on-line resize
	 */
D
Dan Williams 已提交
4103 4104
	sector_t sectors;
	int err = strict_blocks_to_sectors(buf, &sectors);
4105

A
Andre Noll 已提交
4106 4107
	if (err < 0)
		return err;
4108
	if (mddev->pers) {
A
Andre Noll 已提交
4109
		err = update_size(mddev, sectors);
4110
		md_update_sb(mddev, 1);
4111
	} else {
A
Andre Noll 已提交
4112 4113 4114
		if (mddev->dev_sectors == 0 ||
		    mddev->dev_sectors > sectors)
			mddev->dev_sectors = sectors;
4115 4116 4117 4118 4119 4120 4121
		else
			err = -ENOSPC;
	}
	return err ? err : len;
}

static struct md_sysfs_entry md_size =
4122
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4123

4124 4125

/* Metdata version.
4126 4127 4128
 * This is one of
 *   'none' for arrays with no metadata (good luck...)
 *   'external' for arrays with externally managed metadata,
4129 4130 4131
 * or N.M for internally known formats
 */
static ssize_t
4132
metadata_show(struct mddev *mddev, char *page)
4133 4134 4135 4136
{
	if (mddev->persistent)
		return sprintf(page, "%d.%d\n",
			       mddev->major_version, mddev->minor_version);
4137 4138
	else if (mddev->external)
		return sprintf(page, "external:%s\n", mddev->metadata_type);
4139 4140 4141 4142 4143
	else
		return sprintf(page, "none\n");
}

static ssize_t
4144
metadata_store(struct mddev *mddev, const char *buf, size_t len)
4145 4146 4147
{
	int major, minor;
	char *e;
4148 4149 4150 4151 4152 4153 4154
	/* Changing the details of 'external' metadata is
	 * always permitted.  Otherwise there must be
	 * no devices attached to the array.
	 */
	if (mddev->external && strncmp(buf, "external:", 9) == 0)
		;
	else if (!list_empty(&mddev->disks))
4155 4156 4157 4158
		return -EBUSY;

	if (cmd_match(buf, "none")) {
		mddev->persistent = 0;
4159 4160 4161 4162 4163 4164
		mddev->external = 0;
		mddev->major_version = 0;
		mddev->minor_version = 90;
		return len;
	}
	if (strncmp(buf, "external:", 9) == 0) {
4165
		size_t namelen = len-9;
4166 4167 4168 4169 4170 4171 4172 4173
		if (namelen >= sizeof(mddev->metadata_type))
			namelen = sizeof(mddev->metadata_type)-1;
		strncpy(mddev->metadata_type, buf+9, namelen);
		mddev->metadata_type[namelen] = 0;
		if (namelen && mddev->metadata_type[namelen-1] == '\n')
			mddev->metadata_type[--namelen] = 0;
		mddev->persistent = 0;
		mddev->external = 1;
4174 4175 4176 4177 4178 4179 4180 4181 4182
		mddev->major_version = 0;
		mddev->minor_version = 90;
		return len;
	}
	major = simple_strtoul(buf, &e, 10);
	if (e==buf || *e != '.')
		return -EINVAL;
	buf = e+1;
	minor = simple_strtoul(buf, &e, 10);
4183
	if (e==buf || (*e && *e != '\n') )
4184
		return -EINVAL;
4185
	if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4186 4187 4188 4189
		return -ENOENT;
	mddev->major_version = major;
	mddev->minor_version = minor;
	mddev->persistent = 1;
4190
	mddev->external = 0;
4191 4192 4193 4194
	return len;
}

static struct md_sysfs_entry md_metadata =
4195
__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4196

4197
static ssize_t
4198
action_show(struct mddev *mddev, char *page)
4199
{
4200
	char *type = "idle";
4201 4202 4203
	if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
		type = "frozen";
	else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4204
	    (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
4205 4206 4207
		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
			type = "reshape";
		else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4208 4209 4210 4211 4212 4213
			if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
				type = "resync";
			else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
				type = "check";
			else
				type = "repair";
4214
		} else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
4215 4216 4217 4218 4219
			type = "recover";
	}
	return sprintf(page, "%s\n", type);
}

4220
static void reap_sync_thread(struct mddev *mddev);
4221

4222
static ssize_t
4223
action_store(struct mddev *mddev, const char *page, size_t len)
4224
{
4225 4226 4227
	if (!mddev->pers || !mddev->pers->sync_request)
		return -EINVAL;

4228 4229 4230 4231 4232 4233
	if (cmd_match(page, "frozen"))
		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
	else
		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);

	if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4234 4235
		if (mddev->sync_thread) {
			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4236
			reap_sync_thread(mddev);
4237
		}
4238 4239
	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
		   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
4240
		return -EBUSY;
4241 4242 4243 4244
	else if (cmd_match(page, "resync"))
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	else if (cmd_match(page, "recover")) {
		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4245
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4246
	} else if (cmd_match(page, "reshape")) {
4247 4248 4249 4250 4251 4252
		int err;
		if (mddev->pers->start_reshape == NULL)
			return -EINVAL;
		err = mddev->pers->start_reshape(mddev);
		if (err)
			return err;
4253
		sysfs_notify(&mddev->kobj, NULL, "degraded");
4254
	} else {
4255
		if (cmd_match(page, "check"))
4256
			set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4257
		else if (!cmd_match(page, "repair"))
4258 4259 4260 4261
			return -EINVAL;
		set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
	}
4262 4263 4264 4265 4266 4267 4268
	if (mddev->ro == 2) {
		/* A write to sync_action is enough to justify
		 * canceling read-auto mode
		 */
		mddev->ro = 0;
		md_wakeup_thread(mddev->sync_thread);
	}
4269
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4270
	md_wakeup_thread(mddev->thread);
N
NeilBrown 已提交
4271
	sysfs_notify_dirent_safe(mddev->sysfs_action);
4272 4273 4274
	return len;
}

4275
static ssize_t
4276
mismatch_cnt_show(struct mddev *mddev, char *page)
4277 4278
{
	return sprintf(page, "%llu\n",
4279 4280
		       (unsigned long long)
		       atomic64_read(&mddev->resync_mismatches));
4281 4282
}

4283 4284
static struct md_sysfs_entry md_scan_mode =
__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4285

4286

4287
static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4288

4289
static ssize_t
4290
sync_min_show(struct mddev *mddev, char *page)
4291 4292 4293 4294 4295 4296
{
	return sprintf(page, "%d (%s)\n", speed_min(mddev),
		       mddev->sync_speed_min ? "local": "system");
}

static ssize_t
4297
sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315
{
	int min;
	char *e;
	if (strncmp(buf, "system", 6)==0) {
		mddev->sync_speed_min = 0;
		return len;
	}
	min = simple_strtoul(buf, &e, 10);
	if (buf == e || (*e && *e != '\n') || min <= 0)
		return -EINVAL;
	mddev->sync_speed_min = min;
	return len;
}

static struct md_sysfs_entry md_sync_min =
__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);

static ssize_t
4316
sync_max_show(struct mddev *mddev, char *page)
4317 4318 4319 4320 4321 4322
{
	return sprintf(page, "%d (%s)\n", speed_max(mddev),
		       mddev->sync_speed_max ? "local": "system");
}

static ssize_t
4323
sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340
{
	int max;
	char *e;
	if (strncmp(buf, "system", 6)==0) {
		mddev->sync_speed_max = 0;
		return len;
	}
	max = simple_strtoul(buf, &e, 10);
	if (buf == e || (*e && *e != '\n') || max <= 0)
		return -EINVAL;
	mddev->sync_speed_max = max;
	return len;
}

static struct md_sysfs_entry md_sync_max =
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);

4341
static ssize_t
4342
degraded_show(struct mddev *mddev, char *page)
4343 4344 4345 4346
{
	return sprintf(page, "%d\n", mddev->degraded);
}
static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4347

4348
static ssize_t
4349
sync_force_parallel_show(struct mddev *mddev, char *page)
4350 4351 4352 4353 4354
{
	return sprintf(page, "%d\n", mddev->parallel_resync);
}

static ssize_t
4355
sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377
{
	long n;

	if (strict_strtol(buf, 10, &n))
		return -EINVAL;

	if (n != 0 && n != 1)
		return -EINVAL;

	mddev->parallel_resync = n;

	if (mddev->sync_thread)
		wake_up(&resync_wait);

	return len;
}

/* force parallel resync, even with shared block devices */
static struct md_sysfs_entry md_sync_force_parallel =
__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
       sync_force_parallel_show, sync_force_parallel_store);

4378
static ssize_t
4379
sync_speed_show(struct mddev *mddev, char *page)
4380 4381
{
	unsigned long resync, dt, db;
4382 4383
	if (mddev->curr_resync == 0)
		return sprintf(page, "none\n");
4384 4385
	resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
	dt = (jiffies - mddev->resync_mark) / HZ;
4386
	if (!dt) dt++;
4387 4388
	db = resync - mddev->resync_mark_cnt;
	return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4389 4390
}

4391
static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4392 4393

static ssize_t
4394
sync_completed_show(struct mddev *mddev, char *page)
4395
{
4396
	unsigned long long max_sectors, resync;
4397

4398 4399 4400
	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
		return sprintf(page, "none\n");

4401 4402
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
A
Andre Noll 已提交
4403
		max_sectors = mddev->resync_max_sectors;
4404
	else
A
Andre Noll 已提交
4405
		max_sectors = mddev->dev_sectors;
4406

4407
	resync = mddev->curr_resync_completed;
4408
	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4409 4410
}

4411
static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
4412

4413
static ssize_t
4414
min_sync_show(struct mddev *mddev, char *page)
4415 4416 4417 4418 4419
{
	return sprintf(page, "%llu\n",
		       (unsigned long long)mddev->resync_min);
}
static ssize_t
4420
min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4421 4422 4423 4424 4425 4426 4427 4428 4429 4430
{
	unsigned long long min;
	if (strict_strtoull(buf, 10, &min))
		return -EINVAL;
	if (min > mddev->resync_max)
		return -EINVAL;
	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
		return -EBUSY;

	/* Must be a multiple of chunk_size */
4431
	if (mddev->chunk_sectors) {
4432
		sector_t temp = min;
4433
		if (sector_div(temp, mddev->chunk_sectors))
4434 4435 4436 4437 4438 4439 4440 4441 4442 4443
			return -EINVAL;
	}
	mddev->resync_min = min;

	return len;
}

static struct md_sysfs_entry md_min_sync =
__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);

4444
static ssize_t
4445
max_sync_show(struct mddev *mddev, char *page)
4446 4447 4448 4449 4450 4451 4452 4453
{
	if (mddev->resync_max == MaxSector)
		return sprintf(page, "max\n");
	else
		return sprintf(page, "%llu\n",
			       (unsigned long long)mddev->resync_max);
}
static ssize_t
4454
max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4455 4456 4457 4458
{
	if (strncmp(buf, "max", 3) == 0)
		mddev->resync_max = MaxSector;
	else {
4459 4460 4461 4462
		unsigned long long max;
		if (strict_strtoull(buf, 10, &max))
			return -EINVAL;
		if (max < mddev->resync_min)
4463 4464
			return -EINVAL;
		if (max < mddev->resync_max &&
4465
		    mddev->ro == 0 &&
4466 4467 4468 4469
		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
			return -EBUSY;

		/* Must be a multiple of chunk_size */
4470
		if (mddev->chunk_sectors) {
4471
			sector_t temp = max;
4472
			if (sector_div(temp, mddev->chunk_sectors))
4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483
				return -EINVAL;
		}
		mddev->resync_max = max;
	}
	wake_up(&mddev->recovery_wait);
	return len;
}

static struct md_sysfs_entry md_max_sync =
__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);

4484
static ssize_t
4485
suspend_lo_show(struct mddev *mddev, char *page)
4486 4487 4488 4489 4490
{
	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
}

static ssize_t
4491
suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4492 4493 4494
{
	char *e;
	unsigned long long new = simple_strtoull(buf, &e, 10);
4495
	unsigned long long old = mddev->suspend_lo;
4496

4497 4498
	if (mddev->pers == NULL || 
	    mddev->pers->quiesce == NULL)
4499 4500 4501
		return -EINVAL;
	if (buf == e || (*e && *e != '\n'))
		return -EINVAL;
4502 4503 4504 4505

	mddev->suspend_lo = new;
	if (new >= old)
		/* Shrinking suspended region */
4506
		mddev->pers->quiesce(mddev, 2);
4507 4508 4509 4510 4511 4512
	else {
		/* Expanding suspended region - need to wait */
		mddev->pers->quiesce(mddev, 1);
		mddev->pers->quiesce(mddev, 0);
	}
	return len;
4513 4514 4515 4516 4517 4518
}
static struct md_sysfs_entry md_suspend_lo =
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);


static ssize_t
4519
suspend_hi_show(struct mddev *mddev, char *page)
4520 4521 4522 4523 4524
{
	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
}

static ssize_t
4525
suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4526 4527 4528
{
	char *e;
	unsigned long long new = simple_strtoull(buf, &e, 10);
4529
	unsigned long long old = mddev->suspend_hi;
4530

4531 4532
	if (mddev->pers == NULL ||
	    mddev->pers->quiesce == NULL)
4533 4534 4535
		return -EINVAL;
	if (buf == e || (*e && *e != '\n'))
		return -EINVAL;
4536 4537 4538 4539 4540 4541 4542

	mddev->suspend_hi = new;
	if (new <= old)
		/* Shrinking suspended region */
		mddev->pers->quiesce(mddev, 2);
	else {
		/* Expanding suspended region - need to wait */
4543 4544
		mddev->pers->quiesce(mddev, 1);
		mddev->pers->quiesce(mddev, 0);
4545 4546
	}
	return len;
4547 4548 4549 4550
}
static struct md_sysfs_entry md_suspend_hi =
__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);

4551
static ssize_t
4552
reshape_position_show(struct mddev *mddev, char *page)
4553 4554 4555 4556 4557 4558 4559 4560 4561
{
	if (mddev->reshape_position != MaxSector)
		return sprintf(page, "%llu\n",
			       (unsigned long long)mddev->reshape_position);
	strcpy(page, "none\n");
	return 5;
}

static ssize_t
4562
reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4563
{
4564
	struct md_rdev *rdev;
4565 4566 4567 4568 4569 4570 4571 4572
	char *e;
	unsigned long long new = simple_strtoull(buf, &e, 10);
	if (mddev->pers)
		return -EBUSY;
	if (buf == e || (*e && *e != '\n'))
		return -EINVAL;
	mddev->reshape_position = new;
	mddev->delta_disks = 0;
4573
	mddev->reshape_backwards = 0;
4574 4575
	mddev->new_level = mddev->level;
	mddev->new_layout = mddev->layout;
4576
	mddev->new_chunk_sectors = mddev->chunk_sectors;
4577 4578
	rdev_for_each(rdev, mddev)
		rdev->new_data_offset = rdev->data_offset;
4579 4580 4581 4582 4583 4584 4585
	return len;
}

static struct md_sysfs_entry md_reshape_position =
__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
       reshape_position_store);

4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621
static ssize_t
reshape_direction_show(struct mddev *mddev, char *page)
{
	return sprintf(page, "%s\n",
		       mddev->reshape_backwards ? "backwards" : "forwards");
}

static ssize_t
reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
{
	int backwards = 0;
	if (cmd_match(buf, "forwards"))
		backwards = 0;
	else if (cmd_match(buf, "backwards"))
		backwards = 1;
	else
		return -EINVAL;
	if (mddev->reshape_backwards == backwards)
		return len;

	/* check if we are allowed to change */
	if (mddev->delta_disks)
		return -EBUSY;

	if (mddev->persistent &&
	    mddev->major_version == 0)
		return -EINVAL;

	mddev->reshape_backwards = backwards;
	return len;
}

static struct md_sysfs_entry md_reshape_direction =
__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
       reshape_direction_store);

D
Dan Williams 已提交
4622
static ssize_t
4623
array_size_show(struct mddev *mddev, char *page)
D
Dan Williams 已提交
4624 4625 4626 4627 4628 4629 4630 4631 4632
{
	if (mddev->external_size)
		return sprintf(page, "%llu\n",
			       (unsigned long long)mddev->array_sectors/2);
	else
		return sprintf(page, "default\n");
}

static ssize_t
4633
array_size_store(struct mddev *mddev, const char *buf, size_t len)
D
Dan Williams 已提交
4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647
{
	sector_t sectors;

	if (strncmp(buf, "default", 7) == 0) {
		if (mddev->pers)
			sectors = mddev->pers->size(mddev, 0, 0);
		else
			sectors = mddev->array_sectors;

		mddev->external_size = 0;
	} else {
		if (strict_blocks_to_sectors(buf, &sectors) < 0)
			return -EINVAL;
		if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4648
			return -E2BIG;
D
Dan Williams 已提交
4649 4650 4651 4652 4653

		mddev->external_size = 1;
	}

	mddev->array_sectors = sectors;
4654 4655
	if (mddev->pers) {
		set_capacity(mddev->gendisk, mddev->array_sectors);
4656
		revalidate_disk(mddev->gendisk);
4657
	}
D
Dan Williams 已提交
4658 4659 4660 4661 4662 4663
	return len;
}

static struct md_sysfs_entry md_array_size =
__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
       array_size_store);
4664

4665 4666
static struct attribute *md_default_attrs[] = {
	&md_level.attr,
4667
	&md_layout.attr,
4668
	&md_raid_disks.attr,
4669
	&md_chunk_size.attr,
4670
	&md_size.attr,
4671
	&md_resync_start.attr,
4672
	&md_metadata.attr,
4673
	&md_new_device.attr,
4674
	&md_safe_delay.attr,
4675
	&md_array_state.attr,
4676
	&md_reshape_position.attr,
4677
	&md_reshape_direction.attr,
D
Dan Williams 已提交
4678
	&md_array_size.attr,
4679
	&max_corr_read_errors.attr,
4680 4681 4682 4683
	NULL,
};

static struct attribute *md_redundancy_attrs[] = {
4684
	&md_scan_mode.attr,
4685
	&md_mismatches.attr,
4686 4687 4688
	&md_sync_min.attr,
	&md_sync_max.attr,
	&md_sync_speed.attr,
4689
	&md_sync_force_parallel.attr,
4690
	&md_sync_completed.attr,
4691
	&md_min_sync.attr,
4692
	&md_max_sync.attr,
4693 4694
	&md_suspend_lo.attr,
	&md_suspend_hi.attr,
4695
	&md_bitmap.attr,
4696
	&md_degraded.attr,
4697 4698
	NULL,
};
4699 4700 4701 4702 4703
static struct attribute_group md_redundancy_group = {
	.name = NULL,
	.attrs = md_redundancy_attrs,
};

4704 4705 4706 4707 4708

static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4709
	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4710
	ssize_t rv;
4711 4712 4713

	if (!entry->show)
		return -EIO;
4714 4715 4716 4717 4718 4719 4720 4721
	spin_lock(&all_mddevs_lock);
	if (list_empty(&mddev->all_mddevs)) {
		spin_unlock(&all_mddevs_lock);
		return -EBUSY;
	}
	mddev_get(mddev);
	spin_unlock(&all_mddevs_lock);

I
Ingo Molnar 已提交
4722 4723 4724 4725 4726
	rv = mddev_lock(mddev);
	if (!rv) {
		rv = entry->show(mddev, page);
		mddev_unlock(mddev);
	}
4727
	mddev_put(mddev);
4728
	return rv;
4729 4730 4731 4732 4733 4734 4735
}

static ssize_t
md_attr_store(struct kobject *kobj, struct attribute *attr,
	      const char *page, size_t length)
{
	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4736
	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4737
	ssize_t rv;
4738 4739 4740

	if (!entry->store)
		return -EIO;
4741 4742
	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
4743 4744 4745 4746 4747 4748 4749
	spin_lock(&all_mddevs_lock);
	if (list_empty(&mddev->all_mddevs)) {
		spin_unlock(&all_mddevs_lock);
		return -EBUSY;
	}
	mddev_get(mddev);
	spin_unlock(&all_mddevs_lock);
I
Ingo Molnar 已提交
4750 4751 4752 4753 4754
	rv = mddev_lock(mddev);
	if (!rv) {
		rv = entry->store(mddev, page, length);
		mddev_unlock(mddev);
	}
4755
	mddev_put(mddev);
4756
	return rv;
4757 4758 4759 4760
}

static void md_free(struct kobject *ko)
{
4761
	struct mddev *mddev = container_of(ko, struct mddev, kobj);
4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772

	if (mddev->sysfs_state)
		sysfs_put(mddev->sysfs_state);

	if (mddev->gendisk) {
		del_gendisk(mddev->gendisk);
		put_disk(mddev->gendisk);
	}
	if (mddev->queue)
		blk_cleanup_queue(mddev->queue);

4773 4774 4775
	kfree(mddev);
}

4776
static const struct sysfs_ops md_sysfs_ops = {
4777 4778 4779 4780 4781 4782 4783 4784 4785
	.show	= md_attr_show,
	.store	= md_attr_store,
};
static struct kobj_type md_ktype = {
	.release	= md_free,
	.sysfs_ops	= &md_sysfs_ops,
	.default_attrs	= md_default_attrs,
};

L
Linus Torvalds 已提交
4786 4787
int mdp_major = 0;

4788 4789
static void mddev_delayed_delete(struct work_struct *ws)
{
4790
	struct mddev *mddev = container_of(ws, struct mddev, del_work);
4791

4792
	sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4793 4794 4795 4796
	kobject_del(&mddev->kobj);
	kobject_put(&mddev->kobj);
}

4797
static int md_alloc(dev_t dev, char *name)
L
Linus Torvalds 已提交
4798
{
A
Arjan van de Ven 已提交
4799
	static DEFINE_MUTEX(disks_mutex);
4800
	struct mddev *mddev = mddev_find(dev);
L
Linus Torvalds 已提交
4801
	struct gendisk *disk;
4802 4803 4804
	int partitioned;
	int shift;
	int unit;
4805
	int error;
L
Linus Torvalds 已提交
4806 4807

	if (!mddev)
4808 4809 4810 4811 4812
		return -ENODEV;

	partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
	shift = partitioned ? MdpMinorShift : 0;
	unit = MINOR(mddev->unit) >> shift;
L
Linus Torvalds 已提交
4813

T
Tejun Heo 已提交
4814 4815
	/* wait for any previous instance of this device to be
	 * completely removed (mddev_delayed_delete).
4816
	 */
T
Tejun Heo 已提交
4817
	flush_workqueue(md_misc_wq);
4818

A
Arjan van de Ven 已提交
4819
	mutex_lock(&disks_mutex);
N
NeilBrown 已提交
4820 4821 4822
	error = -EEXIST;
	if (mddev->gendisk)
		goto abort;
4823 4824 4825 4826

	if (name) {
		/* Need to ensure that 'name' is not a duplicate.
		 */
4827
		struct mddev *mddev2;
4828 4829 4830 4831 4832 4833
		spin_lock(&all_mddevs_lock);

		list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
			if (mddev2->gendisk &&
			    strcmp(mddev2->gendisk->disk_name, name) == 0) {
				spin_unlock(&all_mddevs_lock);
N
NeilBrown 已提交
4834
				goto abort;
4835 4836
			}
		spin_unlock(&all_mddevs_lock);
L
Linus Torvalds 已提交
4837
	}
4838

N
NeilBrown 已提交
4839
	error = -ENOMEM;
4840
	mddev->queue = blk_alloc_queue(GFP_KERNEL);
N
NeilBrown 已提交
4841 4842
	if (!mddev->queue)
		goto abort;
4843 4844 4845
	mddev->queue->queuedata = mddev;

	blk_queue_make_request(mddev->queue, md_make_request);
4846
	blk_set_stacking_limits(&mddev->queue->limits);
4847

L
Linus Torvalds 已提交
4848 4849
	disk = alloc_disk(1 << shift);
	if (!disk) {
4850 4851
		blk_cleanup_queue(mddev->queue);
		mddev->queue = NULL;
N
NeilBrown 已提交
4852
		goto abort;
L
Linus Torvalds 已提交
4853
	}
4854
	disk->major = MAJOR(mddev->unit);
L
Linus Torvalds 已提交
4855
	disk->first_minor = unit << shift;
4856 4857 4858
	if (name)
		strcpy(disk->disk_name, name);
	else if (partitioned)
L
Linus Torvalds 已提交
4859
		sprintf(disk->disk_name, "md_d%d", unit);
4860
	else
L
Linus Torvalds 已提交
4861 4862 4863 4864
		sprintf(disk->disk_name, "md%d", unit);
	disk->fops = &md_fops;
	disk->private_data = mddev;
	disk->queue = mddev->queue;
4865
	blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
4866
	/* Allow extended partitions.  This makes the
4867
	 * 'mdp' device redundant, but we can't really
4868 4869 4870
	 * remove it now.
	 */
	disk->flags |= GENHD_FL_EXT_DEVT;
L
Linus Torvalds 已提交
4871
	mddev->gendisk = disk;
4872 4873 4874 4875 4876 4877
	/* As soon as we call add_disk(), another thread could get
	 * through to md_open, so make sure it doesn't get too far
	 */
	mutex_lock(&mddev->open_mutex);
	add_disk(disk);

4878 4879
	error = kobject_init_and_add(&mddev->kobj, &md_ktype,
				     &disk_to_dev(disk)->kobj, "%s", "md");
N
NeilBrown 已提交
4880 4881 4882 4883
	if (error) {
		/* This isn't possible, but as kobject_init_and_add is marked
		 * __must_check, we must do something with the result
		 */
4884 4885
		printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
		       disk->disk_name);
N
NeilBrown 已提交
4886 4887
		error = 0;
	}
N
NeilBrown 已提交
4888 4889
	if (mddev->kobj.sd &&
	    sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4890
		printk(KERN_DEBUG "pointless warning\n");
4891
	mutex_unlock(&mddev->open_mutex);
N
NeilBrown 已提交
4892 4893
 abort:
	mutex_unlock(&disks_mutex);
N
NeilBrown 已提交
4894
	if (!error && mddev->kobj.sd) {
4895
		kobject_uevent(&mddev->kobj, KOBJ_ADD);
N
NeilBrown 已提交
4896
		mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
4897
	}
4898
	mddev_put(mddev);
N
NeilBrown 已提交
4899
	return error;
4900 4901 4902 4903 4904
}

static struct kobject *md_probe(dev_t dev, int *part, void *data)
{
	md_alloc(dev, NULL);
L
Linus Torvalds 已提交
4905 4906 4907
	return NULL;
}

4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926
static int add_named_array(const char *val, struct kernel_param *kp)
{
	/* val must be "md_*" where * is not all digits.
	 * We allocate an array with a large free minor number, and
	 * set the name to val.  val must not already be an active name.
	 */
	int len = strlen(val);
	char buf[DISK_NAME_LEN];

	while (len && val[len-1] == '\n')
		len--;
	if (len >= DISK_NAME_LEN)
		return -E2BIG;
	strlcpy(buf, val, len+1);
	if (strncmp(buf, "md_", 3) != 0)
		return -EINVAL;
	return md_alloc(0, buf);
}

L
Linus Torvalds 已提交
4927 4928
static void md_safemode_timeout(unsigned long data)
{
4929
	struct mddev *mddev = (struct mddev *) data;
L
Linus Torvalds 已提交
4930

4931 4932 4933
	if (!atomic_read(&mddev->writes_pending)) {
		mddev->safemode = 1;
		if (mddev->external)
N
NeilBrown 已提交
4934
			sysfs_notify_dirent_safe(mddev->sysfs_state);
4935
	}
L
Linus Torvalds 已提交
4936 4937 4938
	md_wakeup_thread(mddev->thread);
}

4939
static int start_dirty_degraded;
L
Linus Torvalds 已提交
4940

4941
int md_run(struct mddev *mddev)
L
Linus Torvalds 已提交
4942
{
4943
	int err;
4944
	struct md_rdev *rdev;
4945
	struct md_personality *pers;
L
Linus Torvalds 已提交
4946

4947 4948
	if (list_empty(&mddev->disks))
		/* cannot run an array with no devices.. */
L
Linus Torvalds 已提交
4949 4950 4951 4952
		return -EINVAL;

	if (mddev->pers)
		return -EBUSY;
4953 4954 4955
	/* Cannot run until previous stop completes properly */
	if (mddev->sysfs_active)
		return -EBUSY;
4956

L
Linus Torvalds 已提交
4957 4958 4959
	/*
	 * Analyze all RAID superblock(s)
	 */
4960 4961 4962
	if (!mddev->raid_disks) {
		if (!mddev->persistent)
			return -EINVAL;
4963
		analyze_sbs(mddev);
4964
	}
L
Linus Torvalds 已提交
4965

4966 4967 4968 4969
	if (mddev->level != LEVEL_NONE)
		request_module("md-level-%d", mddev->level);
	else if (mddev->clevel[0])
		request_module("md-%s", mddev->clevel);
L
Linus Torvalds 已提交
4970 4971 4972 4973 4974 4975

	/*
	 * Drop all container device buffers, from now on
	 * the only valid external interface is through the md
	 * device.
	 */
N
NeilBrown 已提交
4976
	rdev_for_each(rdev, mddev) {
4977
		if (test_bit(Faulty, &rdev->flags))
L
Linus Torvalds 已提交
4978 4979
			continue;
		sync_blockdev(rdev->bdev);
4980
		invalidate_bdev(rdev->bdev);
4981 4982 4983

		/* perform some consistency tests on the device.
		 * We don't want the data to overlap the metadata,
A
Andre Noll 已提交
4984
		 * Internal Bitmap issues have been handled elsewhere.
4985
		 */
4986 4987 4988
		if (rdev->meta_bdev) {
			/* Nothing to check */;
		} else if (rdev->data_offset < rdev->sb_start) {
A
Andre Noll 已提交
4989 4990
			if (mddev->dev_sectors &&
			    rdev->data_offset + mddev->dev_sectors
4991
			    > rdev->sb_start) {
4992 4993 4994 4995 4996
				printk("md: %s: data overlaps metadata\n",
				       mdname(mddev));
				return -EINVAL;
			}
		} else {
4997
			if (rdev->sb_start + rdev->sb_size/512
4998 4999 5000 5001 5002 5003
			    > rdev->data_offset) {
				printk("md: %s: metadata overlaps data\n",
				       mdname(mddev));
				return -EINVAL;
			}
		}
N
NeilBrown 已提交
5004
		sysfs_notify_dirent_safe(rdev->sysfs_state);
L
Linus Torvalds 已提交
5005 5006
	}

5007
	if (mddev->bio_set == NULL)
5008
		mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5009

L
Linus Torvalds 已提交
5010
	spin_lock(&pers_lock);
5011
	pers = find_pers(mddev->level, mddev->clevel);
5012
	if (!pers || !try_module_get(pers->owner)) {
L
Linus Torvalds 已提交
5013
		spin_unlock(&pers_lock);
5014 5015 5016 5017 5018 5019
		if (mddev->level != LEVEL_NONE)
			printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
			       mddev->level);
		else
			printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
			       mddev->clevel);
L
Linus Torvalds 已提交
5020 5021
		return -EINVAL;
	}
5022
	mddev->pers = pers;
L
Linus Torvalds 已提交
5023
	spin_unlock(&pers_lock);
5024 5025 5026 5027
	if (mddev->level != pers->level) {
		mddev->level = pers->level;
		mddev->new_level = pers->level;
	}
5028
	strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
L
Linus Torvalds 已提交
5029

5030
	if (mddev->reshape_position != MaxSector &&
5031
	    pers->start_reshape == NULL) {
5032 5033 5034 5035 5036 5037
		/* This personality cannot handle reshaping... */
		mddev->pers = NULL;
		module_put(pers->owner);
		return -EINVAL;
	}

5038 5039 5040 5041 5042
	if (pers->sync_request) {
		/* Warn if this is a potentially silly
		 * configuration.
		 */
		char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5043
		struct md_rdev *rdev2;
5044
		int warned = 0;
5045

N
NeilBrown 已提交
5046 5047
		rdev_for_each(rdev, mddev)
			rdev_for_each(rdev2, mddev) {
5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060
				if (rdev < rdev2 &&
				    rdev->bdev->bd_contains ==
				    rdev2->bdev->bd_contains) {
					printk(KERN_WARNING
					       "%s: WARNING: %s appears to be"
					       " on the same physical disk as"
					       " %s.\n",
					       mdname(mddev),
					       bdevname(rdev->bdev,b),
					       bdevname(rdev2->bdev,b2));
					warned = 1;
				}
			}
5061

5062 5063 5064 5065 5066 5067
		if (warned)
			printk(KERN_WARNING
			       "True protection against single-disk"
			       " failure might be compromised.\n");
	}

5068
	mddev->recovery = 0;
A
Andre Noll 已提交
5069 5070 5071
	/* may be over-ridden by personality */
	mddev->resync_max_sectors = mddev->dev_sectors;

5072
	mddev->ok_start_degraded = start_dirty_degraded;
L
Linus Torvalds 已提交
5073

5074
	if (start_readonly && mddev->ro == 0)
5075 5076
		mddev->ro = 2; /* read-only, but switch on first write */

5077
	err = mddev->pers->run(mddev);
5078 5079
	if (err)
		printk(KERN_ERR "md: pers->run() failed ...\n");
D
Dan Williams 已提交
5080 5081 5082 5083 5084 5085 5086 5087 5088 5089
	else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
		WARN_ONCE(!mddev->external_size, "%s: default size too small,"
			  " but 'external_size' not in effect?\n", __func__);
		printk(KERN_ERR
		       "md: invalid array_size %llu > default size %llu\n",
		       (unsigned long long)mddev->array_sectors / 2,
		       (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
		err = -EINVAL;
		mddev->pers->stop(mddev);
	}
5090 5091
	if (err == 0 && mddev->pers->sync_request &&
	    (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5092 5093 5094 5095 5096 5097 5098
		err = bitmap_create(mddev);
		if (err) {
			printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
			       mdname(mddev), err);
			mddev->pers->stop(mddev);
		}
	}
L
Linus Torvalds 已提交
5099 5100 5101
	if (err) {
		module_put(mddev->pers->owner);
		mddev->pers = NULL;
5102 5103
		bitmap_destroy(mddev);
		return err;
L
Linus Torvalds 已提交
5104
	}
5105
	if (mddev->pers->sync_request) {
N
NeilBrown 已提交
5106 5107
		if (mddev->kobj.sd &&
		    sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5108 5109 5110
			printk(KERN_WARNING
			       "md: cannot register extra attributes for %s\n",
			       mdname(mddev));
N
NeilBrown 已提交
5111
		mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5112
	} else if (mddev->ro == 2) /* auto-readonly not meaningful */
5113 5114
		mddev->ro = 0;

L
Linus Torvalds 已提交
5115
 	atomic_set(&mddev->writes_pending,0);
5116 5117
	atomic_set(&mddev->max_corr_read_errors,
		   MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
L
Linus Torvalds 已提交
5118 5119 5120
	mddev->safemode = 0;
	mddev->safemode_timer.function = md_safemode_timeout;
	mddev->safemode_timer.data = (unsigned long) mddev;
5121
	mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
L
Linus Torvalds 已提交
5122
	mddev->in_sync = 1;
5123 5124
	smp_wmb();
	mddev->ready = 1;
N
NeilBrown 已提交
5125
	rdev_for_each(rdev, mddev)
5126 5127
		if (rdev->raid_disk >= 0)
			if (sysfs_link_rdev(mddev, rdev))
N
NeilBrown 已提交
5128
				/* failure here is OK */;
L
Linus Torvalds 已提交
5129 5130 5131
	
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	
5132 5133
	if (mddev->flags)
		md_update_sb(mddev, 0);
L
Linus Torvalds 已提交
5134

5135
	md_new_event(mddev);
N
NeilBrown 已提交
5136 5137
	sysfs_notify_dirent_safe(mddev->sysfs_state);
	sysfs_notify_dirent_safe(mddev->sysfs_action);
5138
	sysfs_notify(&mddev->kobj, NULL, "degraded");
L
Linus Torvalds 已提交
5139 5140
	return 0;
}
5141
EXPORT_SYMBOL_GPL(md_run);
L
Linus Torvalds 已提交
5142

5143
static int do_md_run(struct mddev *mddev)
5144 5145 5146 5147 5148 5149
{
	int err;

	err = md_run(mddev);
	if (err)
		goto out;
5150 5151 5152 5153 5154
	err = bitmap_load(mddev);
	if (err) {
		bitmap_destroy(mddev);
		goto out;
	}
5155 5156 5157 5158

	md_wakeup_thread(mddev->thread);
	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */

5159 5160
	set_capacity(mddev->gendisk, mddev->array_sectors);
	revalidate_disk(mddev->gendisk);
5161
	mddev->changed = 1;
5162 5163 5164 5165 5166
	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
	return err;
}

5167
static int restart_array(struct mddev *mddev)
L
Linus Torvalds 已提交
5168 5169 5170
{
	struct gendisk *disk = mddev->gendisk;

A
Andre Noll 已提交
5171
	/* Complain if it has no devices */
L
Linus Torvalds 已提交
5172
	if (list_empty(&mddev->disks))
A
Andre Noll 已提交
5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186
		return -ENXIO;
	if (!mddev->pers)
		return -EINVAL;
	if (!mddev->ro)
		return -EBUSY;
	mddev->safemode = 0;
	mddev->ro = 0;
	set_disk_ro(disk, 0);
	printk(KERN_INFO "md: %s switched to read-write mode.\n",
		mdname(mddev));
	/* Kick recovery or resync if necessary */
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	md_wakeup_thread(mddev->thread);
	md_wakeup_thread(mddev->sync_thread);
N
NeilBrown 已提交
5187
	sysfs_notify_dirent_safe(mddev->sysfs_state);
A
Andre Noll 已提交
5188
	return 0;
L
Linus Torvalds 已提交
5189 5190
}

5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207
/* similar to deny_write_access, but accounts for our holding a reference
 * to the file ourselves */
static int deny_bitmap_write_access(struct file * file)
{
	struct inode *inode = file->f_mapping->host;

	spin_lock(&inode->i_lock);
	if (atomic_read(&inode->i_writecount) > 1) {
		spin_unlock(&inode->i_lock);
		return -ETXTBSY;
	}
	atomic_set(&inode->i_writecount, -1);
	spin_unlock(&inode->i_lock);

	return 0;
}

5208
void restore_bitmap_write_access(struct file *file)
5209 5210 5211 5212 5213 5214 5215 5216
{
	struct inode *inode = file->f_mapping->host;

	spin_lock(&inode->i_lock);
	atomic_set(&inode->i_writecount, 1);
	spin_unlock(&inode->i_lock);
}

5217
static void md_clean(struct mddev *mddev)
N
NeilBrown 已提交
5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238
{
	mddev->array_sectors = 0;
	mddev->external_size = 0;
	mddev->dev_sectors = 0;
	mddev->raid_disks = 0;
	mddev->recovery_cp = 0;
	mddev->resync_min = 0;
	mddev->resync_max = MaxSector;
	mddev->reshape_position = MaxSector;
	mddev->external = 0;
	mddev->persistent = 0;
	mddev->level = LEVEL_NONE;
	mddev->clevel[0] = 0;
	mddev->flags = 0;
	mddev->ro = 0;
	mddev->metadata_type[0] = 0;
	mddev->chunk_sectors = 0;
	mddev->ctime = mddev->utime = 0;
	mddev->layout = 0;
	mddev->max_disks = 0;
	mddev->events = 0;
5239
	mddev->can_decrease_events = 0;
N
NeilBrown 已提交
5240
	mddev->delta_disks = 0;
5241
	mddev->reshape_backwards = 0;
N
NeilBrown 已提交
5242 5243 5244 5245
	mddev->new_level = LEVEL_NONE;
	mddev->new_layout = 0;
	mddev->new_chunk_sectors = 0;
	mddev->curr_resync = 0;
5246
	atomic64_set(&mddev->resync_mismatches, 0);
N
NeilBrown 已提交
5247 5248 5249 5250
	mddev->suspend_lo = mddev->suspend_hi = 0;
	mddev->sync_speed_min = mddev->sync_speed_max = 0;
	mddev->recovery = 0;
	mddev->in_sync = 0;
5251
	mddev->changed = 0;
N
NeilBrown 已提交
5252 5253
	mddev->degraded = 0;
	mddev->safemode = 0;
5254
	mddev->merge_check_needed = 0;
N
NeilBrown 已提交
5255 5256
	mddev->bitmap_info.offset = 0;
	mddev->bitmap_info.default_offset = 0;
5257
	mddev->bitmap_info.default_space = 0;
N
NeilBrown 已提交
5258 5259 5260 5261 5262
	mddev->bitmap_info.chunksize = 0;
	mddev->bitmap_info.daemon_sleep = 0;
	mddev->bitmap_info.max_write_behind = 0;
}

5263
static void __md_stop_writes(struct mddev *mddev)
5264 5265 5266 5267
{
	if (mddev->sync_thread) {
		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5268
		reap_sync_thread(mddev);
5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281
	}

	del_timer_sync(&mddev->safemode_timer);

	bitmap_flush(mddev);
	md_super_wait(mddev);

	if (!mddev->in_sync || mddev->flags) {
		/* mark array as shutdown cleanly */
		mddev->in_sync = 1;
		md_update_sb(mddev, 1);
	}
}
5282

5283
void md_stop_writes(struct mddev *mddev)
5284 5285 5286 5287 5288
{
	mddev_lock(mddev);
	__md_stop_writes(mddev);
	mddev_unlock(mddev);
}
5289
EXPORT_SYMBOL_GPL(md_stop_writes);
5290

5291
void md_stop(struct mddev *mddev)
N
NeilBrown 已提交
5292
{
5293
	mddev->ready = 0;
N
NeilBrown 已提交
5294 5295 5296 5297 5298
	mddev->pers->stop(mddev);
	if (mddev->pers->sync_request && mddev->to_remove == NULL)
		mddev->to_remove = &md_redundancy_group;
	module_put(mddev->pers->owner);
	mddev->pers = NULL;
N
NeilBrown 已提交
5299
	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
N
NeilBrown 已提交
5300
}
5301
EXPORT_SYMBOL_GPL(md_stop);
N
NeilBrown 已提交
5302

5303
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5304 5305 5306
{
	int err = 0;
	mutex_lock(&mddev->open_mutex);
5307
	if (atomic_read(&mddev->openers) > !!bdev) {
5308 5309 5310 5311
		printk("md: %s still in use.\n",mdname(mddev));
		err = -EBUSY;
		goto out;
	}
5312 5313
	if (bdev)
		sync_blockdev(bdev);
5314
	if (mddev->pers) {
5315
		__md_stop_writes(mddev);
5316 5317 5318 5319 5320 5321 5322

		err  = -ENXIO;
		if (mddev->ro==1)
			goto out;
		mddev->ro = 1;
		set_disk_ro(mddev->gendisk, 1);
		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
N
NeilBrown 已提交
5323
		sysfs_notify_dirent_safe(mddev->sysfs_state);
5324 5325 5326 5327 5328 5329 5330
		err = 0;	
	}
out:
	mutex_unlock(&mddev->open_mutex);
	return err;
}

5331 5332 5333 5334
/* mode:
 *   0 - completely stop and dis-assemble array
 *   2 - stop but do not disassemble array
 */
5335 5336
static int do_md_stop(struct mddev * mddev, int mode,
		      struct block_device *bdev)
L
Linus Torvalds 已提交
5337 5338
{
	struct gendisk *disk = mddev->gendisk;
5339
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5340

N
NeilBrown 已提交
5341
	mutex_lock(&mddev->open_mutex);
5342
	if (atomic_read(&mddev->openers) > !!bdev ||
5343
	    mddev->sysfs_active) {
5344
		printk("md: %s still in use.\n",mdname(mddev));
N
NeilBrown 已提交
5345 5346 5347
		mutex_unlock(&mddev->open_mutex);
		return -EBUSY;
	}
5348 5349 5350 5351 5352 5353 5354
	if (bdev)
		/* It is possible IO was issued on some other
		 * open file which was closed before we took ->open_mutex.
		 * As that was not the last close __blkdev_put will not
		 * have called sync_blockdev, so we must.
		 */
		sync_blockdev(bdev);
L
Linus Torvalds 已提交
5355

N
NeilBrown 已提交
5356
	if (mddev->pers) {
5357 5358
		if (mddev->ro)
			set_disk_ro(disk, 0);
5359

5360
		__md_stop_writes(mddev);
5361 5362 5363
		md_stop(mddev);
		mddev->queue->merge_bvec_fn = NULL;
		mddev->queue->backing_dev_info.congested_fn = NULL;
N
NeilBrown 已提交
5364

5365
		/* tell userspace to handle 'inactive' */
N
NeilBrown 已提交
5366
		sysfs_notify_dirent_safe(mddev->sysfs_state);
5367

N
NeilBrown 已提交
5368
		rdev_for_each(rdev, mddev)
5369 5370
			if (rdev->raid_disk >= 0)
				sysfs_unlink_rdev(mddev, rdev);
5371

5372
		set_capacity(disk, 0);
N
NeilBrown 已提交
5373
		mutex_unlock(&mddev->open_mutex);
5374
		mddev->changed = 1;
5375
		revalidate_disk(disk);
5376

5377 5378
		if (mddev->ro)
			mddev->ro = 0;
N
NeilBrown 已提交
5379 5380
	} else
		mutex_unlock(&mddev->open_mutex);
L
Linus Torvalds 已提交
5381 5382 5383
	/*
	 * Free resources if final stop
	 */
5384
	if (mode == 0) {
L
Linus Torvalds 已提交
5385 5386
		printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));

5387
		bitmap_destroy(mddev);
5388 5389 5390 5391
		if (mddev->bitmap_info.file) {
			restore_bitmap_write_access(mddev->bitmap_info.file);
			fput(mddev->bitmap_info.file);
			mddev->bitmap_info.file = NULL;
5392
		}
5393
		mddev->bitmap_info.offset = 0;
5394

L
Linus Torvalds 已提交
5395 5396
		export_array(mddev);

N
NeilBrown 已提交
5397
		md_clean(mddev);
5398
		kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5399 5400
		if (mddev->hold_active == UNTIL_STOP)
			mddev->hold_active = 0;
5401
	}
M
Martin K. Petersen 已提交
5402
	blk_integrity_unregister(disk);
5403
	md_new_event(mddev);
N
NeilBrown 已提交
5404
	sysfs_notify_dirent_safe(mddev->sysfs_state);
N
NeilBrown 已提交
5405
	return 0;
L
Linus Torvalds 已提交
5406 5407
}

J
Jeff Garzik 已提交
5408
#ifndef MODULE
5409
static void autorun_array(struct mddev *mddev)
L
Linus Torvalds 已提交
5410
{
5411
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5412 5413
	int err;

5414
	if (list_empty(&mddev->disks))
L
Linus Torvalds 已提交
5415 5416 5417 5418
		return;

	printk(KERN_INFO "md: running: ");

N
NeilBrown 已提交
5419
	rdev_for_each(rdev, mddev) {
L
Linus Torvalds 已提交
5420 5421 5422 5423 5424
		char b[BDEVNAME_SIZE];
		printk("<%s>", bdevname(rdev->bdev,b));
	}
	printk("\n");

5425
	err = do_md_run(mddev);
L
Linus Torvalds 已提交
5426 5427
	if (err) {
		printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
5428
		do_md_stop(mddev, 0, NULL);
L
Linus Torvalds 已提交
5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445
	}
}

/*
 * lets try to run arrays based on all disks that have arrived
 * until now. (those are in pending_raid_disks)
 *
 * the method: pick the first pending disk, collect all disks with
 * the same UUID, remove all from the pending list and put them into
 * the 'same_array' list. Then order this list based on superblock
 * update time (freshest comes first), kick out 'old' disks and
 * compare superblocks. If everything's fine then run it.
 *
 * If "unit" is allocated, then bump its reference count
 */
static void autorun_devices(int part)
{
5446
	struct md_rdev *rdev0, *rdev, *tmp;
5447
	struct mddev *mddev;
L
Linus Torvalds 已提交
5448 5449 5450 5451
	char b[BDEVNAME_SIZE];

	printk(KERN_INFO "md: autorun ...\n");
	while (!list_empty(&pending_raid_disks)) {
5452
		int unit;
L
Linus Torvalds 已提交
5453
		dev_t dev;
5454
		LIST_HEAD(candidates);
L
Linus Torvalds 已提交
5455
		rdev0 = list_entry(pending_raid_disks.next,
5456
					 struct md_rdev, same_set);
L
Linus Torvalds 已提交
5457 5458 5459 5460

		printk(KERN_INFO "md: considering %s ...\n",
			bdevname(rdev0->bdev,b));
		INIT_LIST_HEAD(&candidates);
5461
		rdev_for_each_list(rdev, tmp, &pending_raid_disks)
L
Linus Torvalds 已提交
5462 5463 5464 5465 5466 5467 5468 5469 5470 5471
			if (super_90_load(rdev, rdev0, 0) >= 0) {
				printk(KERN_INFO "md:  adding %s ...\n",
					bdevname(rdev->bdev,b));
				list_move(&rdev->same_set, &candidates);
			}
		/*
		 * now we have a set of devices, with all of them having
		 * mostly sane superblocks. It's time to allocate the
		 * mddev.
		 */
5472 5473 5474 5475 5476 5477 5478 5479 5480
		if (part) {
			dev = MKDEV(mdp_major,
				    rdev0->preferred_minor << MdpMinorShift);
			unit = MINOR(dev) >> MdpMinorShift;
		} else {
			dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
			unit = MINOR(dev);
		}
		if (rdev0->preferred_minor != unit) {
L
Linus Torvalds 已提交
5481 5482 5483 5484 5485 5486 5487
			printk(KERN_INFO "md: unit number in %s is bad: %d\n",
			       bdevname(rdev0->bdev, b), rdev0->preferred_minor);
			break;
		}

		md_probe(dev, NULL, NULL);
		mddev = mddev_find(dev);
N
Neil Brown 已提交
5488 5489 5490 5491
		if (!mddev || !mddev->gendisk) {
			if (mddev)
				mddev_put(mddev);
			printk(KERN_ERR
L
Linus Torvalds 已提交
5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505
				"md: cannot allocate memory for md drive.\n");
			break;
		}
		if (mddev_lock(mddev)) 
			printk(KERN_WARNING "md: %s locked, cannot run\n",
			       mdname(mddev));
		else if (mddev->raid_disks || mddev->major_version
			 || !list_empty(&mddev->disks)) {
			printk(KERN_WARNING 
				"md: %s already running, cannot run %s\n",
				mdname(mddev), bdevname(rdev0->bdev,b));
			mddev_unlock(mddev);
		} else {
			printk(KERN_INFO "md: created %s\n", mdname(mddev));
5506
			mddev->persistent = 1;
5507
			rdev_for_each_list(rdev, tmp, &candidates) {
L
Linus Torvalds 已提交
5508 5509 5510 5511 5512 5513 5514 5515 5516 5517
				list_del_init(&rdev->same_set);
				if (bind_rdev_to_array(rdev, mddev))
					export_rdev(rdev);
			}
			autorun_array(mddev);
			mddev_unlock(mddev);
		}
		/* on success, candidates will be empty, on error
		 * it won't...
		 */
5518
		rdev_for_each_list(rdev, tmp, &candidates) {
5519
			list_del_init(&rdev->same_set);
L
Linus Torvalds 已提交
5520
			export_rdev(rdev);
5521
		}
L
Linus Torvalds 已提交
5522 5523 5524 5525
		mddev_put(mddev);
	}
	printk(KERN_INFO "md: ... autorun DONE.\n");
}
J
Jeff Garzik 已提交
5526
#endif /* !MODULE */
L
Linus Torvalds 已提交
5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541

static int get_version(void __user * arg)
{
	mdu_version_t ver;

	ver.major = MD_MAJOR_VERSION;
	ver.minor = MD_MINOR_VERSION;
	ver.patchlevel = MD_PATCHLEVEL_VERSION;

	if (copy_to_user(arg, &ver, sizeof(ver)))
		return -EFAULT;

	return 0;
}

5542
static int get_array_info(struct mddev * mddev, void __user * arg)
L
Linus Torvalds 已提交
5543 5544
{
	mdu_array_info_t info;
5545
	int nr,working,insync,failed,spare;
5546
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5547

5548 5549 5550
	nr = working = insync = failed = spare = 0;
	rcu_read_lock();
	rdev_for_each_rcu(rdev, mddev) {
L
Linus Torvalds 已提交
5551
		nr++;
5552
		if (test_bit(Faulty, &rdev->flags))
L
Linus Torvalds 已提交
5553 5554 5555
			failed++;
		else {
			working++;
5556
			if (test_bit(In_sync, &rdev->flags))
5557
				insync++;	
L
Linus Torvalds 已提交
5558 5559 5560 5561
			else
				spare++;
		}
	}
5562
	rcu_read_unlock();
L
Linus Torvalds 已提交
5563 5564 5565 5566 5567 5568

	info.major_version = mddev->major_version;
	info.minor_version = mddev->minor_version;
	info.patch_version = MD_PATCHLEVEL_VERSION;
	info.ctime         = mddev->ctime;
	info.level         = mddev->level;
A
Andre Noll 已提交
5569 5570
	info.size          = mddev->dev_sectors / 2;
	if (info.size != mddev->dev_sectors / 2) /* overflow */
5571
		info.size = -1;
L
Linus Torvalds 已提交
5572 5573 5574 5575 5576 5577 5578 5579 5580
	info.nr_disks      = nr;
	info.raid_disks    = mddev->raid_disks;
	info.md_minor      = mddev->md_minor;
	info.not_persistent= !mddev->persistent;

	info.utime         = mddev->utime;
	info.state         = 0;
	if (mddev->in_sync)
		info.state = (1<<MD_SB_CLEAN);
5581
	if (mddev->bitmap && mddev->bitmap_info.offset)
5582
		info.state = (1<<MD_SB_BITMAP_PRESENT);
5583
	info.active_disks  = insync;
L
Linus Torvalds 已提交
5584 5585 5586 5587 5588
	info.working_disks = working;
	info.failed_disks  = failed;
	info.spare_disks   = spare;

	info.layout        = mddev->layout;
5589
	info.chunk_size    = mddev->chunk_sectors << 9;
L
Linus Torvalds 已提交
5590 5591 5592 5593 5594 5595 5596

	if (copy_to_user(arg, &info, sizeof(info)))
		return -EFAULT;

	return 0;
}

5597
static int get_bitmap_file(struct mddev * mddev, void __user * arg)
5598 5599 5600 5601 5602
{
	mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
	char *ptr, *buf = NULL;
	int err = -ENOMEM;

5603 5604 5605 5606
	if (md_allow_write(mddev))
		file = kmalloc(sizeof(*file), GFP_NOIO);
	else
		file = kmalloc(sizeof(*file), GFP_KERNEL);
5607

5608 5609 5610 5611
	if (!file)
		goto out;

	/* bitmap disabled, zero the first byte and copy out */
5612
	if (!mddev->bitmap || !mddev->bitmap->storage.file) {
5613 5614 5615 5616 5617 5618 5619 5620
		file->pathname[0] = '\0';
		goto copy_out;
	}

	buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
	if (!buf)
		goto out;

5621 5622
	ptr = d_path(&mddev->bitmap->storage.file->f_path,
		     buf, sizeof(file->pathname));
C
Christoph Hellwig 已提交
5623
	if (IS_ERR(ptr))
5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637
		goto out;

	strcpy(file->pathname, ptr);

copy_out:
	err = 0;
	if (copy_to_user(arg, file, sizeof(*file)))
		err = -EFAULT;
out:
	kfree(buf);
	kfree(file);
	return err;
}

5638
static int get_disk_info(struct mddev * mddev, void __user * arg)
L
Linus Torvalds 已提交
5639 5640
{
	mdu_disk_info_t info;
5641
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5642 5643 5644 5645

	if (copy_from_user(&info, arg, sizeof(info)))
		return -EFAULT;

5646 5647
	rcu_read_lock();
	rdev = find_rdev_nr_rcu(mddev, info.number);
L
Linus Torvalds 已提交
5648 5649 5650 5651 5652
	if (rdev) {
		info.major = MAJOR(rdev->bdev->bd_dev);
		info.minor = MINOR(rdev->bdev->bd_dev);
		info.raid_disk = rdev->raid_disk;
		info.state = 0;
5653
		if (test_bit(Faulty, &rdev->flags))
L
Linus Torvalds 已提交
5654
			info.state |= (1<<MD_DISK_FAULTY);
5655
		else if (test_bit(In_sync, &rdev->flags)) {
L
Linus Torvalds 已提交
5656 5657 5658
			info.state |= (1<<MD_DISK_ACTIVE);
			info.state |= (1<<MD_DISK_SYNC);
		}
5659 5660
		if (test_bit(WriteMostly, &rdev->flags))
			info.state |= (1<<MD_DISK_WRITEMOSTLY);
L
Linus Torvalds 已提交
5661 5662 5663 5664 5665
	} else {
		info.major = info.minor = 0;
		info.raid_disk = -1;
		info.state = (1<<MD_DISK_REMOVED);
	}
5666
	rcu_read_unlock();
L
Linus Torvalds 已提交
5667 5668 5669 5670 5671 5672 5673

	if (copy_to_user(arg, &info, sizeof(info)))
		return -EFAULT;

	return 0;
}

5674
static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
L
Linus Torvalds 已提交
5675 5676
{
	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5677
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693
	dev_t dev = MKDEV(info->major,info->minor);

	if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
		return -EOVERFLOW;

	if (!mddev->raid_disks) {
		int err;
		/* expecting a device which has a superblock */
		rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
		if (IS_ERR(rdev)) {
			printk(KERN_WARNING 
				"md: md_import_device returned %ld\n",
				PTR_ERR(rdev));
			return PTR_ERR(rdev);
		}
		if (!list_empty(&mddev->disks)) {
5694 5695 5696
			struct md_rdev *rdev0
				= list_entry(mddev->disks.next,
					     struct md_rdev, same_set);
5697
			err = super_types[mddev->major_version]
L
Linus Torvalds 已提交
5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726
				.load_super(rdev, rdev0, mddev->minor_version);
			if (err < 0) {
				printk(KERN_WARNING 
					"md: %s has different UUID to %s\n",
					bdevname(rdev->bdev,b), 
					bdevname(rdev0->bdev,b2));
				export_rdev(rdev);
				return -EINVAL;
			}
		}
		err = bind_rdev_to_array(rdev, mddev);
		if (err)
			export_rdev(rdev);
		return err;
	}

	/*
	 * add_new_disk can be used once the array is assembled
	 * to add "hot spares".  They must already have a superblock
	 * written
	 */
	if (mddev->pers) {
		int err;
		if (!mddev->pers->hot_add_disk) {
			printk(KERN_WARNING 
				"%s: personality does not support diskops!\n",
			       mdname(mddev));
			return -EINVAL;
		}
5727 5728 5729 5730 5731
		if (mddev->persistent)
			rdev = md_import_device(dev, mddev->major_version,
						mddev->minor_version);
		else
			rdev = md_import_device(dev, -1, -1);
L
Linus Torvalds 已提交
5732 5733 5734 5735 5736 5737
		if (IS_ERR(rdev)) {
			printk(KERN_WARNING 
				"md: md_import_device returned %ld\n",
				PTR_ERR(rdev));
			return PTR_ERR(rdev);
		}
5738
		/* set saved_raid_disk if appropriate */
5739 5740
		if (!mddev->persistent) {
			if (info->state & (1<<MD_DISK_SYNC)  &&
5741
			    info->raid_disk < mddev->raid_disks) {
5742
				rdev->raid_disk = info->raid_disk;
5743 5744
				set_bit(In_sync, &rdev->flags);
			} else
5745 5746 5747 5748
				rdev->raid_disk = -1;
		} else
			super_types[mddev->major_version].
				validate_super(mddev, rdev);
5749
		if ((info->state & (1<<MD_DISK_SYNC)) &&
5750
		     rdev->raid_disk != info->raid_disk) {
5751 5752 5753 5754 5755 5756 5757
			/* This was a hot-add request, but events doesn't
			 * match, so reject it.
			 */
			export_rdev(rdev);
			return -EINVAL;
		}

5758 5759 5760 5761
		if (test_bit(In_sync, &rdev->flags))
			rdev->saved_raid_disk = rdev->raid_disk;
		else
			rdev->saved_raid_disk = -1;
5762

5763
		clear_bit(In_sync, &rdev->flags); /* just to be sure */
5764 5765
		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
			set_bit(WriteMostly, &rdev->flags);
5766 5767
		else
			clear_bit(WriteMostly, &rdev->flags);
5768

L
Linus Torvalds 已提交
5769 5770
		rdev->raid_disk = -1;
		err = bind_rdev_to_array(rdev, mddev);
5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781
		if (!err && !mddev->pers->hot_remove_disk) {
			/* If there is hot_add_disk but no hot_remove_disk
			 * then added disks for geometry changes,
			 * and should be added immediately.
			 */
			super_types[mddev->major_version].
				validate_super(mddev, rdev);
			err = mddev->pers->hot_add_disk(mddev, rdev);
			if (err)
				unbind_rdev_from_array(rdev);
		}
L
Linus Torvalds 已提交
5782 5783
		if (err)
			export_rdev(rdev);
5784
		else
N
NeilBrown 已提交
5785
			sysfs_notify_dirent_safe(rdev->sysfs_state);
5786

5787
		md_update_sb(mddev, 1);
5788 5789
		if (mddev->degraded)
			set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5790
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5791 5792
		if (!err)
			md_new_event(mddev);
5793
		md_wakeup_thread(mddev->thread);
L
Linus Torvalds 已提交
5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807
		return err;
	}

	/* otherwise, add_new_disk is only allowed
	 * for major_version==0 superblocks
	 */
	if (mddev->major_version != 0) {
		printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
		       mdname(mddev));
		return -EINVAL;
	}

	if (!(info->state & (1<<MD_DISK_FAULTY))) {
		int err;
5808
		rdev = md_import_device(dev, -1, 0);
L
Linus Torvalds 已提交
5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821
		if (IS_ERR(rdev)) {
			printk(KERN_WARNING 
				"md: error, md_import_device() returned %ld\n",
				PTR_ERR(rdev));
			return PTR_ERR(rdev);
		}
		rdev->desc_nr = info->number;
		if (info->raid_disk < mddev->raid_disks)
			rdev->raid_disk = info->raid_disk;
		else
			rdev->raid_disk = -1;

		if (rdev->raid_disk < mddev->raid_disks)
5822 5823
			if (info->state & (1<<MD_DISK_SYNC))
				set_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
5824

5825 5826 5827
		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
			set_bit(WriteMostly, &rdev->flags);

L
Linus Torvalds 已提交
5828 5829
		if (!mddev->persistent) {
			printk(KERN_INFO "md: nonpersistent superblock ...\n");
5830 5831
			rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
		} else
5832
			rdev->sb_start = calc_dev_sboffset(rdev);
5833
		rdev->sectors = rdev->sb_start;
L
Linus Torvalds 已提交
5834

5835 5836 5837 5838 5839
		err = bind_rdev_to_array(rdev, mddev);
		if (err) {
			export_rdev(rdev);
			return err;
		}
L
Linus Torvalds 已提交
5840 5841 5842 5843 5844
	}

	return 0;
}

5845
static int hot_remove_disk(struct mddev * mddev, dev_t dev)
L
Linus Torvalds 已提交
5846 5847
{
	char b[BDEVNAME_SIZE];
5848
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5849 5850 5851 5852 5853 5854 5855 5856 5857

	rdev = find_rdev(mddev, dev);
	if (!rdev)
		return -ENXIO;

	if (rdev->raid_disk >= 0)
		goto busy;

	kick_rdev_from_array(rdev);
5858
	md_update_sb(mddev, 1);
5859
	md_new_event(mddev);
L
Linus Torvalds 已提交
5860 5861 5862

	return 0;
busy:
5863
	printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
L
Linus Torvalds 已提交
5864 5865 5866 5867
		bdevname(rdev->bdev,b), mdname(mddev));
	return -EBUSY;
}

5868
static int hot_add_disk(struct mddev * mddev, dev_t dev)
L
Linus Torvalds 已提交
5869 5870 5871
{
	char b[BDEVNAME_SIZE];
	int err;
5872
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889

	if (!mddev->pers)
		return -ENODEV;

	if (mddev->major_version != 0) {
		printk(KERN_WARNING "%s: HOT_ADD may only be used with"
			" version-0 superblocks.\n",
			mdname(mddev));
		return -EINVAL;
	}
	if (!mddev->pers->hot_add_disk) {
		printk(KERN_WARNING 
			"%s: personality does not support diskops!\n",
			mdname(mddev));
		return -EINVAL;
	}

5890
	rdev = md_import_device(dev, -1, 0);
L
Linus Torvalds 已提交
5891 5892 5893 5894 5895 5896 5897 5898
	if (IS_ERR(rdev)) {
		printk(KERN_WARNING 
			"md: error, md_import_device() returned %ld\n",
			PTR_ERR(rdev));
		return -EINVAL;
	}

	if (mddev->persistent)
5899
		rdev->sb_start = calc_dev_sboffset(rdev);
L
Linus Torvalds 已提交
5900
	else
5901
		rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
L
Linus Torvalds 已提交
5902

5903
	rdev->sectors = rdev->sb_start;
L
Linus Torvalds 已提交
5904

5905
	if (test_bit(Faulty, &rdev->flags)) {
L
Linus Torvalds 已提交
5906 5907 5908 5909 5910 5911
		printk(KERN_WARNING 
			"md: can not hot-add faulty %s disk to %s!\n",
			bdevname(rdev->bdev,b), mdname(mddev));
		err = -EINVAL;
		goto abort_export;
	}
5912
	clear_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
5913
	rdev->desc_nr = -1;
5914
	rdev->saved_raid_disk = -1;
5915 5916 5917
	err = bind_rdev_to_array(rdev, mddev);
	if (err)
		goto abort_export;
L
Linus Torvalds 已提交
5918 5919 5920 5921 5922 5923 5924 5925

	/*
	 * The rest should better be atomic, we can have disk failures
	 * noticed in interrupt contexts ...
	 */

	rdev->raid_disk = -1;

5926
	md_update_sb(mddev, 1);
L
Linus Torvalds 已提交
5927 5928 5929 5930 5931 5932 5933

	/*
	 * Kick recovery, maybe this spare has to be added to the
	 * array immediately.
	 */
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	md_wakeup_thread(mddev->thread);
5934
	md_new_event(mddev);
L
Linus Torvalds 已提交
5935 5936 5937 5938 5939 5940 5941
	return 0;

abort_export:
	export_rdev(rdev);
	return err;
}

5942
static int set_bitmap_file(struct mddev *mddev, int fd)
5943 5944 5945
{
	int err;

5946 5947 5948 5949 5950 5951 5952
	if (mddev->pers) {
		if (!mddev->pers->quiesce)
			return -EBUSY;
		if (mddev->recovery || mddev->sync_thread)
			return -EBUSY;
		/* we should be able to change the bitmap.. */
	}
5953 5954


5955 5956 5957
	if (fd >= 0) {
		if (mddev->bitmap)
			return -EEXIST; /* cannot add when bitmap is present */
5958
		mddev->bitmap_info.file = fget(fd);
5959

5960
		if (mddev->bitmap_info.file == NULL) {
5961 5962 5963 5964 5965
			printk(KERN_ERR "%s: error: failed to get bitmap file\n",
			       mdname(mddev));
			return -EBADF;
		}

5966
		err = deny_bitmap_write_access(mddev->bitmap_info.file);
5967 5968 5969
		if (err) {
			printk(KERN_ERR "%s: error: bitmap file is already in use\n",
			       mdname(mddev));
5970 5971
			fput(mddev->bitmap_info.file);
			mddev->bitmap_info.file = NULL;
5972 5973
			return err;
		}
5974
		mddev->bitmap_info.offset = 0; /* file overrides offset */
5975 5976 5977 5978 5979
	} else if (mddev->bitmap == NULL)
		return -ENOENT; /* cannot remove what isn't there */
	err = 0;
	if (mddev->pers) {
		mddev->pers->quiesce(mddev, 1);
5980
		if (fd >= 0) {
5981
			err = bitmap_create(mddev);
5982 5983 5984
			if (!err)
				err = bitmap_load(mddev);
		}
5985
		if (fd < 0 || err) {
5986
			bitmap_destroy(mddev);
5987 5988
			fd = -1; /* make sure to put the file */
		}
5989
		mddev->pers->quiesce(mddev, 0);
5990 5991
	}
	if (fd < 0) {
5992 5993 5994
		if (mddev->bitmap_info.file) {
			restore_bitmap_write_access(mddev->bitmap_info.file);
			fput(mddev->bitmap_info.file);
5995
		}
5996
		mddev->bitmap_info.file = NULL;
5997 5998
	}

5999 6000 6001
	return err;
}

L
Linus Torvalds 已提交
6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014
/*
 * set_array_info is used two different ways
 * The original usage is when creating a new array.
 * In this usage, raid_disks is > 0 and it together with
 *  level, size, not_persistent,layout,chunksize determine the
 *  shape of the array.
 *  This will always create an array with a type-0.90.0 superblock.
 * The newer usage is when assembling an array.
 *  In this case raid_disks will be 0, and the major_version field is
 *  use to determine which style super-blocks are to be found on the devices.
 *  The minor and patch _version numbers are also kept incase the
 *  super_block handler wishes to interpret them.
 */
6015
static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
L
Linus Torvalds 已提交
6016 6017 6018 6019 6020
{

	if (info->raid_disks == 0) {
		/* just setting version number for superblock loading */
		if (info->major_version < 0 ||
6021
		    info->major_version >= ARRAY_SIZE(super_types) ||
L
Linus Torvalds 已提交
6022 6023 6024 6025 6026 6027 6028 6029 6030 6031
		    super_types[info->major_version].name == NULL) {
			/* maybe try to auto-load a module? */
			printk(KERN_INFO 
				"md: superblock version %d not known\n",
				info->major_version);
			return -EINVAL;
		}
		mddev->major_version = info->major_version;
		mddev->minor_version = info->minor_version;
		mddev->patch_version = info->patch_version;
6032
		mddev->persistent = !info->not_persistent;
6033 6034 6035 6036
		/* ensure mddev_put doesn't delete this now that there
		 * is some minimal configuration.
		 */
		mddev->ctime         = get_seconds();
L
Linus Torvalds 已提交
6037 6038 6039 6040 6041 6042 6043 6044
		return 0;
	}
	mddev->major_version = MD_MAJOR_VERSION;
	mddev->minor_version = MD_MINOR_VERSION;
	mddev->patch_version = MD_PATCHLEVEL_VERSION;
	mddev->ctime         = get_seconds();

	mddev->level         = info->level;
6045
	mddev->clevel[0]     = 0;
A
Andre Noll 已提交
6046
	mddev->dev_sectors   = 2 * (sector_t)info->size;
L
Linus Torvalds 已提交
6047 6048 6049 6050 6051 6052 6053 6054 6055
	mddev->raid_disks    = info->raid_disks;
	/* don't set md_minor, it is determined by which /dev/md* was
	 * openned
	 */
	if (info->state & (1<<MD_SB_CLEAN))
		mddev->recovery_cp = MaxSector;
	else
		mddev->recovery_cp = 0;
	mddev->persistent    = ! info->not_persistent;
6056
	mddev->external	     = 0;
L
Linus Torvalds 已提交
6057 6058

	mddev->layout        = info->layout;
6059
	mddev->chunk_sectors = info->chunk_size >> 9;
L
Linus Torvalds 已提交
6060 6061 6062

	mddev->max_disks     = MD_SB_DISKS;

6063 6064
	if (mddev->persistent)
		mddev->flags         = 0;
6065
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
L
Linus Torvalds 已提交
6066

6067
	mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6068
	mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6069
	mddev->bitmap_info.offset = 0;
6070

6071 6072
	mddev->reshape_position = MaxSector;

L
Linus Torvalds 已提交
6073 6074 6075 6076 6077
	/*
	 * Generate a 128 bit UUID
	 */
	get_random_bytes(mddev->uuid, 16);

6078
	mddev->new_level = mddev->level;
6079
	mddev->new_chunk_sectors = mddev->chunk_sectors;
6080 6081
	mddev->new_layout = mddev->layout;
	mddev->delta_disks = 0;
6082
	mddev->reshape_backwards = 0;
6083

L
Linus Torvalds 已提交
6084 6085 6086
	return 0;
}

6087
void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6088
{
D
Dan Williams 已提交
6089 6090 6091 6092 6093
	WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);

	if (mddev->external_size)
		return;

6094 6095 6096 6097
	mddev->array_sectors = array_sectors;
}
EXPORT_SYMBOL(md_set_array_sectors);

6098
static int update_size(struct mddev *mddev, sector_t num_sectors)
6099
{
6100
	struct md_rdev *rdev;
6101
	int rv;
6102
	int fit = (num_sectors == 0);
6103 6104 6105

	if (mddev->pers->resize == NULL)
		return -EINVAL;
6106 6107 6108 6109 6110
	/* The "num_sectors" is the number of sectors of each device that
	 * is used.  This can only make sense for arrays with redundancy.
	 * linear and raid0 always use whatever space is available. We can only
	 * consider changing this number if no resync or reconstruction is
	 * happening, and if the new size is acceptable. It must fit before the
6111
	 * sb_start or, if that is <data_offset, it must fit before the size
6112 6113
	 * of each device.  If num_sectors is zero, we find the largest size
	 * that fits.
6114 6115 6116
	 */
	if (mddev->sync_thread)
		return -EBUSY;
6117

N
NeilBrown 已提交
6118
	rdev_for_each(rdev, mddev) {
6119
		sector_t avail = rdev->sectors;
6120

6121 6122 6123
		if (fit && (num_sectors == 0 || num_sectors > avail))
			num_sectors = avail;
		if (avail < num_sectors)
6124 6125
			return -ENOSPC;
	}
6126
	rv = mddev->pers->resize(mddev, num_sectors);
6127 6128
	if (!rv)
		revalidate_disk(mddev->gendisk);
6129 6130 6131
	return rv;
}

6132
static int update_raid_disks(struct mddev *mddev, int raid_disks)
6133 6134
{
	int rv;
6135
	struct md_rdev *rdev;
6136
	/* change the number of raid disks */
6137
	if (mddev->pers->check_reshape == NULL)
6138 6139
		return -EINVAL;
	if (raid_disks <= 0 ||
6140
	    (mddev->max_disks && raid_disks >= mddev->max_disks))
6141
		return -EINVAL;
6142
	if (mddev->sync_thread || mddev->reshape_position != MaxSector)
6143
		return -EBUSY;
6144 6145 6146 6147 6148 6149 6150 6151 6152 6153

	rdev_for_each(rdev, mddev) {
		if (mddev->raid_disks < raid_disks &&
		    rdev->data_offset < rdev->new_data_offset)
			return -EINVAL;
		if (mddev->raid_disks > raid_disks &&
		    rdev->data_offset > rdev->new_data_offset)
			return -EINVAL;
	}

6154
	mddev->delta_disks = raid_disks - mddev->raid_disks;
6155 6156 6157 6158
	if (mddev->delta_disks < 0)
		mddev->reshape_backwards = 1;
	else if (mddev->delta_disks > 0)
		mddev->reshape_backwards = 0;
6159 6160

	rv = mddev->pers->check_reshape(mddev);
6161
	if (rv < 0) {
6162
		mddev->delta_disks = 0;
6163 6164
		mddev->reshape_backwards = 0;
	}
6165 6166 6167 6168
	return rv;
}


L
Linus Torvalds 已提交
6169 6170 6171 6172 6173 6174 6175 6176
/*
 * update_array_info is used to change the configuration of an
 * on-line array.
 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
 * fields in the info are checked against the array.
 * Any differences that cannot be handled will cause an error.
 * Normally, only one change can be managed at a time.
 */
6177
static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
L
Linus Torvalds 已提交
6178 6179 6180
{
	int rv = 0;
	int cnt = 0;
6181 6182 6183
	int state = 0;

	/* calculate expected state,ignoring low bits */
6184
	if (mddev->bitmap && mddev->bitmap_info.offset)
6185
		state |= (1 << MD_SB_BITMAP_PRESENT);
L
Linus Torvalds 已提交
6186 6187 6188 6189 6190 6191 6192 6193

	if (mddev->major_version != info->major_version ||
	    mddev->minor_version != info->minor_version ||
/*	    mddev->patch_version != info->patch_version || */
	    mddev->ctime         != info->ctime         ||
	    mddev->level         != info->level         ||
/*	    mddev->layout        != info->layout        || */
	    !mddev->persistent	 != info->not_persistent||
6194
	    mddev->chunk_sectors != info->chunk_size >> 9 ||
6195 6196 6197
	    /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
	    ((state^info->state) & 0xfffffe00)
		)
L
Linus Torvalds 已提交
6198 6199
		return -EINVAL;
	/* Check there is only one change */
A
Andre Noll 已提交
6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211
	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
		cnt++;
	if (mddev->raid_disks != info->raid_disks)
		cnt++;
	if (mddev->layout != info->layout)
		cnt++;
	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
		cnt++;
	if (cnt == 0)
		return 0;
	if (cnt > 1)
		return -EINVAL;
L
Linus Torvalds 已提交
6212 6213 6214 6215 6216 6217

	if (mddev->layout != info->layout) {
		/* Change layout
		 * we don't need to do anything at the md level, the
		 * personality will take care of it all.
		 */
6218
		if (mddev->pers->check_reshape == NULL)
L
Linus Torvalds 已提交
6219
			return -EINVAL;
6220 6221
		else {
			mddev->new_layout = info->layout;
6222
			rv = mddev->pers->check_reshape(mddev);
6223 6224 6225 6226
			if (rv)
				mddev->new_layout = mddev->layout;
			return rv;
		}
L
Linus Torvalds 已提交
6227
	}
A
Andre Noll 已提交
6228
	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6229
		rv = update_size(mddev, (sector_t)info->size * 2);
6230

6231 6232 6233
	if (mddev->raid_disks    != info->raid_disks)
		rv = update_raid_disks(mddev, info->raid_disks);

6234 6235 6236 6237 6238 6239 6240 6241 6242
	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
		if (mddev->pers->quiesce == NULL)
			return -EINVAL;
		if (mddev->recovery || mddev->sync_thread)
			return -EBUSY;
		if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
			/* add the bitmap */
			if (mddev->bitmap)
				return -EEXIST;
6243
			if (mddev->bitmap_info.default_offset == 0)
6244
				return -EINVAL;
6245 6246
			mddev->bitmap_info.offset =
				mddev->bitmap_info.default_offset;
6247 6248
			mddev->bitmap_info.space =
				mddev->bitmap_info.default_space;
6249 6250
			mddev->pers->quiesce(mddev, 1);
			rv = bitmap_create(mddev);
6251 6252
			if (!rv)
				rv = bitmap_load(mddev);
6253 6254 6255 6256 6257 6258 6259
			if (rv)
				bitmap_destroy(mddev);
			mddev->pers->quiesce(mddev, 0);
		} else {
			/* remove the bitmap */
			if (!mddev->bitmap)
				return -ENOENT;
6260
			if (mddev->bitmap->storage.file)
6261 6262 6263 6264
				return -EINVAL;
			mddev->pers->quiesce(mddev, 1);
			bitmap_destroy(mddev);
			mddev->pers->quiesce(mddev, 0);
6265
			mddev->bitmap_info.offset = 0;
6266 6267
		}
	}
6268
	md_update_sb(mddev, 1);
L
Linus Torvalds 已提交
6269 6270 6271
	return rv;
}

6272
static int set_disk_faulty(struct mddev *mddev, dev_t dev)
L
Linus Torvalds 已提交
6273
{
6274
	struct md_rdev *rdev;
6275
	int err = 0;
L
Linus Torvalds 已提交
6276 6277 6278 6279

	if (mddev->pers == NULL)
		return -ENODEV;

6280 6281
	rcu_read_lock();
	rdev = find_rdev_rcu(mddev, dev);
L
Linus Torvalds 已提交
6282
	if (!rdev)
6283 6284 6285 6286 6287 6288 6289 6290
		err =  -ENODEV;
	else {
		md_error(mddev, rdev);
		if (!test_bit(Faulty, &rdev->flags))
			err = -EBUSY;
	}
	rcu_read_unlock();
	return err;
L
Linus Torvalds 已提交
6291 6292
}

6293 6294 6295 6296 6297 6298
/*
 * We have a problem here : there is no easy way to give a CHS
 * virtual geometry. We currently pretend that we have a 2 heads
 * 4 sectors (with a BIG number of cylinders...). This drives
 * dosfs just mad... ;-)
 */
6299 6300
static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
6301
	struct mddev *mddev = bdev->bd_disk->private_data;
6302 6303 6304

	geo->heads = 2;
	geo->sectors = 4;
6305
	geo->cylinders = mddev->array_sectors / 8;
6306 6307 6308
	return 0;
}

A
Al Viro 已提交
6309
static int md_ioctl(struct block_device *bdev, fmode_t mode,
L
Linus Torvalds 已提交
6310 6311 6312 6313
			unsigned int cmd, unsigned long arg)
{
	int err = 0;
	void __user *argp = (void __user *)arg;
6314
	struct mddev *mddev = NULL;
6315
	int ro;
L
Linus Torvalds 已提交
6316

6317 6318 6319 6320 6321 6322 6323 6324 6325
	switch (cmd) {
	case RAID_VERSION:
	case GET_ARRAY_INFO:
	case GET_DISK_INFO:
		break;
	default:
		if (!capable(CAP_SYS_ADMIN))
			return -EACCES;
	}
L
Linus Torvalds 已提交
6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354

	/*
	 * Commands dealing with the RAID driver but not any
	 * particular array:
	 */
	switch (cmd)
	{
		case RAID_VERSION:
			err = get_version(argp);
			goto done;

		case PRINT_RAID_DEBUG:
			err = 0;
			md_print_devices();
			goto done;

#ifndef MODULE
		case RAID_AUTORUN:
			err = 0;
			autostart_arrays(arg);
			goto done;
#endif
		default:;
	}

	/*
	 * Commands creating/starting a new array:
	 */

A
Al Viro 已提交
6355
	mddev = bdev->bd_disk->private_data;
L
Linus Torvalds 已提交
6356 6357 6358 6359 6360 6361

	if (!mddev) {
		BUG();
		goto abort;
	}

6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382
	/* Some actions do not requires the mutex */
	switch (cmd) {
	case GET_ARRAY_INFO:
		if (!mddev->raid_disks && !mddev->external)
			err = -ENODEV;
		else
			err = get_array_info(mddev, argp);
		goto abort;

	case GET_DISK_INFO:
		if (!mddev->raid_disks && !mddev->external)
			err = -ENODEV;
		else
			err = get_disk_info(mddev, argp);
		goto abort;

	case SET_DISK_FAULTY:
		err = set_disk_faulty(mddev, new_decode_dev(arg));
		goto abort;
	}

L
Linus Torvalds 已提交
6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439
	err = mddev_lock(mddev);
	if (err) {
		printk(KERN_INFO 
			"md: ioctl lock interrupted, reason %d, cmd %d\n",
			err, cmd);
		goto abort;
	}

	switch (cmd)
	{
		case SET_ARRAY_INFO:
			{
				mdu_array_info_t info;
				if (!arg)
					memset(&info, 0, sizeof(info));
				else if (copy_from_user(&info, argp, sizeof(info))) {
					err = -EFAULT;
					goto abort_unlock;
				}
				if (mddev->pers) {
					err = update_array_info(mddev, &info);
					if (err) {
						printk(KERN_WARNING "md: couldn't update"
						       " array info. %d\n", err);
						goto abort_unlock;
					}
					goto done_unlock;
				}
				if (!list_empty(&mddev->disks)) {
					printk(KERN_WARNING
					       "md: array %s already has disks!\n",
					       mdname(mddev));
					err = -EBUSY;
					goto abort_unlock;
				}
				if (mddev->raid_disks) {
					printk(KERN_WARNING
					       "md: array %s already initialised!\n",
					       mdname(mddev));
					err = -EBUSY;
					goto abort_unlock;
				}
				err = set_array_info(mddev, &info);
				if (err) {
					printk(KERN_WARNING "md: couldn't set"
					       " array info. %d\n", err);
					goto abort_unlock;
				}
			}
			goto done_unlock;

		default:;
	}

	/*
	 * Commands querying/configuring an existing array:
	 */
6440
	/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6441
	 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6442 6443 6444 6445
	if ((!mddev->raid_disks && !mddev->external)
	    && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
	    && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
	    && cmd != GET_BITMAP_FILE) {
L
Linus Torvalds 已提交
6446 6447 6448 6449 6450 6451 6452 6453 6454
		err = -ENODEV;
		goto abort_unlock;
	}

	/*
	 * Commands even a read-only array can execute:
	 */
	switch (cmd)
	{
6455
		case GET_BITMAP_FILE:
6456
			err = get_bitmap_file(mddev, argp);
6457 6458
			goto done_unlock;

L
Linus Torvalds 已提交
6459 6460 6461 6462 6463
		case RESTART_ARRAY_RW:
			err = restart_array(mddev);
			goto done_unlock;

		case STOP_ARRAY:
6464
			err = do_md_stop(mddev, 0, bdev);
L
Linus Torvalds 已提交
6465 6466 6467
			goto done_unlock;

		case STOP_ARRAY_RO:
6468
			err = md_set_readonly(mddev, bdev);
L
Linus Torvalds 已提交
6469 6470
			goto done_unlock;

6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498
		case BLKROSET:
			if (get_user(ro, (int __user *)(arg))) {
				err = -EFAULT;
				goto done_unlock;
			}
			err = -EINVAL;

			/* if the bdev is going readonly the value of mddev->ro
			 * does not matter, no writes are coming
			 */
			if (ro)
				goto done_unlock;

			/* are we are already prepared for writes? */
			if (mddev->ro != 1)
				goto done_unlock;

			/* transitioning to readauto need only happen for
			 * arrays that call md_write_start
			 */
			if (mddev->pers) {
				err = restart_array(mddev);
				if (err == 0) {
					mddev->ro = 2;
					set_disk_ro(mddev->gendisk, 0);
				}
			}
			goto done_unlock;
L
Linus Torvalds 已提交
6499 6500 6501 6502
	}

	/*
	 * The remaining ioctls are changing the state of the
6503 6504 6505 6506
	 * superblock, so we do not allow them on read-only arrays.
	 * However non-MD ioctls (e.g. get-size) will still come through
	 * here and hit the 'default' below, so only disallow
	 * 'md' ioctls, and switch to rw mode if started auto-readonly.
L
Linus Torvalds 已提交
6507
	 */
6508
	if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
6509 6510
		if (mddev->ro == 2) {
			mddev->ro = 0;
N
NeilBrown 已提交
6511
			sysfs_notify_dirent_safe(mddev->sysfs_state);
6512 6513
			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
			md_wakeup_thread(mddev->thread);
6514 6515 6516 6517
		} else {
			err = -EROFS;
			goto abort_unlock;
		}
L
Linus Torvalds 已提交
6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540
	}

	switch (cmd)
	{
		case ADD_NEW_DISK:
		{
			mdu_disk_info_t info;
			if (copy_from_user(&info, argp, sizeof(info)))
				err = -EFAULT;
			else
				err = add_new_disk(mddev, &info);
			goto done_unlock;
		}

		case HOT_REMOVE_DISK:
			err = hot_remove_disk(mddev, new_decode_dev(arg));
			goto done_unlock;

		case HOT_ADD_DISK:
			err = hot_add_disk(mddev, new_decode_dev(arg));
			goto done_unlock;

		case RUN_ARRAY:
6541
			err = do_md_run(mddev);
L
Linus Torvalds 已提交
6542 6543
			goto done_unlock;

6544 6545 6546 6547
		case SET_BITMAP_FILE:
			err = set_bitmap_file(mddev, (int)arg);
			goto done_unlock;

L
Linus Torvalds 已提交
6548 6549 6550 6551 6552 6553 6554
		default:
			err = -EINVAL;
			goto abort_unlock;
	}

done_unlock:
abort_unlock:
6555 6556 6557
	if (mddev->hold_active == UNTIL_IOCTL &&
	    err != -EINVAL)
		mddev->hold_active = 0;
L
Linus Torvalds 已提交
6558 6559 6560 6561 6562 6563 6564 6565 6566
	mddev_unlock(mddev);

	return err;
done:
	if (err)
		MD_BUG();
abort:
	return err;
}
6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585
#ifdef CONFIG_COMPAT
static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
		    unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case HOT_REMOVE_DISK:
	case HOT_ADD_DISK:
	case SET_DISK_FAULTY:
	case SET_BITMAP_FILE:
		/* These take in integer arg, do not convert */
		break;
	default:
		arg = (unsigned long)compat_ptr(arg);
		break;
	}

	return md_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */
L
Linus Torvalds 已提交
6586

A
Al Viro 已提交
6587
static int md_open(struct block_device *bdev, fmode_t mode)
L
Linus Torvalds 已提交
6588 6589 6590 6591 6592
{
	/*
	 * Succeed if we can lock the mddev, which confirms that
	 * it isn't being stopped right now.
	 */
6593
	struct mddev *mddev = mddev_find(bdev->bd_dev);
L
Linus Torvalds 已提交
6594 6595
	int err;

6596 6597 6598
	if (!mddev)
		return -ENODEV;

6599 6600 6601 6602 6603 6604
	if (mddev->gendisk != bdev->bd_disk) {
		/* we are racing with mddev_put which is discarding this
		 * bd_disk.
		 */
		mddev_put(mddev);
		/* Wait until bdev->bd_disk is definitely gone */
T
Tejun Heo 已提交
6605
		flush_workqueue(md_misc_wq);
6606 6607 6608 6609 6610
		/* Then retry the open from the top */
		return -ERESTARTSYS;
	}
	BUG_ON(mddev != bdev->bd_disk->private_data);

N
NeilBrown 已提交
6611
	if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
L
Linus Torvalds 已提交
6612 6613 6614
		goto out;

	err = 0;
6615
	atomic_inc(&mddev->openers);
N
NeilBrown 已提交
6616
	mutex_unlock(&mddev->open_mutex);
L
Linus Torvalds 已提交
6617

6618
	check_disk_change(bdev);
L
Linus Torvalds 已提交
6619 6620 6621 6622
 out:
	return err;
}

A
Al Viro 已提交
6623
static int md_release(struct gendisk *disk, fmode_t mode)
L
Linus Torvalds 已提交
6624
{
6625
 	struct mddev *mddev = disk->private_data;
L
Linus Torvalds 已提交
6626

E
Eric Sesterhenn 已提交
6627
	BUG_ON(!mddev);
6628
	atomic_dec(&mddev->openers);
L
Linus Torvalds 已提交
6629 6630 6631 6632
	mddev_put(mddev);

	return 0;
}
6633 6634 6635

static int md_media_changed(struct gendisk *disk)
{
6636
	struct mddev *mddev = disk->private_data;
6637 6638 6639 6640 6641 6642

	return mddev->changed;
}

static int md_revalidate(struct gendisk *disk)
{
6643
	struct mddev *mddev = disk->private_data;
6644 6645 6646 6647

	mddev->changed = 0;
	return 0;
}
6648
static const struct block_device_operations md_fops =
L
Linus Torvalds 已提交
6649 6650
{
	.owner		= THIS_MODULE,
A
Al Viro 已提交
6651 6652
	.open		= md_open,
	.release	= md_release,
N
NeilBrown 已提交
6653
	.ioctl		= md_ioctl,
6654 6655 6656
#ifdef CONFIG_COMPAT
	.compat_ioctl	= md_compat_ioctl,
#endif
6657
	.getgeo		= md_getgeo,
6658 6659
	.media_changed  = md_media_changed,
	.revalidate_disk= md_revalidate,
L
Linus Torvalds 已提交
6660 6661
};

A
Adrian Bunk 已提交
6662
static int md_thread(void * arg)
L
Linus Torvalds 已提交
6663
{
6664
	struct md_thread *thread = arg;
L
Linus Torvalds 已提交
6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677

	/*
	 * md_thread is a 'system-thread', it's priority should be very
	 * high. We avoid resource deadlocks individually in each
	 * raid personality. (RAID5 does preallocation) We also use RR and
	 * the very same RT priority as kswapd, thus we will never get
	 * into a priority inversion deadlock.
	 *
	 * we definitely have to have equal or higher priority than
	 * bdflush, otherwise bdflush will deadlock if there are too
	 * many dirty RAID5 blocks.
	 */

N
NeilBrown 已提交
6678
	allow_signal(SIGKILL);
6679
	while (!kthread_should_stop()) {
L
Linus Torvalds 已提交
6680

6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693
		/* We need to wait INTERRUPTIBLE so that
		 * we don't add to the load-average.
		 * That means we need to be sure no signals are
		 * pending
		 */
		if (signal_pending(current))
			flush_signals(current);

		wait_event_interruptible_timeout
			(thread->wqueue,
			 test_bit(THREAD_WAKEUP, &thread->flags)
			 || kthread_should_stop(),
			 thread->timeout);
L
Linus Torvalds 已提交
6694

6695 6696
		clear_bit(THREAD_WAKEUP, &thread->flags);
		if (!kthread_should_stop())
S
Shaohua Li 已提交
6697
			thread->run(thread);
L
Linus Torvalds 已提交
6698
	}
6699

L
Linus Torvalds 已提交
6700 6701 6702
	return 0;
}

6703
void md_wakeup_thread(struct md_thread *thread)
L
Linus Torvalds 已提交
6704 6705
{
	if (thread) {
6706
		pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
L
Linus Torvalds 已提交
6707 6708 6709 6710 6711
		set_bit(THREAD_WAKEUP, &thread->flags);
		wake_up(&thread->wqueue);
	}
}

S
Shaohua Li 已提交
6712 6713
struct md_thread *md_register_thread(void (*run) (struct md_thread *),
		struct mddev *mddev, const char *name)
L
Linus Torvalds 已提交
6714
{
6715
	struct md_thread *thread;
L
Linus Torvalds 已提交
6716

6717
	thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
L
Linus Torvalds 已提交
6718 6719 6720 6721 6722 6723 6724
	if (!thread)
		return NULL;

	init_waitqueue_head(&thread->wqueue);

	thread->run = run;
	thread->mddev = mddev;
6725
	thread->timeout = MAX_SCHEDULE_TIMEOUT;
6726 6727 6728
	thread->tsk = kthread_run(md_thread, thread,
				  "%s_%s",
				  mdname(thread->mddev),
6729
				  name);
6730
	if (IS_ERR(thread->tsk)) {
L
Linus Torvalds 已提交
6731 6732 6733 6734 6735 6736
		kfree(thread);
		return NULL;
	}
	return thread;
}

6737
void md_unregister_thread(struct md_thread **threadp)
L
Linus Torvalds 已提交
6738
{
6739
	struct md_thread *thread = *threadp;
6740 6741
	if (!thread)
		return;
6742
	pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6743 6744 6745 6746 6747 6748
	/* Locking ensures that mddev_unlock does not wake_up a
	 * non-existent thread
	 */
	spin_lock(&pers_lock);
	*threadp = NULL;
	spin_unlock(&pers_lock);
6749 6750

	kthread_stop(thread->tsk);
L
Linus Torvalds 已提交
6751 6752 6753
	kfree(thread);
}

6754
void md_error(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
6755 6756 6757 6758 6759 6760
{
	if (!mddev) {
		MD_BUG();
		return;
	}

6761
	if (!rdev || test_bit(Faulty, &rdev->flags))
L
Linus Torvalds 已提交
6762
		return;
6763

6764
	if (!mddev->pers || !mddev->pers->error_handler)
L
Linus Torvalds 已提交
6765 6766
		return;
	mddev->pers->error_handler(mddev,rdev);
6767 6768
	if (mddev->degraded)
		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
N
NeilBrown 已提交
6769
	sysfs_notify_dirent_safe(rdev->sysfs_state);
L
Linus Torvalds 已提交
6770 6771 6772
	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	md_wakeup_thread(mddev->thread);
6773
	if (mddev->event_work.func)
T
Tejun Heo 已提交
6774
		queue_work(md_misc_wq, &mddev->event_work);
6775
	md_new_event_inintr(mddev);
L
Linus Torvalds 已提交
6776 6777 6778 6779 6780 6781 6782
}

/* seq_file implementation /proc/mdstat */

static void status_unused(struct seq_file *seq)
{
	int i = 0;
6783
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
6784 6785 6786

	seq_printf(seq, "unused devices: ");

6787
	list_for_each_entry(rdev, &pending_raid_disks, same_set) {
L
Linus Torvalds 已提交
6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799
		char b[BDEVNAME_SIZE];
		i++;
		seq_printf(seq, "%s ",
			      bdevname(rdev->bdev,b));
	}
	if (!i)
		seq_printf(seq, "<none>");

	seq_printf(seq, "\n");
}


6800
static void status_resync(struct seq_file *seq, struct mddev * mddev)
L
Linus Torvalds 已提交
6801
{
6802 6803 6804
	sector_t max_sectors, resync, res;
	unsigned long dt, db;
	sector_t rt;
6805 6806
	int scale;
	unsigned int per_milli;
L
Linus Torvalds 已提交
6807

6808
	resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
L
Linus Torvalds 已提交
6809

6810 6811
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6812
		max_sectors = mddev->resync_max_sectors;
L
Linus Torvalds 已提交
6813
	else
6814
		max_sectors = mddev->dev_sectors;
L
Linus Torvalds 已提交
6815 6816 6817 6818

	/*
	 * Should not happen.
	 */
6819
	if (!max_sectors) {
L
Linus Torvalds 已提交
6820 6821 6822
		MD_BUG();
		return;
	}
6823
	/* Pick 'scale' such that (resync>>scale)*1000 will fit
6824
	 * in a sector_t, and (max_sectors>>scale) will fit in a
6825 6826 6827 6828 6829
	 * u32, as those are the requirements for sector_div.
	 * Thus 'scale' must be at least 10
	 */
	scale = 10;
	if (sizeof(sector_t) > sizeof(unsigned long)) {
6830
		while ( max_sectors/2 > (1ULL<<(scale+32)))
6831 6832 6833
			scale++;
	}
	res = (resync>>scale)*1000;
6834
	sector_div(res, (u32)((max_sectors>>scale)+1));
6835 6836

	per_milli = res;
L
Linus Torvalds 已提交
6837
	{
6838
		int i, x = per_milli/50, y = 20-x;
L
Linus Torvalds 已提交
6839 6840 6841 6842 6843 6844 6845 6846
		seq_printf(seq, "[");
		for (i = 0; i < x; i++)
			seq_printf(seq, "=");
		seq_printf(seq, ">");
		for (i = 0; i < y; i++)
			seq_printf(seq, ".");
		seq_printf(seq, "] ");
	}
6847
	seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
6848 6849
		   (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
		    "reshape" :
6850 6851 6852 6853 6854
		    (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
		     "check" :
		     (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
		      "resync" : "recovery"))),
		   per_milli/10, per_milli % 10,
6855 6856
		   (unsigned long long) resync/2,
		   (unsigned long long) max_sectors/2);
L
Linus Torvalds 已提交
6857 6858 6859 6860 6861

	/*
	 * dt: time from mark until now
	 * db: blocks written from mark until now
	 * rt: remaining time
6862 6863 6864 6865
	 *
	 * rt is a sector_t, so could be 32bit or 64bit.
	 * So we divide before multiply in case it is 32bit and close
	 * to the limit.
L
Lucas De Marchi 已提交
6866
	 * We scale the divisor (db) by 32 to avoid losing precision
6867 6868 6869 6870
	 * near the end of resync when the number of remaining sectors
	 * is close to 'db'.
	 * We then divide rt by 32 after multiplying by db to compensate.
	 * The '+1' avoids division by zero if db is very small.
L
Linus Torvalds 已提交
6871 6872 6873
	 */
	dt = ((jiffies - mddev->resync_mark) / HZ);
	if (!dt) dt++;
6874 6875
	db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
		- mddev->resync_mark_cnt;
L
Linus Torvalds 已提交
6876

6877 6878 6879 6880 6881 6882 6883
	rt = max_sectors - resync;    /* number of remaining sectors */
	sector_div(rt, db/32+1);
	rt *= dt;
	rt >>= 5;

	seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
		   ((unsigned long)rt % 60)/6);
L
Linus Torvalds 已提交
6884

6885
	seq_printf(seq, " speed=%ldK/sec", db/2/dt);
L
Linus Torvalds 已提交
6886 6887 6888 6889 6890 6891
}

static void *md_seq_start(struct seq_file *seq, loff_t *pos)
{
	struct list_head *tmp;
	loff_t l = *pos;
6892
	struct mddev *mddev;
L
Linus Torvalds 已提交
6893 6894 6895 6896 6897 6898 6899 6900 6901 6902

	if (l >= 0x10000)
		return NULL;
	if (!l--)
		/* header */
		return (void*)1;

	spin_lock(&all_mddevs_lock);
	list_for_each(tmp,&all_mddevs)
		if (!l--) {
6903
			mddev = list_entry(tmp, struct mddev, all_mddevs);
L
Linus Torvalds 已提交
6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916
			mddev_get(mddev);
			spin_unlock(&all_mddevs_lock);
			return mddev;
		}
	spin_unlock(&all_mddevs_lock);
	if (!l--)
		return (void*)2;/* tail */
	return NULL;
}

static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct list_head *tmp;
6917
	struct mddev *next_mddev, *mddev = v;
L
Linus Torvalds 已提交
6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928
	
	++*pos;
	if (v == (void*)2)
		return NULL;

	spin_lock(&all_mddevs_lock);
	if (v == (void*)1)
		tmp = all_mddevs.next;
	else
		tmp = mddev->all_mddevs.next;
	if (tmp != &all_mddevs)
6929
		next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
L
Linus Torvalds 已提交
6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943
	else {
		next_mddev = (void*)2;
		*pos = 0x10000;
	}		
	spin_unlock(&all_mddevs_lock);

	if (v != (void*)1)
		mddev_put(mddev);
	return next_mddev;

}

static void md_seq_stop(struct seq_file *seq, void *v)
{
6944
	struct mddev *mddev = v;
L
Linus Torvalds 已提交
6945 6946 6947 6948 6949 6950 6951

	if (mddev && v != (void*)1 && v != (void*)2)
		mddev_put(mddev);
}

static int md_seq_show(struct seq_file *seq, void *v)
{
6952
	struct mddev *mddev = v;
6953
	sector_t sectors;
6954
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
6955 6956

	if (v == (void*)1) {
6957
		struct md_personality *pers;
L
Linus Torvalds 已提交
6958 6959
		seq_printf(seq, "Personalities : ");
		spin_lock(&pers_lock);
6960 6961
		list_for_each_entry(pers, &pers_list, list)
			seq_printf(seq, "[%s] ", pers->name);
L
Linus Torvalds 已提交
6962 6963 6964

		spin_unlock(&pers_lock);
		seq_printf(seq, "\n");
6965
		seq->poll_event = atomic_read(&md_event_count);
L
Linus Torvalds 已提交
6966 6967 6968 6969 6970 6971 6972
		return 0;
	}
	if (v == (void*)2) {
		status_unused(seq);
		return 0;
	}

I
Ingo Molnar 已提交
6973
	if (mddev_lock(mddev) < 0)
L
Linus Torvalds 已提交
6974
		return -EINTR;
I
Ingo Molnar 已提交
6975

L
Linus Torvalds 已提交
6976 6977 6978 6979
	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
		seq_printf(seq, "%s : %sactive", mdname(mddev),
						mddev->pers ? "" : "in");
		if (mddev->pers) {
6980
			if (mddev->ro==1)
L
Linus Torvalds 已提交
6981
				seq_printf(seq, " (read-only)");
6982
			if (mddev->ro==2)
6983
				seq_printf(seq, " (auto-read-only)");
L
Linus Torvalds 已提交
6984 6985 6986
			seq_printf(seq, " %s", mddev->pers->name);
		}

6987
		sectors = 0;
N
NeilBrown 已提交
6988
		rdev_for_each(rdev, mddev) {
L
Linus Torvalds 已提交
6989 6990 6991
			char b[BDEVNAME_SIZE];
			seq_printf(seq, " %s[%d]",
				bdevname(rdev->bdev,b), rdev->desc_nr);
6992 6993
			if (test_bit(WriteMostly, &rdev->flags))
				seq_printf(seq, "(W)");
6994
			if (test_bit(Faulty, &rdev->flags)) {
L
Linus Torvalds 已提交
6995 6996
				seq_printf(seq, "(F)");
				continue;
6997 6998
			}
			if (rdev->raid_disk < 0)
6999
				seq_printf(seq, "(S)"); /* spare */
7000 7001
			if (test_bit(Replacement, &rdev->flags))
				seq_printf(seq, "(R)");
7002
			sectors += rdev->sectors;
L
Linus Torvalds 已提交
7003 7004 7005 7006 7007
		}

		if (!list_empty(&mddev->disks)) {
			if (mddev->pers)
				seq_printf(seq, "\n      %llu blocks",
7008 7009
					   (unsigned long long)
					   mddev->array_sectors / 2);
L
Linus Torvalds 已提交
7010 7011
			else
				seq_printf(seq, "\n      %llu blocks",
7012
					   (unsigned long long)sectors / 2);
L
Linus Torvalds 已提交
7013
		}
7014 7015 7016 7017 7018 7019 7020
		if (mddev->persistent) {
			if (mddev->major_version != 0 ||
			    mddev->minor_version != 90) {
				seq_printf(seq," super %d.%d",
					   mddev->major_version,
					   mddev->minor_version);
			}
7021 7022 7023 7024
		} else if (mddev->external)
			seq_printf(seq, " super external:%s",
				   mddev->metadata_type);
		else
7025
			seq_printf(seq, " super non-persistent");
L
Linus Torvalds 已提交
7026 7027

		if (mddev->pers) {
7028
			mddev->pers->status(seq, mddev);
L
Linus Torvalds 已提交
7029
	 		seq_printf(seq, "\n      ");
7030 7031
			if (mddev->pers->sync_request) {
				if (mddev->curr_resync > 2) {
7032
					status_resync(seq, mddev);
7033 7034 7035 7036 7037 7038
					seq_printf(seq, "\n      ");
				} else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
					seq_printf(seq, "\tresync=DELAYED\n      ");
				else if (mddev->recovery_cp < MaxSector)
					seq_printf(seq, "\tresync=PENDING\n      ");
			}
7039 7040 7041
		} else
			seq_printf(seq, "\n       ");

7042
		bitmap_status(seq, mddev->bitmap);
L
Linus Torvalds 已提交
7043 7044 7045 7046 7047 7048 7049 7050

		seq_printf(seq, "\n");
	}
	mddev_unlock(mddev);
	
	return 0;
}

J
Jan Engelhardt 已提交
7051
static const struct seq_operations md_seq_ops = {
L
Linus Torvalds 已提交
7052 7053 7054 7055 7056 7057 7058 7059
	.start  = md_seq_start,
	.next   = md_seq_next,
	.stop   = md_seq_stop,
	.show   = md_seq_show,
};

static int md_seq_open(struct inode *inode, struct file *file)
{
7060
	struct seq_file *seq;
L
Linus Torvalds 已提交
7061 7062 7063
	int error;

	error = seq_open(file, &md_seq_ops);
7064
	if (error)
7065 7066 7067 7068
		return error;

	seq = file->private_data;
	seq->poll_event = atomic_read(&md_event_count);
L
Linus Torvalds 已提交
7069 7070 7071
	return error;
}

7072 7073
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
{
7074
	struct seq_file *seq = filp->private_data;
7075 7076 7077 7078 7079 7080 7081
	int mask;

	poll_wait(filp, &md_event_waiters, wait);

	/* always allow read */
	mask = POLLIN | POLLRDNORM;

7082
	if (seq->poll_event != atomic_read(&md_event_count))
7083 7084 7085 7086
		mask |= POLLERR | POLLPRI;
	return mask;
}

7087
static const struct file_operations md_seq_fops = {
7088
	.owner		= THIS_MODULE,
L
Linus Torvalds 已提交
7089 7090 7091
	.open           = md_seq_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
7092
	.release	= seq_release_private,
7093
	.poll		= mdstat_poll,
L
Linus Torvalds 已提交
7094 7095
};

7096
int register_md_personality(struct md_personality *p)
L
Linus Torvalds 已提交
7097 7098
{
	spin_lock(&pers_lock);
7099 7100
	list_add_tail(&p->list, &pers_list);
	printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
L
Linus Torvalds 已提交
7101 7102 7103 7104
	spin_unlock(&pers_lock);
	return 0;
}

7105
int unregister_md_personality(struct md_personality *p)
L
Linus Torvalds 已提交
7106
{
7107
	printk(KERN_INFO "md: %s personality unregistered\n", p->name);
L
Linus Torvalds 已提交
7108
	spin_lock(&pers_lock);
7109
	list_del_init(&p->list);
L
Linus Torvalds 已提交
7110 7111 7112 7113
	spin_unlock(&pers_lock);
	return 0;
}

7114
static int is_mddev_idle(struct mddev *mddev, int init)
L
Linus Torvalds 已提交
7115
{
7116
	struct md_rdev * rdev;
L
Linus Torvalds 已提交
7117
	int idle;
N
NeilBrown 已提交
7118
	int curr_events;
L
Linus Torvalds 已提交
7119 7120

	idle = 1;
7121 7122
	rcu_read_lock();
	rdev_for_each_rcu(rdev, mddev) {
L
Linus Torvalds 已提交
7123
		struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
N
NeilBrown 已提交
7124 7125 7126
		curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
			      (int)part_stat_read(&disk->part0, sectors[1]) -
			      atomic_read(&disk->sync_io);
7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146
		/* sync IO will cause sync_io to increase before the disk_stats
		 * as sync_io is counted when a request starts, and
		 * disk_stats is counted when it completes.
		 * So resync activity will cause curr_events to be smaller than
		 * when there was no such activity.
		 * non-sync IO will cause disk_stat to increase without
		 * increasing sync_io so curr_events will (eventually)
		 * be larger than it was before.  Once it becomes
		 * substantially larger, the test below will cause
		 * the array to appear non-idle, and resync will slow
		 * down.
		 * If there is a lot of outstanding resync activity when
		 * we set last_event to curr_events, then all that activity
		 * completing might cause the array to appear non-idle
		 * and resync will be slowed down even though there might
		 * not have been non-resync activity.  This will only
		 * happen once though.  'last_events' will soon reflect
		 * the state where there is little or no outstanding
		 * resync requests, and further resync activity will
		 * always make curr_events less than last_events.
7147
		 *
L
Linus Torvalds 已提交
7148
		 */
N
NeilBrown 已提交
7149
		if (init || curr_events - rdev->last_events > 64) {
L
Linus Torvalds 已提交
7150 7151 7152 7153
			rdev->last_events = curr_events;
			idle = 0;
		}
	}
7154
	rcu_read_unlock();
L
Linus Torvalds 已提交
7155 7156 7157
	return idle;
}

7158
void md_done_sync(struct mddev *mddev, int blocks, int ok)
L
Linus Torvalds 已提交
7159 7160 7161 7162 7163
{
	/* another "blocks" (512byte) blocks have been synced */
	atomic_sub(blocks, &mddev->recovery_active);
	wake_up(&mddev->recovery_wait);
	if (!ok) {
7164
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
L
Linus Torvalds 已提交
7165 7166 7167 7168 7169 7170
		md_wakeup_thread(mddev->thread);
		// stop recovery, signal do_sync ....
	}
}


7171 7172
/* md_write_start(mddev, bi)
 * If we need to update some array metadata (e.g. 'active' flag
7173 7174
 * in superblock) before writing, schedule a superblock update
 * and wait for it to complete.
7175
 */
7176
void md_write_start(struct mddev *mddev, struct bio *bi)
L
Linus Torvalds 已提交
7177
{
7178
	int did_change = 0;
7179
	if (bio_data_dir(bi) != WRITE)
7180
		return;
7181

7182 7183 7184 7185 7186 7187
	BUG_ON(mddev->ro == 1);
	if (mddev->ro == 2) {
		/* need to switch to read/write */
		mddev->ro = 0;
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
		md_wakeup_thread(mddev->thread);
7188
		md_wakeup_thread(mddev->sync_thread);
7189
		did_change = 1;
7190
	}
7191
	atomic_inc(&mddev->writes_pending);
7192 7193
	if (mddev->safemode == 1)
		mddev->safemode = 0;
7194
	if (mddev->in_sync) {
7195
		spin_lock_irq(&mddev->write_lock);
7196 7197
		if (mddev->in_sync) {
			mddev->in_sync = 0;
7198
			set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7199
			set_bit(MD_CHANGE_PENDING, &mddev->flags);
7200
			md_wakeup_thread(mddev->thread);
7201
			did_change = 1;
7202
		}
7203
		spin_unlock_irq(&mddev->write_lock);
7204
	}
7205
	if (did_change)
N
NeilBrown 已提交
7206
		sysfs_notify_dirent_safe(mddev->sysfs_state);
7207 7208
	wait_event(mddev->sb_wait,
		   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
L
Linus Torvalds 已提交
7209 7210
}

7211
void md_write_end(struct mddev *mddev)
L
Linus Torvalds 已提交
7212 7213 7214 7215
{
	if (atomic_dec_and_test(&mddev->writes_pending)) {
		if (mddev->safemode == 2)
			md_wakeup_thread(mddev->thread);
7216
		else if (mddev->safemode_delay)
L
Linus Torvalds 已提交
7217 7218 7219 7220
			mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
	}
}

7221 7222 7223 7224 7225
/* md_allow_write(mddev)
 * Calling this ensures that the array is marked 'active' so that writes
 * may proceed without blocking.  It is important to call this before
 * attempting a GFP_KERNEL allocation while holding the mddev lock.
 * Must be called with mddev_lock held.
7226 7227 7228
 *
 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
 * is dropped, so return -EAGAIN after notifying userspace.
7229
 */
7230
int md_allow_write(struct mddev *mddev)
7231 7232
{
	if (!mddev->pers)
7233
		return 0;
7234
	if (mddev->ro)
7235
		return 0;
7236
	if (!mddev->pers->sync_request)
7237
		return 0;
7238 7239 7240 7241 7242

	spin_lock_irq(&mddev->write_lock);
	if (mddev->in_sync) {
		mddev->in_sync = 0;
		set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7243
		set_bit(MD_CHANGE_PENDING, &mddev->flags);
7244 7245 7246 7247 7248
		if (mddev->safemode_delay &&
		    mddev->safemode == 0)
			mddev->safemode = 1;
		spin_unlock_irq(&mddev->write_lock);
		md_update_sb(mddev, 0);
N
NeilBrown 已提交
7249
		sysfs_notify_dirent_safe(mddev->sysfs_state);
7250 7251
	} else
		spin_unlock_irq(&mddev->write_lock);
7252

7253
	if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
7254 7255 7256
		return -EAGAIN;
	else
		return 0;
7257 7258 7259
}
EXPORT_SYMBOL_GPL(md_allow_write);

L
Linus Torvalds 已提交
7260 7261
#define SYNC_MARKS	10
#define	SYNC_MARK_STEP	(3*HZ)
S
Shaohua Li 已提交
7262
void md_do_sync(struct md_thread *thread)
L
Linus Torvalds 已提交
7263
{
S
Shaohua Li 已提交
7264
	struct mddev *mddev = thread->mddev;
7265
	struct mddev *mddev2;
L
Linus Torvalds 已提交
7266 7267
	unsigned int currspeed = 0,
		 window;
7268
	sector_t max_sectors,j, io_sectors;
L
Linus Torvalds 已提交
7269 7270 7271 7272 7273
	unsigned long mark[SYNC_MARKS];
	sector_t mark_cnt[SYNC_MARKS];
	int last_mark,m;
	struct list_head *tmp;
	sector_t last_check;
7274
	int skipped = 0;
7275
	struct md_rdev *rdev;
7276
	char *desc;
M
majianpeng 已提交
7277
	struct blk_plug plug;
L
Linus Torvalds 已提交
7278 7279 7280 7281

	/* just incase thread restarts... */
	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
		return;
7282 7283
	if (mddev->ro) /* never try to sync a read-only array */
		return;
L
Linus Torvalds 已提交
7284

7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
			desc = "data-check";
		else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
			desc = "requested-resync";
		else
			desc = "resync";
	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
		desc = "reshape";
	else
		desc = "recovery";

L
Linus Torvalds 已提交
7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316
	/* we overload curr_resync somewhat here.
	 * 0 == not engaged in resync at all
	 * 2 == checking that there is no conflict with another sync
	 * 1 == like 2, but have yielded to allow conflicting resync to
	 *		commense
	 * other == active in resync - this many blocks
	 *
	 * Before starting a resync we must have set curr_resync to
	 * 2, and then checked that every "conflicting" array has curr_resync
	 * less than ours.  When we find one that is the same or higher
	 * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
	 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
	 * This will mean we have to start checking from the beginning again.
	 *
	 */

	do {
		mddev->curr_resync = 2;

	try_again:
7317
		if (kthread_should_stop())
N
NeilBrown 已提交
7318
			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7319 7320

		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
L
Linus Torvalds 已提交
7321
			goto skip;
7322
		for_each_mddev(mddev2, tmp) {
L
Linus Torvalds 已提交
7323 7324
			if (mddev2 == mddev)
				continue;
7325 7326 7327
			if (!mddev->parallel_resync
			&&  mddev2->curr_resync
			&&  match_mddev_units(mddev, mddev2)) {
L
Linus Torvalds 已提交
7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338
				DEFINE_WAIT(wq);
				if (mddev < mddev2 && mddev->curr_resync == 2) {
					/* arbitrarily yield */
					mddev->curr_resync = 1;
					wake_up(&resync_wait);
				}
				if (mddev > mddev2 && mddev->curr_resync == 1)
					/* no need to wait here, we can wait the next
					 * time 'round when curr_resync == 2
					 */
					continue;
7339 7340 7341 7342 7343
				/* We need to wait 'interruptible' so as not to
				 * contribute to the load average, and not to
				 * be caught by 'softlockup'
				 */
				prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7344
				if (!kthread_should_stop() &&
7345
				    mddev2->curr_resync >= mddev->curr_resync) {
7346 7347
					printk(KERN_INFO "md: delaying %s of %s"
					       " until %s has finished (they"
L
Linus Torvalds 已提交
7348
					       " share one or more physical units)\n",
7349
					       desc, mdname(mddev), mdname(mddev2));
L
Linus Torvalds 已提交
7350
					mddev_put(mddev2);
7351 7352
					if (signal_pending(current))
						flush_signals(current);
L
Linus Torvalds 已提交
7353 7354 7355 7356 7357 7358 7359 7360 7361
					schedule();
					finish_wait(&resync_wait, &wq);
					goto try_again;
				}
				finish_wait(&resync_wait, &wq);
			}
		}
	} while (mddev->curr_resync < 2);

7362
	j = 0;
7363
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
L
Linus Torvalds 已提交
7364
		/* resync follows the size requested by the personality,
7365
		 * which defaults to physical size, but can be virtual size
L
Linus Torvalds 已提交
7366 7367
		 */
		max_sectors = mddev->resync_max_sectors;
7368
		atomic64_set(&mddev->resync_mismatches, 0);
7369
		/* we don't use the checkpoint if there's a bitmap */
7370 7371 7372
		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
			j = mddev->resync_min;
		else if (!mddev->bitmap)
7373
			j = mddev->recovery_cp;
7374

7375
	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7376
		max_sectors = mddev->resync_max_sectors;
7377
	else {
L
Linus Torvalds 已提交
7378
		/* recovery follows the physical size of devices */
A
Andre Noll 已提交
7379
		max_sectors = mddev->dev_sectors;
7380
		j = MaxSector;
7381
		rcu_read_lock();
N
NeilBrown 已提交
7382
		rdev_for_each_rcu(rdev, mddev)
7383 7384 7385 7386 7387
			if (rdev->raid_disk >= 0 &&
			    !test_bit(Faulty, &rdev->flags) &&
			    !test_bit(In_sync, &rdev->flags) &&
			    rdev->recovery_offset < j)
				j = rdev->recovery_offset;
7388
		rcu_read_unlock();
7389
	}
L
Linus Torvalds 已提交
7390

7391 7392 7393
	printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
	printk(KERN_INFO "md: minimum _guaranteed_  speed:"
		" %d KB/sec/disk.\n", speed_min(mddev));
7394
	printk(KERN_INFO "md: using maximum available idle IO bandwidth "
7395 7396
	       "(but not more than %d KB/sec) for %s.\n",
	       speed_max(mddev), desc);
L
Linus Torvalds 已提交
7397

N
NeilBrown 已提交
7398
	is_mddev_idle(mddev, 1); /* this initializes IO event counters */
7399

7400
	io_sectors = 0;
L
Linus Torvalds 已提交
7401 7402
	for (m = 0; m < SYNC_MARKS; m++) {
		mark[m] = jiffies;
7403
		mark_cnt[m] = io_sectors;
L
Linus Torvalds 已提交
7404 7405 7406 7407 7408 7409 7410 7411 7412
	}
	last_mark = 0;
	mddev->resync_mark = mark[last_mark];
	mddev->resync_mark_cnt = mark_cnt[last_mark];

	/*
	 * Tune reconstruction:
	 */
	window = 32*(PAGE_SIZE/512);
J
Jonathan Brassow 已提交
7413 7414
	printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
		window/2, (unsigned long long)max_sectors/2);
L
Linus Torvalds 已提交
7415 7416 7417 7418 7419 7420

	atomic_set(&mddev->recovery_active, 0);
	last_check = 0;

	if (j>2) {
		printk(KERN_INFO 
7421 7422
		       "md: resuming %s of %s from checkpoint.\n",
		       desc, mdname(mddev));
L
Linus Torvalds 已提交
7423 7424
		mddev->curr_resync = j;
	}
7425
	mddev->curr_resync_completed = j;
L
Linus Torvalds 已提交
7426

M
majianpeng 已提交
7427
	blk_start_plug(&plug);
L
Linus Torvalds 已提交
7428
	while (j < max_sectors) {
7429
		sector_t sectors;
L
Linus Torvalds 已提交
7430

7431
		skipped = 0;
7432

7433 7434 7435 7436 7437 7438 7439
		if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
		    ((mddev->curr_resync > mddev->curr_resync_completed &&
		      (mddev->curr_resync - mddev->curr_resync_completed)
		      > (max_sectors >> 4)) ||
		     (j - mddev->curr_resync_completed)*2
		     >= mddev->resync_max - mddev->curr_resync_completed
			    )) {
7440 7441 7442
			/* time to update curr_resync_completed */
			wait_event(mddev->recovery_wait,
				   atomic_read(&mddev->recovery_active) == 0);
7443
			mddev->curr_resync_completed = j;
7444
			set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7445
			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7446
		}
7447

7448 7449 7450 7451 7452 7453 7454 7455 7456 7457
		while (j >= mddev->resync_max && !kthread_should_stop()) {
			/* As this condition is controlled by user-space,
			 * we can block indefinitely, so use '_interruptible'
			 * to avoid triggering warnings.
			 */
			flush_signals(current); /* just in case */
			wait_event_interruptible(mddev->recovery_wait,
						 mddev->resync_max > j
						 || kthread_should_stop());
		}
7458 7459 7460 7461

		if (kthread_should_stop())
			goto interrupted;

7462
		sectors = mddev->pers->sync_request(mddev, j, &skipped,
7463
						  currspeed < speed_min(mddev));
7464
		if (sectors == 0) {
7465
			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
L
Linus Torvalds 已提交
7466 7467
			goto out;
		}
7468 7469 7470 7471 7472 7473

		if (!skipped) { /* actual IO requested */
			io_sectors += sectors;
			atomic_add(sectors, &mddev->recovery_active);
		}

7474 7475 7476
		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
			break;

L
Linus Torvalds 已提交
7477 7478
		j += sectors;
		if (j>1) mddev->curr_resync = j;
7479
		mddev->curr_mark_cnt = io_sectors;
7480
		if (last_check == 0)
7481
			/* this is the earliest that rebuild will be
7482 7483 7484
			 * visible in /proc/mdstat
			 */
			md_new_event(mddev);
7485 7486

		if (last_check + window > io_sectors || j == max_sectors)
L
Linus Torvalds 已提交
7487 7488
			continue;

7489
		last_check = io_sectors;
L
Linus Torvalds 已提交
7490 7491 7492 7493 7494 7495 7496 7497
	repeat:
		if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
			/* step marks */
			int next = (last_mark+1) % SYNC_MARKS;

			mddev->resync_mark = mark[next];
			mddev->resync_mark_cnt = mark_cnt[next];
			mark[next] = jiffies;
7498
			mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
L
Linus Torvalds 已提交
7499 7500 7501 7502
			last_mark = next;
		}


7503 7504 7505
		if (kthread_should_stop())
			goto interrupted;

L
Linus Torvalds 已提交
7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516

		/*
		 * this loop exits only if either when we are slower than
		 * the 'hard' speed limit, or the system was IO-idle for
		 * a jiffy.
		 * the system might be non-idle CPU-wise, but we only care
		 * about not overloading the IO subsystem. (things like an
		 * e2fsck being done on the RAID array should execute fast)
		 */
		cond_resched();

7517 7518
		currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
			/((jiffies-mddev->resync_mark)/HZ +1) +1;
L
Linus Torvalds 已提交
7519

7520 7521
		if (currspeed > speed_min(mddev)) {
			if ((currspeed > speed_max(mddev)) ||
N
NeilBrown 已提交
7522
					!is_mddev_idle(mddev, 0)) {
7523
				msleep(500);
L
Linus Torvalds 已提交
7524 7525 7526 7527
				goto repeat;
			}
		}
	}
7528
	printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
L
Linus Torvalds 已提交
7529 7530 7531 7532
	/*
	 * this also signals 'finished resyncing' to md_stop
	 */
 out:
M
majianpeng 已提交
7533
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
7534 7535 7536
	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));

	/* tell personality that we are finished */
7537
	mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
L
Linus Torvalds 已提交
7538

7539
	if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
7540 7541 7542 7543 7544
	    mddev->curr_resync > 2) {
		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
			if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
				if (mddev->curr_resync >= mddev->recovery_cp) {
					printk(KERN_INFO
7545 7546
					       "md: checkpointing %s of %s.\n",
					       desc, mdname(mddev));
7547 7548
					mddev->recovery_cp =
						mddev->curr_resync_completed;
7549 7550 7551 7552 7553 7554
				}
			} else
				mddev->recovery_cp = MaxSector;
		} else {
			if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
				mddev->curr_resync = MaxSector;
7555
			rcu_read_lock();
N
NeilBrown 已提交
7556
			rdev_for_each_rcu(rdev, mddev)
7557
				if (rdev->raid_disk >= 0 &&
7558
				    mddev->delta_disks >= 0 &&
7559 7560 7561 7562
				    !test_bit(Faulty, &rdev->flags) &&
				    !test_bit(In_sync, &rdev->flags) &&
				    rdev->recovery_offset < mddev->curr_resync)
					rdev->recovery_offset = mddev->curr_resync;
7563
			rcu_read_unlock();
7564
		}
L
Linus Torvalds 已提交
7565
	}
7566
 skip:
7567
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
L
Linus Torvalds 已提交
7568

7569 7570 7571 7572 7573 7574 7575
	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
		/* We completed so min/max setting can be forgotten if used. */
		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
			mddev->resync_min = 0;
		mddev->resync_max = MaxSector;
	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
		mddev->resync_min = mddev->curr_resync_completed;
L
Linus Torvalds 已提交
7576 7577 7578 7579
	mddev->curr_resync = 0;
	wake_up(&resync_wait);
	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
	md_wakeup_thread(mddev->thread);
7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590
	return;

 interrupted:
	/*
	 * got a signal, exit.
	 */
	printk(KERN_INFO
	       "md: md_do_sync() got signal ... exiting\n");
	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
	goto out;

L
Linus Torvalds 已提交
7591
}
7592
EXPORT_SYMBOL_GPL(md_do_sync);
L
Linus Torvalds 已提交
7593

7594
static int remove_and_add_spares(struct mddev *mddev)
7595
{
7596
	struct md_rdev *rdev;
7597
	int spares = 0;
7598
	int removed = 0;
7599

7600 7601
	mddev->curr_resync_completed = 0;

N
NeilBrown 已提交
7602
	rdev_for_each(rdev, mddev)
7603
		if (rdev->raid_disk >= 0 &&
7604
		    !test_bit(Blocked, &rdev->flags) &&
7605 7606 7607 7608
		    (test_bit(Faulty, &rdev->flags) ||
		     ! test_bit(In_sync, &rdev->flags)) &&
		    atomic_read(&rdev->nr_pending)==0) {
			if (mddev->pers->hot_remove_disk(
7609
				    mddev, rdev) == 0) {
7610
				sysfs_unlink_rdev(mddev, rdev);
7611
				rdev->raid_disk = -1;
7612
				removed++;
7613 7614
			}
		}
7615 7616 7617 7618
	if (removed)
		sysfs_notify(&mddev->kobj, NULL,
			     "degraded");

7619

N
NeilBrown 已提交
7620
	rdev_for_each(rdev, mddev) {
7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631
		if (rdev->raid_disk >= 0 &&
		    !test_bit(In_sync, &rdev->flags) &&
		    !test_bit(Faulty, &rdev->flags))
			spares++;
		if (rdev->raid_disk < 0
		    && !test_bit(Faulty, &rdev->flags)) {
			rdev->recovery_offset = 0;
			if (mddev->pers->
			    hot_add_disk(mddev, rdev) == 0) {
				if (sysfs_link_rdev(mddev, rdev))
					/* failure here is OK */;
7632
				spares++;
7633 7634
				md_new_event(mddev);
				set_bit(MD_CHANGE_DEVS, &mddev->flags);
7635
			}
7636
		}
7637 7638 7639
	}
	return spares;
}
7640

7641
static void reap_sync_thread(struct mddev *mddev)
7642
{
7643
	struct md_rdev *rdev;
7644 7645

	/* resync has finished, collect result */
7646
	md_unregister_thread(&mddev->sync_thread);
7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658
	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
		/* success...*/
		/* activate any spares */
		if (mddev->pers->spare_active(mddev))
			sysfs_notify(&mddev->kobj, NULL,
				     "degraded");
	}
	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
	    mddev->pers->finish_reshape)
		mddev->pers->finish_reshape(mddev);

7659 7660 7661 7662 7663
	/* If array is no-longer degraded, then any saved_raid_disk
	 * information must be scrapped.  Also if any device is now
	 * In_sync we must scrape the saved_raid_disk for that device
	 * do the superblock for an incrementally recovered device
	 * written out.
7664
	 */
N
NeilBrown 已提交
7665
	rdev_for_each(rdev, mddev)
7666 7667
		if (!mddev->degraded ||
		    test_bit(In_sync, &rdev->flags))
7668 7669
			rdev->saved_raid_disk = -1;

7670
	md_update_sb(mddev, 1);
7671 7672 7673 7674 7675 7676 7677 7678 7679
	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
	/* flag recovery needed just to double check */
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	sysfs_notify_dirent_safe(mddev->sysfs_action);
	md_new_event(mddev);
7680 7681
	if (mddev->event_work.func)
		queue_work(md_misc_wq, &mddev->event_work);
7682 7683
}

L
Linus Torvalds 已提交
7684 7685 7686 7687 7688 7689 7690 7691 7692 7693
/*
 * This routine is regularly called by all per-raid-array threads to
 * deal with generic issues like resync and super-block update.
 * Raid personalities that don't have a thread (linear/raid0) do not
 * need this as they never do any recovery or update the superblock.
 *
 * It does not do any resync itself, but rather "forks" off other threads
 * to do that as needed.
 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
 * "->recovery" and create a thread at ->sync_thread.
7694
 * When the thread finishes it sets MD_RECOVERY_DONE
L
Linus Torvalds 已提交
7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705
 * and wakeups up this thread which will reap the thread and finish up.
 * This thread also removes any faulty devices (with nr_pending == 0).
 *
 * The overall approach is:
 *  1/ if the superblock needs updating, update it.
 *  2/ If a recovery thread is running, don't do anything else.
 *  3/ If recovery has finished, clean up, possibly marking spares active.
 *  4/ If there are any faulty devices, remove them.
 *  5/ If array is degraded, try to add spares devices
 *  6/ If array has spares or is not in-sync, start a resync thread.
 */
7706
void md_check_recovery(struct mddev *mddev)
L
Linus Torvalds 已提交
7707
{
7708 7709 7710
	if (mddev->suspended)
		return;

7711
	if (mddev->bitmap)
7712
		bitmap_daemon_work(mddev);
L
Linus Torvalds 已提交
7713

7714
	if (signal_pending(current)) {
7715
		if (mddev->pers->sync_request && !mddev->external) {
7716 7717 7718 7719 7720 7721 7722
			printk(KERN_INFO "md: %s in immediate safe mode\n",
			       mdname(mddev));
			mddev->safemode = 2;
		}
		flush_signals(current);
	}

7723 7724
	if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
		return;
L
Linus Torvalds 已提交
7725
	if ( ! (
7726
		(mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
L
Linus Torvalds 已提交
7727
		test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7728
		test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7729
		(mddev->external == 0 && mddev->safemode == 1) ||
7730 7731
		(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
		 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
L
Linus Torvalds 已提交
7732 7733
		))
		return;
7734

7735
	if (mddev_trylock(mddev)) {
7736
		int spares = 0;
7737

7738 7739 7740 7741
		if (mddev->ro) {
			/* Only thing we do on a ro array is remove
			 * failed devices.
			 */
7742
			struct md_rdev *rdev;
N
NeilBrown 已提交
7743
			rdev_for_each(rdev, mddev)
7744 7745 7746 7747 7748
				if (rdev->raid_disk >= 0 &&
				    !test_bit(Blocked, &rdev->flags) &&
				    test_bit(Faulty, &rdev->flags) &&
				    atomic_read(&rdev->nr_pending)==0) {
					if (mddev->pers->hot_remove_disk(
7749
						    mddev, rdev) == 0) {
7750
						sysfs_unlink_rdev(mddev, rdev);
7751 7752 7753
						rdev->raid_disk = -1;
					}
				}
7754 7755 7756 7757
			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
			goto unlock;
		}

7758
		if (!mddev->external) {
7759
			int did_change = 0;
7760 7761 7762 7763 7764 7765
			spin_lock_irq(&mddev->write_lock);
			if (mddev->safemode &&
			    !atomic_read(&mddev->writes_pending) &&
			    !mddev->in_sync &&
			    mddev->recovery_cp == MaxSector) {
				mddev->in_sync = 1;
7766
				did_change = 1;
7767
				set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7768 7769 7770 7771
			}
			if (mddev->safemode == 1)
				mddev->safemode = 0;
			spin_unlock_irq(&mddev->write_lock);
7772
			if (did_change)
N
NeilBrown 已提交
7773
				sysfs_notify_dirent_safe(mddev->sysfs_state);
7774 7775
		}

7776 7777
		if (mddev->flags)
			md_update_sb(mddev, 0);
7778

L
Linus Torvalds 已提交
7779 7780 7781 7782 7783 7784 7785
		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
		    !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
			/* resync/recovery still happening */
			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
			goto unlock;
		}
		if (mddev->sync_thread) {
7786
			reap_sync_thread(mddev);
L
Linus Torvalds 已提交
7787 7788
			goto unlock;
		}
7789 7790 7791 7792
		/* Set RUNNING before clearing NEEDED to avoid
		 * any transients in the value of "sync_action".
		 */
		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7793 7794 7795 7796 7797
		/* Clear some bits that don't mean anything, but
		 * might be left set
		 */
		clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
		clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
L
Linus Torvalds 已提交
7798

7799 7800
		if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
		    test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7801
			goto unlock;
L
Linus Torvalds 已提交
7802 7803 7804 7805 7806 7807 7808
		/* no recovery is running.
		 * remove any failed drives, then
		 * add spares if possible.
		 * Spare are also removed and re-added, to allow
		 * the personality to fail the re-add.
		 */

7809
		if (mddev->reshape_position != MaxSector) {
7810 7811
			if (mddev->pers->check_reshape == NULL ||
			    mddev->pers->check_reshape(mddev) != 0)
7812 7813 7814
				/* Cannot proceed */
				goto unlock;
			set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7815
			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7816
		} else if ((spares = remove_and_add_spares(mddev))) {
7817 7818
			clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
			clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7819
			clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7820
			set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7821 7822
		} else if (mddev->recovery_cp < MaxSector) {
			set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7823
			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7824 7825
		} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
			/* nothing to be done ... */
L
Linus Torvalds 已提交
7826
			goto unlock;
7827

L
Linus Torvalds 已提交
7828
		if (mddev->pers->sync_request) {
7829
			if (spares) {
7830 7831 7832 7833 7834 7835
				/* We are adding a device or devices to an array
				 * which has the bitmap stored on all devices.
				 * So make sure all bitmap pages get written
				 */
				bitmap_write_all(mddev->bitmap);
			}
L
Linus Torvalds 已提交
7836 7837
			mddev->sync_thread = md_register_thread(md_do_sync,
								mddev,
7838
								"resync");
L
Linus Torvalds 已提交
7839 7840 7841 7842 7843
			if (!mddev->sync_thread) {
				printk(KERN_ERR "%s: could not start resync"
					" thread...\n", 
					mdname(mddev));
				/* leave the spares where they are, it shouldn't hurt */
7844 7845 7846 7847 7848
				clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
				clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
				clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
				clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
				clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7849
			} else
L
Linus Torvalds 已提交
7850
				md_wakeup_thread(mddev->sync_thread);
N
NeilBrown 已提交
7851
			sysfs_notify_dirent_safe(mddev->sysfs_action);
7852
			md_new_event(mddev);
L
Linus Torvalds 已提交
7853 7854
		}
	unlock:
7855 7856 7857 7858
		if (!mddev->sync_thread) {
			clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
			if (test_and_clear_bit(MD_RECOVERY_RECOVER,
					       &mddev->recovery))
7859
				if (mddev->sysfs_action)
N
NeilBrown 已提交
7860
					sysfs_notify_dirent_safe(mddev->sysfs_action);
7861
		}
L
Linus Torvalds 已提交
7862 7863 7864 7865
		mddev_unlock(mddev);
	}
}

7866
void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
7867
{
N
NeilBrown 已提交
7868
	sysfs_notify_dirent_safe(rdev->sysfs_state);
7869
	wait_event_timeout(rdev->blocked_wait,
7870 7871
			   !test_bit(Blocked, &rdev->flags) &&
			   !test_bit(BlockedBadBlocks, &rdev->flags),
7872 7873 7874 7875 7876
			   msecs_to_jiffies(5000));
	rdev_dec_pending(rdev, mddev);
}
EXPORT_SYMBOL(md_wait_for_blocked_rdev);

7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890
void md_finish_reshape(struct mddev *mddev)
{
	/* called be personality module when reshape completes. */
	struct md_rdev *rdev;

	rdev_for_each(rdev, mddev) {
		if (rdev->data_offset > rdev->new_data_offset)
			rdev->sectors += rdev->data_offset - rdev->new_data_offset;
		else
			rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
		rdev->data_offset = rdev->new_data_offset;
	}
}
EXPORT_SYMBOL(md_finish_reshape);
7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130

/* Bad block management.
 * We can record which blocks on each device are 'bad' and so just
 * fail those blocks, or that stripe, rather than the whole device.
 * Entries in the bad-block table are 64bits wide.  This comprises:
 * Length of bad-range, in sectors: 0-511 for lengths 1-512
 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
 *  A 'shift' can be set so that larger blocks are tracked and
 *  consequently larger devices can be covered.
 * 'Acknowledged' flag - 1 bit. - the most significant bit.
 *
 * Locking of the bad-block table uses a seqlock so md_is_badblock
 * might need to retry if it is very unlucky.
 * We will sometimes want to check for bad blocks in a bi_end_io function,
 * so we use the write_seqlock_irq variant.
 *
 * When looking for a bad block we specify a range and want to
 * know if any block in the range is bad.  So we binary-search
 * to the last range that starts at-or-before the given endpoint,
 * (or "before the sector after the target range")
 * then see if it ends after the given start.
 * We return
 *  0 if there are no known bad blocks in the range
 *  1 if there are known bad block which are all acknowledged
 * -1 if there are bad blocks which have not yet been acknowledged in metadata.
 * plus the start/length of the first bad section we overlap.
 */
int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
		   sector_t *first_bad, int *bad_sectors)
{
	int hi;
	int lo = 0;
	u64 *p = bb->page;
	int rv = 0;
	sector_t target = s + sectors;
	unsigned seq;

	if (bb->shift > 0) {
		/* round the start down, and the end up */
		s >>= bb->shift;
		target += (1<<bb->shift) - 1;
		target >>= bb->shift;
		sectors = target - s;
	}
	/* 'target' is now the first block after the bad range */

retry:
	seq = read_seqbegin(&bb->lock);

	hi = bb->count;

	/* Binary search between lo and hi for 'target'
	 * i.e. for the last range that starts before 'target'
	 */
	/* INVARIANT: ranges before 'lo' and at-or-after 'hi'
	 * are known not to be the last range before target.
	 * VARIANT: hi-lo is the number of possible
	 * ranges, and decreases until it reaches 1
	 */
	while (hi - lo > 1) {
		int mid = (lo + hi) / 2;
		sector_t a = BB_OFFSET(p[mid]);
		if (a < target)
			/* This could still be the one, earlier ranges
			 * could not. */
			lo = mid;
		else
			/* This and later ranges are definitely out. */
			hi = mid;
	}
	/* 'lo' might be the last that started before target, but 'hi' isn't */
	if (hi > lo) {
		/* need to check all range that end after 's' to see if
		 * any are unacknowledged.
		 */
		while (lo >= 0 &&
		       BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
			if (BB_OFFSET(p[lo]) < target) {
				/* starts before the end, and finishes after
				 * the start, so they must overlap
				 */
				if (rv != -1 && BB_ACK(p[lo]))
					rv = 1;
				else
					rv = -1;
				*first_bad = BB_OFFSET(p[lo]);
				*bad_sectors = BB_LEN(p[lo]);
			}
			lo--;
		}
	}

	if (read_seqretry(&bb->lock, seq))
		goto retry;

	return rv;
}
EXPORT_SYMBOL_GPL(md_is_badblock);

/*
 * Add a range of bad blocks to the table.
 * This might extend the table, or might contract it
 * if two adjacent ranges can be merged.
 * We binary-search to find the 'insertion' point, then
 * decide how best to handle it.
 */
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
			    int acknowledged)
{
	u64 *p;
	int lo, hi;
	int rv = 1;

	if (bb->shift < 0)
		/* badblocks are disabled */
		return 0;

	if (bb->shift) {
		/* round the start down, and the end up */
		sector_t next = s + sectors;
		s >>= bb->shift;
		next += (1<<bb->shift) - 1;
		next >>= bb->shift;
		sectors = next - s;
	}

	write_seqlock_irq(&bb->lock);

	p = bb->page;
	lo = 0;
	hi = bb->count;
	/* Find the last range that starts at-or-before 's' */
	while (hi - lo > 1) {
		int mid = (lo + hi) / 2;
		sector_t a = BB_OFFSET(p[mid]);
		if (a <= s)
			lo = mid;
		else
			hi = mid;
	}
	if (hi > lo && BB_OFFSET(p[lo]) > s)
		hi = lo;

	if (hi > lo) {
		/* we found a range that might merge with the start
		 * of our new range
		 */
		sector_t a = BB_OFFSET(p[lo]);
		sector_t e = a + BB_LEN(p[lo]);
		int ack = BB_ACK(p[lo]);
		if (e >= s) {
			/* Yes, we can merge with a previous range */
			if (s == a && s + sectors >= e)
				/* new range covers old */
				ack = acknowledged;
			else
				ack = ack && acknowledged;

			if (e < s + sectors)
				e = s + sectors;
			if (e - a <= BB_MAX_LEN) {
				p[lo] = BB_MAKE(a, e-a, ack);
				s = e;
			} else {
				/* does not all fit in one range,
				 * make p[lo] maximal
				 */
				if (BB_LEN(p[lo]) != BB_MAX_LEN)
					p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
				s = a + BB_MAX_LEN;
			}
			sectors = e - s;
		}
	}
	if (sectors && hi < bb->count) {
		/* 'hi' points to the first range that starts after 's'.
		 * Maybe we can merge with the start of that range */
		sector_t a = BB_OFFSET(p[hi]);
		sector_t e = a + BB_LEN(p[hi]);
		int ack = BB_ACK(p[hi]);
		if (a <= s + sectors) {
			/* merging is possible */
			if (e <= s + sectors) {
				/* full overlap */
				e = s + sectors;
				ack = acknowledged;
			} else
				ack = ack && acknowledged;

			a = s;
			if (e - a <= BB_MAX_LEN) {
				p[hi] = BB_MAKE(a, e-a, ack);
				s = e;
			} else {
				p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
				s = a + BB_MAX_LEN;
			}
			sectors = e - s;
			lo = hi;
			hi++;
		}
	}
	if (sectors == 0 && hi < bb->count) {
		/* we might be able to combine lo and hi */
		/* Note: 's' is at the end of 'lo' */
		sector_t a = BB_OFFSET(p[hi]);
		int lolen = BB_LEN(p[lo]);
		int hilen = BB_LEN(p[hi]);
		int newlen = lolen + hilen - (s - a);
		if (s >= a && newlen < BB_MAX_LEN) {
			/* yes, we can combine them */
			int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
			p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
			memmove(p + hi, p + hi + 1,
				(bb->count - hi - 1) * 8);
			bb->count--;
		}
	}
	while (sectors) {
		/* didn't merge (it all).
		 * Need to add a range just before 'hi' */
		if (bb->count >= MD_MAX_BADBLOCKS) {
			/* No room for more */
			rv = 0;
			break;
		} else {
			int this_sectors = sectors;
			memmove(p + hi + 1, p + hi,
				(bb->count - hi) * 8);
			bb->count++;

			if (this_sectors > BB_MAX_LEN)
				this_sectors = BB_MAX_LEN;
			p[hi] = BB_MAKE(s, this_sectors, acknowledged);
			sectors -= this_sectors;
			s += this_sectors;
		}
	}

	bb->changed = 1;
8131 8132
	if (!acknowledged)
		bb->unacked_exist = 1;
8133 8134 8135 8136 8137
	write_sequnlock_irq(&bb->lock);

	return rv;
}

8138
int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8139
		       int is_new)
8140
{
8141 8142 8143 8144 8145 8146 8147
	int rv;
	if (is_new)
		s += rdev->new_data_offset;
	else
		s += rdev->data_offset;
	rv = md_set_badblocks(&rdev->badblocks,
			      s, sectors, 0);
8148 8149
	if (rv) {
		/* Make sure they get written out promptly */
8150
		sysfs_notify_dirent_safe(rdev->sysfs_state);
8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252
		set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
		md_wakeup_thread(rdev->mddev->thread);
	}
	return rv;
}
EXPORT_SYMBOL_GPL(rdev_set_badblocks);

/*
 * Remove a range of bad blocks from the table.
 * This may involve extending the table if we spilt a region,
 * but it must not fail.  So if the table becomes full, we just
 * drop the remove request.
 */
static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
{
	u64 *p;
	int lo, hi;
	sector_t target = s + sectors;
	int rv = 0;

	if (bb->shift > 0) {
		/* When clearing we round the start up and the end down.
		 * This should not matter as the shift should align with
		 * the block size and no rounding should ever be needed.
		 * However it is better the think a block is bad when it
		 * isn't than to think a block is not bad when it is.
		 */
		s += (1<<bb->shift) - 1;
		s >>= bb->shift;
		target >>= bb->shift;
		sectors = target - s;
	}

	write_seqlock_irq(&bb->lock);

	p = bb->page;
	lo = 0;
	hi = bb->count;
	/* Find the last range that starts before 'target' */
	while (hi - lo > 1) {
		int mid = (lo + hi) / 2;
		sector_t a = BB_OFFSET(p[mid]);
		if (a < target)
			lo = mid;
		else
			hi = mid;
	}
	if (hi > lo) {
		/* p[lo] is the last range that could overlap the
		 * current range.  Earlier ranges could also overlap,
		 * but only this one can overlap the end of the range.
		 */
		if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
			/* Partial overlap, leave the tail of this range */
			int ack = BB_ACK(p[lo]);
			sector_t a = BB_OFFSET(p[lo]);
			sector_t end = a + BB_LEN(p[lo]);

			if (a < s) {
				/* we need to split this range */
				if (bb->count >= MD_MAX_BADBLOCKS) {
					rv = 0;
					goto out;
				}
				memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
				bb->count++;
				p[lo] = BB_MAKE(a, s-a, ack);
				lo++;
			}
			p[lo] = BB_MAKE(target, end - target, ack);
			/* there is no longer an overlap */
			hi = lo;
			lo--;
		}
		while (lo >= 0 &&
		       BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
			/* This range does overlap */
			if (BB_OFFSET(p[lo]) < s) {
				/* Keep the early parts of this range. */
				int ack = BB_ACK(p[lo]);
				sector_t start = BB_OFFSET(p[lo]);
				p[lo] = BB_MAKE(start, s - start, ack);
				/* now low doesn't overlap, so.. */
				break;
			}
			lo--;
		}
		/* 'lo' is strictly before, 'hi' is strictly after,
		 * anything between needs to be discarded
		 */
		if (hi - lo > 1) {
			memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
			bb->count -= (hi - lo - 1);
		}
	}

	bb->changed = 1;
out:
	write_sequnlock_irq(&bb->lock);
	return rv;
}

8253 8254
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
			 int is_new)
8255
{
8256 8257 8258 8259
	if (is_new)
		s += rdev->new_data_offset;
	else
		s += rdev->data_offset;
8260
	return md_clear_badblocks(&rdev->badblocks,
8261
				  s, sectors);
8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);

/*
 * Acknowledge all bad blocks in a list.
 * This only succeeds if ->changed is clear.  It is used by
 * in-kernel metadata updates
 */
void md_ack_all_badblocks(struct badblocks *bb)
{
	if (bb->page == NULL || bb->changed)
		/* no point even trying */
		return;
	write_seqlock_irq(&bb->lock);

8277
	if (bb->changed == 0 && bb->unacked_exist) {
8278 8279 8280 8281 8282 8283 8284 8285 8286
		u64 *p = bb->page;
		int i;
		for (i = 0; i < bb->count ; i++) {
			if (!BB_ACK(p[i])) {
				sector_t start = BB_OFFSET(p[i]);
				int len = BB_LEN(p[i]);
				p[i] = BB_MAKE(start, len, 1);
			}
		}
8287
		bb->unacked_exist = 0;
8288 8289 8290 8291 8292
	}
	write_sequnlock_irq(&bb->lock);
}
EXPORT_SYMBOL_GPL(md_ack_all_badblocks);

8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334
/* sysfs access to bad-blocks list.
 * We present two files.
 * 'bad-blocks' lists sector numbers and lengths of ranges that
 *    are recorded as bad.  The list is truncated to fit within
 *    the one-page limit of sysfs.
 *    Writing "sector length" to this file adds an acknowledged
 *    bad block list.
 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
 *    been acknowledged.  Writing to this file adds bad blocks
 *    without acknowledging them.  This is largely for testing.
 */

static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack)
{
	size_t len;
	int i;
	u64 *p = bb->page;
	unsigned seq;

	if (bb->shift < 0)
		return 0;

retry:
	seq = read_seqbegin(&bb->lock);

	len = 0;
	i = 0;

	while (len < PAGE_SIZE && i < bb->count) {
		sector_t s = BB_OFFSET(p[i]);
		unsigned int length = BB_LEN(p[i]);
		int ack = BB_ACK(p[i]);
		i++;

		if (unack && ack)
			continue;

		len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
				(unsigned long long)s << bb->shift,
				length << bb->shift);
	}
8335 8336
	if (unack && len == 0)
		bb->unacked_exist = 0;
8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386

	if (read_seqretry(&bb->lock, seq))
		goto retry;

	return len;
}

#define DO_DEBUG 1

static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
{
	unsigned long long sector;
	int length;
	char newline;
#ifdef DO_DEBUG
	/* Allow clearing via sysfs *only* for testing/debugging.
	 * Normally only a successful write may clear a badblock
	 */
	int clear = 0;
	if (page[0] == '-') {
		clear = 1;
		page++;
	}
#endif /* DO_DEBUG */

	switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
	case 3:
		if (newline != '\n')
			return -EINVAL;
	case 2:
		if (length <= 0)
			return -EINVAL;
		break;
	default:
		return -EINVAL;
	}

#ifdef DO_DEBUG
	if (clear) {
		md_clear_badblocks(bb, sector, length);
		return len;
	}
#endif /* DO_DEBUG */
	if (md_set_badblocks(bb, sector, length, !unack))
		return len;
	else
		return -ENOSPC;
}

A
Adrian Bunk 已提交
8387 8388
static int md_notify_reboot(struct notifier_block *this,
			    unsigned long code, void *x)
L
Linus Torvalds 已提交
8389 8390
{
	struct list_head *tmp;
8391
	struct mddev *mddev;
8392
	int need_delay = 0;
L
Linus Torvalds 已提交
8393

8394 8395
	for_each_mddev(mddev, tmp) {
		if (mddev_trylock(mddev)) {
8396 8397
			if (mddev->pers)
				__md_stop_writes(mddev);
8398 8399
			mddev->safemode = 2;
			mddev_unlock(mddev);
8400
		}
8401
		need_delay = 1;
L
Linus Torvalds 已提交
8402
	}
8403 8404 8405 8406 8407 8408 8409 8410 8411
	/*
	 * certain more exotic SCSI devices are known to be
	 * volatile wrt too early system reboots. While the
	 * right place to handle this issue is the given
	 * driver, we do want to have a safe RAID driver ...
	 */
	if (need_delay)
		mdelay(1000*1);

L
Linus Torvalds 已提交
8412 8413 8414
	return NOTIFY_DONE;
}

A
Adrian Bunk 已提交
8415
static struct notifier_block md_notifier = {
L
Linus Torvalds 已提交
8416 8417 8418 8419 8420 8421 8422
	.notifier_call	= md_notify_reboot,
	.next		= NULL,
	.priority	= INT_MAX, /* before any real devices */
};

static void md_geninit(void)
{
8423
	pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
L
Linus Torvalds 已提交
8424

8425
	proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
L
Linus Torvalds 已提交
8426 8427
}

A
Adrian Bunk 已提交
8428
static int __init md_init(void)
L
Linus Torvalds 已提交
8429
{
T
Tejun Heo 已提交
8430 8431
	int ret = -ENOMEM;

8432
	md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
T
Tejun Heo 已提交
8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446
	if (!md_wq)
		goto err_wq;

	md_misc_wq = alloc_workqueue("md_misc", 0, 0);
	if (!md_misc_wq)
		goto err_misc_wq;

	if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
		goto err_md;

	if ((ret = register_blkdev(0, "mdp")) < 0)
		goto err_mdp;
	mdp_major = ret;

C
Christoph Hellwig 已提交
8447
	blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
8448 8449
			    md_probe, NULL, NULL);
	blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
L
Linus Torvalds 已提交
8450 8451 8452
			    md_probe, NULL, NULL);

	register_reboot_notifier(&md_notifier);
8453
	raid_table_header = register_sysctl_table(raid_root_table);
L
Linus Torvalds 已提交
8454 8455

	md_geninit();
8456
	return 0;
L
Linus Torvalds 已提交
8457

T
Tejun Heo 已提交
8458 8459 8460 8461 8462 8463 8464 8465 8466
err_mdp:
	unregister_blkdev(MD_MAJOR, "md");
err_md:
	destroy_workqueue(md_misc_wq);
err_misc_wq:
	destroy_workqueue(md_wq);
err_wq:
	return ret;
}
L
Linus Torvalds 已提交
8467 8468 8469 8470 8471 8472 8473

#ifndef MODULE

/*
 * Searches all registered partitions for autorun RAID arrays
 * at boot time.
 */
8474 8475 8476 8477 8478 8479

static LIST_HEAD(all_detected_devices);
struct detected_devices_node {
	struct list_head list;
	dev_t dev;
};
L
Linus Torvalds 已提交
8480 8481 8482

void md_autodetect_dev(dev_t dev)
{
8483 8484 8485 8486 8487 8488 8489 8490 8491 8492
	struct detected_devices_node *node_detected_dev;

	node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
	if (node_detected_dev) {
		node_detected_dev->dev = dev;
		list_add_tail(&node_detected_dev->list, &all_detected_devices);
	} else {
		printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
			", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
	}
L
Linus Torvalds 已提交
8493 8494 8495 8496 8497
}


static void autostart_arrays(int part)
{
8498
	struct md_rdev *rdev;
8499 8500 8501
	struct detected_devices_node *node_detected_dev;
	dev_t dev;
	int i_scanned, i_passed;
L
Linus Torvalds 已提交
8502

8503 8504
	i_scanned = 0;
	i_passed = 0;
L
Linus Torvalds 已提交
8505

8506
	printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
L
Linus Torvalds 已提交
8507

8508 8509 8510 8511 8512 8513 8514
	while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
		i_scanned++;
		node_detected_dev = list_entry(all_detected_devices.next,
					struct detected_devices_node, list);
		list_del(&node_detected_dev->list);
		dev = node_detected_dev->dev;
		kfree(node_detected_dev);
8515
		rdev = md_import_device(dev,0, 90);
L
Linus Torvalds 已提交
8516 8517 8518
		if (IS_ERR(rdev))
			continue;

8519
		if (test_bit(Faulty, &rdev->flags)) {
L
Linus Torvalds 已提交
8520 8521 8522
			MD_BUG();
			continue;
		}
8523
		set_bit(AutoDetected, &rdev->flags);
L
Linus Torvalds 已提交
8524
		list_add(&rdev->same_set, &pending_raid_disks);
8525
		i_passed++;
L
Linus Torvalds 已提交
8526
	}
8527 8528 8529

	printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
						i_scanned, i_passed);
L
Linus Torvalds 已提交
8530 8531 8532 8533

	autorun_devices(part);
}

J
Jeff Garzik 已提交
8534
#endif /* !MODULE */
L
Linus Torvalds 已提交
8535 8536 8537

static __exit void md_exit(void)
{
8538
	struct mddev *mddev;
L
Linus Torvalds 已提交
8539
	struct list_head *tmp;
8540

C
Christoph Hellwig 已提交
8541
	blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
8542
	blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
L
Linus Torvalds 已提交
8543

C
Christoph Hellwig 已提交
8544
	unregister_blkdev(MD_MAJOR,"md");
L
Linus Torvalds 已提交
8545 8546 8547 8548
	unregister_blkdev(mdp_major, "mdp");
	unregister_reboot_notifier(&md_notifier);
	unregister_sysctl_table(raid_table_header);
	remove_proc_entry("mdstat", NULL);
8549
	for_each_mddev(mddev, tmp) {
L
Linus Torvalds 已提交
8550
		export_array(mddev);
8551
		mddev->hold_active = 0;
L
Linus Torvalds 已提交
8552
	}
T
Tejun Heo 已提交
8553 8554
	destroy_workqueue(md_misc_wq);
	destroy_workqueue(md_wq);
L
Linus Torvalds 已提交
8555 8556
}

8557
subsys_initcall(md_init);
L
Linus Torvalds 已提交
8558 8559
module_exit(md_exit)

8560 8561 8562 8563 8564 8565 8566 8567 8568 8569
static int get_ro(char *buffer, struct kernel_param *kp)
{
	return sprintf(buffer, "%d", start_readonly);
}
static int set_ro(const char *val, struct kernel_param *kp)
{
	char *e;
	int num = simple_strtoul(val, &e, 10);
	if (*val && (*e == '\0' || *e == '\n')) {
		start_readonly = num;
8570
		return 0;
8571 8572 8573 8574
	}
	return -EINVAL;
}

8575 8576
module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
8577

8578
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
8579

L
Linus Torvalds 已提交
8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590
EXPORT_SYMBOL(register_md_personality);
EXPORT_SYMBOL(unregister_md_personality);
EXPORT_SYMBOL(md_error);
EXPORT_SYMBOL(md_done_sync);
EXPORT_SYMBOL(md_write_start);
EXPORT_SYMBOL(md_write_end);
EXPORT_SYMBOL(md_register_thread);
EXPORT_SYMBOL(md_unregister_thread);
EXPORT_SYMBOL(md_wakeup_thread);
EXPORT_SYMBOL(md_check_recovery);
MODULE_LICENSE("GPL");
8591
MODULE_DESCRIPTION("MD RAID framework");
8592
MODULE_ALIAS("md");
8593
MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);