md.c 223.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
   md.c : Multiple Devices driver for Linux
3
     Copyright (C) 1998, 1999, 2000 Ingo Molnar
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21

     completely rewritten, based on the MD driver code from Marc Zyngier

   Changes:

   - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
   - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
   - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
   - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
   - kmod support by: Cyrus Durgin
   - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
   - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>

   - lots of fixes and improvements to the RAID1/RAID5 and generic
     RAID code (such as request based resynchronization):

     Neil Brown <neilb@cse.unsw.edu.au>.

22 23 24
   - persistent bitmap code
     Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.

L
Linus Torvalds 已提交
25 26 27 28 29 30 31 32 33 34
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2, or (at your option)
   any later version.

   You should have received a copy of the GNU General Public License
   (for example /usr/src/linux/COPYING); if not, write to the Free
   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/

35
#include <linux/kthread.h>
36
#include <linux/blkdev.h>
L
Linus Torvalds 已提交
37
#include <linux/sysctl.h>
38
#include <linux/seq_file.h>
A
Al Viro 已提交
39
#include <linux/fs.h>
40
#include <linux/poll.h>
41
#include <linux/ctype.h>
42
#include <linux/string.h>
43 44 45
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
46
#include <linux/module.h>
47
#include <linux/reboot.h>
48
#include <linux/file.h>
49
#include <linux/compat.h>
50
#include <linux/delay.h>
51 52
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
53
#include <linux/slab.h>
54
#include "md.h"
55
#include "bitmap.h"
L
Linus Torvalds 已提交
56 57

#ifndef MODULE
58
static void autostart_arrays(int part);
L
Linus Torvalds 已提交
59 60
#endif

61 62 63 64 65
/* pers_list is a list of registered personalities protected
 * by pers_lock.
 * pers_lock does extra service to protect accesses to
 * mddev->thread when the mutex cannot be held.
 */
66
static LIST_HEAD(pers_list);
L
Linus Torvalds 已提交
67 68
static DEFINE_SPINLOCK(pers_lock);

69
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
T
Tejun Heo 已提交
70 71
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
72

73 74 75
static int remove_and_add_spares(struct mddev *mddev,
				 struct md_rdev *this);

76 77 78 79 80 81
/*
 * Default number of read corrections we'll attempt on an rdev
 * before ejecting it from the array. We divide the read error
 * count by 2 for every hour elapsed between read errors.
 */
#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
L
Linus Torvalds 已提交
82 83 84 85
/*
 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
 * is 1000 KB/sec, so the extra system load does not show up that much.
 * Increase it if you want to have more _guaranteed_ speed. Note that
86
 * the RAID driver will use the maximum available bandwidth if the IO
L
Linus Torvalds 已提交
87 88 89 90 91
 * subsystem is idle. There is also an 'absolute maximum' reconstruction
 * speed limit - in case reconstruction slows down your system despite
 * idle IO detection.
 *
 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
92
 * or /sys/block/mdX/md/sync_speed_{min,max}
L
Linus Torvalds 已提交
93 94 95 96
 */

static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
97
static inline int speed_min(struct mddev *mddev)
98 99 100 101 102
{
	return mddev->sync_speed_min ?
		mddev->sync_speed_min : sysctl_speed_limit_min;
}

103
static inline int speed_max(struct mddev *mddev)
104 105 106 107
{
	return mddev->sync_speed_max ?
		mddev->sync_speed_max : sysctl_speed_limit_max;
}
L
Linus Torvalds 已提交
108 109 110

static struct ctl_table_header *raid_table_header;

111
static struct ctl_table raid_table[] = {
L
Linus Torvalds 已提交
112 113 114 115
	{
		.procname	= "speed_limit_min",
		.data		= &sysctl_speed_limit_min,
		.maxlen		= sizeof(int),
116
		.mode		= S_IRUGO|S_IWUSR,
117
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
118 119 120 121 122
	},
	{
		.procname	= "speed_limit_max",
		.data		= &sysctl_speed_limit_max,
		.maxlen		= sizeof(int),
123
		.mode		= S_IRUGO|S_IWUSR,
124
		.proc_handler	= proc_dointvec,
L
Linus Torvalds 已提交
125
	},
126
	{ }
L
Linus Torvalds 已提交
127 128
};

129
static struct ctl_table raid_dir_table[] = {
L
Linus Torvalds 已提交
130 131 132
	{
		.procname	= "raid",
		.maxlen		= 0,
133
		.mode		= S_IRUGO|S_IXUGO,
L
Linus Torvalds 已提交
134 135
		.child		= raid_table,
	},
136
	{ }
L
Linus Torvalds 已提交
137 138
};

139
static struct ctl_table raid_root_table[] = {
L
Linus Torvalds 已提交
140 141 142 143 144 145
	{
		.procname	= "dev",
		.maxlen		= 0,
		.mode		= 0555,
		.child		= raid_dir_table,
	},
146
	{  }
L
Linus Torvalds 已提交
147 148
};

149
static const struct block_device_operations md_fops;
L
Linus Torvalds 已提交
150

151 152
static int start_readonly;

153 154 155 156 157
/* bio_clone_mddev
 * like bio_clone, but with a local bio set
 */

struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
158
			    struct mddev *mddev)
159 160 161 162 163 164
{
	struct bio *b;

	if (!mddev || !mddev->bio_set)
		return bio_alloc(gfp_mask, nr_iovecs);

165
	b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
166 167 168 169 170 171 172
	if (!b)
		return NULL;
	return b;
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);

struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
173
			    struct mddev *mddev)
174 175 176 177
{
	if (!mddev || !mddev->bio_set)
		return bio_clone(bio, gfp_mask);

178
	return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
179 180 181
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);

182 183 184 185 186 187 188 189 190 191
/*
 * We have a system wide 'event count' that is incremented
 * on any 'interesting' event, and readers of /proc/mdstat
 * can use 'poll' or 'select' to find out when the event
 * count increases.
 *
 * Events are:
 *  start array, stop array, error, add device, remove device,
 *  start build, activate spare
 */
192
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
193
static atomic_t md_event_count;
194
void md_new_event(struct mddev *mddev)
195 196 197 198
{
	atomic_inc(&md_event_count);
	wake_up(&md_event_waiters);
}
199
EXPORT_SYMBOL_GPL(md_new_event);
200

201 202 203
/* Alternate version that can be called from interrupts
 * when calling sysfs_notify isn't needed.
 */
204
static void md_new_event_inintr(struct mddev *mddev)
205 206 207 208 209
{
	atomic_inc(&md_event_count);
	wake_up(&md_event_waiters);
}

L
Linus Torvalds 已提交
210 211 212 213 214 215 216 217 218 219 220 221 222 223
/*
 * Enables to iterate over all existing md arrays
 * all_mddevs_lock protects this list.
 */
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);

/*
 * iterates through all used mddevs in the system.
 * We take care to grab the all_mddevs_lock whenever navigating
 * the list, and to always hold a refcount when unlocked.
 * Any code which breaks out of this loop while own
 * a reference to the current mddev and must mddev_put it.
 */
224
#define for_each_mddev(_mddev,_tmp)					\
L
Linus Torvalds 已提交
225
									\
226
	for (({ spin_lock(&all_mddevs_lock);				\
227 228 229 230
		_tmp = all_mddevs.next;					\
		_mddev = NULL;});					\
	     ({ if (_tmp != &all_mddevs)				\
			mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
L
Linus Torvalds 已提交
231
		spin_unlock(&all_mddevs_lock);				\
232 233 234
		if (_mddev) mddev_put(_mddev);				\
		_mddev = list_entry(_tmp, struct mddev, all_mddevs);	\
		_tmp != &all_mddevs;});					\
L
Linus Torvalds 已提交
235
	     ({ spin_lock(&all_mddevs_lock);				\
236
		_tmp = _tmp->next;})					\
L
Linus Torvalds 已提交
237 238
		)

239 240 241 242 243 244 245
/* Rather than calling directly into the personality make_request function,
 * IO requests come here first so that we can check if the device is
 * being suspended pending a reconfiguration.
 * We hold a refcount over the call to ->make_request.  By the time that
 * call has finished, the bio has been linked into some internal structure
 * and so is visible to ->quiesce(), so we don't need the refcount any more.
 */
246
static void md_make_request(struct request_queue *q, struct bio *bio)
L
Linus Torvalds 已提交
247
{
248
	const int rw = bio_data_dir(bio);
249
	struct mddev *mddev = q->queuedata;
250
	unsigned int sectors;
251

252 253
	if (mddev == NULL || mddev->pers == NULL
	    || !mddev->ready) {
254
		bio_io_error(bio);
255
		return;
256
	}
257 258 259 260
	if (mddev->ro == 1 && unlikely(rw == WRITE)) {
		bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
		return;
	}
261
	smp_rmb(); /* Ensure implications of  'active' are visible */
262
	rcu_read_lock();
T
Tejun Heo 已提交
263
	if (mddev->suspended) {
264 265 266 267
		DEFINE_WAIT(__wait);
		for (;;) {
			prepare_to_wait(&mddev->sb_wait, &__wait,
					TASK_UNINTERRUPTIBLE);
T
Tejun Heo 已提交
268
			if (!mddev->suspended)
269 270 271 272 273 274 275 276 277
				break;
			rcu_read_unlock();
			schedule();
			rcu_read_lock();
		}
		finish_wait(&mddev->sb_wait, &__wait);
	}
	atomic_inc(&mddev->active_io);
	rcu_read_unlock();
278

279 280 281 282 283
	/*
	 * save the sectors now since our bio can
	 * go away inside make_request
	 */
	sectors = bio_sectors(bio);
284
	mddev->pers->make_request(mddev, bio);
285

286
	generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
287

288 289 290 291
	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
		wake_up(&mddev->sb_wait);
}

292 293 294 295 296 297
/* mddev_suspend makes sure no new requests are submitted
 * to the device, and that any requests that have been submitted
 * are completely handled.
 * Once ->stop is called and completes, the module will be completely
 * unused.
 */
298
void mddev_suspend(struct mddev *mddev)
299 300 301 302 303 304
{
	BUG_ON(mddev->suspended);
	mddev->suspended = 1;
	synchronize_rcu();
	wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
	mddev->pers->quiesce(mddev, 1);
305 306

	del_timer_sync(&mddev->safemode_timer);
307
}
308
EXPORT_SYMBOL_GPL(mddev_suspend);
309

310
void mddev_resume(struct mddev *mddev)
311 312 313 314
{
	mddev->suspended = 0;
	wake_up(&mddev->sb_wait);
	mddev->pers->quiesce(mddev, 0);
315

316
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
317 318
	md_wakeup_thread(mddev->thread);
	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
L
Linus Torvalds 已提交
319
}
320
EXPORT_SYMBOL_GPL(mddev_resume);
L
Linus Torvalds 已提交
321

322
int mddev_congested(struct mddev *mddev, int bits)
323
{
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
	struct md_personality *pers = mddev->pers;
	int ret = 0;

	rcu_read_lock();
	if (mddev->suspended)
		ret = 1;
	else if (pers && pers->congested)
		ret = pers->congested(mddev, bits);
	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(mddev_congested);
static int md_congested(void *data, int bits)
{
	struct mddev *mddev = data;
	return mddev_congested(mddev, bits);
340 341
}

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
static int md_mergeable_bvec(struct request_queue *q,
			     struct bvec_merge_data *bvm,
			     struct bio_vec *biovec)
{
	struct mddev *mddev = q->queuedata;
	int ret;
	rcu_read_lock();
	if (mddev->suspended) {
		/* Must always allow one vec */
		if (bvm->bi_size == 0)
			ret = biovec->bv_len;
		else
			ret = 0;
	} else {
		struct md_personality *pers = mddev->pers;
		if (pers && pers->mergeable_bvec)
			ret = pers->mergeable_bvec(mddev, bvm, biovec);
		else
			ret = biovec->bv_len;
	}
	rcu_read_unlock();
	return ret;
}
365
/*
T
Tejun Heo 已提交
366
 * Generic flush handling for md
367 368
 */

T
Tejun Heo 已提交
369
static void md_end_flush(struct bio *bio, int err)
370
{
371
	struct md_rdev *rdev = bio->bi_private;
372
	struct mddev *mddev = rdev->mddev;
373 374 375 376

	rdev_dec_pending(rdev, mddev);

	if (atomic_dec_and_test(&mddev->flush_pending)) {
T
Tejun Heo 已提交
377
		/* The pre-request flush has finished */
T
Tejun Heo 已提交
378
		queue_work(md_wq, &mddev->flush_work);
379 380 381 382
	}
	bio_put(bio);
}

N
NeilBrown 已提交
383 384
static void md_submit_flush_data(struct work_struct *ws);

385
static void submit_flushes(struct work_struct *ws)
386
{
387
	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
388
	struct md_rdev *rdev;
389

N
NeilBrown 已提交
390 391
	INIT_WORK(&mddev->flush_work, md_submit_flush_data);
	atomic_set(&mddev->flush_pending, 1);
392
	rcu_read_lock();
N
NeilBrown 已提交
393
	rdev_for_each_rcu(rdev, mddev)
394 395 396 397 398 399 400 401 402 403
		if (rdev->raid_disk >= 0 &&
		    !test_bit(Faulty, &rdev->flags)) {
			/* Take two references, one is dropped
			 * when request finishes, one after
			 * we reclaim rcu_read_lock
			 */
			struct bio *bi;
			atomic_inc(&rdev->nr_pending);
			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();
404
			bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
T
Tejun Heo 已提交
405
			bi->bi_end_io = md_end_flush;
406 407 408
			bi->bi_private = rdev;
			bi->bi_bdev = rdev->bdev;
			atomic_inc(&mddev->flush_pending);
T
Tejun Heo 已提交
409
			submit_bio(WRITE_FLUSH, bi);
410 411 412 413
			rcu_read_lock();
			rdev_dec_pending(rdev, mddev);
		}
	rcu_read_unlock();
N
NeilBrown 已提交
414 415
	if (atomic_dec_and_test(&mddev->flush_pending))
		queue_work(md_wq, &mddev->flush_work);
416 417
}

T
Tejun Heo 已提交
418
static void md_submit_flush_data(struct work_struct *ws)
419
{
420
	struct mddev *mddev = container_of(ws, struct mddev, flush_work);
T
Tejun Heo 已提交
421
	struct bio *bio = mddev->flush_bio;
422

423
	if (bio->bi_iter.bi_size == 0)
424 425 426
		/* an empty barrier - all done */
		bio_endio(bio, 0);
	else {
T
Tejun Heo 已提交
427
		bio->bi_rw &= ~REQ_FLUSH;
428
		mddev->pers->make_request(mddev, bio);
429
	}
430 431 432

	mddev->flush_bio = NULL;
	wake_up(&mddev->sb_wait);
433 434
}

435
void md_flush_request(struct mddev *mddev, struct bio *bio)
436
{
437
	spin_lock_irq(&mddev->lock);
438
	wait_event_lock_irq(mddev->sb_wait,
T
Tejun Heo 已提交
439
			    !mddev->flush_bio,
440
			    mddev->lock);
T
Tejun Heo 已提交
441
	mddev->flush_bio = bio;
442
	spin_unlock_irq(&mddev->lock);
443

444 445
	INIT_WORK(&mddev->flush_work, submit_flushes);
	queue_work(md_wq, &mddev->flush_work);
446
}
T
Tejun Heo 已提交
447
EXPORT_SYMBOL(md_flush_request);
448

449
void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
450
{
451 452 453
	struct mddev *mddev = cb->data;
	md_wakeup_thread(mddev->thread);
	kfree(cb);
454
}
455
EXPORT_SYMBOL(md_unplug);
456

457
static inline struct mddev *mddev_get(struct mddev *mddev)
L
Linus Torvalds 已提交
458 459 460 461 462
{
	atomic_inc(&mddev->active);
	return mddev;
}

463
static void mddev_delayed_delete(struct work_struct *ws);
464

465
static void mddev_put(struct mddev *mddev)
L
Linus Torvalds 已提交
466
{
467 468
	struct bio_set *bs = NULL;

L
Linus Torvalds 已提交
469 470
	if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
		return;
471
	if (!mddev->raid_disks && list_empty(&mddev->disks) &&
472 473 474
	    mddev->ctime == 0 && !mddev->hold_active) {
		/* Array is not configured at all, and not held active,
		 * so destroy it */
475
		list_del_init(&mddev->all_mddevs);
476 477
		bs = mddev->bio_set;
		mddev->bio_set = NULL;
478
		if (mddev->gendisk) {
T
Tejun Heo 已提交
479 480 481 482
			/* We did a probe so need to clean up.  Call
			 * queue_work inside the spinlock so that
			 * flush_workqueue() after mddev_find will
			 * succeed in waiting for the work to be done.
483 484
			 */
			INIT_WORK(&mddev->del_work, mddev_delayed_delete);
T
Tejun Heo 已提交
485
			queue_work(md_misc_wq, &mddev->del_work);
486 487 488 489
		} else
			kfree(mddev);
	}
	spin_unlock(&all_mddevs_lock);
490 491
	if (bs)
		bioset_free(bs);
L
Linus Torvalds 已提交
492 493
}

494
void mddev_init(struct mddev *mddev)
495 496 497 498 499 500 501 502 503 504
{
	mutex_init(&mddev->open_mutex);
	mutex_init(&mddev->reconfig_mutex);
	mutex_init(&mddev->bitmap_info.mutex);
	INIT_LIST_HEAD(&mddev->disks);
	INIT_LIST_HEAD(&mddev->all_mddevs);
	init_timer(&mddev->safemode_timer);
	atomic_set(&mddev->active, 1);
	atomic_set(&mddev->openers, 0);
	atomic_set(&mddev->active_io, 0);
505
	spin_lock_init(&mddev->lock);
506 507 508 509
	atomic_set(&mddev->flush_pending, 0);
	init_waitqueue_head(&mddev->sb_wait);
	init_waitqueue_head(&mddev->recovery_wait);
	mddev->reshape_position = MaxSector;
510
	mddev->reshape_backwards = 0;
511
	mddev->last_sync_action = "none";
512 513 514 515
	mddev->resync_min = 0;
	mddev->resync_max = MaxSector;
	mddev->level = LEVEL_NONE;
}
516
EXPORT_SYMBOL_GPL(mddev_init);
517

518
static struct mddev *mddev_find(dev_t unit)
L
Linus Torvalds 已提交
519
{
520
	struct mddev *mddev, *new = NULL;
L
Linus Torvalds 已提交
521

522 523 524
	if (unit && MAJOR(unit) != MD_MAJOR)
		unit &= ~((1<<MdpMinorShift)-1);

L
Linus Torvalds 已提交
525 526
 retry:
	spin_lock(&all_mddevs_lock);
527 528 529 530 531 532 533 534 535 536 537 538

	if (unit) {
		list_for_each_entry(mddev, &all_mddevs, all_mddevs)
			if (mddev->unit == unit) {
				mddev_get(mddev);
				spin_unlock(&all_mddevs_lock);
				kfree(new);
				return mddev;
			}

		if (new) {
			list_add(&new->all_mddevs, &all_mddevs);
L
Linus Torvalds 已提交
539
			spin_unlock(&all_mddevs_lock);
540 541
			new->hold_active = UNTIL_IOCTL;
			return new;
L
Linus Torvalds 已提交
542
		}
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
	} else if (new) {
		/* find an unused unit number */
		static int next_minor = 512;
		int start = next_minor;
		int is_free = 0;
		int dev = 0;
		while (!is_free) {
			dev = MKDEV(MD_MAJOR, next_minor);
			next_minor++;
			if (next_minor > MINORMASK)
				next_minor = 0;
			if (next_minor == start) {
				/* Oh dear, all in use. */
				spin_unlock(&all_mddevs_lock);
				kfree(new);
				return NULL;
			}
560

561 562 563 564 565 566 567 568 569 570
			is_free = 1;
			list_for_each_entry(mddev, &all_mddevs, all_mddevs)
				if (mddev->unit == dev) {
					is_free = 0;
					break;
				}
		}
		new->unit = dev;
		new->md_minor = MINOR(dev);
		new->hold_active = UNTIL_STOP;
L
Linus Torvalds 已提交
571 572 573 574 575 576
		list_add(&new->all_mddevs, &all_mddevs);
		spin_unlock(&all_mddevs_lock);
		return new;
	}
	spin_unlock(&all_mddevs_lock);

577
	new = kzalloc(sizeof(*new), GFP_KERNEL);
L
Linus Torvalds 已提交
578 579 580 581 582 583 584 585 586
	if (!new)
		return NULL;

	new->unit = unit;
	if (MAJOR(unit) == MD_MAJOR)
		new->md_minor = MINOR(unit);
	else
		new->md_minor = MINOR(unit) >> MdpMinorShift;

587
	mddev_init(new);
L
Linus Torvalds 已提交
588 589 590 591

	goto retry;
}

592
static inline int __must_check mddev_lock(struct mddev *mddev)
L
Linus Torvalds 已提交
593
{
594
	return mutex_lock_interruptible(&mddev->reconfig_mutex);
L
Linus Torvalds 已提交
595 596
}

597 598 599
/* Sometimes we need to take the lock in a situation where
 * failure due to interrupts is not acceptable.
 */
600
static inline void mddev_lock_nointr(struct mddev *mddev)
601 602 603 604
{
	mutex_lock(&mddev->reconfig_mutex);
}

605
static inline int mddev_is_locked(struct mddev *mddev)
D
Dan Williams 已提交
606 607 608 609
{
	return mutex_is_locked(&mddev->reconfig_mutex);
}

610
static inline int mddev_trylock(struct mddev *mddev)
L
Linus Torvalds 已提交
611
{
612
	return mutex_trylock(&mddev->reconfig_mutex);
L
Linus Torvalds 已提交
613 614
}

615 616
static struct attribute_group md_redundancy_group;

617
static void mddev_unlock(struct mddev *mddev)
L
Linus Torvalds 已提交
618
{
619
	if (mddev->to_remove) {
620 621 622 623
		/* These cannot be removed under reconfig_mutex as
		 * an access to the files will try to take reconfig_mutex
		 * while holding the file unremovable, which leads to
		 * a deadlock.
624 625 626 627 628 629 630
		 * So hold set sysfs_active while the remove in happeing,
		 * and anything else which might set ->to_remove or my
		 * otherwise change the sysfs namespace will fail with
		 * -EBUSY if sysfs_active is still set.
		 * We set sysfs_active under reconfig_mutex and elsewhere
		 * test it under the same mutex to ensure its correct value
		 * is seen.
631
		 */
632 633
		struct attribute_group *to_remove = mddev->to_remove;
		mddev->to_remove = NULL;
634
		mddev->sysfs_active = 1;
635 636
		mutex_unlock(&mddev->reconfig_mutex);

N
NeilBrown 已提交
637 638 639 640 641 642 643 644 645 646
		if (mddev->kobj.sd) {
			if (to_remove != &md_redundancy_group)
				sysfs_remove_group(&mddev->kobj, to_remove);
			if (mddev->pers == NULL ||
			    mddev->pers->sync_request == NULL) {
				sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
				if (mddev->sysfs_action)
					sysfs_put(mddev->sysfs_action);
				mddev->sysfs_action = NULL;
			}
647
		}
648
		mddev->sysfs_active = 0;
649 650
	} else
		mutex_unlock(&mddev->reconfig_mutex);
L
Linus Torvalds 已提交
651

C
Chris Dunlop 已提交
652 653
	/* As we've dropped the mutex we need a spinlock to
	 * make sure the thread doesn't disappear
654 655
	 */
	spin_lock(&pers_lock);
656
	md_wakeup_thread(mddev->thread);
657
	spin_unlock(&pers_lock);
L
Linus Torvalds 已提交
658 659
}

660 661 662 663 664 665 666 667 668 669 670 671
static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
{
	struct md_rdev *rdev;

	rdev_for_each_rcu(rdev, mddev)
		if (rdev->desc_nr == nr)
			return rdev;

	return NULL;
}

static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
L
Linus Torvalds 已提交
672
{
673
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
674

N
NeilBrown 已提交
675
	rdev_for_each(rdev, mddev)
L
Linus Torvalds 已提交
676 677
		if (rdev->bdev->bd_dev == dev)
			return rdev;
678

L
Linus Torvalds 已提交
679 680 681
	return NULL;
}

682 683 684 685 686 687 688 689 690 691 692
static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
{
	struct md_rdev *rdev;

	rdev_for_each_rcu(rdev, mddev)
		if (rdev->bdev->bd_dev == dev)
			return rdev;

	return NULL;
}

693
static struct md_personality *find_pers(int level, char *clevel)
694
{
695
	struct md_personality *pers;
696 697
	list_for_each_entry(pers, &pers_list, list) {
		if (level != LEVEL_NONE && pers->level == level)
698
			return pers;
699 700 701
		if (strcmp(pers->name, clevel)==0)
			return pers;
	}
702 703 704
	return NULL;
}

705
/* return the offset of the super block in 512byte sectors */
706
static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
L
Linus Torvalds 已提交
707
{
708
	sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
709
	return MD_NEW_SIZE_SECTORS(num_sectors);
L
Linus Torvalds 已提交
710 711
}

712
static int alloc_disk_sb(struct md_rdev *rdev)
L
Linus Torvalds 已提交
713 714 715 716
{
	rdev->sb_page = alloc_page(GFP_KERNEL);
	if (!rdev->sb_page) {
		printk(KERN_ALERT "md: out of memory.\n");
717
		return -ENOMEM;
L
Linus Torvalds 已提交
718 719 720 721 722
	}

	return 0;
}

723
void md_rdev_clear(struct md_rdev *rdev)
L
Linus Torvalds 已提交
724 725
{
	if (rdev->sb_page) {
726
		put_page(rdev->sb_page);
L
Linus Torvalds 已提交
727 728
		rdev->sb_loaded = 0;
		rdev->sb_page = NULL;
729
		rdev->sb_start = 0;
730
		rdev->sectors = 0;
L
Linus Torvalds 已提交
731
	}
732 733 734 735
	if (rdev->bb_page) {
		put_page(rdev->bb_page);
		rdev->bb_page = NULL;
	}
736 737
	kfree(rdev->badblocks.page);
	rdev->badblocks.page = NULL;
L
Linus Torvalds 已提交
738
}
739
EXPORT_SYMBOL_GPL(md_rdev_clear);
L
Linus Torvalds 已提交
740

741
static void super_written(struct bio *bio, int error)
742
{
743
	struct md_rdev *rdev = bio->bi_private;
744
	struct mddev *mddev = rdev->mddev;
745

746 747 748 749
	if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
		printk("md: super_written gets error=%d, uptodate=%d\n",
		       error, test_bit(BIO_UPTODATE, &bio->bi_flags));
		WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
750
		md_error(mddev, rdev);
751
	}
752

753 754
	if (atomic_dec_and_test(&mddev->pending_writes))
		wake_up(&mddev->sb_wait);
N
Neil Brown 已提交
755
	bio_put(bio);
756 757
}

758
void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
759 760 761 762 763 764 765 766
		   sector_t sector, int size, struct page *page)
{
	/* write first size bytes of page to sector of rdev
	 * Increment mddev->pending_writes before returning
	 * and decrement it on completion, waking up sb_wait
	 * if zero is reached.
	 * If an error occurred, call md_error
	 */
767
	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
768

769
	bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
770
	bio->bi_iter.bi_sector = sector;
771 772 773
	bio_add_page(bio, page, size, 0);
	bio->bi_private = rdev;
	bio->bi_end_io = super_written;
774

775
	atomic_inc(&mddev->pending_writes);
776
	submit_bio(WRITE_FLUSH_FUA, bio);
777 778
}

779
void md_super_wait(struct mddev *mddev)
780
{
T
Tejun Heo 已提交
781
	/* wait for all superblock writes that were scheduled to complete */
782
	wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
783 784
}

785
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
J
Jonathan Brassow 已提交
786
		 struct page *page, int rw, bool metadata_op)
L
Linus Torvalds 已提交
787
{
788
	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
L
Linus Torvalds 已提交
789 790
	int ret;

791 792
	bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
		rdev->meta_bdev : rdev->bdev;
J
Jonathan Brassow 已提交
793
	if (metadata_op)
794
		bio->bi_iter.bi_sector = sector + rdev->sb_start;
795 796 797
	else if (rdev->mddev->reshape_position != MaxSector &&
		 (rdev->mddev->reshape_backwards ==
		  (sector >= rdev->mddev->reshape_position)))
798
		bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
J
Jonathan Brassow 已提交
799
	else
800
		bio->bi_iter.bi_sector = sector + rdev->data_offset;
L
Linus Torvalds 已提交
801
	bio_add_page(bio, page, size, 0);
802
	submit_bio_wait(rw, bio);
L
Linus Torvalds 已提交
803 804 805 806 807

	ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_put(bio);
	return ret;
}
808
EXPORT_SYMBOL_GPL(sync_page_io);
L
Linus Torvalds 已提交
809

810
static int read_disk_sb(struct md_rdev *rdev, int size)
L
Linus Torvalds 已提交
811 812
{
	char b[BDEVNAME_SIZE];
N
NeilBrown 已提交
813

L
Linus Torvalds 已提交
814 815 816
	if (rdev->sb_loaded)
		return 0;

J
Jonathan Brassow 已提交
817
	if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
L
Linus Torvalds 已提交
818 819 820 821 822 823 824 825 826 827 828 829
		goto fail;
	rdev->sb_loaded = 1;
	return 0;

fail:
	printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
		bdevname(rdev->bdev,b));
	return -EINVAL;
}

static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
830
	return	sb1->set_uuid0 == sb2->set_uuid0 &&
A
Andre Noll 已提交
831 832 833
		sb1->set_uuid1 == sb2->set_uuid1 &&
		sb1->set_uuid2 == sb2->set_uuid2 &&
		sb1->set_uuid3 == sb2->set_uuid3;
L
Linus Torvalds 已提交
834 835 836 837 838 839 840 841 842 843 844 845
}

static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
	int ret;
	mdp_super_t *tmp1, *tmp2;

	tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
	tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);

	if (!tmp1 || !tmp2) {
		ret = 0;
846
		printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
L
Linus Torvalds 已提交
847 848 849 850 851 852 853 854 855 856 857 858
		goto abort;
	}

	*tmp1 = *sb1;
	*tmp2 = *sb2;

	/*
	 * nr_disks is not constant
	 */
	tmp1->nr_disks = 0;
	tmp2->nr_disks = 0;

A
Andre Noll 已提交
859
	ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
L
Linus Torvalds 已提交
860
abort:
861 862
	kfree(tmp1);
	kfree(tmp2);
L
Linus Torvalds 已提交
863 864 865
	return ret;
}

866 867 868 869 870 871
static u32 md_csum_fold(u32 csum)
{
	csum = (csum & 0xffff) + (csum >> 16);
	return (csum & 0xffff) + (csum >> 16);
}

872
static unsigned int calc_sb_csum(mdp_super_t *sb)
L
Linus Torvalds 已提交
873
{
874 875 876
	u64 newcsum = 0;
	u32 *sb32 = (u32*)sb;
	int i;
L
Linus Torvalds 已提交
877 878 879 880
	unsigned int disk_csum, csum;

	disk_csum = sb->sb_csum;
	sb->sb_csum = 0;
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896

	for (i = 0; i < MD_SB_BYTES/4 ; i++)
		newcsum += sb32[i];
	csum = (newcsum & 0xffffffff) + (newcsum>>32);

#ifdef CONFIG_ALPHA
	/* This used to use csum_partial, which was wrong for several
	 * reasons including that different results are returned on
	 * different architectures.  It isn't critical that we get exactly
	 * the same return value as before (we always csum_fold before
	 * testing, and that removes any differences).  However as we
	 * know that csum_partial always returned a 16bit value on
	 * alphas, do a fold to maximise conformity to previous behaviour.
	 */
	sb->sb_csum = md_csum_fold(disk_csum);
#else
L
Linus Torvalds 已提交
897
	sb->sb_csum = disk_csum;
898
#endif
L
Linus Torvalds 已提交
899 900 901 902 903 904 905 906 907 908 909
	return csum;
}

/*
 * Handle superblock details.
 * We want to be able to handle multiple superblock formats
 * so we have a common interface to them all, and an array of
 * different handlers.
 * We rely on user-space to write the initial superblock, and support
 * reading and updating of superblocks.
 * Interface methods are:
910
 *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
L
Linus Torvalds 已提交
911 912 913 914 915 916 917 918 919
 *      loads and validates a superblock on dev.
 *      if refdev != NULL, compare superblocks on both devices
 *    Return:
 *      0 - dev has a superblock that is compatible with refdev
 *      1 - dev has a superblock that is compatible and newer than refdev
 *          so dev should be used as the refdev in future
 *     -EINVAL superblock incompatible or invalid
 *     -othererror e.g. -EIO
 *
920
 *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
L
Linus Torvalds 已提交
921 922 923 924 925
 *      Verify that dev is acceptable into mddev.
 *       The first time, mddev->raid_disks will be 0, and data from
 *       dev should be merged in.  Subsequent calls check that dev
 *       is new enough.  Return 0 or -EINVAL
 *
926
 *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
L
Linus Torvalds 已提交
927 928 929 930 931 932
 *     Update the superblock for rdev with data in mddev
 *     This does not write to disc.
 *
 */

struct super_type  {
933 934
	char		    *name;
	struct module	    *owner;
935 936
	int		    (*load_super)(struct md_rdev *rdev,
					  struct md_rdev *refdev,
937
					  int minor_version);
938 939 940 941
	int		    (*validate_super)(struct mddev *mddev,
					      struct md_rdev *rdev);
	void		    (*sync_super)(struct mddev *mddev,
					  struct md_rdev *rdev);
942
	unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
943
						sector_t num_sectors);
944 945
	int		    (*allow_new_offset)(struct md_rdev *rdev,
						unsigned long long new_offset);
L
Linus Torvalds 已提交
946 947
};

948 949 950 951 952 953 954 955
/*
 * Check that the given mddev has no bitmap.
 *
 * This function is called from the run method of all personalities that do not
 * support bitmaps. It prints an error message and returns non-zero if mddev
 * has a bitmap. Otherwise, it returns 0.
 *
 */
956
int md_check_no_bitmap(struct mddev *mddev)
957
{
958
	if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
959 960 961 962 963 964 965
		return 0;
	printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
		mdname(mddev), mddev->pers->name);
	return 1;
}
EXPORT_SYMBOL(md_check_no_bitmap);

L
Linus Torvalds 已提交
966
/*
967
 * load_super for 0.90.0
L
Linus Torvalds 已提交
968
 */
969
static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
L
Linus Torvalds 已提交
970 971 972 973 974 975
{
	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
	mdp_super_t *sb;
	int ret;

	/*
976
	 * Calculate the position of the superblock (512byte sectors),
L
Linus Torvalds 已提交
977 978 979 980
	 * it's at the end of the disk.
	 *
	 * It also happens to be a multiple of 4Kb.
	 */
981
	rdev->sb_start = calc_dev_sboffset(rdev);
L
Linus Torvalds 已提交
982

983
	ret = read_disk_sb(rdev, MD_SB_BYTES);
L
Linus Torvalds 已提交
984 985 986 987 988
	if (ret) return ret;

	ret = -EINVAL;

	bdevname(rdev->bdev, b);
989
	sb = page_address(rdev->sb_page);
L
Linus Torvalds 已提交
990 991 992 993 994 995 996 997

	if (sb->md_magic != MD_SB_MAGIC) {
		printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
		       b);
		goto abort;
	}

	if (sb->major_version != 0 ||
998 999
	    sb->minor_version < 90 ||
	    sb->minor_version > 91) {
L
Linus Torvalds 已提交
1000 1001 1002 1003 1004 1005 1006 1007 1008
		printk(KERN_WARNING "Bad version number %d.%d on %s\n",
			sb->major_version, sb->minor_version,
			b);
		goto abort;
	}

	if (sb->raid_disks <= 0)
		goto abort;

1009
	if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
L
Linus Torvalds 已提交
1010 1011 1012 1013 1014 1015 1016
		printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
			b);
		goto abort;
	}

	rdev->preferred_minor = sb->md_minor;
	rdev->data_offset = 0;
1017
	rdev->new_data_offset = 0;
1018
	rdev->sb_size = MD_SB_BYTES;
1019
	rdev->badblocks.shift = -1;
L
Linus Torvalds 已提交
1020 1021 1022 1023 1024 1025

	if (sb->level == LEVEL_MULTIPATH)
		rdev->desc_nr = -1;
	else
		rdev->desc_nr = sb->this_disk.number;

1026
	if (!refdev) {
L
Linus Torvalds 已提交
1027
		ret = 1;
1028
	} else {
L
Linus Torvalds 已提交
1029
		__u64 ev1, ev2;
1030
		mdp_super_t *refsb = page_address(refdev->sb_page);
L
Linus Torvalds 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		if (!uuid_equal(refsb, sb)) {
			printk(KERN_WARNING "md: %s has different UUID to %s\n",
				b, bdevname(refdev->bdev,b2));
			goto abort;
		}
		if (!sb_equal(refsb, sb)) {
			printk(KERN_WARNING "md: %s has same UUID"
			       " but different superblock to %s\n",
			       b, bdevname(refdev->bdev, b2));
			goto abort;
		}
		ev1 = md_event(sb);
		ev2 = md_event(refsb);
		if (ev1 > ev2)
			ret = 1;
1046
		else
L
Linus Torvalds 已提交
1047 1048
			ret = 0;
	}
1049
	rdev->sectors = rdev->sb_start;
1050 1051 1052 1053 1054
	/* Limit to 4TB as metadata cannot record more than that.
	 * (not needed for Linear and RAID0 as metadata doesn't
	 * record this size)
	 */
	if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1055
		rdev->sectors = (2ULL << 32) - 2;
L
Linus Torvalds 已提交
1056

1057
	if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1058 1059 1060
		/* "this cannot possibly happen" ... */
		ret = -EINVAL;

L
Linus Torvalds 已提交
1061 1062 1063 1064 1065 1066 1067
 abort:
	return ret;
}

/*
 * validate_super for 0.90.0
 */
1068
static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1069 1070
{
	mdp_disk_t *desc;
1071
	mdp_super_t *sb = page_address(rdev->sb_page);
1072
	__u64 ev1 = md_event(sb);
L
Linus Torvalds 已提交
1073

1074
	rdev->raid_disk = -1;
1075 1076
	clear_bit(Faulty, &rdev->flags);
	clear_bit(In_sync, &rdev->flags);
1077
	clear_bit(Bitmap_sync, &rdev->flags);
1078 1079
	clear_bit(WriteMostly, &rdev->flags);

L
Linus Torvalds 已提交
1080 1081 1082 1083
	if (mddev->raid_disks == 0) {
		mddev->major_version = 0;
		mddev->minor_version = sb->minor_version;
		mddev->patch_version = sb->patch_version;
1084
		mddev->external = 0;
1085
		mddev->chunk_sectors = sb->chunk_size >> 9;
L
Linus Torvalds 已提交
1086 1087 1088
		mddev->ctime = sb->ctime;
		mddev->utime = sb->utime;
		mddev->level = sb->level;
1089
		mddev->clevel[0] = 0;
L
Linus Torvalds 已提交
1090 1091
		mddev->layout = sb->layout;
		mddev->raid_disks = sb->raid_disks;
1092
		mddev->dev_sectors = ((sector_t)sb->size) * 2;
1093
		mddev->events = ev1;
1094
		mddev->bitmap_info.offset = 0;
1095 1096
		mddev->bitmap_info.space = 0;
		/* bitmap can use 60 K after the 4K superblocks */
1097
		mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1098
		mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1099
		mddev->reshape_backwards = 0;
L
Linus Torvalds 已提交
1100

1101 1102 1103 1104 1105
		if (mddev->minor_version >= 91) {
			mddev->reshape_position = sb->reshape_position;
			mddev->delta_disks = sb->delta_disks;
			mddev->new_level = sb->new_level;
			mddev->new_layout = sb->new_layout;
1106
			mddev->new_chunk_sectors = sb->new_chunk >> 9;
1107 1108
			if (mddev->delta_disks < 0)
				mddev->reshape_backwards = 1;
1109 1110 1111 1112 1113
		} else {
			mddev->reshape_position = MaxSector;
			mddev->delta_disks = 0;
			mddev->new_level = mddev->level;
			mddev->new_layout = mddev->layout;
1114
			mddev->new_chunk_sectors = mddev->chunk_sectors;
1115 1116
		}

L
Linus Torvalds 已提交
1117 1118 1119
		if (sb->state & (1<<MD_SB_CLEAN))
			mddev->recovery_cp = MaxSector;
		else {
1120
			if (sb->events_hi == sb->cp_events_hi &&
L
Linus Torvalds 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
				sb->events_lo == sb->cp_events_lo) {
				mddev->recovery_cp = sb->recovery_cp;
			} else
				mddev->recovery_cp = 0;
		}

		memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
		memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
		memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
		memcpy(mddev->uuid+12,&sb->set_uuid3, 4);

		mddev->max_disks = MD_SB_DISKS;
1133 1134

		if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1135
		    mddev->bitmap_info.file == NULL) {
1136 1137
			mddev->bitmap_info.offset =
				mddev->bitmap_info.default_offset;
1138
			mddev->bitmap_info.space =
1139
				mddev->bitmap_info.default_space;
1140
		}
1141

1142
	} else if (mddev->pers == NULL) {
1143 1144
		/* Insist on good event counter while assembling, except
		 * for spares (which don't need an event count) */
L
Linus Torvalds 已提交
1145
		++ev1;
1146 1147
		if (sb->disks[rdev->desc_nr].state & (
			    (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1148
			if (ev1 < mddev->events)
1149
				return -EINVAL;
1150 1151 1152 1153 1154 1155
	} else if (mddev->bitmap) {
		/* if adding to array with a bitmap, then we can accept an
		 * older device ... but not too old.
		 */
		if (ev1 < mddev->bitmap->events_cleared)
			return 0;
1156 1157
		if (ev1 < mddev->events)
			set_bit(Bitmap_sync, &rdev->flags);
1158 1159 1160 1161 1162
	} else {
		if (ev1 < mddev->events)
			/* just a hot-add of a new device, leave raid_disk at -1 */
			return 0;
	}
1163

L
Linus Torvalds 已提交
1164 1165 1166 1167
	if (mddev->level != LEVEL_MULTIPATH) {
		desc = sb->disks + rdev->desc_nr;

		if (desc->state & (1<<MD_DISK_FAULTY))
1168
			set_bit(Faulty, &rdev->flags);
1169 1170
		else if (desc->state & (1<<MD_DISK_SYNC) /* &&
			    desc->raid_disk < mddev->raid_disks */) {
1171
			set_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
1172
			rdev->raid_disk = desc->raid_disk;
1173
			rdev->saved_raid_disk = desc->raid_disk;
1174 1175 1176 1177 1178 1179 1180 1181
		} else if (desc->state & (1<<MD_DISK_ACTIVE)) {
			/* active but not in sync implies recovery up to
			 * reshape position.  We don't know exactly where
			 * that is, so set to zero for now */
			if (mddev->minor_version >= 91) {
				rdev->recovery_offset = 0;
				rdev->raid_disk = desc->raid_disk;
			}
L
Linus Torvalds 已提交
1182
		}
1183 1184
		if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
			set_bit(WriteMostly, &rdev->flags);
1185
	} else /* MULTIPATH are always insync */
1186
		set_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
1187 1188 1189 1190 1191 1192
	return 0;
}

/*
 * sync_super for 0.90.0
 */
1193
static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1194 1195
{
	mdp_super_t *sb;
1196
	struct md_rdev *rdev2;
L
Linus Torvalds 已提交
1197
	int next_spare = mddev->raid_disks;
1198

L
Linus Torvalds 已提交
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
	/* make rdev->sb match mddev data..
	 *
	 * 1/ zero out disks
	 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
	 * 3/ any empty disks < next_spare become removed
	 *
	 * disks[0] gets initialised to REMOVED because
	 * we cannot be sure from other fields if it has
	 * been initialised or not.
	 */
	int i;
	int active=0, working=0,failed=0,spare=0,nr_disks=0;

1212 1213
	rdev->sb_size = MD_SB_BYTES;

1214
	sb = page_address(rdev->sb_page);
L
Linus Torvalds 已提交
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228

	memset(sb, 0, sizeof(*sb));

	sb->md_magic = MD_SB_MAGIC;
	sb->major_version = mddev->major_version;
	sb->patch_version = mddev->patch_version;
	sb->gvalid_words  = 0; /* ignored */
	memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
	memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
	memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
	memcpy(&sb->set_uuid3, mddev->uuid+12,4);

	sb->ctime = mddev->ctime;
	sb->level = mddev->level;
A
Andre Noll 已提交
1229
	sb->size = mddev->dev_sectors / 2;
L
Linus Torvalds 已提交
1230 1231
	sb->raid_disks = mddev->raid_disks;
	sb->md_minor = mddev->md_minor;
1232
	sb->not_persistent = 0;
L
Linus Torvalds 已提交
1233 1234 1235 1236 1237
	sb->utime = mddev->utime;
	sb->state = 0;
	sb->events_hi = (mddev->events>>32);
	sb->events_lo = (u32)mddev->events;

1238 1239 1240 1241 1242 1243 1244 1245
	if (mddev->reshape_position == MaxSector)
		sb->minor_version = 90;
	else {
		sb->minor_version = 91;
		sb->reshape_position = mddev->reshape_position;
		sb->new_level = mddev->new_level;
		sb->delta_disks = mddev->delta_disks;
		sb->new_layout = mddev->new_layout;
1246
		sb->new_chunk = mddev->new_chunk_sectors << 9;
1247 1248
	}
	mddev->minor_version = sb->minor_version;
L
Linus Torvalds 已提交
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
	if (mddev->in_sync)
	{
		sb->recovery_cp = mddev->recovery_cp;
		sb->cp_events_hi = (mddev->events>>32);
		sb->cp_events_lo = (u32)mddev->events;
		if (mddev->recovery_cp == MaxSector)
			sb->state = (1<< MD_SB_CLEAN);
	} else
		sb->recovery_cp = 0;

	sb->layout = mddev->layout;
1260
	sb->chunk_size = mddev->chunk_sectors << 9;
L
Linus Torvalds 已提交
1261

1262
	if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1263 1264
		sb->state |= (1<<MD_SB_BITMAP_PRESENT);

L
Linus Torvalds 已提交
1265
	sb->disks[0].state = (1<<MD_DISK_REMOVED);
N
NeilBrown 已提交
1266
	rdev_for_each(rdev2, mddev) {
L
Linus Torvalds 已提交
1267
		mdp_disk_t *d;
1268
		int desc_nr;
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
		int is_active = test_bit(In_sync, &rdev2->flags);

		if (rdev2->raid_disk >= 0 &&
		    sb->minor_version >= 91)
			/* we have nowhere to store the recovery_offset,
			 * but if it is not below the reshape_position,
			 * we can piggy-back on that.
			 */
			is_active = 1;
		if (rdev2->raid_disk < 0 ||
		    test_bit(Faulty, &rdev2->flags))
			is_active = 0;
		if (is_active)
1282
			desc_nr = rdev2->raid_disk;
L
Linus Torvalds 已提交
1283
		else
1284
			desc_nr = next_spare++;
1285
		rdev2->desc_nr = desc_nr;
L
Linus Torvalds 已提交
1286 1287 1288 1289 1290
		d = &sb->disks[rdev2->desc_nr];
		nr_disks++;
		d->number = rdev2->desc_nr;
		d->major = MAJOR(rdev2->bdev->bd_dev);
		d->minor = MINOR(rdev2->bdev->bd_dev);
1291
		if (is_active)
L
Linus Torvalds 已提交
1292 1293 1294
			d->raid_disk = rdev2->raid_disk;
		else
			d->raid_disk = rdev2->desc_nr; /* compatibility */
1295
		if (test_bit(Faulty, &rdev2->flags))
L
Linus Torvalds 已提交
1296
			d->state = (1<<MD_DISK_FAULTY);
1297
		else if (is_active) {
L
Linus Torvalds 已提交
1298
			d->state = (1<<MD_DISK_ACTIVE);
1299 1300
			if (test_bit(In_sync, &rdev2->flags))
				d->state |= (1<<MD_DISK_SYNC);
L
Linus Torvalds 已提交
1301 1302 1303 1304 1305 1306 1307
			active++;
			working++;
		} else {
			d->state = 0;
			spare++;
			working++;
		}
1308 1309
		if (test_bit(WriteMostly, &rdev2->flags))
			d->state |= (1<<MD_DISK_WRITEMOSTLY);
L
Linus Torvalds 已提交
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
	}
	/* now set the "removed" and "faulty" bits on any missing devices */
	for (i=0 ; i < mddev->raid_disks ; i++) {
		mdp_disk_t *d = &sb->disks[i];
		if (d->state == 0 && d->number == 0) {
			d->number = i;
			d->raid_disk = i;
			d->state = (1<<MD_DISK_REMOVED);
			d->state |= (1<<MD_DISK_FAULTY);
			failed++;
		}
	}
	sb->nr_disks = nr_disks;
	sb->active_disks = active;
	sb->working_disks = working;
	sb->failed_disks = failed;
	sb->spare_disks = spare;

	sb->this_disk = sb->disks[rdev->desc_nr];
	sb->sb_csum = calc_sb_csum(sb);
}

1332 1333 1334 1335
/*
 * rdev_size_change for 0.90.0
 */
static unsigned long long
1336
super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1337
{
A
Andre Noll 已提交
1338
	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1339
		return 0; /* component must fit device */
1340
	if (rdev->mddev->bitmap_info.offset)
1341
		return 0; /* can't move bitmap */
1342
	rdev->sb_start = calc_dev_sboffset(rdev);
1343 1344
	if (!num_sectors || num_sectors > rdev->sb_start)
		num_sectors = rdev->sb_start;
1345 1346 1347
	/* Limit to 4TB as metadata cannot record more than that.
	 * 4TB == 2^32 KB, or 2*2^32 sectors.
	 */
1348
	if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1349
		num_sectors = (2ULL << 32) - 2;
1350
	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1351 1352
		       rdev->sb_page);
	md_super_wait(rdev->mddev);
1353
	return num_sectors;
1354 1355
}

1356 1357 1358 1359 1360 1361
static int
super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
{
	/* non-zero offset changes not possible with v0.90 */
	return new_offset == 0;
}
1362

L
Linus Torvalds 已提交
1363 1364 1365 1366
/*
 * version 1 superblock
 */

1367
static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
L
Linus Torvalds 已提交
1368
{
1369 1370
	__le32 disk_csum;
	u32 csum;
L
Linus Torvalds 已提交
1371 1372
	unsigned long long newcsum;
	int size = 256 + le32_to_cpu(sb->max_dev)*2;
1373
	__le32 *isuper = (__le32*)sb;
L
Linus Torvalds 已提交
1374 1375 1376 1377

	disk_csum = sb->sb_csum;
	sb->sb_csum = 0;
	newcsum = 0;
1378
	for (; size >= 4; size -= 4)
L
Linus Torvalds 已提交
1379 1380 1381
		newcsum += le32_to_cpu(*isuper++);

	if (size == 2)
1382
		newcsum += le16_to_cpu(*(__le16*) isuper);
L
Linus Torvalds 已提交
1383 1384 1385 1386 1387 1388

	csum = (newcsum & 0xffffffff) + (newcsum >> 32);
	sb->sb_csum = disk_csum;
	return cpu_to_le32(csum);
}

1389 1390
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
			    int acknowledged);
1391
static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
L
Linus Torvalds 已提交
1392 1393 1394
{
	struct mdp_superblock_1 *sb;
	int ret;
1395
	sector_t sb_start;
1396
	sector_t sectors;
L
Linus Torvalds 已提交
1397
	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1398
	int bmask;
L
Linus Torvalds 已提交
1399 1400

	/*
1401
	 * Calculate the position of the superblock in 512byte sectors.
L
Linus Torvalds 已提交
1402 1403 1404 1405 1406 1407 1408 1409
	 * It is always aligned to a 4K boundary and
	 * depeding on minor_version, it can be:
	 * 0: At least 8K, but less than 12K, from end of device
	 * 1: At start of device
	 * 2: 4K from start of device.
	 */
	switch(minor_version) {
	case 0:
1410
		sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1411 1412
		sb_start -= 8*2;
		sb_start &= ~(sector_t)(4*2-1);
L
Linus Torvalds 已提交
1413 1414
		break;
	case 1:
1415
		sb_start = 0;
L
Linus Torvalds 已提交
1416 1417
		break;
	case 2:
1418
		sb_start = 8;
L
Linus Torvalds 已提交
1419 1420 1421 1422
		break;
	default:
		return -EINVAL;
	}
1423
	rdev->sb_start = sb_start;
L
Linus Torvalds 已提交
1424

1425 1426 1427 1428
	/* superblock is rarely larger than 1K, but it can be larger,
	 * and it is safe to read 4k, so we do that
	 */
	ret = read_disk_sb(rdev, 4096);
L
Linus Torvalds 已提交
1429 1430
	if (ret) return ret;

1431
	sb = page_address(rdev->sb_page);
L
Linus Torvalds 已提交
1432 1433 1434 1435

	if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
	    sb->major_version != cpu_to_le32(1) ||
	    le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1436
	    le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1437
	    (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
L
Linus Torvalds 已提交
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
		return -EINVAL;

	if (calc_sb_1_csum(sb) != sb->sb_csum) {
		printk("md: invalid superblock checksum on %s\n",
			bdevname(rdev->bdev,b));
		return -EINVAL;
	}
	if (le64_to_cpu(sb->data_size) < 10) {
		printk("md: data_size too small on %s\n",
		       bdevname(rdev->bdev,b));
		return -EINVAL;
	}
1450 1451 1452 1453 1454
	if (sb->pad0 ||
	    sb->pad3[0] ||
	    memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
		/* Some padding is non-zero, might be a new feature */
		return -EINVAL;
1455

L
Linus Torvalds 已提交
1456 1457
	rdev->preferred_minor = 0xffff;
	rdev->data_offset = le64_to_cpu(sb->data_offset);
1458 1459 1460 1461
	rdev->new_data_offset = rdev->data_offset;
	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
	    (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
		rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1462
	atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
L
Linus Torvalds 已提交
1463

1464
	rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1465
	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1466
	if (rdev->sb_size & bmask)
1467 1468 1469
		rdev->sb_size = (rdev->sb_size | bmask) + 1;

	if (minor_version
1470
	    && rdev->data_offset < sb_start + (rdev->sb_size/512))
1471
		return -EINVAL;
1472 1473 1474
	if (minor_version
	    && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
		return -EINVAL;
1475

1476 1477 1478 1479 1480
	if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
		rdev->desc_nr = -1;
	else
		rdev->desc_nr = le32_to_cpu(sb->dev_number);

1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
	if (!rdev->bb_page) {
		rdev->bb_page = alloc_page(GFP_KERNEL);
		if (!rdev->bb_page)
			return -ENOMEM;
	}
	if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
	    rdev->badblocks.count == 0) {
		/* need to load the bad block list.
		 * Currently we limit it to one page.
		 */
		s32 offset;
		sector_t bb_sector;
		u64 *bbp;
		int i;
		int sectors = le16_to_cpu(sb->bblog_size);
		if (sectors > (PAGE_SIZE / 512))
			return -EINVAL;
		offset = le32_to_cpu(sb->bblog_offset);
		if (offset == 0)
			return -EINVAL;
		bb_sector = (long long)offset;
		if (!sync_page_io(rdev, bb_sector, sectors << 9,
				  rdev->bb_page, READ, true))
			return -EIO;
		bbp = (u64 *)page_address(rdev->bb_page);
		rdev->badblocks.shift = sb->bblog_shift;
		for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
			u64 bb = le64_to_cpu(*bbp);
			int count = bb & (0x3ff);
			u64 sector = bb >> 10;
			sector <<= sb->bblog_shift;
			count <<= sb->bblog_shift;
			if (bb + 1 == 0)
				break;
			if (md_set_badblocks(&rdev->badblocks,
					     sector, count, 1) == 0)
				return -EINVAL;
		}
1519 1520
	} else if (sb->bblog_offset != 0)
		rdev->badblocks.shift = 0;
1521

1522
	if (!refdev) {
1523
		ret = 1;
1524
	} else {
L
Linus Torvalds 已提交
1525
		__u64 ev1, ev2;
1526
		struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
L
Linus Torvalds 已提交
1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541

		if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
		    sb->level != refsb->level ||
		    sb->layout != refsb->layout ||
		    sb->chunksize != refsb->chunksize) {
			printk(KERN_WARNING "md: %s has strangely different"
				" superblock to %s\n",
				bdevname(rdev->bdev,b),
				bdevname(refdev->bdev,b2));
			return -EINVAL;
		}
		ev1 = le64_to_cpu(sb->events);
		ev2 = le64_to_cpu(refsb->events);

		if (ev1 > ev2)
1542 1543 1544
			ret = 1;
		else
			ret = 0;
L
Linus Torvalds 已提交
1545
	}
1546 1547 1548 1549 1550 1551
	if (minor_version) {
		sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
		sectors -= rdev->data_offset;
	} else
		sectors = rdev->sb_start;
	if (sectors < le64_to_cpu(sb->data_size))
L
Linus Torvalds 已提交
1552
		return -EINVAL;
1553
	rdev->sectors = le64_to_cpu(sb->data_size);
1554
	return ret;
L
Linus Torvalds 已提交
1555 1556
}

1557
static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1558
{
1559
	struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1560
	__u64 ev1 = le64_to_cpu(sb->events);
L
Linus Torvalds 已提交
1561

1562
	rdev->raid_disk = -1;
1563 1564
	clear_bit(Faulty, &rdev->flags);
	clear_bit(In_sync, &rdev->flags);
1565
	clear_bit(Bitmap_sync, &rdev->flags);
1566 1567
	clear_bit(WriteMostly, &rdev->flags);

L
Linus Torvalds 已提交
1568 1569 1570
	if (mddev->raid_disks == 0) {
		mddev->major_version = 1;
		mddev->patch_version = 0;
1571
		mddev->external = 0;
1572
		mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
L
Linus Torvalds 已提交
1573 1574 1575
		mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
		mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
		mddev->level = le32_to_cpu(sb->level);
1576
		mddev->clevel[0] = 0;
L
Linus Torvalds 已提交
1577 1578
		mddev->layout = le32_to_cpu(sb->layout);
		mddev->raid_disks = le32_to_cpu(sb->raid_disks);
A
Andre Noll 已提交
1579
		mddev->dev_sectors = le64_to_cpu(sb->size);
1580
		mddev->events = ev1;
1581
		mddev->bitmap_info.offset = 0;
1582 1583 1584 1585
		mddev->bitmap_info.space = 0;
		/* Default location for bitmap is 1K after superblock
		 * using 3K - total of 4K
		 */
1586
		mddev->bitmap_info.default_offset = 1024 >> 9;
1587
		mddev->bitmap_info.default_space = (4096-1024) >> 9;
1588 1589
		mddev->reshape_backwards = 0;

L
Linus Torvalds 已提交
1590 1591 1592 1593
		mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
		memcpy(mddev->uuid, sb->set_uuid, 16);

		mddev->max_disks =  (4096-256)/2;
1594

1595
		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1596
		    mddev->bitmap_info.file == NULL) {
1597 1598
			mddev->bitmap_info.offset =
				(__s32)le32_to_cpu(sb->bitmap_offset);
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612
			/* Metadata doesn't record how much space is available.
			 * For 1.0, we assume we can use up to the superblock
			 * if before, else to 4K beyond superblock.
			 * For others, assume no change is possible.
			 */
			if (mddev->minor_version > 0)
				mddev->bitmap_info.space = 0;
			else if (mddev->bitmap_info.offset > 0)
				mddev->bitmap_info.space =
					8 - mddev->bitmap_info.offset;
			else
				mddev->bitmap_info.space =
					-mddev->bitmap_info.offset;
		}
1613

1614 1615 1616 1617 1618
		if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
			mddev->reshape_position = le64_to_cpu(sb->reshape_position);
			mddev->delta_disks = le32_to_cpu(sb->delta_disks);
			mddev->new_level = le32_to_cpu(sb->new_level);
			mddev->new_layout = le32_to_cpu(sb->new_layout);
1619
			mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1620 1621 1622 1623 1624
			if (mddev->delta_disks < 0 ||
			    (mddev->delta_disks == 0 &&
			     (le32_to_cpu(sb->feature_map)
			      & MD_FEATURE_RESHAPE_BACKWARDS)))
				mddev->reshape_backwards = 1;
1625 1626 1627 1628 1629
		} else {
			mddev->reshape_position = MaxSector;
			mddev->delta_disks = 0;
			mddev->new_level = mddev->level;
			mddev->new_layout = mddev->layout;
1630
			mddev->new_chunk_sectors = mddev->chunk_sectors;
1631 1632
		}

1633
	} else if (mddev->pers == NULL) {
1634 1635
		/* Insist of good event counter while assembling, except for
		 * spares (which don't need an event count) */
L
Linus Torvalds 已提交
1636
		++ev1;
1637 1638 1639 1640 1641
		if (rdev->desc_nr >= 0 &&
		    rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
		    le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
			if (ev1 < mddev->events)
				return -EINVAL;
1642 1643 1644 1645 1646 1647
	} else if (mddev->bitmap) {
		/* If adding to array with a bitmap, then we can accept an
		 * older device, but not too old.
		 */
		if (ev1 < mddev->bitmap->events_cleared)
			return 0;
1648 1649
		if (ev1 < mddev->events)
			set_bit(Bitmap_sync, &rdev->flags);
1650 1651 1652 1653 1654
	} else {
		if (ev1 < mddev->events)
			/* just a hot-add of a new device, leave raid_disk at -1 */
			return 0;
	}
L
Linus Torvalds 已提交
1655 1656
	if (mddev->level != LEVEL_MULTIPATH) {
		int role;
1657 1658 1659 1660 1661 1662
		if (rdev->desc_nr < 0 ||
		    rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
			role = 0xffff;
			rdev->desc_nr = -1;
		} else
			role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
L
Linus Torvalds 已提交
1663 1664 1665 1666
		switch(role) {
		case 0xffff: /* spare */
			break;
		case 0xfffe: /* faulty */
1667
			set_bit(Faulty, &rdev->flags);
L
Linus Torvalds 已提交
1668 1669
			break;
		default:
1670
			rdev->saved_raid_disk = role;
1671
			if ((le32_to_cpu(sb->feature_map) &
1672
			     MD_FEATURE_RECOVERY_OFFSET)) {
1673
				rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1674 1675 1676 1677
				if (!(le32_to_cpu(sb->feature_map) &
				      MD_FEATURE_RECOVERY_BITMAP))
					rdev->saved_raid_disk = -1;
			} else
1678
				set_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
1679 1680 1681
			rdev->raid_disk = role;
			break;
		}
1682 1683
		if (sb->devflags & WriteMostly1)
			set_bit(WriteMostly, &rdev->flags);
1684 1685
		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
			set_bit(Replacement, &rdev->flags);
1686
	} else /* MULTIPATH are always insync */
1687
		set_bit(In_sync, &rdev->flags);
1688

L
Linus Torvalds 已提交
1689 1690 1691
	return 0;
}

1692
static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
1693 1694
{
	struct mdp_superblock_1 *sb;
1695
	struct md_rdev *rdev2;
L
Linus Torvalds 已提交
1696 1697 1698
	int max_dev, i;
	/* make rdev->sb match mddev and rdev data. */

1699
	sb = page_address(rdev->sb_page);
L
Linus Torvalds 已提交
1700 1701 1702

	sb->feature_map = 0;
	sb->pad0 = 0;
1703
	sb->recovery_offset = cpu_to_le64(0);
L
Linus Torvalds 已提交
1704 1705 1706 1707 1708 1709 1710 1711 1712
	memset(sb->pad3, 0, sizeof(sb->pad3));

	sb->utime = cpu_to_le64((__u64)mddev->utime);
	sb->events = cpu_to_le64(mddev->events);
	if (mddev->in_sync)
		sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
	else
		sb->resync_offset = cpu_to_le64(0);

1713
	sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1714

1715
	sb->raid_disks = cpu_to_le32(mddev->raid_disks);
A
Andre Noll 已提交
1716
	sb->size = cpu_to_le64(mddev->dev_sectors);
1717
	sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1718 1719
	sb->level = cpu_to_le32(mddev->level);
	sb->layout = cpu_to_le32(mddev->layout);
1720

1721 1722 1723 1724
	if (test_bit(WriteMostly, &rdev->flags))
		sb->devflags |= WriteMostly1;
	else
		sb->devflags &= ~WriteMostly1;
1725 1726
	sb->data_offset = cpu_to_le64(rdev->data_offset);
	sb->data_size = cpu_to_le64(rdev->sectors);
1727

1728 1729
	if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
		sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1730
		sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1731
	}
1732 1733

	if (rdev->raid_disk >= 0 &&
1734
	    !test_bit(In_sync, &rdev->flags)) {
1735 1736 1737 1738
		sb->feature_map |=
			cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
		sb->recovery_offset =
			cpu_to_le64(rdev->recovery_offset);
1739 1740 1741
		if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
			sb->feature_map |=
				cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1742
	}
1743 1744 1745
	if (test_bit(Replacement, &rdev->flags))
		sb->feature_map |=
			cpu_to_le32(MD_FEATURE_REPLACEMENT);
1746

1747 1748 1749 1750 1751 1752
	if (mddev->reshape_position != MaxSector) {
		sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
		sb->reshape_position = cpu_to_le64(mddev->reshape_position);
		sb->new_layout = cpu_to_le32(mddev->new_layout);
		sb->delta_disks = cpu_to_le32(mddev->delta_disks);
		sb->new_level = cpu_to_le32(mddev->new_level);
1753
		sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1754 1755 1756 1757
		if (mddev->delta_disks == 0 &&
		    mddev->reshape_backwards)
			sb->feature_map
				|= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1758 1759 1760 1761 1762 1763
		if (rdev->new_data_offset != rdev->data_offset) {
			sb->feature_map
				|= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
			sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
							     - rdev->data_offset));
		}
1764
	}
1765

1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
	if (rdev->badblocks.count == 0)
		/* Nothing to do for bad blocks*/ ;
	else if (sb->bblog_offset == 0)
		/* Cannot record bad blocks on this device */
		md_error(mddev, rdev);
	else {
		struct badblocks *bb = &rdev->badblocks;
		u64 *bbp = (u64 *)page_address(rdev->bb_page);
		u64 *p = bb->page;
		sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
		if (bb->changed) {
			unsigned seq;

retry:
			seq = read_seqbegin(&bb->lock);

			memset(bbp, 0xff, PAGE_SIZE);

			for (i = 0 ; i < bb->count ; i++) {
1785
				u64 internal_bb = p[i];
1786 1787
				u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
						| BB_LEN(internal_bb));
1788
				bbp[i] = cpu_to_le64(store_bb);
1789
			}
1790
			bb->changed = 0;
1791 1792 1793 1794 1795 1796 1797 1798 1799
			if (read_seqretry(&bb->lock, seq))
				goto retry;

			bb->sector = (rdev->sb_start +
				      (int)le32_to_cpu(sb->bblog_offset));
			bb->size = le16_to_cpu(sb->bblog_size);
		}
	}

L
Linus Torvalds 已提交
1800
	max_dev = 0;
N
NeilBrown 已提交
1801
	rdev_for_each(rdev2, mddev)
L
Linus Torvalds 已提交
1802 1803
		if (rdev2->desc_nr+1 > max_dev)
			max_dev = rdev2->desc_nr+1;
1804

1805 1806
	if (max_dev > le32_to_cpu(sb->max_dev)) {
		int bmask;
1807
		sb->max_dev = cpu_to_le32(max_dev);
1808 1809 1810 1811
		rdev->sb_size = max_dev * 2 + 256;
		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
		if (rdev->sb_size & bmask)
			rdev->sb_size = (rdev->sb_size | bmask) + 1;
1812 1813 1814
	} else
		max_dev = le32_to_cpu(sb->max_dev);

L
Linus Torvalds 已提交
1815 1816
	for (i=0; i<max_dev;i++)
		sb->dev_roles[i] = cpu_to_le16(0xfffe);
1817

N
NeilBrown 已提交
1818
	rdev_for_each(rdev2, mddev) {
L
Linus Torvalds 已提交
1819
		i = rdev2->desc_nr;
1820
		if (test_bit(Faulty, &rdev2->flags))
L
Linus Torvalds 已提交
1821
			sb->dev_roles[i] = cpu_to_le16(0xfffe);
1822
		else if (test_bit(In_sync, &rdev2->flags))
L
Linus Torvalds 已提交
1823
			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1824
		else if (rdev2->raid_disk >= 0)
1825
			sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
L
Linus Torvalds 已提交
1826 1827 1828 1829 1830 1831 1832
		else
			sb->dev_roles[i] = cpu_to_le16(0xffff);
	}

	sb->sb_csum = calc_sb_1_csum(sb);
}

1833
static unsigned long long
1834
super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1835 1836
{
	struct mdp_superblock_1 *sb;
1837
	sector_t max_sectors;
A
Andre Noll 已提交
1838
	if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1839
		return 0; /* component must fit device */
1840 1841
	if (rdev->data_offset != rdev->new_data_offset)
		return 0; /* too confusing */
1842
	if (rdev->sb_start < rdev->data_offset) {
1843
		/* minor versions 1 and 2; superblock before data */
1844
		max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1845 1846 1847
		max_sectors -= rdev->data_offset;
		if (!num_sectors || num_sectors > max_sectors)
			num_sectors = max_sectors;
1848
	} else if (rdev->mddev->bitmap_info.offset) {
1849 1850 1851 1852
		/* minor version 0 with bitmap we can't move */
		return 0;
	} else {
		/* minor version 0; superblock after data */
1853
		sector_t sb_start;
1854
		sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1855
		sb_start &= ~(sector_t)(4*2 - 1);
1856
		max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1857 1858
		if (!num_sectors || num_sectors > max_sectors)
			num_sectors = max_sectors;
1859
		rdev->sb_start = sb_start;
1860
	}
1861
	sb = page_address(rdev->sb_page);
1862
	sb->data_size = cpu_to_le64(num_sectors);
1863
	sb->super_offset = rdev->sb_start;
1864
	sb->sb_csum = calc_sb_1_csum(sb);
1865
	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1866 1867
		       rdev->sb_page);
	md_super_wait(rdev->mddev);
1868
	return num_sectors;
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896

}

static int
super_1_allow_new_offset(struct md_rdev *rdev,
			 unsigned long long new_offset)
{
	/* All necessary checks on new >= old have been done */
	struct bitmap *bitmap;
	if (new_offset >= rdev->data_offset)
		return 1;

	/* with 1.0 metadata, there is no metadata to tread on
	 * so we can always move back */
	if (rdev->mddev->minor_version == 0)
		return 1;

	/* otherwise we must be sure not to step on
	 * any metadata, so stay:
	 * 36K beyond start of superblock
	 * beyond end of badblocks
	 * beyond write-intent bitmap
	 */
	if (rdev->sb_start + (32+4)*2 > new_offset)
		return 0;
	bitmap = rdev->mddev->bitmap;
	if (bitmap && !rdev->mddev->bitmap_info.file &&
	    rdev->sb_start + rdev->mddev->bitmap_info.offset +
1897
	    bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1898 1899 1900 1901 1902
		return 0;
	if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
		return 0;

	return 1;
1903
}
L
Linus Torvalds 已提交
1904

A
Adrian Bunk 已提交
1905
static struct super_type super_types[] = {
L
Linus Torvalds 已提交
1906 1907 1908
	[0] = {
		.name	= "0.90.0",
		.owner	= THIS_MODULE,
1909 1910 1911 1912
		.load_super	    = super_90_load,
		.validate_super	    = super_90_validate,
		.sync_super	    = super_90_sync,
		.rdev_size_change   = super_90_rdev_size_change,
1913
		.allow_new_offset   = super_90_allow_new_offset,
L
Linus Torvalds 已提交
1914 1915 1916 1917
	},
	[1] = {
		.name	= "md-1",
		.owner	= THIS_MODULE,
1918 1919 1920 1921
		.load_super	    = super_1_load,
		.validate_super	    = super_1_validate,
		.sync_super	    = super_1_sync,
		.rdev_size_change   = super_1_rdev_size_change,
1922
		.allow_new_offset   = super_1_allow_new_offset,
L
Linus Torvalds 已提交
1923 1924 1925
	},
};

1926
static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
{
	if (mddev->sync_super) {
		mddev->sync_super(mddev, rdev);
		return;
	}

	BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));

	super_types[mddev->major_version].sync_super(mddev, rdev);
}

1938
static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
L
Linus Torvalds 已提交
1939
{
1940
	struct md_rdev *rdev, *rdev2;
L
Linus Torvalds 已提交
1941

1942 1943 1944
	rcu_read_lock();
	rdev_for_each_rcu(rdev, mddev1)
		rdev_for_each_rcu(rdev2, mddev2)
1945
			if (rdev->bdev->bd_contains ==
1946 1947
			    rdev2->bdev->bd_contains) {
				rcu_read_unlock();
1948
				return 1;
1949 1950
			}
	rcu_read_unlock();
L
Linus Torvalds 已提交
1951 1952 1953 1954 1955
	return 0;
}

static LIST_HEAD(pending_raid_disks);

1956 1957 1958 1959 1960 1961 1962
/*
 * Try to register data integrity profile for an mddev
 *
 * This is called when an array is started and after a disk has been kicked
 * from the array. It only succeeds if all working and active component devices
 * are integrity capable with matching profiles.
 */
1963
int md_integrity_register(struct mddev *mddev)
1964
{
1965
	struct md_rdev *rdev, *reference = NULL;
1966 1967 1968

	if (list_empty(&mddev->disks))
		return 0; /* nothing to do */
1969 1970
	if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
		return 0; /* shouldn't register, or already is */
N
NeilBrown 已提交
1971
	rdev_for_each(rdev, mddev) {
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
		/* skip spares and non-functional disks */
		if (test_bit(Faulty, &rdev->flags))
			continue;
		if (rdev->raid_disk < 0)
			continue;
		if (!reference) {
			/* Use the first rdev as the reference */
			reference = rdev;
			continue;
		}
		/* does this rdev's profile match the reference profile? */
		if (blk_integrity_compare(reference->bdev->bd_disk,
				rdev->bdev->bd_disk) < 0)
			return -EINVAL;
	}
1987 1988
	if (!reference || !bdev_get_integrity(reference->bdev))
		return 0;
1989 1990 1991 1992 1993 1994 1995 1996 1997 1998
	/*
	 * All component devices are integrity capable and have matching
	 * profiles, register the common profile for the md device.
	 */
	if (blk_integrity_register(mddev->gendisk,
			bdev_get_integrity(reference->bdev)) != 0) {
		printk(KERN_ERR "md: failed to register integrity for %s\n",
			mdname(mddev));
		return -EINVAL;
	}
1999 2000 2001 2002 2003 2004
	printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
	if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
		printk(KERN_ERR "md: failed to create integrity pool for %s\n",
		       mdname(mddev));
		return -EINVAL;
	}
2005 2006 2007 2008 2009
	return 0;
}
EXPORT_SYMBOL(md_integrity_register);

/* Disable data integrity if non-capable/non-matching disk is being added */
2010
void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
M
Martin K. Petersen 已提交
2011
{
2012 2013 2014 2015 2016 2017 2018 2019
	struct blk_integrity *bi_rdev;
	struct blk_integrity *bi_mddev;

	if (!mddev->gendisk)
		return;

	bi_rdev = bdev_get_integrity(rdev->bdev);
	bi_mddev = blk_get_integrity(mddev->gendisk);
M
Martin K. Petersen 已提交
2020

2021
	if (!bi_mddev) /* nothing to do */
M
Martin K. Petersen 已提交
2022
		return;
2023
	if (rdev->raid_disk < 0) /* skip spares */
M
Martin K. Petersen 已提交
2024
		return;
2025 2026 2027 2028 2029
	if (bi_rdev && blk_integrity_compare(mddev->gendisk,
					     rdev->bdev->bd_disk) >= 0)
		return;
	printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
	blk_integrity_unregister(mddev->gendisk);
M
Martin K. Petersen 已提交
2030
}
2031
EXPORT_SYMBOL(md_integrity_add_rdev);
M
Martin K. Petersen 已提交
2032

2033
static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
L
Linus Torvalds 已提交
2034
{
2035
	char b[BDEVNAME_SIZE];
2036
	struct kobject *ko;
2037
	char *s;
2038
	int err;
L
Linus Torvalds 已提交
2039

2040 2041 2042 2043
	/* prevent duplicates */
	if (find_rdev(mddev, rdev->bdev->bd_dev))
		return -EEXIST;

2044 2045 2046
	/* make sure rdev->sectors exceeds mddev->dev_sectors */
	if (rdev->sectors && (mddev->dev_sectors == 0 ||
			rdev->sectors < mddev->dev_sectors)) {
2047 2048 2049 2050 2051 2052 2053 2054
		if (mddev->pers) {
			/* Cannot change size, so fail
			 * If mddev->level <= 0, then we don't care
			 * about aligning sizes (e.g. linear)
			 */
			if (mddev->level > 0)
				return -ENOSPC;
		} else
2055
			mddev->dev_sectors = rdev->sectors;
2056
	}
L
Linus Torvalds 已提交
2057 2058 2059 2060 2061

	/* Verify rdev->desc_nr is unique.
	 * If it is -1, assign a free number, else
	 * check number is not in use
	 */
2062
	rcu_read_lock();
L
Linus Torvalds 已提交
2063 2064
	if (rdev->desc_nr < 0) {
		int choice = 0;
2065 2066 2067
		if (mddev->pers)
			choice = mddev->raid_disks;
		while (find_rdev_nr_rcu(mddev, choice))
L
Linus Torvalds 已提交
2068 2069 2070
			choice++;
		rdev->desc_nr = choice;
	} else {
2071 2072
		if (find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
			rcu_read_unlock();
L
Linus Torvalds 已提交
2073
			return -EBUSY;
2074
		}
L
Linus Torvalds 已提交
2075
	}
2076
	rcu_read_unlock();
2077 2078 2079 2080 2081
	if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
		printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
		       mdname(mddev), mddev->max_disks);
		return -EBUSY;
	}
2082
	bdevname(rdev->bdev,b);
2083
	while ( (s=strchr(b, '/')) != NULL)
2084
		*s = '!';
2085

L
Linus Torvalds 已提交
2086
	rdev->mddev = mddev;
2087
	printk(KERN_INFO "md: bind<%s>\n", b);
2088

2089
	if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2090
		goto fail;
2091

T
Tejun Heo 已提交
2092
	ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
N
NeilBrown 已提交
2093 2094 2095
	if (sysfs_create_link(&rdev->kobj, ko, "block"))
		/* failure here is OK */;
	rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2096

2097
	list_add_rcu(&rdev->same_set, &mddev->disks);
2098
	bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2099 2100

	/* May as well allow recovery to be retried once */
2101
	mddev->recovery_disabled++;
M
Martin K. Petersen 已提交
2102

L
Linus Torvalds 已提交
2103
	return 0;
2104 2105 2106 2107 2108

 fail:
	printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
	       b, mdname(mddev));
	return err;
L
Linus Torvalds 已提交
2109 2110
}

2111
static void md_delayed_delete(struct work_struct *ws)
2112
{
2113
	struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2114
	kobject_del(&rdev->kobj);
2115
	kobject_put(&rdev->kobj);
2116 2117
}

2118
static void unbind_rdev_from_array(struct md_rdev *rdev)
L
Linus Torvalds 已提交
2119 2120
{
	char b[BDEVNAME_SIZE];
N
NeilBrown 已提交
2121

2122
	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2123
	list_del_rcu(&rdev->same_set);
L
Linus Torvalds 已提交
2124 2125
	printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
	rdev->mddev = NULL;
2126
	sysfs_remove_link(&rdev->kobj, "block");
2127 2128
	sysfs_put(rdev->sysfs_state);
	rdev->sysfs_state = NULL;
2129
	rdev->badblocks.count = 0;
2130
	/* We need to delay this, otherwise we can deadlock when
2131 2132
	 * writing to 'remove' to "dev/state".  We also need
	 * to delay it due to rcu usage.
2133
	 */
2134
	synchronize_rcu();
2135 2136
	INIT_WORK(&rdev->del_work, md_delayed_delete);
	kobject_get(&rdev->kobj);
T
Tejun Heo 已提交
2137
	queue_work(md_misc_wq, &rdev->del_work);
L
Linus Torvalds 已提交
2138 2139 2140 2141 2142 2143 2144
}

/*
 * prevent the device from being mounted, repartitioned or
 * otherwise reused by a RAID array (or any other kernel
 * subsystem), by bd_claiming the device.
 */
2145
static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
L
Linus Torvalds 已提交
2146 2147 2148 2149 2150
{
	int err = 0;
	struct block_device *bdev;
	char b[BDEVNAME_SIZE];

2151
	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2152
				 shared ? (struct md_rdev *)lock_rdev : rdev);
L
Linus Torvalds 已提交
2153 2154 2155 2156 2157 2158 2159 2160 2161
	if (IS_ERR(bdev)) {
		printk(KERN_ERR "md: could not open %s.\n",
			__bdevname(dev, b));
		return PTR_ERR(bdev);
	}
	rdev->bdev = bdev;
	return err;
}

2162
static void unlock_rdev(struct md_rdev *rdev)
L
Linus Torvalds 已提交
2163 2164 2165
{
	struct block_device *bdev = rdev->bdev;
	rdev->bdev = NULL;
2166
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
L
Linus Torvalds 已提交
2167 2168 2169 2170
}

void md_autodetect_dev(dev_t dev);

2171
static void export_rdev(struct md_rdev *rdev)
L
Linus Torvalds 已提交
2172 2173
{
	char b[BDEVNAME_SIZE];
N
NeilBrown 已提交
2174

L
Linus Torvalds 已提交
2175 2176
	printk(KERN_INFO "md: export_rdev(%s)\n",
		bdevname(rdev->bdev,b));
2177
	md_rdev_clear(rdev);
L
Linus Torvalds 已提交
2178
#ifndef MODULE
2179 2180
	if (test_bit(AutoDetected, &rdev->flags))
		md_autodetect_dev(rdev->bdev->bd_dev);
L
Linus Torvalds 已提交
2181 2182
#endif
	unlock_rdev(rdev);
2183
	kobject_put(&rdev->kobj);
L
Linus Torvalds 已提交
2184 2185
}

2186
static void kick_rdev_from_array(struct md_rdev *rdev)
L
Linus Torvalds 已提交
2187 2188 2189 2190 2191
{
	unbind_rdev_from_array(rdev);
	export_rdev(rdev);
}

2192
static void export_array(struct mddev *mddev)
L
Linus Torvalds 已提交
2193
{
N
NeilBrown 已提交
2194
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
2195

N
NeilBrown 已提交
2196 2197 2198
	while (!list_empty(&mddev->disks)) {
		rdev = list_first_entry(&mddev->disks, struct md_rdev,
					same_set);
L
Linus Torvalds 已提交
2199 2200 2201 2202 2203 2204
		kick_rdev_from_array(rdev);
	}
	mddev->raid_disks = 0;
	mddev->major_version = 0;
}

2205
static void sync_sbs(struct mddev *mddev, int nospares)
L
Linus Torvalds 已提交
2206
{
2207 2208 2209 2210 2211 2212
	/* Update each superblock (in-memory image), but
	 * if we are allowed to, skip spares which already
	 * have the right event counter, or have one earlier
	 * (which would mean they aren't being marked as dirty
	 * with the rest of the array)
	 */
2213
	struct md_rdev *rdev;
N
NeilBrown 已提交
2214
	rdev_for_each(rdev, mddev) {
2215 2216 2217 2218 2219 2220 2221
		if (rdev->sb_events == mddev->events ||
		    (nospares &&
		     rdev->raid_disk < 0 &&
		     rdev->sb_events+1 == mddev->events)) {
			/* Don't update this superblock */
			rdev->sb_loaded = 2;
		} else {
2222
			sync_super(mddev, rdev);
2223 2224
			rdev->sb_loaded = 1;
		}
L
Linus Torvalds 已提交
2225 2226 2227
	}
}

2228
static void md_update_sb(struct mddev *mddev, int force_change)
L
Linus Torvalds 已提交
2229
{
2230
	struct md_rdev *rdev;
2231
	int sync_req;
2232
	int nospares = 0;
2233
	int any_badblocks_changed = 0;
L
Linus Torvalds 已提交
2234

2235 2236 2237 2238 2239
	if (mddev->ro) {
		if (force_change)
			set_bit(MD_CHANGE_DEVS, &mddev->flags);
		return;
	}
L
Linus Torvalds 已提交
2240
repeat:
2241
	/* First make sure individual recovery_offsets are correct */
N
NeilBrown 已提交
2242
	rdev_for_each(rdev, mddev) {
2243 2244 2245 2246 2247 2248
		if (rdev->raid_disk >= 0 &&
		    mddev->delta_disks >= 0 &&
		    !test_bit(In_sync, &rdev->flags) &&
		    mddev->curr_resync_completed > rdev->recovery_offset)
				rdev->recovery_offset = mddev->curr_resync_completed;

2249
	}
2250
	if (!mddev->persistent) {
2251
		clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2252
		clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2253
		if (!mddev->external) {
2254
			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
N
NeilBrown 已提交
2255
			rdev_for_each(rdev, mddev) {
2256
				if (rdev->badblocks.changed) {
2257
					rdev->badblocks.changed = 0;
2258 2259 2260 2261 2262 2263 2264 2265
					md_ack_all_badblocks(&rdev->badblocks);
					md_error(mddev, rdev);
				}
				clear_bit(Blocked, &rdev->flags);
				clear_bit(BlockedBadBlocks, &rdev->flags);
				wake_up(&rdev->blocked_wait);
			}
		}
2266 2267 2268 2269
		wake_up(&mddev->sb_wait);
		return;
	}

2270
	spin_lock(&mddev->lock);
2271

2272 2273
	mddev->utime = get_seconds();

2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
	if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
		force_change = 1;
	if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
		/* just a clean<-> dirty transition, possibly leave spares alone,
		 * though if events isn't the right even/odd, we will have to do
		 * spares after all
		 */
		nospares = 1;
	if (force_change)
		nospares = 0;
	if (mddev->degraded)
2285 2286 2287 2288 2289 2290 2291 2292 2293
		/* If the array is degraded, then skipping spares is both
		 * dangerous and fairly pointless.
		 * Dangerous because a device that was removed from the array
		 * might have a event_count that still looks up-to-date,
		 * so it can be re-added without a resync.
		 * Pointless because if there are any spares to skip,
		 * then a recovery will happen and soon that array won't
		 * be degraded any more and the spare can go back to sleep then.
		 */
2294
		nospares = 0;
2295

2296
	sync_req = mddev->in_sync;
2297 2298 2299

	/* If this is just a dirty<->clean transition, and the array is clean
	 * and 'events' is odd, we can roll back to the previous clean state */
2300
	if (nospares
2301
	    && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2302 2303
	    && mddev->can_decrease_events
	    && mddev->events != 1) {
2304
		mddev->events--;
2305 2306
		mddev->can_decrease_events = 0;
	} else {
2307 2308
		/* otherwise we have to go forward and ... */
		mddev->events ++;
2309
		mddev->can_decrease_events = nospares;
2310
	}
L
Linus Torvalds 已提交
2311

N
NeilBrown 已提交
2312 2313 2314 2315 2316 2317
	/*
	 * This 64-bit counter should never wrap.
	 * Either we are in around ~1 trillion A.C., assuming
	 * 1 reboot per second, or we have a bug...
	 */
	WARN_ON(mddev->events == 0);
2318

N
NeilBrown 已提交
2319
	rdev_for_each(rdev, mddev) {
2320 2321
		if (rdev->badblocks.changed)
			any_badblocks_changed++;
2322 2323 2324
		if (test_bit(Faulty, &rdev->flags))
			set_bit(FaultRecorded, &rdev->flags);
	}
2325

2326
	sync_sbs(mddev, nospares);
2327
	spin_unlock(&mddev->lock);
L
Linus Torvalds 已提交
2328

2329 2330
	pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
		 mdname(mddev), mddev->in_sync);
L
Linus Torvalds 已提交
2331

2332
	bitmap_update_sb(mddev->bitmap);
N
NeilBrown 已提交
2333
	rdev_for_each(rdev, mddev) {
L
Linus Torvalds 已提交
2334
		char b[BDEVNAME_SIZE];
2335

2336 2337
		if (rdev->sb_loaded != 1)
			continue; /* no noise on spare devices */
L
Linus Torvalds 已提交
2338

2339
		if (!test_bit(Faulty, &rdev->flags)) {
2340
			md_super_write(mddev,rdev,
2341
				       rdev->sb_start, rdev->sb_size,
2342
				       rdev->sb_page);
2343 2344 2345
			pr_debug("md: (write) %s's sb offset: %llu\n",
				 bdevname(rdev->bdev, b),
				 (unsigned long long)rdev->sb_start);
2346
			rdev->sb_events = mddev->events;
2347 2348 2349 2350 2351 2352 2353
			if (rdev->badblocks.size) {
				md_super_write(mddev, rdev,
					       rdev->badblocks.sector,
					       rdev->badblocks.size << 9,
					       rdev->bb_page);
				rdev->badblocks.size = 0;
			}
2354

2355
		} else
2356 2357
			pr_debug("md: %s (skipping faulty)\n",
				 bdevname(rdev->bdev, b));
2358

2359
		if (mddev->level == LEVEL_MULTIPATH)
L
Linus Torvalds 已提交
2360 2361 2362
			/* only need to write one superblock... */
			break;
	}
2363
	md_super_wait(mddev);
2364
	/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2365

2366
	spin_lock(&mddev->lock);
2367 2368
	if (mddev->in_sync != sync_req ||
	    test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2369
		/* have to write it out again */
2370
		spin_unlock(&mddev->lock);
2371 2372
		goto repeat;
	}
2373
	clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2374
	spin_unlock(&mddev->lock);
2375
	wake_up(&mddev->sb_wait);
2376 2377
	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2378

N
NeilBrown 已提交
2379
	rdev_for_each(rdev, mddev) {
2380 2381 2382 2383
		if (test_and_clear_bit(FaultRecorded, &rdev->flags))
			clear_bit(Blocked, &rdev->flags);

		if (any_badblocks_changed)
2384
			md_ack_all_badblocks(&rdev->badblocks);
2385 2386 2387
		clear_bit(BlockedBadBlocks, &rdev->flags);
		wake_up(&rdev->blocked_wait);
	}
L
Linus Torvalds 已提交
2388 2389
}

2390
/* words written to sysfs files may, or may not, be \n terminated.
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409
 * We want to accept with case. For this we use cmd_match.
 */
static int cmd_match(const char *cmd, const char *str)
{
	/* See if cmd, written into a sysfs file, matches
	 * str.  They must either be the same, or cmd can
	 * have a trailing newline
	 */
	while (*cmd && *str && *cmd == *str) {
		cmd++;
		str++;
	}
	if (*cmd == '\n')
		cmd++;
	if (*str || *cmd)
		return 0;
	return 1;
}

2410 2411
struct rdev_sysfs_entry {
	struct attribute attr;
2412 2413
	ssize_t (*show)(struct md_rdev *, char *);
	ssize_t (*store)(struct md_rdev *, const char *, size_t);
2414 2415 2416
};

static ssize_t
2417
state_show(struct md_rdev *rdev, char *page)
2418 2419
{
	char *sep = "";
2420
	size_t len = 0;
2421

2422 2423
	if (test_bit(Faulty, &rdev->flags) ||
	    rdev->badblocks.unacked_exist) {
2424 2425 2426
		len+= sprintf(page+len, "%sfaulty",sep);
		sep = ",";
	}
2427
	if (test_bit(In_sync, &rdev->flags)) {
2428 2429 2430
		len += sprintf(page+len, "%sin_sync",sep);
		sep = ",";
	}
2431 2432 2433 2434
	if (test_bit(WriteMostly, &rdev->flags)) {
		len += sprintf(page+len, "%swrite_mostly",sep);
		sep = ",";
	}
2435
	if (test_bit(Blocked, &rdev->flags) ||
2436 2437
	    (rdev->badblocks.unacked_exist
	     && !test_bit(Faulty, &rdev->flags))) {
2438 2439 2440
		len += sprintf(page+len, "%sblocked", sep);
		sep = ",";
	}
2441 2442
	if (!test_bit(Faulty, &rdev->flags) &&
	    !test_bit(In_sync, &rdev->flags)) {
2443 2444 2445
		len += sprintf(page+len, "%sspare", sep);
		sep = ",";
	}
2446 2447 2448 2449
	if (test_bit(WriteErrorSeen, &rdev->flags)) {
		len += sprintf(page+len, "%swrite_error", sep);
		sep = ",";
	}
2450 2451 2452 2453 2454 2455 2456 2457 2458
	if (test_bit(WantReplacement, &rdev->flags)) {
		len += sprintf(page+len, "%swant_replacement", sep);
		sep = ",";
	}
	if (test_bit(Replacement, &rdev->flags)) {
		len += sprintf(page+len, "%sreplacement", sep);
		sep = ",";
	}

2459 2460 2461
	return len+sprintf(page+len, "\n");
}

2462
static ssize_t
2463
state_store(struct md_rdev *rdev, const char *buf, size_t len)
2464 2465
{
	/* can write
2466
	 *  faulty  - simulates an error
2467
	 *  remove  - disconnects the device
2468 2469
	 *  writemostly - sets write_mostly
	 *  -writemostly - clears write_mostly
2470 2471
	 *  blocked - sets the Blocked flags
	 *  -blocked - clears the Blocked and possibly simulates an error
2472
	 *  insync - sets Insync providing device isn't active
2473 2474
	 *  -insync - clear Insync for a device with a slot assigned,
	 *            so that it gets rebuilt based on bitmap
2475 2476
	 *  write_error - sets WriteErrorSeen
	 *  -write_error - clears WriteErrorSeen
2477 2478 2479 2480
	 */
	int err = -EINVAL;
	if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
		md_error(rdev->mddev, rdev);
2481 2482 2483 2484
		if (test_bit(Faulty, &rdev->flags))
			err = 0;
		else
			err = -EBUSY;
2485 2486 2487 2488
	} else if (cmd_match(buf, "remove")) {
		if (rdev->raid_disk >= 0)
			err = -EBUSY;
		else {
2489
			struct mddev *mddev = rdev->mddev;
2490
			kick_rdev_from_array(rdev);
2491 2492
			if (mddev->pers)
				md_update_sb(mddev, 1);
2493 2494 2495
			md_new_event(mddev);
			err = 0;
		}
2496 2497 2498 2499 2500
	} else if (cmd_match(buf, "writemostly")) {
		set_bit(WriteMostly, &rdev->flags);
		err = 0;
	} else if (cmd_match(buf, "-writemostly")) {
		clear_bit(WriteMostly, &rdev->flags);
2501 2502 2503 2504 2505
		err = 0;
	} else if (cmd_match(buf, "blocked")) {
		set_bit(Blocked, &rdev->flags);
		err = 0;
	} else if (cmd_match(buf, "-blocked")) {
2506
		if (!test_bit(Faulty, &rdev->flags) &&
2507
		    rdev->badblocks.unacked_exist) {
2508 2509 2510 2511 2512
			/* metadata handler doesn't understand badblocks,
			 * so we need to fail the device
			 */
			md_error(rdev->mddev, rdev);
		}
2513
		clear_bit(Blocked, &rdev->flags);
2514
		clear_bit(BlockedBadBlocks, &rdev->flags);
2515 2516 2517 2518
		wake_up(&rdev->blocked_wait);
		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
		md_wakeup_thread(rdev->mddev->thread);

2519 2520 2521
		err = 0;
	} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
		set_bit(In_sync, &rdev->flags);
2522
		err = 0;
2523
	} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
2524 2525 2526 2527 2528 2529
		if (rdev->mddev->pers == NULL) {
			clear_bit(In_sync, &rdev->flags);
			rdev->saved_raid_disk = rdev->raid_disk;
			rdev->raid_disk = -1;
			err = 0;
		}
2530 2531 2532 2533 2534 2535
	} else if (cmd_match(buf, "write_error")) {
		set_bit(WriteErrorSeen, &rdev->flags);
		err = 0;
	} else if (cmd_match(buf, "-write_error")) {
		clear_bit(WriteErrorSeen, &rdev->flags);
		err = 0;
2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
	} else if (cmd_match(buf, "want_replacement")) {
		/* Any non-spare device that is not a replacement can
		 * become want_replacement at any time, but we then need to
		 * check if recovery is needed.
		 */
		if (rdev->raid_disk >= 0 &&
		    !test_bit(Replacement, &rdev->flags))
			set_bit(WantReplacement, &rdev->flags);
		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
		md_wakeup_thread(rdev->mddev->thread);
		err = 0;
	} else if (cmd_match(buf, "-want_replacement")) {
		/* Clearing 'want_replacement' is always allowed.
		 * Once replacements starts it is too late though.
		 */
		err = 0;
		clear_bit(WantReplacement, &rdev->flags);
	} else if (cmd_match(buf, "replacement")) {
		/* Can only set a device as a replacement when array has not
		 * yet been started.  Once running, replacement is automatic
		 * from spares, or by assigning 'slot'.
		 */
		if (rdev->mddev->pers)
			err = -EBUSY;
		else {
			set_bit(Replacement, &rdev->flags);
			err = 0;
		}
	} else if (cmd_match(buf, "-replacement")) {
		/* Similarly, can only clear Replacement before start */
		if (rdev->mddev->pers)
			err = -EBUSY;
		else {
			clear_bit(Replacement, &rdev->flags);
			err = 0;
		}
2572
	}
N
NeilBrown 已提交
2573 2574
	if (!err)
		sysfs_notify_dirent_safe(rdev->sysfs_state);
2575 2576
	return err ? err : len;
}
2577 2578
static struct rdev_sysfs_entry rdev_state =
__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2579

2580
static ssize_t
2581
errors_show(struct md_rdev *rdev, char *page)
2582 2583 2584 2585 2586
{
	return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
}

static ssize_t
2587
errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2588 2589 2590 2591 2592 2593 2594 2595 2596 2597
{
	char *e;
	unsigned long n = simple_strtoul(buf, &e, 10);
	if (*buf && (*e == 0 || *e == '\n')) {
		atomic_set(&rdev->corrected_errors, n);
		return len;
	}
	return -EINVAL;
}
static struct rdev_sysfs_entry rdev_errors =
2598
__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2599

2600
static ssize_t
2601
slot_show(struct md_rdev *rdev, char *page)
2602 2603 2604 2605 2606 2607 2608 2609
{
	if (rdev->raid_disk < 0)
		return sprintf(page, "none\n");
	else
		return sprintf(page, "%d\n", rdev->raid_disk);
}

static ssize_t
2610
slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2611 2612
{
	char *e;
2613
	int err;
2614 2615 2616 2617 2618
	int slot = simple_strtoul(buf, &e, 10);
	if (strncmp(buf, "none", 4)==0)
		slot = -1;
	else if (e==buf || (*e && *e!= '\n'))
		return -EINVAL;
2619
	if (rdev->mddev->pers && slot == -1) {
2620 2621 2622 2623 2624 2625 2626 2627 2628 2629
		/* Setting 'slot' on an active array requires also
		 * updating the 'rd%d' link, and communicating
		 * with the personality with ->hot_*_disk.
		 * For now we only support removing
		 * failed/spare devices.  This normally happens automatically,
		 * but not when the metadata is externally managed.
		 */
		if (rdev->raid_disk == -1)
			return -EEXIST;
		/* personality does all needed checks */
2630
		if (rdev->mddev->pers->hot_remove_disk == NULL)
2631
			return -EINVAL;
2632 2633 2634 2635
		clear_bit(Blocked, &rdev->flags);
		remove_and_add_spares(rdev->mddev, rdev);
		if (rdev->raid_disk >= 0)
			return -EBUSY;
2636 2637
		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
		md_wakeup_thread(rdev->mddev->thread);
2638 2639
	} else if (rdev->mddev->pers) {
		/* Activating a spare .. or possibly reactivating
2640
		 * if we ever get bitmaps working here.
2641 2642 2643 2644 2645
		 */

		if (rdev->raid_disk != -1)
			return -EBUSY;

2646 2647 2648
		if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
			return -EBUSY;

2649 2650 2651
		if (rdev->mddev->pers->hot_add_disk == NULL)
			return -EINVAL;

2652 2653 2654 2655
		if (slot >= rdev->mddev->raid_disks &&
		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
			return -ENOSPC;

2656 2657 2658 2659 2660
		rdev->raid_disk = slot;
		if (test_bit(In_sync, &rdev->flags))
			rdev->saved_raid_disk = slot;
		else
			rdev->saved_raid_disk = -1;
2661
		clear_bit(In_sync, &rdev->flags);
2662
		clear_bit(Bitmap_sync, &rdev->flags);
2663 2664
		err = rdev->mddev->pers->
			hot_add_disk(rdev->mddev, rdev);
2665
		if (err) {
2666 2667
			rdev->raid_disk = -1;
			return err;
2668
		} else
N
NeilBrown 已提交
2669
			sysfs_notify_dirent_safe(rdev->sysfs_state);
2670
		if (sysfs_link_rdev(rdev->mddev, rdev))
N
NeilBrown 已提交
2671
			/* failure here is OK */;
2672
		/* don't wakeup anyone, leave that to userspace. */
2673
	} else {
2674 2675
		if (slot >= rdev->mddev->raid_disks &&
		    slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2676 2677 2678
			return -ENOSPC;
		rdev->raid_disk = slot;
		/* assume it is working */
2679 2680
		clear_bit(Faulty, &rdev->flags);
		clear_bit(WriteMostly, &rdev->flags);
2681
		set_bit(In_sync, &rdev->flags);
N
NeilBrown 已提交
2682
		sysfs_notify_dirent_safe(rdev->sysfs_state);
2683
	}
2684 2685 2686 2687
	return len;
}

static struct rdev_sysfs_entry rdev_slot =
2688
__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2689

2690
static ssize_t
2691
offset_show(struct md_rdev *rdev, char *page)
2692
{
2693
	return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2694 2695 2696
}

static ssize_t
2697
offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2698
{
2699
	unsigned long long offset;
2700
	if (kstrtoull(buf, 10, &offset) < 0)
2701
		return -EINVAL;
2702
	if (rdev->mddev->pers && rdev->raid_disk >= 0)
2703
		return -EBUSY;
2704
	if (rdev->sectors && rdev->mddev->external)
2705 2706 2707
		/* Must set offset before size, so overlap checks
		 * can be sane */
		return -EBUSY;
2708
	rdev->data_offset = offset;
2709
	rdev->new_data_offset = offset;
2710 2711 2712 2713
	return len;
}

static struct rdev_sysfs_entry rdev_offset =
2714
__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2715

2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727
static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
{
	return sprintf(page, "%llu\n",
		       (unsigned long long)rdev->new_data_offset);
}

static ssize_t new_offset_store(struct md_rdev *rdev,
				const char *buf, size_t len)
{
	unsigned long long new_offset;
	struct mddev *mddev = rdev->mddev;

2728
	if (kstrtoull(buf, 10, &new_offset) < 0)
2729 2730
		return -EINVAL;

2731 2732
	if (mddev->sync_thread ||
	    test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773
		return -EBUSY;
	if (new_offset == rdev->data_offset)
		/* reset is always permitted */
		;
	else if (new_offset > rdev->data_offset) {
		/* must not push array size beyond rdev_sectors */
		if (new_offset - rdev->data_offset
		    + mddev->dev_sectors > rdev->sectors)
				return -E2BIG;
	}
	/* Metadata worries about other space details. */

	/* decreasing the offset is inconsistent with a backwards
	 * reshape.
	 */
	if (new_offset < rdev->data_offset &&
	    mddev->reshape_backwards)
		return -EINVAL;
	/* Increasing offset is inconsistent with forwards
	 * reshape.  reshape_direction should be set to
	 * 'backwards' first.
	 */
	if (new_offset > rdev->data_offset &&
	    !mddev->reshape_backwards)
		return -EINVAL;

	if (mddev->pers && mddev->persistent &&
	    !super_types[mddev->major_version]
	    .allow_new_offset(rdev, new_offset))
		return -E2BIG;
	rdev->new_data_offset = new_offset;
	if (new_offset > rdev->data_offset)
		mddev->reshape_backwards = 1;
	else if (new_offset < rdev->data_offset)
		mddev->reshape_backwards = 0;

	return len;
}
static struct rdev_sysfs_entry rdev_new_offset =
__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);

2774
static ssize_t
2775
rdev_size_show(struct md_rdev *rdev, char *page)
2776
{
2777
	return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2778 2779
}

2780 2781 2782 2783 2784 2785 2786 2787 2788 2789
static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
{
	/* check if two start/length pairs overlap */
	if (s1+l1 <= s2)
		return 0;
	if (s2+l2 <= s1)
		return 0;
	return 1;
}

D
Dan Williams 已提交
2790 2791 2792 2793 2794
static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
{
	unsigned long long blocks;
	sector_t new;

2795
	if (kstrtoull(buf, 10, &blocks) < 0)
D
Dan Williams 已提交
2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808
		return -EINVAL;

	if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
		return -EINVAL; /* sector conversion overflow */

	new = blocks * 2;
	if (new != blocks * 2)
		return -EINVAL; /* unsigned long long to sector_t overflow */

	*sectors = new;
	return 0;
}

2809
static ssize_t
2810
rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2811
{
2812
	struct mddev *my_mddev = rdev->mddev;
2813
	sector_t oldsectors = rdev->sectors;
D
Dan Williams 已提交
2814
	sector_t sectors;
2815

D
Dan Williams 已提交
2816
	if (strict_blocks_to_sectors(buf, &sectors) < 0)
N
Neil Brown 已提交
2817
		return -EINVAL;
2818 2819
	if (rdev->data_offset != rdev->new_data_offset)
		return -EINVAL; /* too confusing */
2820
	if (my_mddev->pers && rdev->raid_disk >= 0) {
N
Neil Brown 已提交
2821
		if (my_mddev->persistent) {
2822 2823 2824
			sectors = super_types[my_mddev->major_version].
				rdev_size_change(rdev, sectors);
			if (!sectors)
2825
				return -EBUSY;
2826
		} else if (!sectors)
2827
			sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2828
				rdev->data_offset;
2829 2830 2831
		if (!my_mddev->pers->resize)
			/* Cannot change size for RAID0 or Linear etc */
			return -EINVAL;
2832
	}
2833
	if (sectors < my_mddev->dev_sectors)
2834
		return -EINVAL; /* component must fit device */
2835

2836 2837
	rdev->sectors = sectors;
	if (sectors > oldsectors && my_mddev->external) {
2838 2839 2840 2841 2842
		/* Need to check that all other rdevs with the same
		 * ->bdev do not overlap.  'rcu' is sufficient to walk
		 * the rdev lists safely.
		 * This check does not provide a hard guarantee, it
		 * just helps avoid dangerous mistakes.
2843
		 */
2844
		struct mddev *mddev;
2845
		int overlap = 0;
2846
		struct list_head *tmp;
2847

2848
		rcu_read_lock();
2849
		for_each_mddev(mddev, tmp) {
2850
			struct md_rdev *rdev2;
2851

N
NeilBrown 已提交
2852
			rdev_for_each(rdev2, mddev)
2853 2854 2855 2856 2857
				if (rdev->bdev == rdev2->bdev &&
				    rdev != rdev2 &&
				    overlaps(rdev->data_offset, rdev->sectors,
					     rdev2->data_offset,
					     rdev2->sectors)) {
2858 2859 2860 2861 2862 2863 2864 2865
					overlap = 1;
					break;
				}
			if (overlap) {
				mddev_put(mddev);
				break;
			}
		}
2866
		rcu_read_unlock();
2867 2868 2869
		if (overlap) {
			/* Someone else could have slipped in a size
			 * change here, but doing so is just silly.
2870
			 * We put oldsectors back because we *know* it is
2871 2872 2873
			 * safe, and trust userspace not to race with
			 * itself
			 */
2874
			rdev->sectors = oldsectors;
2875 2876 2877
			return -EBUSY;
		}
	}
2878 2879 2880 2881
	return len;
}

static struct rdev_sysfs_entry rdev_size =
2882
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2883

2884
static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894
{
	unsigned long long recovery_start = rdev->recovery_offset;

	if (test_bit(In_sync, &rdev->flags) ||
	    recovery_start == MaxSector)
		return sprintf(page, "none\n");

	return sprintf(page, "%llu\n", recovery_start);
}

2895
static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
2896 2897 2898 2899 2900
{
	unsigned long long recovery_start;

	if (cmd_match(buf, "none"))
		recovery_start = MaxSector;
2901
	else if (kstrtoull(buf, 10, &recovery_start))
2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
		return -EINVAL;

	if (rdev->mddev->pers &&
	    rdev->raid_disk >= 0)
		return -EBUSY;

	rdev->recovery_offset = recovery_start;
	if (recovery_start == MaxSector)
		set_bit(In_sync, &rdev->flags);
	else
		clear_bit(In_sync, &rdev->flags);
	return len;
}

static struct rdev_sysfs_entry rdev_recovery_start =
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);

2919 2920 2921 2922 2923
static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack);
static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);

2924
static ssize_t bb_show(struct md_rdev *rdev, char *page)
2925 2926 2927
{
	return badblocks_show(&rdev->badblocks, page, 0);
}
2928
static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
2929
{
2930 2931 2932 2933 2934
	int rv = badblocks_store(&rdev->badblocks, page, len, 0);
	/* Maybe that ack was all we needed */
	if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
		wake_up(&rdev->blocked_wait);
	return rv;
2935 2936 2937 2938
}
static struct rdev_sysfs_entry rdev_bad_blocks =
__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);

2939
static ssize_t ubb_show(struct md_rdev *rdev, char *page)
2940 2941 2942
{
	return badblocks_show(&rdev->badblocks, page, 1);
}
2943
static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
2944 2945 2946 2947 2948 2949
{
	return badblocks_store(&rdev->badblocks, page, len, 1);
}
static struct rdev_sysfs_entry rdev_unack_bad_blocks =
__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);

2950 2951
static struct attribute *rdev_default_attrs[] = {
	&rdev_state.attr,
2952
	&rdev_errors.attr,
2953
	&rdev_slot.attr,
2954
	&rdev_offset.attr,
2955
	&rdev_new_offset.attr,
2956
	&rdev_size.attr,
2957
	&rdev_recovery_start.attr,
2958 2959
	&rdev_bad_blocks.attr,
	&rdev_unack_bad_blocks.attr,
2960 2961 2962 2963 2964 2965
	NULL,
};
static ssize_t
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2966
	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
2967
	struct mddev *mddev = rdev->mddev;
2968
	ssize_t rv;
2969 2970 2971

	if (!entry->show)
		return -EIO;
2972 2973 2974 2975 2976 2977 2978 2979 2980 2981

	rv = mddev ? mddev_lock(mddev) : -EBUSY;
	if (!rv) {
		if (rdev->mddev == NULL)
			rv = -EBUSY;
		else
			rv = entry->show(rdev, page);
		mddev_unlock(mddev);
	}
	return rv;
2982 2983 2984 2985 2986 2987 2988
}

static ssize_t
rdev_attr_store(struct kobject *kobj, struct attribute *attr,
	      const char *page, size_t length)
{
	struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2989
	struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
2990
	ssize_t rv;
2991
	struct mddev *mddev = rdev->mddev;
2992 2993 2994

	if (!entry->store)
		return -EIO;
2995 2996
	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
2997
	rv = mddev ? mddev_lock(mddev): -EBUSY;
2998
	if (!rv) {
2999 3000 3001 3002
		if (rdev->mddev == NULL)
			rv = -EBUSY;
		else
			rv = entry->store(rdev, page, length);
3003
		mddev_unlock(mddev);
3004 3005
	}
	return rv;
3006 3007 3008 3009
}

static void rdev_free(struct kobject *ko)
{
3010
	struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3011 3012
	kfree(rdev);
}
3013
static const struct sysfs_ops rdev_sysfs_ops = {
3014 3015 3016 3017 3018 3019 3020 3021 3022
	.show		= rdev_attr_show,
	.store		= rdev_attr_store,
};
static struct kobj_type rdev_ktype = {
	.release	= rdev_free,
	.sysfs_ops	= &rdev_sysfs_ops,
	.default_attrs	= rdev_default_attrs,
};

3023
int md_rdev_init(struct md_rdev *rdev)
N
NeilBrown 已提交
3024 3025 3026 3027 3028 3029
{
	rdev->desc_nr = -1;
	rdev->saved_raid_disk = -1;
	rdev->raid_disk = -1;
	rdev->flags = 0;
	rdev->data_offset = 0;
3030
	rdev->new_data_offset = 0;
N
NeilBrown 已提交
3031 3032 3033
	rdev->sb_events = 0;
	rdev->last_read_error.tv_sec  = 0;
	rdev->last_read_error.tv_nsec = 0;
3034 3035
	rdev->sb_loaded = 0;
	rdev->bb_page = NULL;
N
NeilBrown 已提交
3036 3037 3038 3039 3040 3041
	atomic_set(&rdev->nr_pending, 0);
	atomic_set(&rdev->read_errors, 0);
	atomic_set(&rdev->corrected_errors, 0);

	INIT_LIST_HEAD(&rdev->same_set);
	init_waitqueue_head(&rdev->blocked_wait);
3042 3043 3044 3045 3046 3047

	/* Add space to store bad block list.
	 * This reserves the space even on arrays where it cannot
	 * be used - I wonder if that matters
	 */
	rdev->badblocks.count = 0;
3048
	rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
3049 3050 3051 3052 3053 3054
	rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
	seqlock_init(&rdev->badblocks.lock);
	if (rdev->badblocks.page == NULL)
		return -ENOMEM;

	return 0;
N
NeilBrown 已提交
3055 3056
}
EXPORT_SYMBOL_GPL(md_rdev_init);
L
Linus Torvalds 已提交
3057 3058 3059 3060 3061 3062 3063 3064 3065 3066
/*
 * Import a device. If 'super_format' >= 0, then sanity check the superblock
 *
 * mark the device faulty if:
 *
 *   - the device is nonexistent (zero size)
 *   - the device has no valid superblock
 *
 * a faulty rdev _never_ has rdev->sb set.
 */
3067
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
L
Linus Torvalds 已提交
3068 3069 3070
{
	char b[BDEVNAME_SIZE];
	int err;
3071
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
3072 3073
	sector_t size;

3074
	rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
L
Linus Torvalds 已提交
3075 3076 3077 3078 3079
	if (!rdev) {
		printk(KERN_ERR "md: could not alloc mem for new device!\n");
		return ERR_PTR(-ENOMEM);
	}

3080 3081 3082 3083 3084
	err = md_rdev_init(rdev);
	if (err)
		goto abort_free;
	err = alloc_disk_sb(rdev);
	if (err)
L
Linus Torvalds 已提交
3085 3086
		goto abort_free;

3087
	err = lock_rdev(rdev, newdev, super_format == -2);
L
Linus Torvalds 已提交
3088 3089 3090
	if (err)
		goto abort_free;

3091
	kobject_init(&rdev->kobj, &rdev_ktype);
3092

3093
	size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
L
Linus Torvalds 已提交
3094
	if (!size) {
3095
		printk(KERN_WARNING
L
Linus Torvalds 已提交
3096 3097 3098 3099 3100 3101 3102 3103 3104 3105
			"md: %s has zero or unknown size, marking faulty!\n",
			bdevname(rdev->bdev,b));
		err = -EINVAL;
		goto abort_free;
	}

	if (super_format >= 0) {
		err = super_types[super_format].
			load_super(rdev, NULL, super_minor);
		if (err == -EINVAL) {
3106 3107 3108 3109 3110
			printk(KERN_WARNING
				"md: %s does not have a valid v%d.%d "
			       "superblock, not importing!\n",
				bdevname(rdev->bdev,b),
			       super_format, super_minor);
L
Linus Torvalds 已提交
3111 3112 3113
			goto abort_free;
		}
		if (err < 0) {
3114
			printk(KERN_WARNING
L
Linus Torvalds 已提交
3115 3116 3117 3118 3119
				"md: could not read %s's sb, not importing!\n",
				bdevname(rdev->bdev,b));
			goto abort_free;
		}
	}
3120

L
Linus Torvalds 已提交
3121 3122 3123
	return rdev;

abort_free:
3124 3125
	if (rdev->bdev)
		unlock_rdev(rdev);
3126
	md_rdev_clear(rdev);
L
Linus Torvalds 已提交
3127 3128 3129 3130 3131 3132 3133 3134
	kfree(rdev);
	return ERR_PTR(err);
}

/*
 * Check a full RAID array for plausibility
 */

3135
static void analyze_sbs(struct mddev *mddev)
L
Linus Torvalds 已提交
3136 3137
{
	int i;
3138
	struct md_rdev *rdev, *freshest, *tmp;
L
Linus Torvalds 已提交
3139 3140 3141
	char b[BDEVNAME_SIZE];

	freshest = NULL;
N
NeilBrown 已提交
3142
	rdev_for_each_safe(rdev, tmp, mddev)
L
Linus Torvalds 已提交
3143 3144 3145 3146 3147 3148 3149 3150 3151 3152
		switch (super_types[mddev->major_version].
			load_super(rdev, freshest, mddev->minor_version)) {
		case 1:
			freshest = rdev;
			break;
		case 0:
			break;
		default:
			printk( KERN_ERR \
				"md: fatal superblock inconsistency in %s"
3153
				" -- removing from array\n",
L
Linus Torvalds 已提交
3154 3155 3156 3157 3158 3159 3160 3161
				bdevname(rdev->bdev,b));
			kick_rdev_from_array(rdev);
		}

	super_types[mddev->major_version].
		validate_super(mddev, freshest);

	i = 0;
N
NeilBrown 已提交
3162
	rdev_for_each_safe(rdev, tmp, mddev) {
3163 3164 3165
		if (mddev->max_disks &&
		    (rdev->desc_nr >= mddev->max_disks ||
		     i > mddev->max_disks)) {
3166 3167 3168 3169 3170 3171 3172
			printk(KERN_WARNING
			       "md: %s: %s: only %d devices permitted\n",
			       mdname(mddev), bdevname(rdev->bdev, b),
			       mddev->max_disks);
			kick_rdev_from_array(rdev);
			continue;
		}
L
Linus Torvalds 已提交
3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
		if (rdev != freshest)
			if (super_types[mddev->major_version].
			    validate_super(mddev, rdev)) {
				printk(KERN_WARNING "md: kicking non-fresh %s"
					" from array!\n",
					bdevname(rdev->bdev,b));
				kick_rdev_from_array(rdev);
				continue;
			}
		if (mddev->level == LEVEL_MULTIPATH) {
			rdev->desc_nr = i++;
			rdev->raid_disk = rdev->desc_nr;
3185
			set_bit(In_sync, &rdev->flags);
3186
		} else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
3187 3188
			rdev->raid_disk = -1;
			clear_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
3189 3190 3191 3192
		}
	}
}

3193 3194 3195
/* Read a fixed-point number.
 * Numbers in sysfs attributes should be in "standard" units where
 * possible, so time should be in seconds.
3196
 * However we internally use a a much smaller unit such as
3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232
 * milliseconds or jiffies.
 * This function takes a decimal number with a possible fractional
 * component, and produces an integer which is the result of
 * multiplying that number by 10^'scale'.
 * all without any floating-point arithmetic.
 */
int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
{
	unsigned long result = 0;
	long decimals = -1;
	while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
		if (*cp == '.')
			decimals = 0;
		else if (decimals < scale) {
			unsigned int value;
			value = *cp - '0';
			result = result * 10 + value;
			if (decimals >= 0)
				decimals++;
		}
		cp++;
	}
	if (*cp == '\n')
		cp++;
	if (*cp)
		return -EINVAL;
	if (decimals < 0)
		decimals = 0;
	while (decimals < scale) {
		result *= 10;
		decimals ++;
	}
	*res = result;
	return 0;
}

3233 3234
static void md_safemode_timeout(unsigned long data);

3235
static ssize_t
3236
safe_delay_show(struct mddev *mddev, char *page)
3237 3238 3239 3240 3241
{
	int msec = (mddev->safemode_delay*1000)/HZ;
	return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
}
static ssize_t
3242
safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3243 3244
{
	unsigned long msec;
3245

3246
	if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3247 3248 3249 3250
		return -EINVAL;
	if (msec == 0)
		mddev->safemode_delay = 0;
	else {
3251
		unsigned long old_delay = mddev->safemode_delay;
3252 3253 3254
		mddev->safemode_delay = (msec*HZ)/1000;
		if (mddev->safemode_delay == 0)
			mddev->safemode_delay = 1;
N
NeilBrown 已提交
3255
		if (mddev->safemode_delay < old_delay || old_delay == 0)
3256
			md_safemode_timeout((unsigned long)mddev);
3257 3258 3259 3260
	}
	return len;
}
static struct md_sysfs_entry md_safe_delay =
3261
__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3262

3263
static ssize_t
3264
level_show(struct mddev *mddev, char *page)
3265
{
3266
	struct md_personality *p = mddev->pers;
3267
	if (p)
3268
		return sprintf(page, "%s\n", p->name);
3269 3270 3271 3272 3273 3274
	else if (mddev->clevel[0])
		return sprintf(page, "%s\n", mddev->clevel);
	else if (mddev->level != LEVEL_NONE)
		return sprintf(page, "%d\n", mddev->level);
	else
		return 0;
3275 3276
}

3277
static ssize_t
3278
level_store(struct mddev *mddev, const char *buf, size_t len)
3279
{
3280
	char clevel[16];
3281
	ssize_t rv = len;
3282
	struct md_personality *pers;
3283
	long level;
3284
	void *priv;
3285
	struct md_rdev *rdev;
3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298

	if (mddev->pers == NULL) {
		if (len == 0)
			return 0;
		if (len >= sizeof(mddev->clevel))
			return -ENOSPC;
		strncpy(mddev->clevel, buf, len);
		if (mddev->clevel[len-1] == '\n')
			len--;
		mddev->clevel[len] = 0;
		mddev->level = LEVEL_NONE;
		return rv;
	}
3299 3300
	if (mddev->ro)
		return  -EROFS;
3301 3302 3303 3304 3305 3306 3307

	/* request to change the personality.  Need to ensure:
	 *  - array is not engaged in resync/recovery/reshape
	 *  - old personality can be suspended
	 *  - new personality will access other array.
	 */

3308
	if (mddev->sync_thread ||
3309
	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3310 3311
	    mddev->reshape_position != MaxSector ||
	    mddev->sysfs_active)
3312
		return -EBUSY;
3313 3314 3315 3316 3317 3318 3319 3320

	if (!mddev->pers->quiesce) {
		printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
		       mdname(mddev), mddev->pers->name);
		return -EINVAL;
	}

	/* Now find the new personality */
3321
	if (len == 0 || len >= sizeof(clevel))
3322
		return -EINVAL;
3323 3324
	strncpy(clevel, buf, len);
	if (clevel[len-1] == '\n')
3325
		len--;
3326
	clevel[len] = 0;
3327
	if (kstrtol(clevel, 10, &level))
3328
		level = LEVEL_NONE;
3329

3330 3331
	if (request_module("md-%s", clevel) != 0)
		request_module("md-level-%s", clevel);
3332
	spin_lock(&pers_lock);
3333
	pers = find_pers(level, clevel);
3334 3335
	if (!pers || !try_module_get(pers->owner)) {
		spin_unlock(&pers_lock);
3336
		printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348
		return -EINVAL;
	}
	spin_unlock(&pers_lock);

	if (pers == mddev->pers) {
		/* Nothing to do! */
		module_put(pers->owner);
		return rv;
	}
	if (!pers->takeover) {
		module_put(pers->owner);
		printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3349
		       mdname(mddev), clevel);
3350 3351 3352
		return -EINVAL;
	}

N
NeilBrown 已提交
3353
	rdev_for_each(rdev, mddev)
3354 3355
		rdev->new_raid_disk = rdev->raid_disk;

3356 3357 3358 3359 3360 3361 3362
	/* ->takeover must set new_* and/or delta_disks
	 * if it succeeds, and may set them when it fails.
	 */
	priv = pers->takeover(mddev);
	if (IS_ERR(priv)) {
		mddev->new_level = mddev->level;
		mddev->new_layout = mddev->layout;
3363
		mddev->new_chunk_sectors = mddev->chunk_sectors;
3364 3365
		mddev->raid_disks -= mddev->delta_disks;
		mddev->delta_disks = 0;
3366
		mddev->reshape_backwards = 0;
3367 3368
		module_put(pers->owner);
		printk(KERN_WARNING "md: %s: %s would not accept array\n",
3369
		       mdname(mddev), clevel);
3370 3371 3372 3373 3374 3375
		return PTR_ERR(priv);
	}

	/* Looks like we have a winner */
	mddev_suspend(mddev);
	mddev->pers->stop(mddev);
3376

3377 3378 3379 3380 3381 3382 3383
	if (mddev->pers->sync_request == NULL &&
	    pers->sync_request != NULL) {
		/* need to add the md_redundancy_group */
		if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
			printk(KERN_WARNING
			       "md: cannot register extra attributes for %s\n",
			       mdname(mddev));
T
Tejun Heo 已提交
3384
		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3385
	}
3386 3387 3388 3389 3390 3391 3392
	if (mddev->pers->sync_request != NULL &&
	    pers->sync_request == NULL) {
		/* need to remove the md_redundancy_group */
		if (mddev->to_remove == NULL)
			mddev->to_remove = &md_redundancy_group;
	}

3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406
	if (mddev->pers->sync_request == NULL &&
	    mddev->external) {
		/* We are converting from a no-redundancy array
		 * to a redundancy array and metadata is managed
		 * externally so we need to be sure that writes
		 * won't block due to a need to transition
		 *      clean->dirty
		 * until external management is started.
		 */
		mddev->in_sync = 0;
		mddev->safemode_delay = 0;
		mddev->safemode = 0;
	}

N
NeilBrown 已提交
3407
	rdev_for_each(rdev, mddev) {
3408 3409
		if (rdev->raid_disk < 0)
			continue;
3410
		if (rdev->new_raid_disk >= mddev->raid_disks)
3411 3412 3413
			rdev->new_raid_disk = -1;
		if (rdev->new_raid_disk == rdev->raid_disk)
			continue;
3414
		sysfs_unlink_rdev(mddev, rdev);
3415
	}
N
NeilBrown 已提交
3416
	rdev_for_each(rdev, mddev) {
3417 3418 3419 3420 3421 3422
		if (rdev->raid_disk < 0)
			continue;
		if (rdev->new_raid_disk == rdev->raid_disk)
			continue;
		rdev->raid_disk = rdev->new_raid_disk;
		if (rdev->raid_disk < 0)
3423
			clear_bit(In_sync, &rdev->flags);
3424
		else {
3425 3426 3427 3428
			if (sysfs_link_rdev(mddev, rdev))
				printk(KERN_WARNING "md: cannot register rd%d"
				       " for %s after level change\n",
				       rdev->raid_disk, mdname(mddev));
3429
		}
3430 3431 3432
	}

	module_put(mddev->pers->owner);
3433 3434 3435 3436 3437
	mddev->pers = pers;
	mddev->private = priv;
	strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
	mddev->level = mddev->new_level;
	mddev->layout = mddev->new_layout;
3438
	mddev->chunk_sectors = mddev->new_chunk_sectors;
3439
	mddev->delta_disks = 0;
3440
	mddev->reshape_backwards = 0;
3441
	mddev->degraded = 0;
3442 3443 3444 3445 3446 3447 3448
	if (mddev->pers->sync_request == NULL) {
		/* this is now an array without redundancy, so
		 * it must always be in_sync
		 */
		mddev->in_sync = 1;
		del_timer_sync(&mddev->safemode_timer);
	}
3449
	blk_set_stacking_limits(&mddev->queue->limits);
3450 3451
	pers->run(mddev);
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
3452
	mddev_resume(mddev);
3453 3454
	if (!mddev->thread)
		md_update_sb(mddev, 1);
3455
	sysfs_notify(&mddev->kobj, NULL, "level");
3456
	md_new_event(mddev);
3457 3458 3459 3460
	return rv;
}

static struct md_sysfs_entry md_level =
3461
__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3462

3463
static ssize_t
3464
layout_show(struct mddev *mddev, char *page)
3465 3466
{
	/* just a number, not meaningful for all levels */
3467 3468 3469 3470
	if (mddev->reshape_position != MaxSector &&
	    mddev->layout != mddev->new_layout)
		return sprintf(page, "%d (%d)\n",
			       mddev->new_layout, mddev->layout);
3471 3472 3473 3474
	return sprintf(page, "%d\n", mddev->layout);
}

static ssize_t
3475
layout_store(struct mddev *mddev, const char *buf, size_t len)
3476 3477 3478 3479 3480 3481 3482
{
	char *e;
	unsigned long n = simple_strtoul(buf, &e, 10);

	if (!*buf || (*e && *e != '\n'))
		return -EINVAL;

3483 3484
	if (mddev->pers) {
		int err;
3485
		if (mddev->pers->check_reshape == NULL)
3486
			return -EBUSY;
3487 3488
		if (mddev->ro)
			return -EROFS;
3489
		mddev->new_layout = n;
3490
		err = mddev->pers->check_reshape(mddev);
3491 3492
		if (err) {
			mddev->new_layout = mddev->layout;
3493
			return err;
3494
		}
3495
	} else {
3496
		mddev->new_layout = n;
3497 3498 3499
		if (mddev->reshape_position == MaxSector)
			mddev->layout = n;
	}
3500 3501 3502
	return len;
}
static struct md_sysfs_entry md_layout =
3503
__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3504

3505
static ssize_t
3506
raid_disks_show(struct mddev *mddev, char *page)
3507
{
3508 3509
	if (mddev->raid_disks == 0)
		return 0;
3510 3511 3512 3513
	if (mddev->reshape_position != MaxSector &&
	    mddev->delta_disks != 0)
		return sprintf(page, "%d (%d)\n", mddev->raid_disks,
			       mddev->raid_disks - mddev->delta_disks);
3514 3515 3516
	return sprintf(page, "%d\n", mddev->raid_disks);
}

3517
static int update_raid_disks(struct mddev *mddev, int raid_disks);
3518 3519

static ssize_t
3520
raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
{
	char *e;
	int rv = 0;
	unsigned long n = simple_strtoul(buf, &e, 10);

	if (!*buf || (*e && *e != '\n'))
		return -EINVAL;

	if (mddev->pers)
		rv = update_raid_disks(mddev, n);
3531
	else if (mddev->reshape_position != MaxSector) {
3532
		struct md_rdev *rdev;
3533
		int olddisks = mddev->raid_disks - mddev->delta_disks;
3534 3535 3536 3537 3538 3539 3540 3541 3542

		rdev_for_each(rdev, mddev) {
			if (olddisks < n &&
			    rdev->data_offset < rdev->new_data_offset)
				return -EINVAL;
			if (olddisks > n &&
			    rdev->data_offset > rdev->new_data_offset)
				return -EINVAL;
		}
3543 3544
		mddev->delta_disks = n - olddisks;
		mddev->raid_disks = n;
3545
		mddev->reshape_backwards = (mddev->delta_disks < 0);
3546
	} else
3547 3548 3549 3550
		mddev->raid_disks = n;
	return rv ? rv : len;
}
static struct md_sysfs_entry md_raid_disks =
3551
__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3552

3553
static ssize_t
3554
chunk_size_show(struct mddev *mddev, char *page)
3555
{
3556
	if (mddev->reshape_position != MaxSector &&
3557 3558 3559
	    mddev->chunk_sectors != mddev->new_chunk_sectors)
		return sprintf(page, "%d (%d)\n",
			       mddev->new_chunk_sectors << 9,
3560 3561
			       mddev->chunk_sectors << 9);
	return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3562 3563 3564
}

static ssize_t
3565
chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3566 3567 3568 3569 3570 3571 3572
{
	char *e;
	unsigned long n = simple_strtoul(buf, &e, 10);

	if (!*buf || (*e && *e != '\n'))
		return -EINVAL;

3573 3574
	if (mddev->pers) {
		int err;
3575
		if (mddev->pers->check_reshape == NULL)
3576
			return -EBUSY;
3577 3578
		if (mddev->ro)
			return -EROFS;
3579
		mddev->new_chunk_sectors = n >> 9;
3580
		err = mddev->pers->check_reshape(mddev);
3581 3582
		if (err) {
			mddev->new_chunk_sectors = mddev->chunk_sectors;
3583
			return err;
3584
		}
3585
	} else {
3586
		mddev->new_chunk_sectors = n >> 9;
3587
		if (mddev->reshape_position == MaxSector)
3588
			mddev->chunk_sectors = n >> 9;
3589
	}
3590 3591 3592
	return len;
}
static struct md_sysfs_entry md_chunk_size =
3593
__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3594

3595
static ssize_t
3596
resync_start_show(struct mddev *mddev, char *page)
3597
{
3598 3599
	if (mddev->recovery_cp == MaxSector)
		return sprintf(page, "none\n");
3600 3601 3602 3603
	return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
}

static ssize_t
3604
resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3605 3606 3607 3608
{
	char *e;
	unsigned long long n = simple_strtoull(buf, &e, 10);

3609
	if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3610
		return -EBUSY;
3611 3612 3613
	if (cmd_match(buf, "none"))
		n = MaxSector;
	else if (!*buf || (*e && *e != '\n'))
3614 3615 3616
		return -EINVAL;

	mddev->recovery_cp = n;
3617 3618
	if (mddev->pers)
		set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3619 3620 3621
	return len;
}
static struct md_sysfs_entry md_resync_start =
3622
__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
3623

3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635
/*
 * The array state can be:
 *
 * clear
 *     No devices, no size, no level
 *     Equivalent to STOP_ARRAY ioctl
 * inactive
 *     May have some settings, but array is not active
 *        all IO results in error
 *     When written, doesn't tear down array, but just stops it
 * suspended (not supported yet)
 *     All IO requests will block. The array can be reconfigured.
3636
 *     Writing this, if accepted, will block until array is quiescent
3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661
 * readonly
 *     no resync can happen.  no superblocks get written.
 *     write requests fail
 * read-auto
 *     like readonly, but behaves like 'clean' on a write request.
 *
 * clean - no pending writes, but otherwise active.
 *     When written to inactive array, starts without resync
 *     If a write request arrives then
 *       if metadata is known, mark 'dirty' and switch to 'active'.
 *       if not known, block and switch to write-pending
 *     If written to an active array that has pending writes, then fails.
 * active
 *     fully active: IO and resync can be happening.
 *     When written to inactive array, starts with resync
 *
 * write-pending
 *     clean, but writes are blocked waiting for 'active' to be written.
 *
 * active-idle
 *     like active, but no writes have been seen for a while (100msec).
 *
 */
enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
		   write_pending, active_idle, bad_word};
3662
static char *array_states[] = {
3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675
	"clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
	"write-pending", "active-idle", NULL };

static int match_word(const char *word, char **list)
{
	int n;
	for (n=0; list[n]; n++)
		if (cmd_match(word, list[n]))
			break;
	return n;
}

static ssize_t
3676
array_state_show(struct mddev *mddev, char *page)
3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690
{
	enum array_state st = inactive;

	if (mddev->pers)
		switch(mddev->ro) {
		case 1:
			st = readonly;
			break;
		case 2:
			st = read_auto;
			break;
		case 0:
			if (mddev->in_sync)
				st = clean;
3691
			else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3692
				st = write_pending;
3693 3694 3695 3696 3697 3698 3699 3700
			else if (mddev->safemode)
				st = active_idle;
			else
				st = active;
		}
	else {
		if (list_empty(&mddev->disks) &&
		    mddev->raid_disks == 0 &&
A
Andre Noll 已提交
3701
		    mddev->dev_sectors == 0)
3702 3703 3704 3705 3706 3707 3708
			st = clear;
		else
			st = inactive;
	}
	return sprintf(page, "%s\n", array_states[st]);
}

3709 3710 3711
static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
static int do_md_run(struct mddev *mddev);
3712
static int restart_array(struct mddev *mddev);
3713 3714

static ssize_t
3715
array_state_store(struct mddev *mddev, const char *buf, size_t len)
3716 3717 3718 3719 3720 3721 3722 3723
{
	int err = -EINVAL;
	enum array_state st = match_word(buf, array_states);
	switch(st) {
	case bad_word:
		break;
	case clear:
		/* stopping an active array */
3724
		err = do_md_stop(mddev, 0, NULL);
3725 3726 3727
		break;
	case inactive:
		/* stopping an active array */
3728
		if (mddev->pers)
3729
			err = do_md_stop(mddev, 2, NULL);
3730
		else
3731
			err = 0; /* already inactive */
3732 3733 3734 3735 3736
		break;
	case suspended:
		break; /* not supported yet */
	case readonly:
		if (mddev->pers)
3737
			err = md_set_readonly(mddev, NULL);
3738 3739
		else {
			mddev->ro = 1;
3740
			set_disk_ro(mddev->gendisk, 1);
3741 3742 3743 3744 3745
			err = do_md_run(mddev);
		}
		break;
	case read_auto:
		if (mddev->pers) {
3746
			if (mddev->ro == 0)
3747
				err = md_set_readonly(mddev, NULL);
3748
			else if (mddev->ro == 1)
3749 3750 3751 3752 3753
				err = restart_array(mddev);
			if (err == 0) {
				mddev->ro = 2;
				set_disk_ro(mddev->gendisk, 0);
			}
3754 3755 3756 3757 3758 3759 3760 3761
		} else {
			mddev->ro = 2;
			err = do_md_run(mddev);
		}
		break;
	case clean:
		if (mddev->pers) {
			restart_array(mddev);
3762
			spin_lock(&mddev->lock);
3763
			if (atomic_read(&mddev->writes_pending) == 0) {
3764 3765
				if (mddev->in_sync == 0) {
					mddev->in_sync = 1;
3766 3767
					if (mddev->safemode == 1)
						mddev->safemode = 0;
3768
					set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3769 3770 3771 3772
				}
				err = 0;
			} else
				err = -EBUSY;
3773
			spin_unlock(&mddev->lock);
3774 3775
		} else
			err = -EINVAL;
3776 3777 3778 3779
		break;
	case active:
		if (mddev->pers) {
			restart_array(mddev);
3780
			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3781 3782 3783 3784
			wake_up(&mddev->sb_wait);
			err = 0;
		} else {
			mddev->ro = 0;
3785
			set_disk_ro(mddev->gendisk, 0);
3786 3787 3788 3789 3790 3791 3792 3793 3794 3795
			err = do_md_run(mddev);
		}
		break;
	case write_pending:
	case active_idle:
		/* these cannot be set */
		break;
	}
	if (err)
		return err;
3796
	else {
3797 3798
		if (mddev->hold_active == UNTIL_IOCTL)
			mddev->hold_active = 0;
N
NeilBrown 已提交
3799
		sysfs_notify_dirent_safe(mddev->sysfs_state);
3800
		return len;
3801
	}
3802
}
3803 3804
static struct md_sysfs_entry md_array_state =
__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3805

3806
static ssize_t
3807
max_corrected_read_errors_show(struct mddev *mddev, char *page) {
3808 3809 3810 3811 3812
	return sprintf(page, "%d\n",
		       atomic_read(&mddev->max_corr_read_errors));
}

static ssize_t
3813
max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828
{
	char *e;
	unsigned long n = simple_strtoul(buf, &e, 10);

	if (*buf && (*e == 0 || *e == '\n')) {
		atomic_set(&mddev->max_corr_read_errors, n);
		return len;
	}
	return -EINVAL;
}

static struct md_sysfs_entry max_corr_read_errors =
__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
	max_corrected_read_errors_store);

3829
static ssize_t
3830
null_show(struct mddev *mddev, char *page)
3831 3832 3833 3834 3835
{
	return -EINVAL;
}

static ssize_t
3836
new_dev_store(struct mddev *mddev, const char *buf, size_t len)
3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848
{
	/* buf must be %d:%d\n? giving major and minor numbers */
	/* The new device is added to the array.
	 * If the array has a persistent superblock, we read the
	 * superblock to initialise info and check validity.
	 * Otherwise, only checking done is that in bind_rdev_to_array,
	 * which mainly checks size.
	 */
	char *e;
	int major = simple_strtoul(buf, &e, 10);
	int minor;
	dev_t dev;
3849
	struct md_rdev *rdev;
3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865
	int err;

	if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
		return -EINVAL;
	minor = simple_strtoul(e+1, &e, 10);
	if (*e && *e != '\n')
		return -EINVAL;
	dev = MKDEV(major, minor);
	if (major != MAJOR(dev) ||
	    minor != MINOR(dev))
		return -EOVERFLOW;

	if (mddev->persistent) {
		rdev = md_import_device(dev, mddev->major_version,
					mddev->minor_version);
		if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
3866 3867 3868
			struct md_rdev *rdev0
				= list_entry(mddev->disks.next,
					     struct md_rdev, same_set);
3869 3870 3871 3872 3873
			err = super_types[mddev->major_version]
				.load_super(rdev, rdev0, mddev->minor_version);
			if (err < 0)
				goto out;
		}
3874 3875 3876
	} else if (mddev->external)
		rdev = md_import_device(dev, -2, -1);
	else
3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888
		rdev = md_import_device(dev, -1, -1);

	if (IS_ERR(rdev))
		return PTR_ERR(rdev);
	err = bind_rdev_to_array(rdev, mddev);
 out:
	if (err)
		export_rdev(rdev);
	return err ? err : len;
}

static struct md_sysfs_entry md_new_device =
3889
__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
3890

3891
static ssize_t
3892
bitmap_store(struct mddev *mddev, const char *buf, size_t len)
3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909
{
	char *end;
	unsigned long chunk, end_chunk;

	if (!mddev->bitmap)
		goto out;
	/* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
	while (*buf) {
		chunk = end_chunk = simple_strtoul(buf, &end, 0);
		if (buf == end) break;
		if (*end == '-') { /* range */
			buf = end + 1;
			end_chunk = simple_strtoul(buf, &end, 0);
			if (buf == end) break;
		}
		if (*end && !isspace(*end)) break;
		bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
3910
		buf = skip_spaces(end);
3911 3912 3913 3914 3915 3916 3917 3918 3919
	}
	bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out:
	return len;
}

static struct md_sysfs_entry md_bitmap =
__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);

3920
static ssize_t
3921
size_show(struct mddev *mddev, char *page)
3922
{
A
Andre Noll 已提交
3923 3924
	return sprintf(page, "%llu\n",
		(unsigned long long)mddev->dev_sectors / 2);
3925 3926
}

3927
static int update_size(struct mddev *mddev, sector_t num_sectors);
3928 3929

static ssize_t
3930
size_store(struct mddev *mddev, const char *buf, size_t len)
3931 3932 3933 3934 3935
{
	/* If array is inactive, we can reduce the component size, but
	 * not increase it (except from 0).
	 * If array is active, we can try an on-line resize
	 */
D
Dan Williams 已提交
3936 3937
	sector_t sectors;
	int err = strict_blocks_to_sectors(buf, &sectors);
3938

A
Andre Noll 已提交
3939 3940
	if (err < 0)
		return err;
3941
	if (mddev->pers) {
A
Andre Noll 已提交
3942
		err = update_size(mddev, sectors);
3943
		md_update_sb(mddev, 1);
3944
	} else {
A
Andre Noll 已提交
3945 3946 3947
		if (mddev->dev_sectors == 0 ||
		    mddev->dev_sectors > sectors)
			mddev->dev_sectors = sectors;
3948 3949 3950 3951 3952 3953 3954
		else
			err = -ENOSPC;
	}
	return err ? err : len;
}

static struct md_sysfs_entry md_size =
3955
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
3956

M
Masanari Iida 已提交
3957
/* Metadata version.
3958 3959 3960
 * This is one of
 *   'none' for arrays with no metadata (good luck...)
 *   'external' for arrays with externally managed metadata,
3961 3962 3963
 * or N.M for internally known formats
 */
static ssize_t
3964
metadata_show(struct mddev *mddev, char *page)
3965 3966 3967 3968
{
	if (mddev->persistent)
		return sprintf(page, "%d.%d\n",
			       mddev->major_version, mddev->minor_version);
3969 3970
	else if (mddev->external)
		return sprintf(page, "external:%s\n", mddev->metadata_type);
3971 3972 3973 3974 3975
	else
		return sprintf(page, "none\n");
}

static ssize_t
3976
metadata_store(struct mddev *mddev, const char *buf, size_t len)
3977 3978 3979
{
	int major, minor;
	char *e;
3980 3981 3982 3983 3984 3985 3986
	/* Changing the details of 'external' metadata is
	 * always permitted.  Otherwise there must be
	 * no devices attached to the array.
	 */
	if (mddev->external && strncmp(buf, "external:", 9) == 0)
		;
	else if (!list_empty(&mddev->disks))
3987 3988 3989 3990
		return -EBUSY;

	if (cmd_match(buf, "none")) {
		mddev->persistent = 0;
3991 3992 3993 3994 3995 3996
		mddev->external = 0;
		mddev->major_version = 0;
		mddev->minor_version = 90;
		return len;
	}
	if (strncmp(buf, "external:", 9) == 0) {
3997
		size_t namelen = len-9;
3998 3999 4000 4001 4002 4003 4004 4005
		if (namelen >= sizeof(mddev->metadata_type))
			namelen = sizeof(mddev->metadata_type)-1;
		strncpy(mddev->metadata_type, buf+9, namelen);
		mddev->metadata_type[namelen] = 0;
		if (namelen && mddev->metadata_type[namelen-1] == '\n')
			mddev->metadata_type[--namelen] = 0;
		mddev->persistent = 0;
		mddev->external = 1;
4006 4007 4008 4009 4010 4011 4012 4013 4014
		mddev->major_version = 0;
		mddev->minor_version = 90;
		return len;
	}
	major = simple_strtoul(buf, &e, 10);
	if (e==buf || *e != '.')
		return -EINVAL;
	buf = e+1;
	minor = simple_strtoul(buf, &e, 10);
4015
	if (e==buf || (*e && *e != '\n') )
4016
		return -EINVAL;
4017
	if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4018 4019 4020 4021
		return -ENOENT;
	mddev->major_version = major;
	mddev->minor_version = minor;
	mddev->persistent = 1;
4022
	mddev->external = 0;
4023 4024 4025 4026
	return len;
}

static struct md_sysfs_entry md_metadata =
4027
__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4028

4029
static ssize_t
4030
action_show(struct mddev *mddev, char *page)
4031
{
4032
	char *type = "idle";
4033 4034 4035
	if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
		type = "frozen";
	else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4036
	    (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
4037 4038 4039
		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
			type = "reshape";
		else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4040 4041 4042 4043 4044 4045
			if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
				type = "resync";
			else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
				type = "check";
			else
				type = "repair";
4046
		} else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
4047 4048 4049 4050 4051 4052
			type = "recover";
	}
	return sprintf(page, "%s\n", type);
}

static ssize_t
4053
action_store(struct mddev *mddev, const char *page, size_t len)
4054
{
4055 4056 4057
	if (!mddev->pers || !mddev->pers->sync_request)
		return -EINVAL;

4058 4059 4060 4061 4062 4063
	if (cmd_match(page, "frozen"))
		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
	else
		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);

	if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4064
		flush_workqueue(md_misc_wq);
4065 4066
		if (mddev->sync_thread) {
			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4067
			md_reap_sync_thread(mddev);
4068
		}
4069 4070
	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
		   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
4071
		return -EBUSY;
4072 4073 4074 4075
	else if (cmd_match(page, "resync"))
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	else if (cmd_match(page, "recover")) {
		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4076
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4077
	} else if (cmd_match(page, "reshape")) {
4078 4079 4080 4081 4082 4083
		int err;
		if (mddev->pers->start_reshape == NULL)
			return -EINVAL;
		err = mddev->pers->start_reshape(mddev);
		if (err)
			return err;
4084
		sysfs_notify(&mddev->kobj, NULL, "degraded");
4085
	} else {
4086
		if (cmd_match(page, "check"))
4087
			set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4088
		else if (!cmd_match(page, "repair"))
4089 4090 4091 4092
			return -EINVAL;
		set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
	}
4093 4094 4095 4096 4097 4098 4099
	if (mddev->ro == 2) {
		/* A write to sync_action is enough to justify
		 * canceling read-auto mode
		 */
		mddev->ro = 0;
		md_wakeup_thread(mddev->sync_thread);
	}
4100
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4101
	md_wakeup_thread(mddev->thread);
N
NeilBrown 已提交
4102
	sysfs_notify_dirent_safe(mddev->sysfs_action);
4103 4104 4105
	return len;
}

4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116
static struct md_sysfs_entry md_scan_mode =
__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);

static ssize_t
last_sync_action_show(struct mddev *mddev, char *page)
{
	return sprintf(page, "%s\n", mddev->last_sync_action);
}

static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);

4117
static ssize_t
4118
mismatch_cnt_show(struct mddev *mddev, char *page)
4119 4120
{
	return sprintf(page, "%llu\n",
4121 4122
		       (unsigned long long)
		       atomic64_read(&mddev->resync_mismatches));
4123 4124
}

4125
static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4126

4127
static ssize_t
4128
sync_min_show(struct mddev *mddev, char *page)
4129 4130 4131 4132 4133 4134
{
	return sprintf(page, "%d (%s)\n", speed_min(mddev),
		       mddev->sync_speed_min ? "local": "system");
}

static ssize_t
4135
sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153
{
	int min;
	char *e;
	if (strncmp(buf, "system", 6)==0) {
		mddev->sync_speed_min = 0;
		return len;
	}
	min = simple_strtoul(buf, &e, 10);
	if (buf == e || (*e && *e != '\n') || min <= 0)
		return -EINVAL;
	mddev->sync_speed_min = min;
	return len;
}

static struct md_sysfs_entry md_sync_min =
__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);

static ssize_t
4154
sync_max_show(struct mddev *mddev, char *page)
4155 4156 4157 4158 4159 4160
{
	return sprintf(page, "%d (%s)\n", speed_max(mddev),
		       mddev->sync_speed_max ? "local": "system");
}

static ssize_t
4161
sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178
{
	int max;
	char *e;
	if (strncmp(buf, "system", 6)==0) {
		mddev->sync_speed_max = 0;
		return len;
	}
	max = simple_strtoul(buf, &e, 10);
	if (buf == e || (*e && *e != '\n') || max <= 0)
		return -EINVAL;
	mddev->sync_speed_max = max;
	return len;
}

static struct md_sysfs_entry md_sync_max =
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);

4179
static ssize_t
4180
degraded_show(struct mddev *mddev, char *page)
4181 4182 4183 4184
{
	return sprintf(page, "%d\n", mddev->degraded);
}
static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4185

4186
static ssize_t
4187
sync_force_parallel_show(struct mddev *mddev, char *page)
4188 4189 4190 4191 4192
{
	return sprintf(page, "%d\n", mddev->parallel_resync);
}

static ssize_t
4193
sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4194 4195 4196
{
	long n;

4197
	if (kstrtol(buf, 10, &n))
4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215
		return -EINVAL;

	if (n != 0 && n != 1)
		return -EINVAL;

	mddev->parallel_resync = n;

	if (mddev->sync_thread)
		wake_up(&resync_wait);

	return len;
}

/* force parallel resync, even with shared block devices */
static struct md_sysfs_entry md_sync_force_parallel =
__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
       sync_force_parallel_show, sync_force_parallel_store);

4216
static ssize_t
4217
sync_speed_show(struct mddev *mddev, char *page)
4218 4219
{
	unsigned long resync, dt, db;
4220 4221
	if (mddev->curr_resync == 0)
		return sprintf(page, "none\n");
4222 4223
	resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
	dt = (jiffies - mddev->resync_mark) / HZ;
4224
	if (!dt) dt++;
4225 4226
	db = resync - mddev->resync_mark_cnt;
	return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4227 4228
}

4229
static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4230 4231

static ssize_t
4232
sync_completed_show(struct mddev *mddev, char *page)
4233
{
4234
	unsigned long long max_sectors, resync;
4235

4236 4237 4238
	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
		return sprintf(page, "none\n");

4239 4240 4241 4242
	if (mddev->curr_resync == 1 ||
	    mddev->curr_resync == 2)
		return sprintf(page, "delayed\n");

4243 4244
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
A
Andre Noll 已提交
4245
		max_sectors = mddev->resync_max_sectors;
4246
	else
A
Andre Noll 已提交
4247
		max_sectors = mddev->dev_sectors;
4248

4249
	resync = mddev->curr_resync_completed;
4250
	return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4251 4252
}

4253
static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
4254

4255
static ssize_t
4256
min_sync_show(struct mddev *mddev, char *page)
4257 4258 4259 4260 4261
{
	return sprintf(page, "%llu\n",
		       (unsigned long long)mddev->resync_min);
}
static ssize_t
4262
min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4263 4264
{
	unsigned long long min;
4265
	if (kstrtoull(buf, 10, &min))
4266 4267 4268 4269 4270 4271 4272
		return -EINVAL;
	if (min > mddev->resync_max)
		return -EINVAL;
	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
		return -EBUSY;

	/* Must be a multiple of chunk_size */
4273
	if (mddev->chunk_sectors) {
4274
		sector_t temp = min;
4275
		if (sector_div(temp, mddev->chunk_sectors))
4276 4277 4278 4279 4280 4281 4282 4283 4284 4285
			return -EINVAL;
	}
	mddev->resync_min = min;

	return len;
}

static struct md_sysfs_entry md_min_sync =
__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);

4286
static ssize_t
4287
max_sync_show(struct mddev *mddev, char *page)
4288 4289 4290 4291 4292 4293 4294 4295
{
	if (mddev->resync_max == MaxSector)
		return sprintf(page, "max\n");
	else
		return sprintf(page, "%llu\n",
			       (unsigned long long)mddev->resync_max);
}
static ssize_t
4296
max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4297 4298 4299 4300
{
	if (strncmp(buf, "max", 3) == 0)
		mddev->resync_max = MaxSector;
	else {
4301
		unsigned long long max;
4302
		if (kstrtoull(buf, 10, &max))
4303 4304
			return -EINVAL;
		if (max < mddev->resync_min)
4305 4306
			return -EINVAL;
		if (max < mddev->resync_max &&
4307
		    mddev->ro == 0 &&
4308 4309 4310 4311
		    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
			return -EBUSY;

		/* Must be a multiple of chunk_size */
4312
		if (mddev->chunk_sectors) {
4313
			sector_t temp = max;
4314
			if (sector_div(temp, mddev->chunk_sectors))
4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325
				return -EINVAL;
		}
		mddev->resync_max = max;
	}
	wake_up(&mddev->recovery_wait);
	return len;
}

static struct md_sysfs_entry md_max_sync =
__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);

4326
static ssize_t
4327
suspend_lo_show(struct mddev *mddev, char *page)
4328 4329 4330 4331 4332
{
	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
}

static ssize_t
4333
suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4334 4335 4336
{
	char *e;
	unsigned long long new = simple_strtoull(buf, &e, 10);
4337
	unsigned long long old = mddev->suspend_lo;
4338

4339
	if (mddev->pers == NULL ||
4340
	    mddev->pers->quiesce == NULL)
4341 4342 4343
		return -EINVAL;
	if (buf == e || (*e && *e != '\n'))
		return -EINVAL;
4344 4345 4346 4347

	mddev->suspend_lo = new;
	if (new >= old)
		/* Shrinking suspended region */
4348
		mddev->pers->quiesce(mddev, 2);
4349 4350 4351 4352 4353 4354
	else {
		/* Expanding suspended region - need to wait */
		mddev->pers->quiesce(mddev, 1);
		mddev->pers->quiesce(mddev, 0);
	}
	return len;
4355 4356 4357 4358 4359
}
static struct md_sysfs_entry md_suspend_lo =
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);

static ssize_t
4360
suspend_hi_show(struct mddev *mddev, char *page)
4361 4362 4363 4364 4365
{
	return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
}

static ssize_t
4366
suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4367 4368 4369
{
	char *e;
	unsigned long long new = simple_strtoull(buf, &e, 10);
4370
	unsigned long long old = mddev->suspend_hi;
4371

4372 4373
	if (mddev->pers == NULL ||
	    mddev->pers->quiesce == NULL)
4374 4375 4376
		return -EINVAL;
	if (buf == e || (*e && *e != '\n'))
		return -EINVAL;
4377 4378 4379 4380 4381 4382 4383

	mddev->suspend_hi = new;
	if (new <= old)
		/* Shrinking suspended region */
		mddev->pers->quiesce(mddev, 2);
	else {
		/* Expanding suspended region - need to wait */
4384 4385
		mddev->pers->quiesce(mddev, 1);
		mddev->pers->quiesce(mddev, 0);
4386 4387
	}
	return len;
4388 4389 4390 4391
}
static struct md_sysfs_entry md_suspend_hi =
__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);

4392
static ssize_t
4393
reshape_position_show(struct mddev *mddev, char *page)
4394 4395 4396 4397 4398 4399 4400 4401 4402
{
	if (mddev->reshape_position != MaxSector)
		return sprintf(page, "%llu\n",
			       (unsigned long long)mddev->reshape_position);
	strcpy(page, "none\n");
	return 5;
}

static ssize_t
4403
reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4404
{
4405
	struct md_rdev *rdev;
4406 4407 4408 4409 4410 4411 4412 4413
	char *e;
	unsigned long long new = simple_strtoull(buf, &e, 10);
	if (mddev->pers)
		return -EBUSY;
	if (buf == e || (*e && *e != '\n'))
		return -EINVAL;
	mddev->reshape_position = new;
	mddev->delta_disks = 0;
4414
	mddev->reshape_backwards = 0;
4415 4416
	mddev->new_level = mddev->level;
	mddev->new_layout = mddev->layout;
4417
	mddev->new_chunk_sectors = mddev->chunk_sectors;
4418 4419
	rdev_for_each(rdev, mddev)
		rdev->new_data_offset = rdev->data_offset;
4420 4421 4422 4423 4424 4425 4426
	return len;
}

static struct md_sysfs_entry md_reshape_position =
__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
       reshape_position_store);

4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462
static ssize_t
reshape_direction_show(struct mddev *mddev, char *page)
{
	return sprintf(page, "%s\n",
		       mddev->reshape_backwards ? "backwards" : "forwards");
}

static ssize_t
reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
{
	int backwards = 0;
	if (cmd_match(buf, "forwards"))
		backwards = 0;
	else if (cmd_match(buf, "backwards"))
		backwards = 1;
	else
		return -EINVAL;
	if (mddev->reshape_backwards == backwards)
		return len;

	/* check if we are allowed to change */
	if (mddev->delta_disks)
		return -EBUSY;

	if (mddev->persistent &&
	    mddev->major_version == 0)
		return -EINVAL;

	mddev->reshape_backwards = backwards;
	return len;
}

static struct md_sysfs_entry md_reshape_direction =
__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
       reshape_direction_store);

D
Dan Williams 已提交
4463
static ssize_t
4464
array_size_show(struct mddev *mddev, char *page)
D
Dan Williams 已提交
4465 4466 4467 4468 4469 4470 4471 4472 4473
{
	if (mddev->external_size)
		return sprintf(page, "%llu\n",
			       (unsigned long long)mddev->array_sectors/2);
	else
		return sprintf(page, "default\n");
}

static ssize_t
4474
array_size_store(struct mddev *mddev, const char *buf, size_t len)
D
Dan Williams 已提交
4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488
{
	sector_t sectors;

	if (strncmp(buf, "default", 7) == 0) {
		if (mddev->pers)
			sectors = mddev->pers->size(mddev, 0, 0);
		else
			sectors = mddev->array_sectors;

		mddev->external_size = 0;
	} else {
		if (strict_blocks_to_sectors(buf, &sectors) < 0)
			return -EINVAL;
		if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4489
			return -E2BIG;
D
Dan Williams 已提交
4490 4491 4492 4493 4494

		mddev->external_size = 1;
	}

	mddev->array_sectors = sectors;
4495 4496
	if (mddev->pers) {
		set_capacity(mddev->gendisk, mddev->array_sectors);
4497
		revalidate_disk(mddev->gendisk);
4498
	}
D
Dan Williams 已提交
4499 4500 4501 4502 4503 4504
	return len;
}

static struct md_sysfs_entry md_array_size =
__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
       array_size_store);
4505

4506 4507
static struct attribute *md_default_attrs[] = {
	&md_level.attr,
4508
	&md_layout.attr,
4509
	&md_raid_disks.attr,
4510
	&md_chunk_size.attr,
4511
	&md_size.attr,
4512
	&md_resync_start.attr,
4513
	&md_metadata.attr,
4514
	&md_new_device.attr,
4515
	&md_safe_delay.attr,
4516
	&md_array_state.attr,
4517
	&md_reshape_position.attr,
4518
	&md_reshape_direction.attr,
D
Dan Williams 已提交
4519
	&md_array_size.attr,
4520
	&max_corr_read_errors.attr,
4521 4522 4523 4524
	NULL,
};

static struct attribute *md_redundancy_attrs[] = {
4525
	&md_scan_mode.attr,
4526
	&md_last_scan_mode.attr,
4527
	&md_mismatches.attr,
4528 4529 4530
	&md_sync_min.attr,
	&md_sync_max.attr,
	&md_sync_speed.attr,
4531
	&md_sync_force_parallel.attr,
4532
	&md_sync_completed.attr,
4533
	&md_min_sync.attr,
4534
	&md_max_sync.attr,
4535 4536
	&md_suspend_lo.attr,
	&md_suspend_hi.attr,
4537
	&md_bitmap.attr,
4538
	&md_degraded.attr,
4539 4540
	NULL,
};
4541 4542 4543 4544 4545
static struct attribute_group md_redundancy_group = {
	.name = NULL,
	.attrs = md_redundancy_attrs,
};

4546 4547 4548 4549
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4550
	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4551
	ssize_t rv;
4552 4553 4554

	if (!entry->show)
		return -EIO;
4555 4556 4557 4558 4559 4560 4561 4562
	spin_lock(&all_mddevs_lock);
	if (list_empty(&mddev->all_mddevs)) {
		spin_unlock(&all_mddevs_lock);
		return -EBUSY;
	}
	mddev_get(mddev);
	spin_unlock(&all_mddevs_lock);

I
Ingo Molnar 已提交
4563 4564 4565 4566 4567
	rv = mddev_lock(mddev);
	if (!rv) {
		rv = entry->show(mddev, page);
		mddev_unlock(mddev);
	}
4568
	mddev_put(mddev);
4569
	return rv;
4570 4571 4572 4573 4574 4575 4576
}

static ssize_t
md_attr_store(struct kobject *kobj, struct attribute *attr,
	      const char *page, size_t length)
{
	struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4577
	struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4578
	ssize_t rv;
4579 4580 4581

	if (!entry->store)
		return -EIO;
4582 4583
	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
4584 4585 4586 4587 4588 4589 4590
	spin_lock(&all_mddevs_lock);
	if (list_empty(&mddev->all_mddevs)) {
		spin_unlock(&all_mddevs_lock);
		return -EBUSY;
	}
	mddev_get(mddev);
	spin_unlock(&all_mddevs_lock);
4591 4592
	if (entry->store == new_dev_store)
		flush_workqueue(md_misc_wq);
I
Ingo Molnar 已提交
4593 4594 4595 4596 4597
	rv = mddev_lock(mddev);
	if (!rv) {
		rv = entry->store(mddev, page, length);
		mddev_unlock(mddev);
	}
4598
	mddev_put(mddev);
4599
	return rv;
4600 4601 4602 4603
}

static void md_free(struct kobject *ko)
{
4604
	struct mddev *mddev = container_of(ko, struct mddev, kobj);
4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615

	if (mddev->sysfs_state)
		sysfs_put(mddev->sysfs_state);

	if (mddev->gendisk) {
		del_gendisk(mddev->gendisk);
		put_disk(mddev->gendisk);
	}
	if (mddev->queue)
		blk_cleanup_queue(mddev->queue);

4616 4617 4618
	kfree(mddev);
}

4619
static const struct sysfs_ops md_sysfs_ops = {
4620 4621 4622 4623 4624 4625 4626 4627 4628
	.show	= md_attr_show,
	.store	= md_attr_store,
};
static struct kobj_type md_ktype = {
	.release	= md_free,
	.sysfs_ops	= &md_sysfs_ops,
	.default_attrs	= md_default_attrs,
};

L
Linus Torvalds 已提交
4629 4630
int mdp_major = 0;

4631 4632
static void mddev_delayed_delete(struct work_struct *ws)
{
4633
	struct mddev *mddev = container_of(ws, struct mddev, del_work);
4634

4635
	sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4636 4637 4638 4639
	kobject_del(&mddev->kobj);
	kobject_put(&mddev->kobj);
}

4640
static int md_alloc(dev_t dev, char *name)
L
Linus Torvalds 已提交
4641
{
A
Arjan van de Ven 已提交
4642
	static DEFINE_MUTEX(disks_mutex);
4643
	struct mddev *mddev = mddev_find(dev);
L
Linus Torvalds 已提交
4644
	struct gendisk *disk;
4645 4646 4647
	int partitioned;
	int shift;
	int unit;
4648
	int error;
L
Linus Torvalds 已提交
4649 4650

	if (!mddev)
4651 4652 4653 4654 4655
		return -ENODEV;

	partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
	shift = partitioned ? MdpMinorShift : 0;
	unit = MINOR(mddev->unit) >> shift;
L
Linus Torvalds 已提交
4656

T
Tejun Heo 已提交
4657 4658
	/* wait for any previous instance of this device to be
	 * completely removed (mddev_delayed_delete).
4659
	 */
T
Tejun Heo 已提交
4660
	flush_workqueue(md_misc_wq);
4661

A
Arjan van de Ven 已提交
4662
	mutex_lock(&disks_mutex);
N
NeilBrown 已提交
4663 4664 4665
	error = -EEXIST;
	if (mddev->gendisk)
		goto abort;
4666 4667 4668 4669

	if (name) {
		/* Need to ensure that 'name' is not a duplicate.
		 */
4670
		struct mddev *mddev2;
4671 4672 4673 4674 4675 4676
		spin_lock(&all_mddevs_lock);

		list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
			if (mddev2->gendisk &&
			    strcmp(mddev2->gendisk->disk_name, name) == 0) {
				spin_unlock(&all_mddevs_lock);
N
NeilBrown 已提交
4677
				goto abort;
4678 4679
			}
		spin_unlock(&all_mddevs_lock);
L
Linus Torvalds 已提交
4680
	}
4681

N
NeilBrown 已提交
4682
	error = -ENOMEM;
4683
	mddev->queue = blk_alloc_queue(GFP_KERNEL);
N
NeilBrown 已提交
4684 4685
	if (!mddev->queue)
		goto abort;
4686 4687 4688
	mddev->queue->queuedata = mddev;

	blk_queue_make_request(mddev->queue, md_make_request);
4689
	blk_set_stacking_limits(&mddev->queue->limits);
4690

L
Linus Torvalds 已提交
4691 4692
	disk = alloc_disk(1 << shift);
	if (!disk) {
4693 4694
		blk_cleanup_queue(mddev->queue);
		mddev->queue = NULL;
N
NeilBrown 已提交
4695
		goto abort;
L
Linus Torvalds 已提交
4696
	}
4697
	disk->major = MAJOR(mddev->unit);
L
Linus Torvalds 已提交
4698
	disk->first_minor = unit << shift;
4699 4700 4701
	if (name)
		strcpy(disk->disk_name, name);
	else if (partitioned)
L
Linus Torvalds 已提交
4702
		sprintf(disk->disk_name, "md_d%d", unit);
4703
	else
L
Linus Torvalds 已提交
4704 4705 4706 4707
		sprintf(disk->disk_name, "md%d", unit);
	disk->fops = &md_fops;
	disk->private_data = mddev;
	disk->queue = mddev->queue;
4708
	blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
4709
	/* Allow extended partitions.  This makes the
4710
	 * 'mdp' device redundant, but we can't really
4711 4712 4713
	 * remove it now.
	 */
	disk->flags |= GENHD_FL_EXT_DEVT;
L
Linus Torvalds 已提交
4714
	mddev->gendisk = disk;
4715 4716 4717 4718 4719 4720
	/* As soon as we call add_disk(), another thread could get
	 * through to md_open, so make sure it doesn't get too far
	 */
	mutex_lock(&mddev->open_mutex);
	add_disk(disk);

4721 4722
	error = kobject_init_and_add(&mddev->kobj, &md_ktype,
				     &disk_to_dev(disk)->kobj, "%s", "md");
N
NeilBrown 已提交
4723 4724 4725 4726
	if (error) {
		/* This isn't possible, but as kobject_init_and_add is marked
		 * __must_check, we must do something with the result
		 */
4727 4728
		printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
		       disk->disk_name);
N
NeilBrown 已提交
4729 4730
		error = 0;
	}
N
NeilBrown 已提交
4731 4732
	if (mddev->kobj.sd &&
	    sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4733
		printk(KERN_DEBUG "pointless warning\n");
4734
	mutex_unlock(&mddev->open_mutex);
N
NeilBrown 已提交
4735 4736
 abort:
	mutex_unlock(&disks_mutex);
N
NeilBrown 已提交
4737
	if (!error && mddev->kobj.sd) {
4738
		kobject_uevent(&mddev->kobj, KOBJ_ADD);
N
NeilBrown 已提交
4739
		mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
4740
	}
4741
	mddev_put(mddev);
N
NeilBrown 已提交
4742
	return error;
4743 4744 4745 4746 4747
}

static struct kobject *md_probe(dev_t dev, int *part, void *data)
{
	md_alloc(dev, NULL);
L
Linus Torvalds 已提交
4748 4749 4750
	return NULL;
}

4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769
static int add_named_array(const char *val, struct kernel_param *kp)
{
	/* val must be "md_*" where * is not all digits.
	 * We allocate an array with a large free minor number, and
	 * set the name to val.  val must not already be an active name.
	 */
	int len = strlen(val);
	char buf[DISK_NAME_LEN];

	while (len && val[len-1] == '\n')
		len--;
	if (len >= DISK_NAME_LEN)
		return -E2BIG;
	strlcpy(buf, val, len+1);
	if (strncmp(buf, "md_", 3) != 0)
		return -EINVAL;
	return md_alloc(0, buf);
}

L
Linus Torvalds 已提交
4770 4771
static void md_safemode_timeout(unsigned long data)
{
4772
	struct mddev *mddev = (struct mddev *) data;
L
Linus Torvalds 已提交
4773

4774 4775 4776
	if (!atomic_read(&mddev->writes_pending)) {
		mddev->safemode = 1;
		if (mddev->external)
N
NeilBrown 已提交
4777
			sysfs_notify_dirent_safe(mddev->sysfs_state);
4778
	}
L
Linus Torvalds 已提交
4779 4780 4781
	md_wakeup_thread(mddev->thread);
}

4782
static int start_dirty_degraded;
L
Linus Torvalds 已提交
4783

4784
int md_run(struct mddev *mddev)
L
Linus Torvalds 已提交
4785
{
4786
	int err;
4787
	struct md_rdev *rdev;
4788
	struct md_personality *pers;
L
Linus Torvalds 已提交
4789

4790 4791
	if (list_empty(&mddev->disks))
		/* cannot run an array with no devices.. */
L
Linus Torvalds 已提交
4792 4793 4794 4795
		return -EINVAL;

	if (mddev->pers)
		return -EBUSY;
4796 4797 4798
	/* Cannot run until previous stop completes properly */
	if (mddev->sysfs_active)
		return -EBUSY;
4799

L
Linus Torvalds 已提交
4800 4801 4802
	/*
	 * Analyze all RAID superblock(s)
	 */
4803 4804 4805
	if (!mddev->raid_disks) {
		if (!mddev->persistent)
			return -EINVAL;
4806
		analyze_sbs(mddev);
4807
	}
L
Linus Torvalds 已提交
4808

4809 4810 4811 4812
	if (mddev->level != LEVEL_NONE)
		request_module("md-level-%d", mddev->level);
	else if (mddev->clevel[0])
		request_module("md-%s", mddev->clevel);
L
Linus Torvalds 已提交
4813 4814 4815 4816 4817 4818

	/*
	 * Drop all container device buffers, from now on
	 * the only valid external interface is through the md
	 * device.
	 */
N
NeilBrown 已提交
4819
	rdev_for_each(rdev, mddev) {
4820
		if (test_bit(Faulty, &rdev->flags))
L
Linus Torvalds 已提交
4821 4822
			continue;
		sync_blockdev(rdev->bdev);
4823
		invalidate_bdev(rdev->bdev);
4824 4825 4826

		/* perform some consistency tests on the device.
		 * We don't want the data to overlap the metadata,
A
Andre Noll 已提交
4827
		 * Internal Bitmap issues have been handled elsewhere.
4828
		 */
4829 4830 4831
		if (rdev->meta_bdev) {
			/* Nothing to check */;
		} else if (rdev->data_offset < rdev->sb_start) {
A
Andre Noll 已提交
4832 4833
			if (mddev->dev_sectors &&
			    rdev->data_offset + mddev->dev_sectors
4834
			    > rdev->sb_start) {
4835 4836 4837 4838 4839
				printk("md: %s: data overlaps metadata\n",
				       mdname(mddev));
				return -EINVAL;
			}
		} else {
4840
			if (rdev->sb_start + rdev->sb_size/512
4841 4842 4843 4844 4845 4846
			    > rdev->data_offset) {
				printk("md: %s: metadata overlaps data\n",
				       mdname(mddev));
				return -EINVAL;
			}
		}
N
NeilBrown 已提交
4847
		sysfs_notify_dirent_safe(rdev->sysfs_state);
L
Linus Torvalds 已提交
4848 4849
	}

4850
	if (mddev->bio_set == NULL)
4851
		mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
4852

L
Linus Torvalds 已提交
4853
	spin_lock(&pers_lock);
4854
	pers = find_pers(mddev->level, mddev->clevel);
4855
	if (!pers || !try_module_get(pers->owner)) {
L
Linus Torvalds 已提交
4856
		spin_unlock(&pers_lock);
4857 4858 4859 4860 4861 4862
		if (mddev->level != LEVEL_NONE)
			printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
			       mddev->level);
		else
			printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
			       mddev->clevel);
L
Linus Torvalds 已提交
4863 4864
		return -EINVAL;
	}
4865
	mddev->pers = pers;
L
Linus Torvalds 已提交
4866
	spin_unlock(&pers_lock);
4867 4868 4869 4870
	if (mddev->level != pers->level) {
		mddev->level = pers->level;
		mddev->new_level = pers->level;
	}
4871
	strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
L
Linus Torvalds 已提交
4872

4873
	if (mddev->reshape_position != MaxSector &&
4874
	    pers->start_reshape == NULL) {
4875 4876 4877 4878 4879 4880
		/* This personality cannot handle reshaping... */
		mddev->pers = NULL;
		module_put(pers->owner);
		return -EINVAL;
	}

4881 4882 4883 4884 4885
	if (pers->sync_request) {
		/* Warn if this is a potentially silly
		 * configuration.
		 */
		char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4886
		struct md_rdev *rdev2;
4887
		int warned = 0;
4888

N
NeilBrown 已提交
4889 4890
		rdev_for_each(rdev, mddev)
			rdev_for_each(rdev2, mddev) {
4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903
				if (rdev < rdev2 &&
				    rdev->bdev->bd_contains ==
				    rdev2->bdev->bd_contains) {
					printk(KERN_WARNING
					       "%s: WARNING: %s appears to be"
					       " on the same physical disk as"
					       " %s.\n",
					       mdname(mddev),
					       bdevname(rdev->bdev,b),
					       bdevname(rdev2->bdev,b2));
					warned = 1;
				}
			}
4904

4905 4906 4907 4908 4909 4910
		if (warned)
			printk(KERN_WARNING
			       "True protection against single-disk"
			       " failure might be compromised.\n");
	}

4911
	mddev->recovery = 0;
A
Andre Noll 已提交
4912 4913 4914
	/* may be over-ridden by personality */
	mddev->resync_max_sectors = mddev->dev_sectors;

4915
	mddev->ok_start_degraded = start_dirty_degraded;
L
Linus Torvalds 已提交
4916

4917
	if (start_readonly && mddev->ro == 0)
4918 4919
		mddev->ro = 2; /* read-only, but switch on first write */

4920
	err = mddev->pers->run(mddev);
4921 4922
	if (err)
		printk(KERN_ERR "md: pers->run() failed ...\n");
D
Dan Williams 已提交
4923 4924 4925 4926 4927 4928 4929 4930 4931 4932
	else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
		WARN_ONCE(!mddev->external_size, "%s: default size too small,"
			  " but 'external_size' not in effect?\n", __func__);
		printk(KERN_ERR
		       "md: invalid array_size %llu > default size %llu\n",
		       (unsigned long long)mddev->array_sectors / 2,
		       (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
		err = -EINVAL;
		mddev->pers->stop(mddev);
	}
4933 4934
	if (err == 0 && mddev->pers->sync_request &&
	    (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
4935 4936 4937 4938 4939 4940 4941
		err = bitmap_create(mddev);
		if (err) {
			printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
			       mdname(mddev), err);
			mddev->pers->stop(mddev);
		}
	}
L
Linus Torvalds 已提交
4942 4943 4944
	if (err) {
		module_put(mddev->pers->owner);
		mddev->pers = NULL;
4945 4946
		bitmap_destroy(mddev);
		return err;
L
Linus Torvalds 已提交
4947
	}
4948 4949 4950
	if (mddev->queue) {
		mddev->queue->backing_dev_info.congested_data = mddev;
		mddev->queue->backing_dev_info.congested_fn = md_congested;
4951
		blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
4952
	}
4953
	if (mddev->pers->sync_request) {
N
NeilBrown 已提交
4954 4955
		if (mddev->kobj.sd &&
		    sysfs_create_group(&mddev->kobj, &md_redundancy_group))
4956 4957 4958
			printk(KERN_WARNING
			       "md: cannot register extra attributes for %s\n",
			       mdname(mddev));
N
NeilBrown 已提交
4959
		mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
4960
	} else if (mddev->ro == 2) /* auto-readonly not meaningful */
4961 4962
		mddev->ro = 0;

4963
	atomic_set(&mddev->writes_pending,0);
4964 4965
	atomic_set(&mddev->max_corr_read_errors,
		   MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
L
Linus Torvalds 已提交
4966 4967 4968
	mddev->safemode = 0;
	mddev->safemode_timer.function = md_safemode_timeout;
	mddev->safemode_timer.data = (unsigned long) mddev;
4969
	mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
L
Linus Torvalds 已提交
4970
	mddev->in_sync = 1;
4971 4972
	smp_wmb();
	mddev->ready = 1;
N
NeilBrown 已提交
4973
	rdev_for_each(rdev, mddev)
4974 4975
		if (rdev->raid_disk >= 0)
			if (sysfs_link_rdev(mddev, rdev))
N
NeilBrown 已提交
4976
				/* failure here is OK */;
4977

L
Linus Torvalds 已提交
4978
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4979

4980
	if (mddev->flags & MD_UPDATE_SB_FLAGS)
4981
		md_update_sb(mddev, 0);
L
Linus Torvalds 已提交
4982

4983
	md_new_event(mddev);
N
NeilBrown 已提交
4984 4985
	sysfs_notify_dirent_safe(mddev->sysfs_state);
	sysfs_notify_dirent_safe(mddev->sysfs_action);
4986
	sysfs_notify(&mddev->kobj, NULL, "degraded");
L
Linus Torvalds 已提交
4987 4988
	return 0;
}
4989
EXPORT_SYMBOL_GPL(md_run);
L
Linus Torvalds 已提交
4990

4991
static int do_md_run(struct mddev *mddev)
4992 4993 4994 4995 4996 4997
{
	int err;

	err = md_run(mddev);
	if (err)
		goto out;
4998 4999 5000 5001 5002
	err = bitmap_load(mddev);
	if (err) {
		bitmap_destroy(mddev);
		goto out;
	}
5003 5004 5005 5006

	md_wakeup_thread(mddev->thread);
	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */

5007 5008
	set_capacity(mddev->gendisk, mddev->array_sectors);
	revalidate_disk(mddev->gendisk);
5009
	mddev->changed = 1;
5010 5011 5012 5013 5014
	kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
	return err;
}

5015
static int restart_array(struct mddev *mddev)
L
Linus Torvalds 已提交
5016 5017 5018
{
	struct gendisk *disk = mddev->gendisk;

A
Andre Noll 已提交
5019
	/* Complain if it has no devices */
L
Linus Torvalds 已提交
5020
	if (list_empty(&mddev->disks))
A
Andre Noll 已提交
5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034
		return -ENXIO;
	if (!mddev->pers)
		return -EINVAL;
	if (!mddev->ro)
		return -EBUSY;
	mddev->safemode = 0;
	mddev->ro = 0;
	set_disk_ro(disk, 0);
	printk(KERN_INFO "md: %s switched to read-write mode.\n",
		mdname(mddev));
	/* Kick recovery or resync if necessary */
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	md_wakeup_thread(mddev->thread);
	md_wakeup_thread(mddev->sync_thread);
N
NeilBrown 已提交
5035
	sysfs_notify_dirent_safe(mddev->sysfs_state);
A
Andre Noll 已提交
5036
	return 0;
L
Linus Torvalds 已提交
5037 5038
}

5039
static void md_clean(struct mddev *mddev)
N
NeilBrown 已提交
5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060
{
	mddev->array_sectors = 0;
	mddev->external_size = 0;
	mddev->dev_sectors = 0;
	mddev->raid_disks = 0;
	mddev->recovery_cp = 0;
	mddev->resync_min = 0;
	mddev->resync_max = MaxSector;
	mddev->reshape_position = MaxSector;
	mddev->external = 0;
	mddev->persistent = 0;
	mddev->level = LEVEL_NONE;
	mddev->clevel[0] = 0;
	mddev->flags = 0;
	mddev->ro = 0;
	mddev->metadata_type[0] = 0;
	mddev->chunk_sectors = 0;
	mddev->ctime = mddev->utime = 0;
	mddev->layout = 0;
	mddev->max_disks = 0;
	mddev->events = 0;
5061
	mddev->can_decrease_events = 0;
N
NeilBrown 已提交
5062
	mddev->delta_disks = 0;
5063
	mddev->reshape_backwards = 0;
N
NeilBrown 已提交
5064 5065 5066 5067
	mddev->new_level = LEVEL_NONE;
	mddev->new_layout = 0;
	mddev->new_chunk_sectors = 0;
	mddev->curr_resync = 0;
5068
	atomic64_set(&mddev->resync_mismatches, 0);
N
NeilBrown 已提交
5069 5070 5071 5072
	mddev->suspend_lo = mddev->suspend_hi = 0;
	mddev->sync_speed_min = mddev->sync_speed_max = 0;
	mddev->recovery = 0;
	mddev->in_sync = 0;
5073
	mddev->changed = 0;
N
NeilBrown 已提交
5074 5075
	mddev->degraded = 0;
	mddev->safemode = 0;
5076
	mddev->merge_check_needed = 0;
N
NeilBrown 已提交
5077 5078
	mddev->bitmap_info.offset = 0;
	mddev->bitmap_info.default_offset = 0;
5079
	mddev->bitmap_info.default_space = 0;
N
NeilBrown 已提交
5080 5081 5082 5083 5084
	mddev->bitmap_info.chunksize = 0;
	mddev->bitmap_info.daemon_sleep = 0;
	mddev->bitmap_info.max_write_behind = 0;
}

5085
static void __md_stop_writes(struct mddev *mddev)
5086
{
5087
	set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5088
	flush_workqueue(md_misc_wq);
5089 5090
	if (mddev->sync_thread) {
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5091
		md_reap_sync_thread(mddev);
5092 5093 5094 5095 5096 5097 5098
	}

	del_timer_sync(&mddev->safemode_timer);

	bitmap_flush(mddev);
	md_super_wait(mddev);

5099
	if (mddev->ro == 0 &&
5100
	    (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
5101 5102 5103 5104 5105
		/* mark array as shutdown cleanly */
		mddev->in_sync = 1;
		md_update_sb(mddev, 1);
	}
}
5106

5107
void md_stop_writes(struct mddev *mddev)
5108
{
5109
	mddev_lock_nointr(mddev);
5110 5111 5112
	__md_stop_writes(mddev);
	mddev_unlock(mddev);
}
5113
EXPORT_SYMBOL_GPL(md_stop_writes);
5114

5115
static void __md_stop(struct mddev *mddev)
N
NeilBrown 已提交
5116
{
5117
	mddev->ready = 0;
N
NeilBrown 已提交
5118 5119 5120 5121 5122
	mddev->pers->stop(mddev);
	if (mddev->pers->sync_request && mddev->to_remove == NULL)
		mddev->to_remove = &md_redundancy_group;
	module_put(mddev->pers->owner);
	mddev->pers = NULL;
N
NeilBrown 已提交
5123
	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
N
NeilBrown 已提交
5124
}
5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136

void md_stop(struct mddev *mddev)
{
	/* stop the array and free an attached data structures.
	 * This is called from dm-raid
	 */
	__md_stop(mddev);
	bitmap_destroy(mddev);
	if (mddev->bio_set)
		bioset_free(mddev->bio_set);
}

5137
EXPORT_SYMBOL_GPL(md_stop);
N
NeilBrown 已提交
5138

5139
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5140 5141
{
	int err = 0;
5142 5143 5144 5145 5146 5147 5148
	int did_freeze = 0;

	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
		did_freeze = 1;
		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
		md_wakeup_thread(mddev->thread);
	}
5149
	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5150
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5151
	if (mddev->sync_thread)
5152 5153 5154
		/* Thread might be blocked waiting for metadata update
		 * which will now never happen */
		wake_up_process(mddev->sync_thread->tsk);
5155

5156
	mddev_unlock(mddev);
5157 5158
	wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
					  &mddev->recovery));
5159 5160
	mddev_lock_nointr(mddev);

5161
	mutex_lock(&mddev->open_mutex);
5162
	if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5163
	    mddev->sync_thread ||
5164
	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5165
	    (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5166
		printk("md: %s still in use.\n",mdname(mddev));
5167 5168
		if (did_freeze) {
			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5169
			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5170 5171
			md_wakeup_thread(mddev->thread);
		}
5172 5173 5174 5175
		err = -EBUSY;
		goto out;
	}
	if (mddev->pers) {
5176
		__md_stop_writes(mddev);
5177 5178 5179 5180 5181 5182 5183

		err  = -ENXIO;
		if (mddev->ro==1)
			goto out;
		mddev->ro = 1;
		set_disk_ro(mddev->gendisk, 1);
		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5184 5185
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
		md_wakeup_thread(mddev->thread);
N
NeilBrown 已提交
5186
		sysfs_notify_dirent_safe(mddev->sysfs_state);
5187
		err = 0;
5188 5189 5190 5191 5192 5193
	}
out:
	mutex_unlock(&mddev->open_mutex);
	return err;
}

5194 5195 5196 5197
/* mode:
 *   0 - completely stop and dis-assemble array
 *   2 - stop but do not disassemble array
 */
5198
static int do_md_stop(struct mddev *mddev, int mode,
5199
		      struct block_device *bdev)
L
Linus Torvalds 已提交
5200 5201
{
	struct gendisk *disk = mddev->gendisk;
5202
	struct md_rdev *rdev;
5203 5204 5205 5206 5207 5208 5209
	int did_freeze = 0;

	if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
		did_freeze = 1;
		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
		md_wakeup_thread(mddev->thread);
	}
5210
	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5211
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5212
	if (mddev->sync_thread)
5213 5214 5215
		/* Thread might be blocked waiting for metadata update
		 * which will now never happen */
		wake_up_process(mddev->sync_thread->tsk);
5216

5217
	mddev_unlock(mddev);
5218 5219 5220
	wait_event(resync_wait, (mddev->sync_thread == NULL &&
				 !test_bit(MD_RECOVERY_RUNNING,
					   &mddev->recovery)));
5221
	mddev_lock_nointr(mddev);
L
Linus Torvalds 已提交
5222

N
NeilBrown 已提交
5223
	mutex_lock(&mddev->open_mutex);
5224
	if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5225 5226
	    mddev->sysfs_active ||
	    mddev->sync_thread ||
5227
	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
5228
	    (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5229
		printk("md: %s still in use.\n",mdname(mddev));
N
NeilBrown 已提交
5230
		mutex_unlock(&mddev->open_mutex);
5231 5232
		if (did_freeze) {
			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5233
			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5234 5235
			md_wakeup_thread(mddev->thread);
		}
5236 5237
		return -EBUSY;
	}
N
NeilBrown 已提交
5238
	if (mddev->pers) {
5239 5240
		if (mddev->ro)
			set_disk_ro(disk, 0);
5241

5242
		__md_stop_writes(mddev);
5243
		__md_stop(mddev);
5244 5245
		mddev->queue->merge_bvec_fn = NULL;
		mddev->queue->backing_dev_info.congested_fn = NULL;
N
NeilBrown 已提交
5246

5247
		/* tell userspace to handle 'inactive' */
N
NeilBrown 已提交
5248
		sysfs_notify_dirent_safe(mddev->sysfs_state);
5249

N
NeilBrown 已提交
5250
		rdev_for_each(rdev, mddev)
5251 5252
			if (rdev->raid_disk >= 0)
				sysfs_unlink_rdev(mddev, rdev);
5253

5254
		set_capacity(disk, 0);
N
NeilBrown 已提交
5255
		mutex_unlock(&mddev->open_mutex);
5256
		mddev->changed = 1;
5257
		revalidate_disk(disk);
5258

5259 5260
		if (mddev->ro)
			mddev->ro = 0;
N
NeilBrown 已提交
5261 5262
	} else
		mutex_unlock(&mddev->open_mutex);
L
Linus Torvalds 已提交
5263 5264 5265
	/*
	 * Free resources if final stop
	 */
5266
	if (mode == 0) {
L
Linus Torvalds 已提交
5267 5268
		printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));

5269
		bitmap_destroy(mddev);
5270 5271 5272
		if (mddev->bitmap_info.file) {
			fput(mddev->bitmap_info.file);
			mddev->bitmap_info.file = NULL;
5273
		}
5274
		mddev->bitmap_info.offset = 0;
5275

L
Linus Torvalds 已提交
5276 5277
		export_array(mddev);

N
NeilBrown 已提交
5278
		md_clean(mddev);
5279
		kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5280 5281
		if (mddev->hold_active == UNTIL_STOP)
			mddev->hold_active = 0;
5282
	}
M
Martin K. Petersen 已提交
5283
	blk_integrity_unregister(disk);
5284
	md_new_event(mddev);
N
NeilBrown 已提交
5285
	sysfs_notify_dirent_safe(mddev->sysfs_state);
N
NeilBrown 已提交
5286
	return 0;
L
Linus Torvalds 已提交
5287 5288
}

J
Jeff Garzik 已提交
5289
#ifndef MODULE
5290
static void autorun_array(struct mddev *mddev)
L
Linus Torvalds 已提交
5291
{
5292
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5293 5294
	int err;

5295
	if (list_empty(&mddev->disks))
L
Linus Torvalds 已提交
5296 5297 5298 5299
		return;

	printk(KERN_INFO "md: running: ");

N
NeilBrown 已提交
5300
	rdev_for_each(rdev, mddev) {
L
Linus Torvalds 已提交
5301 5302 5303 5304 5305
		char b[BDEVNAME_SIZE];
		printk("<%s>", bdevname(rdev->bdev,b));
	}
	printk("\n");

5306
	err = do_md_run(mddev);
L
Linus Torvalds 已提交
5307 5308
	if (err) {
		printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
5309
		do_md_stop(mddev, 0, NULL);
L
Linus Torvalds 已提交
5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326
	}
}

/*
 * lets try to run arrays based on all disks that have arrived
 * until now. (those are in pending_raid_disks)
 *
 * the method: pick the first pending disk, collect all disks with
 * the same UUID, remove all from the pending list and put them into
 * the 'same_array' list. Then order this list based on superblock
 * update time (freshest comes first), kick out 'old' disks and
 * compare superblocks. If everything's fine then run it.
 *
 * If "unit" is allocated, then bump its reference count
 */
static void autorun_devices(int part)
{
5327
	struct md_rdev *rdev0, *rdev, *tmp;
5328
	struct mddev *mddev;
L
Linus Torvalds 已提交
5329 5330 5331 5332
	char b[BDEVNAME_SIZE];

	printk(KERN_INFO "md: autorun ...\n");
	while (!list_empty(&pending_raid_disks)) {
5333
		int unit;
L
Linus Torvalds 已提交
5334
		dev_t dev;
5335
		LIST_HEAD(candidates);
L
Linus Torvalds 已提交
5336
		rdev0 = list_entry(pending_raid_disks.next,
5337
					 struct md_rdev, same_set);
L
Linus Torvalds 已提交
5338 5339 5340 5341

		printk(KERN_INFO "md: considering %s ...\n",
			bdevname(rdev0->bdev,b));
		INIT_LIST_HEAD(&candidates);
5342
		rdev_for_each_list(rdev, tmp, &pending_raid_disks)
L
Linus Torvalds 已提交
5343 5344 5345 5346 5347 5348 5349 5350 5351 5352
			if (super_90_load(rdev, rdev0, 0) >= 0) {
				printk(KERN_INFO "md:  adding %s ...\n",
					bdevname(rdev->bdev,b));
				list_move(&rdev->same_set, &candidates);
			}
		/*
		 * now we have a set of devices, with all of them having
		 * mostly sane superblocks. It's time to allocate the
		 * mddev.
		 */
5353 5354 5355 5356 5357 5358 5359 5360 5361
		if (part) {
			dev = MKDEV(mdp_major,
				    rdev0->preferred_minor << MdpMinorShift);
			unit = MINOR(dev) >> MdpMinorShift;
		} else {
			dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
			unit = MINOR(dev);
		}
		if (rdev0->preferred_minor != unit) {
L
Linus Torvalds 已提交
5362 5363 5364 5365 5366 5367 5368
			printk(KERN_INFO "md: unit number in %s is bad: %d\n",
			       bdevname(rdev0->bdev, b), rdev0->preferred_minor);
			break;
		}

		md_probe(dev, NULL, NULL);
		mddev = mddev_find(dev);
N
Neil Brown 已提交
5369 5370 5371 5372
		if (!mddev || !mddev->gendisk) {
			if (mddev)
				mddev_put(mddev);
			printk(KERN_ERR
L
Linus Torvalds 已提交
5373 5374 5375
				"md: cannot allocate memory for md drive.\n");
			break;
		}
5376
		if (mddev_lock(mddev))
L
Linus Torvalds 已提交
5377 5378 5379 5380
			printk(KERN_WARNING "md: %s locked, cannot run\n",
			       mdname(mddev));
		else if (mddev->raid_disks || mddev->major_version
			 || !list_empty(&mddev->disks)) {
5381
			printk(KERN_WARNING
L
Linus Torvalds 已提交
5382 5383 5384 5385 5386
				"md: %s already running, cannot run %s\n",
				mdname(mddev), bdevname(rdev0->bdev,b));
			mddev_unlock(mddev);
		} else {
			printk(KERN_INFO "md: created %s\n", mdname(mddev));
5387
			mddev->persistent = 1;
5388
			rdev_for_each_list(rdev, tmp, &candidates) {
L
Linus Torvalds 已提交
5389 5390 5391 5392 5393 5394 5395 5396 5397 5398
				list_del_init(&rdev->same_set);
				if (bind_rdev_to_array(rdev, mddev))
					export_rdev(rdev);
			}
			autorun_array(mddev);
			mddev_unlock(mddev);
		}
		/* on success, candidates will be empty, on error
		 * it won't...
		 */
5399
		rdev_for_each_list(rdev, tmp, &candidates) {
5400
			list_del_init(&rdev->same_set);
L
Linus Torvalds 已提交
5401
			export_rdev(rdev);
5402
		}
L
Linus Torvalds 已提交
5403 5404 5405 5406
		mddev_put(mddev);
	}
	printk(KERN_INFO "md: ... autorun DONE.\n");
}
J
Jeff Garzik 已提交
5407
#endif /* !MODULE */
L
Linus Torvalds 已提交
5408

5409
static int get_version(void __user *arg)
L
Linus Torvalds 已提交
5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422
{
	mdu_version_t ver;

	ver.major = MD_MAJOR_VERSION;
	ver.minor = MD_MINOR_VERSION;
	ver.patchlevel = MD_PATCHLEVEL_VERSION;

	if (copy_to_user(arg, &ver, sizeof(ver)))
		return -EFAULT;

	return 0;
}

5423
static int get_array_info(struct mddev *mddev, void __user *arg)
L
Linus Torvalds 已提交
5424 5425
{
	mdu_array_info_t info;
5426
	int nr,working,insync,failed,spare;
5427
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5428

5429 5430 5431
	nr = working = insync = failed = spare = 0;
	rcu_read_lock();
	rdev_for_each_rcu(rdev, mddev) {
L
Linus Torvalds 已提交
5432
		nr++;
5433
		if (test_bit(Faulty, &rdev->flags))
L
Linus Torvalds 已提交
5434 5435 5436
			failed++;
		else {
			working++;
5437
			if (test_bit(In_sync, &rdev->flags))
5438
				insync++;
L
Linus Torvalds 已提交
5439 5440 5441 5442
			else
				spare++;
		}
	}
5443
	rcu_read_unlock();
L
Linus Torvalds 已提交
5444 5445 5446 5447 5448 5449

	info.major_version = mddev->major_version;
	info.minor_version = mddev->minor_version;
	info.patch_version = MD_PATCHLEVEL_VERSION;
	info.ctime         = mddev->ctime;
	info.level         = mddev->level;
A
Andre Noll 已提交
5450 5451
	info.size          = mddev->dev_sectors / 2;
	if (info.size != mddev->dev_sectors / 2) /* overflow */
5452
		info.size = -1;
L
Linus Torvalds 已提交
5453 5454 5455 5456 5457 5458 5459 5460 5461
	info.nr_disks      = nr;
	info.raid_disks    = mddev->raid_disks;
	info.md_minor      = mddev->md_minor;
	info.not_persistent= !mddev->persistent;

	info.utime         = mddev->utime;
	info.state         = 0;
	if (mddev->in_sync)
		info.state = (1<<MD_SB_CLEAN);
5462
	if (mddev->bitmap && mddev->bitmap_info.offset)
5463
		info.state |= (1<<MD_SB_BITMAP_PRESENT);
5464
	info.active_disks  = insync;
L
Linus Torvalds 已提交
5465 5466 5467 5468 5469
	info.working_disks = working;
	info.failed_disks  = failed;
	info.spare_disks   = spare;

	info.layout        = mddev->layout;
5470
	info.chunk_size    = mddev->chunk_sectors << 9;
L
Linus Torvalds 已提交
5471 5472 5473 5474 5475 5476 5477

	if (copy_to_user(arg, &info, sizeof(info)))
		return -EFAULT;

	return 0;
}

5478
static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5479 5480 5481 5482 5483
{
	mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
	char *ptr, *buf = NULL;
	int err = -ENOMEM;

5484
	file = kmalloc(sizeof(*file), GFP_NOIO);
5485

5486 5487 5488 5489
	if (!file)
		goto out;

	/* bitmap disabled, zero the first byte and copy out */
5490
	if (!mddev->bitmap || !mddev->bitmap->storage.file) {
5491 5492 5493 5494 5495 5496 5497 5498
		file->pathname[0] = '\0';
		goto copy_out;
	}

	buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
	if (!buf)
		goto out;

5499 5500
	ptr = d_path(&mddev->bitmap->storage.file->f_path,
		     buf, sizeof(file->pathname));
C
Christoph Hellwig 已提交
5501
	if (IS_ERR(ptr))
5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515
		goto out;

	strcpy(file->pathname, ptr);

copy_out:
	err = 0;
	if (copy_to_user(arg, file, sizeof(*file)))
		err = -EFAULT;
out:
	kfree(buf);
	kfree(file);
	return err;
}

5516
static int get_disk_info(struct mddev *mddev, void __user * arg)
L
Linus Torvalds 已提交
5517 5518
{
	mdu_disk_info_t info;
5519
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5520 5521 5522 5523

	if (copy_from_user(&info, arg, sizeof(info)))
		return -EFAULT;

5524 5525
	rcu_read_lock();
	rdev = find_rdev_nr_rcu(mddev, info.number);
L
Linus Torvalds 已提交
5526 5527 5528 5529 5530
	if (rdev) {
		info.major = MAJOR(rdev->bdev->bd_dev);
		info.minor = MINOR(rdev->bdev->bd_dev);
		info.raid_disk = rdev->raid_disk;
		info.state = 0;
5531
		if (test_bit(Faulty, &rdev->flags))
L
Linus Torvalds 已提交
5532
			info.state |= (1<<MD_DISK_FAULTY);
5533
		else if (test_bit(In_sync, &rdev->flags)) {
L
Linus Torvalds 已提交
5534 5535 5536
			info.state |= (1<<MD_DISK_ACTIVE);
			info.state |= (1<<MD_DISK_SYNC);
		}
5537 5538
		if (test_bit(WriteMostly, &rdev->flags))
			info.state |= (1<<MD_DISK_WRITEMOSTLY);
L
Linus Torvalds 已提交
5539 5540 5541 5542 5543
	} else {
		info.major = info.minor = 0;
		info.raid_disk = -1;
		info.state = (1<<MD_DISK_REMOVED);
	}
5544
	rcu_read_unlock();
L
Linus Torvalds 已提交
5545 5546 5547 5548 5549 5550 5551

	if (copy_to_user(arg, &info, sizeof(info)))
		return -EFAULT;

	return 0;
}

5552
static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
L
Linus Torvalds 已提交
5553 5554
{
	char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5555
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5556 5557 5558 5559 5560 5561 5562 5563 5564 5565
	dev_t dev = MKDEV(info->major,info->minor);

	if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
		return -EOVERFLOW;

	if (!mddev->raid_disks) {
		int err;
		/* expecting a device which has a superblock */
		rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
		if (IS_ERR(rdev)) {
5566
			printk(KERN_WARNING
L
Linus Torvalds 已提交
5567 5568 5569 5570 5571
				"md: md_import_device returned %ld\n",
				PTR_ERR(rdev));
			return PTR_ERR(rdev);
		}
		if (!list_empty(&mddev->disks)) {
5572 5573 5574
			struct md_rdev *rdev0
				= list_entry(mddev->disks.next,
					     struct md_rdev, same_set);
5575
			err = super_types[mddev->major_version]
L
Linus Torvalds 已提交
5576 5577
				.load_super(rdev, rdev0, mddev->minor_version);
			if (err < 0) {
5578
				printk(KERN_WARNING
L
Linus Torvalds 已提交
5579
					"md: %s has different UUID to %s\n",
5580
					bdevname(rdev->bdev,b),
L
Linus Torvalds 已提交
5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599
					bdevname(rdev0->bdev,b2));
				export_rdev(rdev);
				return -EINVAL;
			}
		}
		err = bind_rdev_to_array(rdev, mddev);
		if (err)
			export_rdev(rdev);
		return err;
	}

	/*
	 * add_new_disk can be used once the array is assembled
	 * to add "hot spares".  They must already have a superblock
	 * written
	 */
	if (mddev->pers) {
		int err;
		if (!mddev->pers->hot_add_disk) {
5600
			printk(KERN_WARNING
L
Linus Torvalds 已提交
5601 5602 5603 5604
				"%s: personality does not support diskops!\n",
			       mdname(mddev));
			return -EINVAL;
		}
5605 5606 5607 5608 5609
		if (mddev->persistent)
			rdev = md_import_device(dev, mddev->major_version,
						mddev->minor_version);
		else
			rdev = md_import_device(dev, -1, -1);
L
Linus Torvalds 已提交
5610
		if (IS_ERR(rdev)) {
5611
			printk(KERN_WARNING
L
Linus Torvalds 已提交
5612 5613 5614 5615
				"md: md_import_device returned %ld\n",
				PTR_ERR(rdev));
			return PTR_ERR(rdev);
		}
5616
		/* set saved_raid_disk if appropriate */
5617 5618
		if (!mddev->persistent) {
			if (info->state & (1<<MD_DISK_SYNC)  &&
5619
			    info->raid_disk < mddev->raid_disks) {
5620
				rdev->raid_disk = info->raid_disk;
5621
				set_bit(In_sync, &rdev->flags);
5622
				clear_bit(Bitmap_sync, &rdev->flags);
5623
			} else
5624
				rdev->raid_disk = -1;
5625
			rdev->saved_raid_disk = rdev->raid_disk;
5626 5627 5628
		} else
			super_types[mddev->major_version].
				validate_super(mddev, rdev);
5629
		if ((info->state & (1<<MD_DISK_SYNC)) &&
5630
		     rdev->raid_disk != info->raid_disk) {
5631 5632 5633 5634 5635 5636 5637
			/* This was a hot-add request, but events doesn't
			 * match, so reject it.
			 */
			export_rdev(rdev);
			return -EINVAL;
		}

5638
		clear_bit(In_sync, &rdev->flags); /* just to be sure */
5639 5640
		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
			set_bit(WriteMostly, &rdev->flags);
5641 5642
		else
			clear_bit(WriteMostly, &rdev->flags);
5643

L
Linus Torvalds 已提交
5644 5645
		rdev->raid_disk = -1;
		err = bind_rdev_to_array(rdev, mddev);
5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656
		if (!err && !mddev->pers->hot_remove_disk) {
			/* If there is hot_add_disk but no hot_remove_disk
			 * then added disks for geometry changes,
			 * and should be added immediately.
			 */
			super_types[mddev->major_version].
				validate_super(mddev, rdev);
			err = mddev->pers->hot_add_disk(mddev, rdev);
			if (err)
				unbind_rdev_from_array(rdev);
		}
L
Linus Torvalds 已提交
5657 5658
		if (err)
			export_rdev(rdev);
5659
		else
N
NeilBrown 已提交
5660
			sysfs_notify_dirent_safe(rdev->sysfs_state);
5661

5662
		set_bit(MD_CHANGE_DEVS, &mddev->flags);
5663 5664
		if (mddev->degraded)
			set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5665
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5666 5667
		if (!err)
			md_new_event(mddev);
5668
		md_wakeup_thread(mddev->thread);
L
Linus Torvalds 已提交
5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682
		return err;
	}

	/* otherwise, add_new_disk is only allowed
	 * for major_version==0 superblocks
	 */
	if (mddev->major_version != 0) {
		printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
		       mdname(mddev));
		return -EINVAL;
	}

	if (!(info->state & (1<<MD_DISK_FAULTY))) {
		int err;
5683
		rdev = md_import_device(dev, -1, 0);
L
Linus Torvalds 已提交
5684
		if (IS_ERR(rdev)) {
5685
			printk(KERN_WARNING
L
Linus Torvalds 已提交
5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696
				"md: error, md_import_device() returned %ld\n",
				PTR_ERR(rdev));
			return PTR_ERR(rdev);
		}
		rdev->desc_nr = info->number;
		if (info->raid_disk < mddev->raid_disks)
			rdev->raid_disk = info->raid_disk;
		else
			rdev->raid_disk = -1;

		if (rdev->raid_disk < mddev->raid_disks)
5697 5698
			if (info->state & (1<<MD_DISK_SYNC))
				set_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
5699

5700 5701 5702
		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
			set_bit(WriteMostly, &rdev->flags);

L
Linus Torvalds 已提交
5703 5704
		if (!mddev->persistent) {
			printk(KERN_INFO "md: nonpersistent superblock ...\n");
5705 5706
			rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
		} else
5707
			rdev->sb_start = calc_dev_sboffset(rdev);
5708
		rdev->sectors = rdev->sb_start;
L
Linus Torvalds 已提交
5709

5710 5711 5712 5713 5714
		err = bind_rdev_to_array(rdev, mddev);
		if (err) {
			export_rdev(rdev);
			return err;
		}
L
Linus Torvalds 已提交
5715 5716 5717 5718 5719
	}

	return 0;
}

5720
static int hot_remove_disk(struct mddev *mddev, dev_t dev)
L
Linus Torvalds 已提交
5721 5722
{
	char b[BDEVNAME_SIZE];
5723
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5724 5725 5726 5727 5728

	rdev = find_rdev(mddev, dev);
	if (!rdev)
		return -ENXIO;

5729 5730 5731
	clear_bit(Blocked, &rdev->flags);
	remove_and_add_spares(mddev, rdev);

L
Linus Torvalds 已提交
5732 5733 5734 5735
	if (rdev->raid_disk >= 0)
		goto busy;

	kick_rdev_from_array(rdev);
5736
	md_update_sb(mddev, 1);
5737
	md_new_event(mddev);
L
Linus Torvalds 已提交
5738 5739 5740

	return 0;
busy:
5741
	printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
L
Linus Torvalds 已提交
5742 5743 5744 5745
		bdevname(rdev->bdev,b), mdname(mddev));
	return -EBUSY;
}

5746
static int hot_add_disk(struct mddev *mddev, dev_t dev)
L
Linus Torvalds 已提交
5747 5748 5749
{
	char b[BDEVNAME_SIZE];
	int err;
5750
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761

	if (!mddev->pers)
		return -ENODEV;

	if (mddev->major_version != 0) {
		printk(KERN_WARNING "%s: HOT_ADD may only be used with"
			" version-0 superblocks.\n",
			mdname(mddev));
		return -EINVAL;
	}
	if (!mddev->pers->hot_add_disk) {
5762
		printk(KERN_WARNING
L
Linus Torvalds 已提交
5763 5764 5765 5766 5767
			"%s: personality does not support diskops!\n",
			mdname(mddev));
		return -EINVAL;
	}

5768
	rdev = md_import_device(dev, -1, 0);
L
Linus Torvalds 已提交
5769
	if (IS_ERR(rdev)) {
5770
		printk(KERN_WARNING
L
Linus Torvalds 已提交
5771 5772 5773 5774 5775 5776
			"md: error, md_import_device() returned %ld\n",
			PTR_ERR(rdev));
		return -EINVAL;
	}

	if (mddev->persistent)
5777
		rdev->sb_start = calc_dev_sboffset(rdev);
L
Linus Torvalds 已提交
5778
	else
5779
		rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
L
Linus Torvalds 已提交
5780

5781
	rdev->sectors = rdev->sb_start;
L
Linus Torvalds 已提交
5782

5783
	if (test_bit(Faulty, &rdev->flags)) {
5784
		printk(KERN_WARNING
L
Linus Torvalds 已提交
5785 5786 5787 5788 5789
			"md: can not hot-add faulty %s disk to %s!\n",
			bdevname(rdev->bdev,b), mdname(mddev));
		err = -EINVAL;
		goto abort_export;
	}
5790
	clear_bit(In_sync, &rdev->flags);
L
Linus Torvalds 已提交
5791
	rdev->desc_nr = -1;
5792
	rdev->saved_raid_disk = -1;
5793 5794 5795
	err = bind_rdev_to_array(rdev, mddev);
	if (err)
		goto abort_export;
L
Linus Torvalds 已提交
5796 5797 5798 5799 5800 5801 5802 5803

	/*
	 * The rest should better be atomic, we can have disk failures
	 * noticed in interrupt contexts ...
	 */

	rdev->raid_disk = -1;

5804
	md_update_sb(mddev, 1);
L
Linus Torvalds 已提交
5805 5806 5807 5808 5809 5810 5811

	/*
	 * Kick recovery, maybe this spare has to be added to the
	 * array immediately.
	 */
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	md_wakeup_thread(mddev->thread);
5812
	md_new_event(mddev);
L
Linus Torvalds 已提交
5813 5814 5815 5816 5817 5818 5819
	return 0;

abort_export:
	export_rdev(rdev);
	return err;
}

5820
static int set_bitmap_file(struct mddev *mddev, int fd)
5821
{
5822
	int err = 0;
5823

5824
	if (mddev->pers) {
5825
		if (!mddev->pers->quiesce || !mddev->thread)
5826 5827 5828 5829 5830
			return -EBUSY;
		if (mddev->recovery || mddev->sync_thread)
			return -EBUSY;
		/* we should be able to change the bitmap.. */
	}
5831

5832
	if (fd >= 0) {
5833
		struct inode *inode;
5834 5835
		if (mddev->bitmap)
			return -EEXIST; /* cannot add when bitmap is present */
5836
		mddev->bitmap_info.file = fget(fd);
5837

5838
		if (mddev->bitmap_info.file == NULL) {
5839 5840 5841 5842 5843
			printk(KERN_ERR "%s: error: failed to get bitmap file\n",
			       mdname(mddev));
			return -EBADF;
		}

5844 5845 5846 5847 5848 5849 5850 5851 5852 5853
		inode = mddev->bitmap_info.file->f_mapping->host;
		if (!S_ISREG(inode->i_mode)) {
			printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
			       mdname(mddev));
			err = -EBADF;
		} else if (!(mddev->bitmap_info.file->f_mode & FMODE_WRITE)) {
			printk(KERN_ERR "%s: error: bitmap file must open for write\n",
			       mdname(mddev));
			err = -EBADF;
		} else if (atomic_read(&inode->i_writecount) != 1) {
5854 5855
			printk(KERN_ERR "%s: error: bitmap file is already in use\n",
			       mdname(mddev));
5856 5857 5858
			err = -EBUSY;
		}
		if (err) {
5859 5860
			fput(mddev->bitmap_info.file);
			mddev->bitmap_info.file = NULL;
5861 5862
			return err;
		}
5863
		mddev->bitmap_info.offset = 0; /* file overrides offset */
5864 5865 5866 5867 5868
	} else if (mddev->bitmap == NULL)
		return -ENOENT; /* cannot remove what isn't there */
	err = 0;
	if (mddev->pers) {
		mddev->pers->quiesce(mddev, 1);
5869
		if (fd >= 0) {
5870
			err = bitmap_create(mddev);
5871 5872 5873
			if (!err)
				err = bitmap_load(mddev);
		}
5874
		if (fd < 0 || err) {
5875
			bitmap_destroy(mddev);
5876 5877
			fd = -1; /* make sure to put the file */
		}
5878
		mddev->pers->quiesce(mddev, 0);
5879 5880
	}
	if (fd < 0) {
5881
		if (mddev->bitmap_info.file)
5882 5883
			fput(mddev->bitmap_info.file);
		mddev->bitmap_info.file = NULL;
5884 5885
	}

5886 5887 5888
	return err;
}

L
Linus Torvalds 已提交
5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901
/*
 * set_array_info is used two different ways
 * The original usage is when creating a new array.
 * In this usage, raid_disks is > 0 and it together with
 *  level, size, not_persistent,layout,chunksize determine the
 *  shape of the array.
 *  This will always create an array with a type-0.90.0 superblock.
 * The newer usage is when assembling an array.
 *  In this case raid_disks will be 0, and the major_version field is
 *  use to determine which style super-blocks are to be found on the devices.
 *  The minor and patch _version numbers are also kept incase the
 *  super_block handler wishes to interpret them.
 */
5902
static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
L
Linus Torvalds 已提交
5903 5904 5905 5906 5907
{

	if (info->raid_disks == 0) {
		/* just setting version number for superblock loading */
		if (info->major_version < 0 ||
5908
		    info->major_version >= ARRAY_SIZE(super_types) ||
L
Linus Torvalds 已提交
5909 5910
		    super_types[info->major_version].name == NULL) {
			/* maybe try to auto-load a module? */
5911
			printk(KERN_INFO
L
Linus Torvalds 已提交
5912 5913 5914 5915 5916 5917 5918
				"md: superblock version %d not known\n",
				info->major_version);
			return -EINVAL;
		}
		mddev->major_version = info->major_version;
		mddev->minor_version = info->minor_version;
		mddev->patch_version = info->patch_version;
5919
		mddev->persistent = !info->not_persistent;
5920 5921 5922 5923
		/* ensure mddev_put doesn't delete this now that there
		 * is some minimal configuration.
		 */
		mddev->ctime         = get_seconds();
L
Linus Torvalds 已提交
5924 5925 5926 5927 5928 5929 5930 5931
		return 0;
	}
	mddev->major_version = MD_MAJOR_VERSION;
	mddev->minor_version = MD_MINOR_VERSION;
	mddev->patch_version = MD_PATCHLEVEL_VERSION;
	mddev->ctime         = get_seconds();

	mddev->level         = info->level;
5932
	mddev->clevel[0]     = 0;
A
Andre Noll 已提交
5933
	mddev->dev_sectors   = 2 * (sector_t)info->size;
L
Linus Torvalds 已提交
5934 5935 5936 5937 5938 5939 5940 5941 5942
	mddev->raid_disks    = info->raid_disks;
	/* don't set md_minor, it is determined by which /dev/md* was
	 * openned
	 */
	if (info->state & (1<<MD_SB_CLEAN))
		mddev->recovery_cp = MaxSector;
	else
		mddev->recovery_cp = 0;
	mddev->persistent    = ! info->not_persistent;
5943
	mddev->external	     = 0;
L
Linus Torvalds 已提交
5944 5945

	mddev->layout        = info->layout;
5946
	mddev->chunk_sectors = info->chunk_size >> 9;
L
Linus Torvalds 已提交
5947 5948 5949

	mddev->max_disks     = MD_SB_DISKS;

5950 5951
	if (mddev->persistent)
		mddev->flags         = 0;
5952
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
L
Linus Torvalds 已提交
5953

5954
	mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
5955
	mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
5956
	mddev->bitmap_info.offset = 0;
5957

5958 5959
	mddev->reshape_position = MaxSector;

L
Linus Torvalds 已提交
5960 5961 5962 5963 5964
	/*
	 * Generate a 128 bit UUID
	 */
	get_random_bytes(mddev->uuid, 16);

5965
	mddev->new_level = mddev->level;
5966
	mddev->new_chunk_sectors = mddev->chunk_sectors;
5967 5968
	mddev->new_layout = mddev->layout;
	mddev->delta_disks = 0;
5969
	mddev->reshape_backwards = 0;
5970

L
Linus Torvalds 已提交
5971 5972 5973
	return 0;
}

5974
void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
5975
{
D
Dan Williams 已提交
5976 5977 5978 5979 5980
	WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);

	if (mddev->external_size)
		return;

5981 5982 5983 5984
	mddev->array_sectors = array_sectors;
}
EXPORT_SYMBOL(md_set_array_sectors);

5985
static int update_size(struct mddev *mddev, sector_t num_sectors)
5986
{
5987
	struct md_rdev *rdev;
5988
	int rv;
5989
	int fit = (num_sectors == 0);
5990 5991 5992

	if (mddev->pers->resize == NULL)
		return -EINVAL;
5993 5994 5995 5996 5997
	/* The "num_sectors" is the number of sectors of each device that
	 * is used.  This can only make sense for arrays with redundancy.
	 * linear and raid0 always use whatever space is available. We can only
	 * consider changing this number if no resync or reconstruction is
	 * happening, and if the new size is acceptable. It must fit before the
5998
	 * sb_start or, if that is <data_offset, it must fit before the size
5999 6000
	 * of each device.  If num_sectors is zero, we find the largest size
	 * that fits.
6001
	 */
6002 6003
	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
	    mddev->sync_thread)
6004
		return -EBUSY;
6005 6006
	if (mddev->ro)
		return -EROFS;
6007

N
NeilBrown 已提交
6008
	rdev_for_each(rdev, mddev) {
6009
		sector_t avail = rdev->sectors;
6010

6011 6012 6013
		if (fit && (num_sectors == 0 || num_sectors > avail))
			num_sectors = avail;
		if (avail < num_sectors)
6014 6015
			return -ENOSPC;
	}
6016
	rv = mddev->pers->resize(mddev, num_sectors);
6017 6018
	if (!rv)
		revalidate_disk(mddev->gendisk);
6019 6020 6021
	return rv;
}

6022
static int update_raid_disks(struct mddev *mddev, int raid_disks)
6023 6024
{
	int rv;
6025
	struct md_rdev *rdev;
6026
	/* change the number of raid disks */
6027
	if (mddev->pers->check_reshape == NULL)
6028
		return -EINVAL;
6029 6030
	if (mddev->ro)
		return -EROFS;
6031
	if (raid_disks <= 0 ||
6032
	    (mddev->max_disks && raid_disks >= mddev->max_disks))
6033
		return -EINVAL;
6034 6035 6036
	if (mddev->sync_thread ||
	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
	    mddev->reshape_position != MaxSector)
6037
		return -EBUSY;
6038 6039 6040 6041 6042 6043 6044 6045 6046 6047

	rdev_for_each(rdev, mddev) {
		if (mddev->raid_disks < raid_disks &&
		    rdev->data_offset < rdev->new_data_offset)
			return -EINVAL;
		if (mddev->raid_disks > raid_disks &&
		    rdev->data_offset > rdev->new_data_offset)
			return -EINVAL;
	}

6048
	mddev->delta_disks = raid_disks - mddev->raid_disks;
6049 6050 6051 6052
	if (mddev->delta_disks < 0)
		mddev->reshape_backwards = 1;
	else if (mddev->delta_disks > 0)
		mddev->reshape_backwards = 0;
6053 6054

	rv = mddev->pers->check_reshape(mddev);
6055
	if (rv < 0) {
6056
		mddev->delta_disks = 0;
6057 6058
		mddev->reshape_backwards = 0;
	}
6059 6060 6061
	return rv;
}

L
Linus Torvalds 已提交
6062 6063 6064 6065 6066 6067 6068 6069
/*
 * update_array_info is used to change the configuration of an
 * on-line array.
 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
 * fields in the info are checked against the array.
 * Any differences that cannot be handled will cause an error.
 * Normally, only one change can be managed at a time.
 */
6070
static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
L
Linus Torvalds 已提交
6071 6072 6073
{
	int rv = 0;
	int cnt = 0;
6074 6075 6076
	int state = 0;

	/* calculate expected state,ignoring low bits */
6077
	if (mddev->bitmap && mddev->bitmap_info.offset)
6078
		state |= (1 << MD_SB_BITMAP_PRESENT);
L
Linus Torvalds 已提交
6079 6080 6081 6082 6083 6084 6085 6086

	if (mddev->major_version != info->major_version ||
	    mddev->minor_version != info->minor_version ||
/*	    mddev->patch_version != info->patch_version || */
	    mddev->ctime         != info->ctime         ||
	    mddev->level         != info->level         ||
/*	    mddev->layout        != info->layout        || */
	    !mddev->persistent	 != info->not_persistent||
6087
	    mddev->chunk_sectors != info->chunk_size >> 9 ||
6088 6089 6090
	    /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
	    ((state^info->state) & 0xfffffe00)
		)
L
Linus Torvalds 已提交
6091 6092
		return -EINVAL;
	/* Check there is only one change */
A
Andre Noll 已提交
6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104
	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
		cnt++;
	if (mddev->raid_disks != info->raid_disks)
		cnt++;
	if (mddev->layout != info->layout)
		cnt++;
	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
		cnt++;
	if (cnt == 0)
		return 0;
	if (cnt > 1)
		return -EINVAL;
L
Linus Torvalds 已提交
6105 6106 6107 6108 6109 6110

	if (mddev->layout != info->layout) {
		/* Change layout
		 * we don't need to do anything at the md level, the
		 * personality will take care of it all.
		 */
6111
		if (mddev->pers->check_reshape == NULL)
L
Linus Torvalds 已提交
6112
			return -EINVAL;
6113 6114
		else {
			mddev->new_layout = info->layout;
6115
			rv = mddev->pers->check_reshape(mddev);
6116 6117 6118 6119
			if (rv)
				mddev->new_layout = mddev->layout;
			return rv;
		}
L
Linus Torvalds 已提交
6120
	}
A
Andre Noll 已提交
6121
	if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6122
		rv = update_size(mddev, (sector_t)info->size * 2);
6123

6124 6125 6126
	if (mddev->raid_disks    != info->raid_disks)
		rv = update_raid_disks(mddev, info->raid_disks);

6127
	if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6128
		if (mddev->pers->quiesce == NULL || mddev->thread == NULL)
6129 6130 6131 6132 6133 6134 6135
			return -EINVAL;
		if (mddev->recovery || mddev->sync_thread)
			return -EBUSY;
		if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
			/* add the bitmap */
			if (mddev->bitmap)
				return -EEXIST;
6136
			if (mddev->bitmap_info.default_offset == 0)
6137
				return -EINVAL;
6138 6139
			mddev->bitmap_info.offset =
				mddev->bitmap_info.default_offset;
6140 6141
			mddev->bitmap_info.space =
				mddev->bitmap_info.default_space;
6142 6143
			mddev->pers->quiesce(mddev, 1);
			rv = bitmap_create(mddev);
6144 6145
			if (!rv)
				rv = bitmap_load(mddev);
6146 6147 6148 6149 6150 6151 6152
			if (rv)
				bitmap_destroy(mddev);
			mddev->pers->quiesce(mddev, 0);
		} else {
			/* remove the bitmap */
			if (!mddev->bitmap)
				return -ENOENT;
6153
			if (mddev->bitmap->storage.file)
6154 6155 6156 6157
				return -EINVAL;
			mddev->pers->quiesce(mddev, 1);
			bitmap_destroy(mddev);
			mddev->pers->quiesce(mddev, 0);
6158
			mddev->bitmap_info.offset = 0;
6159 6160
		}
	}
6161
	md_update_sb(mddev, 1);
L
Linus Torvalds 已提交
6162 6163 6164
	return rv;
}

6165
static int set_disk_faulty(struct mddev *mddev, dev_t dev)
L
Linus Torvalds 已提交
6166
{
6167
	struct md_rdev *rdev;
6168
	int err = 0;
L
Linus Torvalds 已提交
6169 6170 6171 6172

	if (mddev->pers == NULL)
		return -ENODEV;

6173 6174
	rcu_read_lock();
	rdev = find_rdev_rcu(mddev, dev);
L
Linus Torvalds 已提交
6175
	if (!rdev)
6176 6177 6178 6179 6180 6181 6182 6183
		err =  -ENODEV;
	else {
		md_error(mddev, rdev);
		if (!test_bit(Faulty, &rdev->flags))
			err = -EBUSY;
	}
	rcu_read_unlock();
	return err;
L
Linus Torvalds 已提交
6184 6185
}

6186 6187 6188 6189 6190 6191
/*
 * We have a problem here : there is no easy way to give a CHS
 * virtual geometry. We currently pretend that we have a 2 heads
 * 4 sectors (with a BIG number of cylinders...). This drives
 * dosfs just mad... ;-)
 */
6192 6193
static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
6194
	struct mddev *mddev = bdev->bd_disk->private_data;
6195 6196 6197

	geo->heads = 2;
	geo->sectors = 4;
6198
	geo->cylinders = mddev->array_sectors / 8;
6199 6200 6201
	return 0;
}

6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226
static inline bool md_ioctl_valid(unsigned int cmd)
{
	switch (cmd) {
	case ADD_NEW_DISK:
	case BLKROSET:
	case GET_ARRAY_INFO:
	case GET_BITMAP_FILE:
	case GET_DISK_INFO:
	case HOT_ADD_DISK:
	case HOT_REMOVE_DISK:
	case RAID_AUTORUN:
	case RAID_VERSION:
	case RESTART_ARRAY_RW:
	case RUN_ARRAY:
	case SET_ARRAY_INFO:
	case SET_BITMAP_FILE:
	case SET_DISK_FAULTY:
	case STOP_ARRAY:
	case STOP_ARRAY_RO:
		return true;
	default:
		return false;
	}
}

A
Al Viro 已提交
6227
static int md_ioctl(struct block_device *bdev, fmode_t mode,
L
Linus Torvalds 已提交
6228 6229 6230 6231
			unsigned int cmd, unsigned long arg)
{
	int err = 0;
	void __user *argp = (void __user *)arg;
6232
	struct mddev *mddev = NULL;
6233
	int ro;
L
Linus Torvalds 已提交
6234

6235 6236 6237
	if (!md_ioctl_valid(cmd))
		return -ENOTTY;

6238 6239 6240 6241 6242 6243 6244 6245 6246
	switch (cmd) {
	case RAID_VERSION:
	case GET_ARRAY_INFO:
	case GET_DISK_INFO:
		break;
	default:
		if (!capable(CAP_SYS_ADMIN))
			return -EACCES;
	}
L
Linus Torvalds 已提交
6247 6248 6249 6250 6251

	/*
	 * Commands dealing with the RAID driver but not any
	 * particular array:
	 */
6252 6253 6254
	switch (cmd) {
	case RAID_VERSION:
		err = get_version(argp);
6255
		goto out;
L
Linus Torvalds 已提交
6256 6257

#ifndef MODULE
6258 6259 6260
	case RAID_AUTORUN:
		err = 0;
		autostart_arrays(arg);
6261
		goto out;
L
Linus Torvalds 已提交
6262
#endif
6263
	default:;
L
Linus Torvalds 已提交
6264 6265 6266 6267 6268 6269
	}

	/*
	 * Commands creating/starting a new array:
	 */

A
Al Viro 已提交
6270
	mddev = bdev->bd_disk->private_data;
L
Linus Torvalds 已提交
6271 6272 6273

	if (!mddev) {
		BUG();
6274
		goto out;
L
Linus Torvalds 已提交
6275 6276
	}

6277 6278 6279 6280 6281 6282 6283
	/* Some actions do not requires the mutex */
	switch (cmd) {
	case GET_ARRAY_INFO:
		if (!mddev->raid_disks && !mddev->external)
			err = -ENODEV;
		else
			err = get_array_info(mddev, argp);
6284
		goto out;
6285 6286 6287 6288 6289 6290

	case GET_DISK_INFO:
		if (!mddev->raid_disks && !mddev->external)
			err = -ENODEV;
		else
			err = get_disk_info(mddev, argp);
6291
		goto out;
6292 6293 6294

	case SET_DISK_FAULTY:
		err = set_disk_faulty(mddev, new_decode_dev(arg));
6295
		goto out;
6296 6297
	}

6298 6299 6300 6301
	if (cmd == ADD_NEW_DISK)
		/* need to ensure md_delayed_delete() has completed */
		flush_workqueue(md_misc_wq);

6302 6303 6304 6305 6306 6307
	if (cmd == HOT_REMOVE_DISK)
		/* need to ensure recovery thread has run */
		wait_event_interruptible_timeout(mddev->sb_wait,
						 !test_bit(MD_RECOVERY_NEEDED,
							   &mddev->flags),
						 msecs_to_jiffies(5000));
6308 6309 6310 6311 6312
	if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
		/* Need to flush page cache, and ensure no-one else opens
		 * and writes
		 */
		mutex_lock(&mddev->open_mutex);
6313
		if (mddev->pers && atomic_read(&mddev->openers) > 1) {
6314 6315
			mutex_unlock(&mddev->open_mutex);
			err = -EBUSY;
6316
			goto out;
6317 6318 6319 6320 6321
		}
		set_bit(MD_STILL_CLOSED, &mddev->flags);
		mutex_unlock(&mddev->open_mutex);
		sync_blockdev(bdev);
	}
L
Linus Torvalds 已提交
6322 6323
	err = mddev_lock(mddev);
	if (err) {
6324
		printk(KERN_INFO
L
Linus Torvalds 已提交
6325 6326
			"md: ioctl lock interrupted, reason %d, cmd %d\n",
			err, cmd);
6327
		goto out;
L
Linus Torvalds 已提交
6328 6329
	}

6330 6331 6332 6333 6334 6335
	if (cmd == SET_ARRAY_INFO) {
		mdu_array_info_t info;
		if (!arg)
			memset(&info, 0, sizeof(info));
		else if (copy_from_user(&info, argp, sizeof(info))) {
			err = -EFAULT;
6336
			goto unlock;
6337 6338 6339 6340 6341 6342
		}
		if (mddev->pers) {
			err = update_array_info(mddev, &info);
			if (err) {
				printk(KERN_WARNING "md: couldn't update"
				       " array info. %d\n", err);
6343
				goto unlock;
L
Linus Torvalds 已提交
6344
			}
6345
			goto unlock;
6346 6347 6348 6349 6350 6351
		}
		if (!list_empty(&mddev->disks)) {
			printk(KERN_WARNING
			       "md: array %s already has disks!\n",
			       mdname(mddev));
			err = -EBUSY;
6352
			goto unlock;
6353 6354 6355 6356 6357 6358
		}
		if (mddev->raid_disks) {
			printk(KERN_WARNING
			       "md: array %s already initialised!\n",
			       mdname(mddev));
			err = -EBUSY;
6359
			goto unlock;
6360 6361 6362 6363 6364
		}
		err = set_array_info(mddev, &info);
		if (err) {
			printk(KERN_WARNING "md: couldn't set"
			       " array info. %d\n", err);
6365
			goto unlock;
6366
		}
6367
		goto unlock;
L
Linus Torvalds 已提交
6368 6369 6370 6371 6372
	}

	/*
	 * Commands querying/configuring an existing array:
	 */
6373
	/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6374
	 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6375 6376 6377 6378
	if ((!mddev->raid_disks && !mddev->external)
	    && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
	    && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
	    && cmd != GET_BITMAP_FILE) {
L
Linus Torvalds 已提交
6379
		err = -ENODEV;
6380
		goto unlock;
L
Linus Torvalds 已提交
6381 6382 6383 6384 6385
	}

	/*
	 * Commands even a read-only array can execute:
	 */
6386 6387 6388
	switch (cmd) {
	case GET_BITMAP_FILE:
		err = get_bitmap_file(mddev, argp);
6389
		goto unlock;
6390

6391 6392
	case RESTART_ARRAY_RW:
		err = restart_array(mddev);
6393
		goto unlock;
L
Linus Torvalds 已提交
6394

6395 6396
	case STOP_ARRAY:
		err = do_md_stop(mddev, 0, bdev);
6397
		goto unlock;
L
Linus Torvalds 已提交
6398

6399 6400
	case STOP_ARRAY_RO:
		err = md_set_readonly(mddev, bdev);
6401
		goto unlock;
L
Linus Torvalds 已提交
6402

6403 6404
	case HOT_REMOVE_DISK:
		err = hot_remove_disk(mddev, new_decode_dev(arg));
6405
		goto unlock;
6406

6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420
	case ADD_NEW_DISK:
		/* We can support ADD_NEW_DISK on read-only arrays
		 * on if we are re-adding a preexisting device.
		 * So require mddev->pers and MD_DISK_SYNC.
		 */
		if (mddev->pers) {
			mdu_disk_info_t info;
			if (copy_from_user(&info, argp, sizeof(info)))
				err = -EFAULT;
			else if (!(info.state & (1<<MD_DISK_SYNC)))
				/* Need to clear read-only for this */
				break;
			else
				err = add_new_disk(mddev, &info);
6421
			goto unlock;
6422 6423 6424
		}
		break;

6425 6426 6427
	case BLKROSET:
		if (get_user(ro, (int __user *)(arg))) {
			err = -EFAULT;
6428
			goto unlock;
6429 6430
		}
		err = -EINVAL;
6431

6432 6433 6434 6435
		/* if the bdev is going readonly the value of mddev->ro
		 * does not matter, no writes are coming
		 */
		if (ro)
6436
			goto unlock;
6437

6438 6439
		/* are we are already prepared for writes? */
		if (mddev->ro != 1)
6440
			goto unlock;
6441

6442 6443 6444 6445 6446 6447 6448 6449
		/* transitioning to readauto need only happen for
		 * arrays that call md_write_start
		 */
		if (mddev->pers) {
			err = restart_array(mddev);
			if (err == 0) {
				mddev->ro = 2;
				set_disk_ro(mddev->gendisk, 0);
6450
			}
6451
		}
6452
		goto unlock;
L
Linus Torvalds 已提交
6453 6454 6455 6456
	}

	/*
	 * The remaining ioctls are changing the state of the
6457
	 * superblock, so we do not allow them on read-only arrays.
L
Linus Torvalds 已提交
6458
	 */
6459
	if (mddev->ro && mddev->pers) {
6460 6461
		if (mddev->ro == 2) {
			mddev->ro = 0;
N
NeilBrown 已提交
6462
			sysfs_notify_dirent_safe(mddev->sysfs_state);
6463
			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6464 6465 6466 6467 6468 6469 6470 6471 6472
			/* mddev_unlock will wake thread */
			/* If a device failed while we were read-only, we
			 * need to make sure the metadata is updated now.
			 */
			if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
				mddev_unlock(mddev);
				wait_event(mddev->sb_wait,
					   !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
					   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6473
				mddev_lock_nointr(mddev);
6474
			}
6475 6476
		} else {
			err = -EROFS;
6477
			goto unlock;
6478
		}
L
Linus Torvalds 已提交
6479 6480
	}

6481 6482
	switch (cmd) {
	case ADD_NEW_DISK:
L
Linus Torvalds 已提交
6483
	{
6484 6485 6486 6487 6488
		mdu_disk_info_t info;
		if (copy_from_user(&info, argp, sizeof(info)))
			err = -EFAULT;
		else
			err = add_new_disk(mddev, &info);
6489
		goto unlock;
6490
	}
L
Linus Torvalds 已提交
6491

6492 6493
	case HOT_ADD_DISK:
		err = hot_add_disk(mddev, new_decode_dev(arg));
6494
		goto unlock;
L
Linus Torvalds 已提交
6495

6496 6497
	case RUN_ARRAY:
		err = do_md_run(mddev);
6498
		goto unlock;
L
Linus Torvalds 已提交
6499

6500 6501
	case SET_BITMAP_FILE:
		err = set_bitmap_file(mddev, (int)arg);
6502
		goto unlock;
6503

6504 6505
	default:
		err = -EINVAL;
6506
		goto unlock;
L
Linus Torvalds 已提交
6507 6508
	}

6509
unlock:
6510 6511 6512
	if (mddev->hold_active == UNTIL_IOCTL &&
	    err != -EINVAL)
		mddev->hold_active = 0;
L
Linus Torvalds 已提交
6513
	mddev_unlock(mddev);
6514
out:
L
Linus Torvalds 已提交
6515 6516
	return err;
}
6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535
#ifdef CONFIG_COMPAT
static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
		    unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case HOT_REMOVE_DISK:
	case HOT_ADD_DISK:
	case SET_DISK_FAULTY:
	case SET_BITMAP_FILE:
		/* These take in integer arg, do not convert */
		break;
	default:
		arg = (unsigned long)compat_ptr(arg);
		break;
	}

	return md_ioctl(bdev, mode, cmd, arg);
}
#endif /* CONFIG_COMPAT */
L
Linus Torvalds 已提交
6536

A
Al Viro 已提交
6537
static int md_open(struct block_device *bdev, fmode_t mode)
L
Linus Torvalds 已提交
6538 6539 6540 6541 6542
{
	/*
	 * Succeed if we can lock the mddev, which confirms that
	 * it isn't being stopped right now.
	 */
6543
	struct mddev *mddev = mddev_find(bdev->bd_dev);
L
Linus Torvalds 已提交
6544 6545
	int err;

6546 6547 6548
	if (!mddev)
		return -ENODEV;

6549 6550 6551 6552 6553 6554
	if (mddev->gendisk != bdev->bd_disk) {
		/* we are racing with mddev_put which is discarding this
		 * bd_disk.
		 */
		mddev_put(mddev);
		/* Wait until bdev->bd_disk is definitely gone */
T
Tejun Heo 已提交
6555
		flush_workqueue(md_misc_wq);
6556 6557 6558 6559 6560
		/* Then retry the open from the top */
		return -ERESTARTSYS;
	}
	BUG_ON(mddev != bdev->bd_disk->private_data);

N
NeilBrown 已提交
6561
	if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
L
Linus Torvalds 已提交
6562 6563 6564
		goto out;

	err = 0;
6565
	atomic_inc(&mddev->openers);
6566
	clear_bit(MD_STILL_CLOSED, &mddev->flags);
N
NeilBrown 已提交
6567
	mutex_unlock(&mddev->open_mutex);
L
Linus Torvalds 已提交
6568

6569
	check_disk_change(bdev);
L
Linus Torvalds 已提交
6570 6571 6572 6573
 out:
	return err;
}

6574
static void md_release(struct gendisk *disk, fmode_t mode)
L
Linus Torvalds 已提交
6575
{
6576
	struct mddev *mddev = disk->private_data;
L
Linus Torvalds 已提交
6577

E
Eric Sesterhenn 已提交
6578
	BUG_ON(!mddev);
6579
	atomic_dec(&mddev->openers);
L
Linus Torvalds 已提交
6580 6581
	mddev_put(mddev);
}
6582 6583 6584

static int md_media_changed(struct gendisk *disk)
{
6585
	struct mddev *mddev = disk->private_data;
6586 6587 6588 6589 6590 6591

	return mddev->changed;
}

static int md_revalidate(struct gendisk *disk)
{
6592
	struct mddev *mddev = disk->private_data;
6593 6594 6595 6596

	mddev->changed = 0;
	return 0;
}
6597
static const struct block_device_operations md_fops =
L
Linus Torvalds 已提交
6598 6599
{
	.owner		= THIS_MODULE,
A
Al Viro 已提交
6600 6601
	.open		= md_open,
	.release	= md_release,
N
NeilBrown 已提交
6602
	.ioctl		= md_ioctl,
6603 6604 6605
#ifdef CONFIG_COMPAT
	.compat_ioctl	= md_compat_ioctl,
#endif
6606
	.getgeo		= md_getgeo,
6607 6608
	.media_changed  = md_media_changed,
	.revalidate_disk= md_revalidate,
L
Linus Torvalds 已提交
6609 6610
};

6611
static int md_thread(void *arg)
L
Linus Torvalds 已提交
6612
{
6613
	struct md_thread *thread = arg;
L
Linus Torvalds 已提交
6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626

	/*
	 * md_thread is a 'system-thread', it's priority should be very
	 * high. We avoid resource deadlocks individually in each
	 * raid personality. (RAID5 does preallocation) We also use RR and
	 * the very same RT priority as kswapd, thus we will never get
	 * into a priority inversion deadlock.
	 *
	 * we definitely have to have equal or higher priority than
	 * bdflush, otherwise bdflush will deadlock if there are too
	 * many dirty RAID5 blocks.
	 */

N
NeilBrown 已提交
6627
	allow_signal(SIGKILL);
6628
	while (!kthread_should_stop()) {
L
Linus Torvalds 已提交
6629

6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642
		/* We need to wait INTERRUPTIBLE so that
		 * we don't add to the load-average.
		 * That means we need to be sure no signals are
		 * pending
		 */
		if (signal_pending(current))
			flush_signals(current);

		wait_event_interruptible_timeout
			(thread->wqueue,
			 test_bit(THREAD_WAKEUP, &thread->flags)
			 || kthread_should_stop(),
			 thread->timeout);
L
Linus Torvalds 已提交
6643

6644 6645
		clear_bit(THREAD_WAKEUP, &thread->flags);
		if (!kthread_should_stop())
S
Shaohua Li 已提交
6646
			thread->run(thread);
L
Linus Torvalds 已提交
6647
	}
6648

L
Linus Torvalds 已提交
6649 6650 6651
	return 0;
}

6652
void md_wakeup_thread(struct md_thread *thread)
L
Linus Torvalds 已提交
6653 6654
{
	if (thread) {
6655
		pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
L
Linus Torvalds 已提交
6656 6657 6658 6659
		set_bit(THREAD_WAKEUP, &thread->flags);
		wake_up(&thread->wqueue);
	}
}
6660
EXPORT_SYMBOL(md_wakeup_thread);
L
Linus Torvalds 已提交
6661

S
Shaohua Li 已提交
6662 6663
struct md_thread *md_register_thread(void (*run) (struct md_thread *),
		struct mddev *mddev, const char *name)
L
Linus Torvalds 已提交
6664
{
6665
	struct md_thread *thread;
L
Linus Torvalds 已提交
6666

6667
	thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
L
Linus Torvalds 已提交
6668 6669 6670 6671 6672 6673 6674
	if (!thread)
		return NULL;

	init_waitqueue_head(&thread->wqueue);

	thread->run = run;
	thread->mddev = mddev;
6675
	thread->timeout = MAX_SCHEDULE_TIMEOUT;
6676 6677 6678
	thread->tsk = kthread_run(md_thread, thread,
				  "%s_%s",
				  mdname(thread->mddev),
6679
				  name);
6680
	if (IS_ERR(thread->tsk)) {
L
Linus Torvalds 已提交
6681 6682 6683 6684 6685
		kfree(thread);
		return NULL;
	}
	return thread;
}
6686
EXPORT_SYMBOL(md_register_thread);
L
Linus Torvalds 已提交
6687

6688
void md_unregister_thread(struct md_thread **threadp)
L
Linus Torvalds 已提交
6689
{
6690
	struct md_thread *thread = *threadp;
6691 6692
	if (!thread)
		return;
6693
	pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6694 6695 6696 6697 6698 6699
	/* Locking ensures that mddev_unlock does not wake_up a
	 * non-existent thread
	 */
	spin_lock(&pers_lock);
	*threadp = NULL;
	spin_unlock(&pers_lock);
6700 6701

	kthread_stop(thread->tsk);
L
Linus Torvalds 已提交
6702 6703
	kfree(thread);
}
6704
EXPORT_SYMBOL(md_unregister_thread);
L
Linus Torvalds 已提交
6705

6706
void md_error(struct mddev *mddev, struct md_rdev *rdev)
L
Linus Torvalds 已提交
6707
{
6708
	if (!rdev || test_bit(Faulty, &rdev->flags))
L
Linus Torvalds 已提交
6709
		return;
6710

6711
	if (!mddev->pers || !mddev->pers->error_handler)
L
Linus Torvalds 已提交
6712 6713
		return;
	mddev->pers->error_handler(mddev,rdev);
6714 6715
	if (mddev->degraded)
		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
N
NeilBrown 已提交
6716
	sysfs_notify_dirent_safe(rdev->sysfs_state);
L
Linus Torvalds 已提交
6717 6718 6719
	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	md_wakeup_thread(mddev->thread);
6720
	if (mddev->event_work.func)
T
Tejun Heo 已提交
6721
		queue_work(md_misc_wq, &mddev->event_work);
6722
	md_new_event_inintr(mddev);
L
Linus Torvalds 已提交
6723
}
6724
EXPORT_SYMBOL(md_error);
L
Linus Torvalds 已提交
6725 6726 6727 6728 6729 6730

/* seq_file implementation /proc/mdstat */

static void status_unused(struct seq_file *seq)
{
	int i = 0;
6731
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
6732 6733 6734

	seq_printf(seq, "unused devices: ");

6735
	list_for_each_entry(rdev, &pending_raid_disks, same_set) {
L
Linus Torvalds 已提交
6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746
		char b[BDEVNAME_SIZE];
		i++;
		seq_printf(seq, "%s ",
			      bdevname(rdev->bdev,b));
	}
	if (!i)
		seq_printf(seq, "<none>");

	seq_printf(seq, "\n");
}

6747
static void status_resync(struct seq_file *seq, struct mddev *mddev)
L
Linus Torvalds 已提交
6748
{
6749 6750 6751
	sector_t max_sectors, resync, res;
	unsigned long dt, db;
	sector_t rt;
6752 6753
	int scale;
	unsigned int per_milli;
L
Linus Torvalds 已提交
6754

6755 6756 6757 6758 6759
	if (mddev->curr_resync <= 3)
		resync = 0;
	else
		resync = mddev->curr_resync
			- atomic_read(&mddev->recovery_active);
L
Linus Torvalds 已提交
6760

6761 6762
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
	    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6763
		max_sectors = mddev->resync_max_sectors;
L
Linus Torvalds 已提交
6764
	else
6765
		max_sectors = mddev->dev_sectors;
L
Linus Torvalds 已提交
6766

N
NeilBrown 已提交
6767
	WARN_ON(max_sectors == 0);
6768
	/* Pick 'scale' such that (resync>>scale)*1000 will fit
6769
	 * in a sector_t, and (max_sectors>>scale) will fit in a
6770 6771 6772 6773 6774
	 * u32, as those are the requirements for sector_div.
	 * Thus 'scale' must be at least 10
	 */
	scale = 10;
	if (sizeof(sector_t) > sizeof(unsigned long)) {
6775
		while ( max_sectors/2 > (1ULL<<(scale+32)))
6776 6777 6778
			scale++;
	}
	res = (resync>>scale)*1000;
6779
	sector_div(res, (u32)((max_sectors>>scale)+1));
6780 6781

	per_milli = res;
L
Linus Torvalds 已提交
6782
	{
6783
		int i, x = per_milli/50, y = 20-x;
L
Linus Torvalds 已提交
6784 6785 6786 6787 6788 6789 6790 6791
		seq_printf(seq, "[");
		for (i = 0; i < x; i++)
			seq_printf(seq, "=");
		seq_printf(seq, ">");
		for (i = 0; i < y; i++)
			seq_printf(seq, ".");
		seq_printf(seq, "] ");
	}
6792
	seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
6793 6794
		   (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
		    "reshape" :
6795 6796 6797 6798 6799
		    (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
		     "check" :
		     (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
		      "resync" : "recovery"))),
		   per_milli/10, per_milli % 10,
6800 6801
		   (unsigned long long) resync/2,
		   (unsigned long long) max_sectors/2);
L
Linus Torvalds 已提交
6802 6803 6804 6805 6806

	/*
	 * dt: time from mark until now
	 * db: blocks written from mark until now
	 * rt: remaining time
6807 6808 6809 6810
	 *
	 * rt is a sector_t, so could be 32bit or 64bit.
	 * So we divide before multiply in case it is 32bit and close
	 * to the limit.
L
Lucas De Marchi 已提交
6811
	 * We scale the divisor (db) by 32 to avoid losing precision
6812 6813 6814 6815
	 * near the end of resync when the number of remaining sectors
	 * is close to 'db'.
	 * We then divide rt by 32 after multiplying by db to compensate.
	 * The '+1' avoids division by zero if db is very small.
L
Linus Torvalds 已提交
6816 6817 6818
	 */
	dt = ((jiffies - mddev->resync_mark) / HZ);
	if (!dt) dt++;
6819 6820
	db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
		- mddev->resync_mark_cnt;
L
Linus Torvalds 已提交
6821

6822 6823 6824 6825 6826 6827 6828
	rt = max_sectors - resync;    /* number of remaining sectors */
	sector_div(rt, db/32+1);
	rt *= dt;
	rt >>= 5;

	seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
		   ((unsigned long)rt % 60)/6);
L
Linus Torvalds 已提交
6829

6830
	seq_printf(seq, " speed=%ldK/sec", db/2/dt);
L
Linus Torvalds 已提交
6831 6832 6833 6834 6835 6836
}

static void *md_seq_start(struct seq_file *seq, loff_t *pos)
{
	struct list_head *tmp;
	loff_t l = *pos;
6837
	struct mddev *mddev;
L
Linus Torvalds 已提交
6838 6839 6840 6841 6842 6843 6844 6845 6846 6847

	if (l >= 0x10000)
		return NULL;
	if (!l--)
		/* header */
		return (void*)1;

	spin_lock(&all_mddevs_lock);
	list_for_each(tmp,&all_mddevs)
		if (!l--) {
6848
			mddev = list_entry(tmp, struct mddev, all_mddevs);
L
Linus Torvalds 已提交
6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861
			mddev_get(mddev);
			spin_unlock(&all_mddevs_lock);
			return mddev;
		}
	spin_unlock(&all_mddevs_lock);
	if (!l--)
		return (void*)2;/* tail */
	return NULL;
}

static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct list_head *tmp;
6862
	struct mddev *next_mddev, *mddev = v;
6863

L
Linus Torvalds 已提交
6864 6865 6866 6867 6868 6869 6870 6871 6872 6873
	++*pos;
	if (v == (void*)2)
		return NULL;

	spin_lock(&all_mddevs_lock);
	if (v == (void*)1)
		tmp = all_mddevs.next;
	else
		tmp = mddev->all_mddevs.next;
	if (tmp != &all_mddevs)
6874
		next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
L
Linus Torvalds 已提交
6875 6876 6877
	else {
		next_mddev = (void*)2;
		*pos = 0x10000;
6878
	}
L
Linus Torvalds 已提交
6879 6880 6881 6882 6883 6884 6885 6886 6887 6888
	spin_unlock(&all_mddevs_lock);

	if (v != (void*)1)
		mddev_put(mddev);
	return next_mddev;

}

static void md_seq_stop(struct seq_file *seq, void *v)
{
6889
	struct mddev *mddev = v;
L
Linus Torvalds 已提交
6890 6891 6892 6893 6894 6895 6896

	if (mddev && v != (void*)1 && v != (void*)2)
		mddev_put(mddev);
}

static int md_seq_show(struct seq_file *seq, void *v)
{
6897
	struct mddev *mddev = v;
6898
	sector_t sectors;
6899
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
6900 6901

	if (v == (void*)1) {
6902
		struct md_personality *pers;
L
Linus Torvalds 已提交
6903 6904
		seq_printf(seq, "Personalities : ");
		spin_lock(&pers_lock);
6905 6906
		list_for_each_entry(pers, &pers_list, list)
			seq_printf(seq, "[%s] ", pers->name);
L
Linus Torvalds 已提交
6907 6908 6909

		spin_unlock(&pers_lock);
		seq_printf(seq, "\n");
6910
		seq->poll_event = atomic_read(&md_event_count);
L
Linus Torvalds 已提交
6911 6912 6913 6914 6915 6916 6917
		return 0;
	}
	if (v == (void*)2) {
		status_unused(seq);
		return 0;
	}

I
Ingo Molnar 已提交
6918
	if (mddev_lock(mddev) < 0)
L
Linus Torvalds 已提交
6919
		return -EINTR;
I
Ingo Molnar 已提交
6920

L
Linus Torvalds 已提交
6921 6922 6923 6924
	if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
		seq_printf(seq, "%s : %sactive", mdname(mddev),
						mddev->pers ? "" : "in");
		if (mddev->pers) {
6925
			if (mddev->ro==1)
L
Linus Torvalds 已提交
6926
				seq_printf(seq, " (read-only)");
6927
			if (mddev->ro==2)
6928
				seq_printf(seq, " (auto-read-only)");
L
Linus Torvalds 已提交
6929 6930 6931
			seq_printf(seq, " %s", mddev->pers->name);
		}

6932
		sectors = 0;
N
NeilBrown 已提交
6933
		rdev_for_each(rdev, mddev) {
L
Linus Torvalds 已提交
6934 6935 6936
			char b[BDEVNAME_SIZE];
			seq_printf(seq, " %s[%d]",
				bdevname(rdev->bdev,b), rdev->desc_nr);
6937 6938
			if (test_bit(WriteMostly, &rdev->flags))
				seq_printf(seq, "(W)");
6939
			if (test_bit(Faulty, &rdev->flags)) {
L
Linus Torvalds 已提交
6940 6941
				seq_printf(seq, "(F)");
				continue;
6942 6943
			}
			if (rdev->raid_disk < 0)
6944
				seq_printf(seq, "(S)"); /* spare */
6945 6946
			if (test_bit(Replacement, &rdev->flags))
				seq_printf(seq, "(R)");
6947
			sectors += rdev->sectors;
L
Linus Torvalds 已提交
6948 6949 6950 6951 6952
		}

		if (!list_empty(&mddev->disks)) {
			if (mddev->pers)
				seq_printf(seq, "\n      %llu blocks",
6953 6954
					   (unsigned long long)
					   mddev->array_sectors / 2);
L
Linus Torvalds 已提交
6955 6956
			else
				seq_printf(seq, "\n      %llu blocks",
6957
					   (unsigned long long)sectors / 2);
L
Linus Torvalds 已提交
6958
		}
6959 6960 6961 6962 6963 6964 6965
		if (mddev->persistent) {
			if (mddev->major_version != 0 ||
			    mddev->minor_version != 90) {
				seq_printf(seq," super %d.%d",
					   mddev->major_version,
					   mddev->minor_version);
			}
6966 6967 6968 6969
		} else if (mddev->external)
			seq_printf(seq, " super external:%s",
				   mddev->metadata_type);
		else
6970
			seq_printf(seq, " super non-persistent");
L
Linus Torvalds 已提交
6971 6972

		if (mddev->pers) {
6973
			mddev->pers->status(seq, mddev);
6974
			seq_printf(seq, "\n      ");
6975 6976
			if (mddev->pers->sync_request) {
				if (mddev->curr_resync > 2) {
6977
					status_resync(seq, mddev);
6978
					seq_printf(seq, "\n      ");
6979
				} else if (mddev->curr_resync >= 1)
6980 6981 6982 6983
					seq_printf(seq, "\tresync=DELAYED\n      ");
				else if (mddev->recovery_cp < MaxSector)
					seq_printf(seq, "\tresync=PENDING\n      ");
			}
6984 6985 6986
		} else
			seq_printf(seq, "\n       ");

6987
		bitmap_status(seq, mddev->bitmap);
L
Linus Torvalds 已提交
6988 6989 6990 6991

		seq_printf(seq, "\n");
	}
	mddev_unlock(mddev);
6992

L
Linus Torvalds 已提交
6993 6994 6995
	return 0;
}

J
Jan Engelhardt 已提交
6996
static const struct seq_operations md_seq_ops = {
L
Linus Torvalds 已提交
6997 6998 6999 7000 7001 7002 7003 7004
	.start  = md_seq_start,
	.next   = md_seq_next,
	.stop   = md_seq_stop,
	.show   = md_seq_show,
};

static int md_seq_open(struct inode *inode, struct file *file)
{
7005
	struct seq_file *seq;
L
Linus Torvalds 已提交
7006 7007 7008
	int error;

	error = seq_open(file, &md_seq_ops);
7009
	if (error)
7010 7011 7012 7013
		return error;

	seq = file->private_data;
	seq->poll_event = atomic_read(&md_event_count);
L
Linus Torvalds 已提交
7014 7015 7016
	return error;
}

7017
static int md_unloading;
7018 7019
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
{
7020
	struct seq_file *seq = filp->private_data;
7021 7022
	int mask;

7023
	if (md_unloading)
7024
		return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
7025 7026 7027 7028 7029
	poll_wait(filp, &md_event_waiters, wait);

	/* always allow read */
	mask = POLLIN | POLLRDNORM;

7030
	if (seq->poll_event != atomic_read(&md_event_count))
7031 7032 7033 7034
		mask |= POLLERR | POLLPRI;
	return mask;
}

7035
static const struct file_operations md_seq_fops = {
7036
	.owner		= THIS_MODULE,
L
Linus Torvalds 已提交
7037 7038 7039
	.open           = md_seq_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
7040
	.release	= seq_release_private,
7041
	.poll		= mdstat_poll,
L
Linus Torvalds 已提交
7042 7043
};

7044
int register_md_personality(struct md_personality *p)
L
Linus Torvalds 已提交
7045
{
7046 7047
	printk(KERN_INFO "md: %s personality registered for level %d\n",
						p->name, p->level);
L
Linus Torvalds 已提交
7048
	spin_lock(&pers_lock);
7049
	list_add_tail(&p->list, &pers_list);
L
Linus Torvalds 已提交
7050 7051 7052
	spin_unlock(&pers_lock);
	return 0;
}
7053
EXPORT_SYMBOL(register_md_personality);
L
Linus Torvalds 已提交
7054

7055
int unregister_md_personality(struct md_personality *p)
L
Linus Torvalds 已提交
7056
{
7057
	printk(KERN_INFO "md: %s personality unregistered\n", p->name);
L
Linus Torvalds 已提交
7058
	spin_lock(&pers_lock);
7059
	list_del_init(&p->list);
L
Linus Torvalds 已提交
7060 7061 7062
	spin_unlock(&pers_lock);
	return 0;
}
7063
EXPORT_SYMBOL(unregister_md_personality);
L
Linus Torvalds 已提交
7064

7065
static int is_mddev_idle(struct mddev *mddev, int init)
L
Linus Torvalds 已提交
7066
{
7067
	struct md_rdev *rdev;
L
Linus Torvalds 已提交
7068
	int idle;
N
NeilBrown 已提交
7069
	int curr_events;
L
Linus Torvalds 已提交
7070 7071

	idle = 1;
7072 7073
	rcu_read_lock();
	rdev_for_each_rcu(rdev, mddev) {
L
Linus Torvalds 已提交
7074
		struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
N
NeilBrown 已提交
7075 7076 7077
		curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
			      (int)part_stat_read(&disk->part0, sectors[1]) -
			      atomic_read(&disk->sync_io);
7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097
		/* sync IO will cause sync_io to increase before the disk_stats
		 * as sync_io is counted when a request starts, and
		 * disk_stats is counted when it completes.
		 * So resync activity will cause curr_events to be smaller than
		 * when there was no such activity.
		 * non-sync IO will cause disk_stat to increase without
		 * increasing sync_io so curr_events will (eventually)
		 * be larger than it was before.  Once it becomes
		 * substantially larger, the test below will cause
		 * the array to appear non-idle, and resync will slow
		 * down.
		 * If there is a lot of outstanding resync activity when
		 * we set last_event to curr_events, then all that activity
		 * completing might cause the array to appear non-idle
		 * and resync will be slowed down even though there might
		 * not have been non-resync activity.  This will only
		 * happen once though.  'last_events' will soon reflect
		 * the state where there is little or no outstanding
		 * resync requests, and further resync activity will
		 * always make curr_events less than last_events.
7098
		 *
L
Linus Torvalds 已提交
7099
		 */
N
NeilBrown 已提交
7100
		if (init || curr_events - rdev->last_events > 64) {
L
Linus Torvalds 已提交
7101 7102 7103 7104
			rdev->last_events = curr_events;
			idle = 0;
		}
	}
7105
	rcu_read_unlock();
L
Linus Torvalds 已提交
7106 7107 7108
	return idle;
}

7109
void md_done_sync(struct mddev *mddev, int blocks, int ok)
L
Linus Torvalds 已提交
7110 7111 7112 7113 7114
{
	/* another "blocks" (512byte) blocks have been synced */
	atomic_sub(blocks, &mddev->recovery_active);
	wake_up(&mddev->recovery_wait);
	if (!ok) {
7115
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7116
		set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
L
Linus Torvalds 已提交
7117 7118 7119 7120
		md_wakeup_thread(mddev->thread);
		// stop recovery, signal do_sync ....
	}
}
7121
EXPORT_SYMBOL(md_done_sync);
L
Linus Torvalds 已提交
7122

7123 7124
/* md_write_start(mddev, bi)
 * If we need to update some array metadata (e.g. 'active' flag
7125 7126
 * in superblock) before writing, schedule a superblock update
 * and wait for it to complete.
7127
 */
7128
void md_write_start(struct mddev *mddev, struct bio *bi)
L
Linus Torvalds 已提交
7129
{
7130
	int did_change = 0;
7131
	if (bio_data_dir(bi) != WRITE)
7132
		return;
7133

7134 7135 7136 7137 7138 7139
	BUG_ON(mddev->ro == 1);
	if (mddev->ro == 2) {
		/* need to switch to read/write */
		mddev->ro = 0;
		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
		md_wakeup_thread(mddev->thread);
7140
		md_wakeup_thread(mddev->sync_thread);
7141
		did_change = 1;
7142
	}
7143
	atomic_inc(&mddev->writes_pending);
7144 7145
	if (mddev->safemode == 1)
		mddev->safemode = 0;
7146
	if (mddev->in_sync) {
7147
		spin_lock(&mddev->lock);
7148 7149
		if (mddev->in_sync) {
			mddev->in_sync = 0;
7150
			set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7151
			set_bit(MD_CHANGE_PENDING, &mddev->flags);
7152
			md_wakeup_thread(mddev->thread);
7153
			did_change = 1;
7154
		}
7155
		spin_unlock(&mddev->lock);
7156
	}
7157
	if (did_change)
N
NeilBrown 已提交
7158
		sysfs_notify_dirent_safe(mddev->sysfs_state);
7159 7160
	wait_event(mddev->sb_wait,
		   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
L
Linus Torvalds 已提交
7161
}
7162
EXPORT_SYMBOL(md_write_start);
L
Linus Torvalds 已提交
7163

7164
void md_write_end(struct mddev *mddev)
L
Linus Torvalds 已提交
7165 7166 7167 7168
{
	if (atomic_dec_and_test(&mddev->writes_pending)) {
		if (mddev->safemode == 2)
			md_wakeup_thread(mddev->thread);
7169
		else if (mddev->safemode_delay)
L
Linus Torvalds 已提交
7170 7171 7172
			mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
	}
}
7173
EXPORT_SYMBOL(md_write_end);
L
Linus Torvalds 已提交
7174

7175 7176 7177 7178 7179
/* md_allow_write(mddev)
 * Calling this ensures that the array is marked 'active' so that writes
 * may proceed without blocking.  It is important to call this before
 * attempting a GFP_KERNEL allocation while holding the mddev lock.
 * Must be called with mddev_lock held.
7180 7181 7182
 *
 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
 * is dropped, so return -EAGAIN after notifying userspace.
7183
 */
7184
int md_allow_write(struct mddev *mddev)
7185 7186
{
	if (!mddev->pers)
7187
		return 0;
7188
	if (mddev->ro)
7189
		return 0;
7190
	if (!mddev->pers->sync_request)
7191
		return 0;
7192

7193
	spin_lock(&mddev->lock);
7194 7195 7196
	if (mddev->in_sync) {
		mddev->in_sync = 0;
		set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7197
		set_bit(MD_CHANGE_PENDING, &mddev->flags);
7198 7199 7200
		if (mddev->safemode_delay &&
		    mddev->safemode == 0)
			mddev->safemode = 1;
7201
		spin_unlock(&mddev->lock);
7202
		md_update_sb(mddev, 0);
N
NeilBrown 已提交
7203
		sysfs_notify_dirent_safe(mddev->sysfs_state);
7204
	} else
7205
		spin_unlock(&mddev->lock);
7206

7207
	if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
7208 7209 7210
		return -EAGAIN;
	else
		return 0;
7211 7212 7213
}
EXPORT_SYMBOL_GPL(md_allow_write);

L
Linus Torvalds 已提交
7214 7215
#define SYNC_MARKS	10
#define	SYNC_MARK_STEP	(3*HZ)
7216
#define UPDATE_FREQUENCY (5*60*HZ)
S
Shaohua Li 已提交
7217
void md_do_sync(struct md_thread *thread)
L
Linus Torvalds 已提交
7218
{
S
Shaohua Li 已提交
7219
	struct mddev *mddev = thread->mddev;
7220
	struct mddev *mddev2;
L
Linus Torvalds 已提交
7221 7222
	unsigned int currspeed = 0,
		 window;
X
Xiao Ni 已提交
7223
	sector_t max_sectors,j, io_sectors, recovery_done;
L
Linus Torvalds 已提交
7224
	unsigned long mark[SYNC_MARKS];
7225
	unsigned long update_time;
L
Linus Torvalds 已提交
7226 7227 7228 7229
	sector_t mark_cnt[SYNC_MARKS];
	int last_mark,m;
	struct list_head *tmp;
	sector_t last_check;
7230
	int skipped = 0;
7231
	struct md_rdev *rdev;
7232
	char *desc, *action = NULL;
M
majianpeng 已提交
7233
	struct blk_plug plug;
L
Linus Torvalds 已提交
7234 7235 7236 7237

	/* just incase thread restarts... */
	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
		return;
7238 7239
	if (mddev->ro) {/* never try to sync a read-only array */
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7240
		return;
7241
	}
L
Linus Torvalds 已提交
7242

7243
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7244
		if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
7245
			desc = "data-check";
7246 7247
			action = "check";
		} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7248
			desc = "requested-resync";
7249 7250
			action = "repair";
		} else
7251 7252 7253 7254 7255 7256
			desc = "resync";
	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
		desc = "reshape";
	else
		desc = "recovery";

7257 7258
	mddev->last_sync_action = action ?: desc;

L
Linus Torvalds 已提交
7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278
	/* we overload curr_resync somewhat here.
	 * 0 == not engaged in resync at all
	 * 2 == checking that there is no conflict with another sync
	 * 1 == like 2, but have yielded to allow conflicting resync to
	 *		commense
	 * other == active in resync - this many blocks
	 *
	 * Before starting a resync we must have set curr_resync to
	 * 2, and then checked that every "conflicting" array has curr_resync
	 * less than ours.  When we find one that is the same or higher
	 * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
	 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
	 * This will mean we have to start checking from the beginning again.
	 *
	 */

	do {
		mddev->curr_resync = 2;

	try_again:
7279
		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
L
Linus Torvalds 已提交
7280
			goto skip;
7281
		for_each_mddev(mddev2, tmp) {
L
Linus Torvalds 已提交
7282 7283
			if (mddev2 == mddev)
				continue;
7284 7285 7286
			if (!mddev->parallel_resync
			&&  mddev2->curr_resync
			&&  match_mddev_units(mddev, mddev2)) {
L
Linus Torvalds 已提交
7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297
				DEFINE_WAIT(wq);
				if (mddev < mddev2 && mddev->curr_resync == 2) {
					/* arbitrarily yield */
					mddev->curr_resync = 1;
					wake_up(&resync_wait);
				}
				if (mddev > mddev2 && mddev->curr_resync == 1)
					/* no need to wait here, we can wait the next
					 * time 'round when curr_resync == 2
					 */
					continue;
7298 7299 7300 7301 7302
				/* We need to wait 'interruptible' so as not to
				 * contribute to the load average, and not to
				 * be caught by 'softlockup'
				 */
				prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7303
				if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7304
				    mddev2->curr_resync >= mddev->curr_resync) {
7305 7306
					printk(KERN_INFO "md: delaying %s of %s"
					       " until %s has finished (they"
L
Linus Torvalds 已提交
7307
					       " share one or more physical units)\n",
7308
					       desc, mdname(mddev), mdname(mddev2));
L
Linus Torvalds 已提交
7309
					mddev_put(mddev2);
7310 7311
					if (signal_pending(current))
						flush_signals(current);
L
Linus Torvalds 已提交
7312 7313 7314 7315 7316 7317 7318 7319 7320
					schedule();
					finish_wait(&resync_wait, &wq);
					goto try_again;
				}
				finish_wait(&resync_wait, &wq);
			}
		}
	} while (mddev->curr_resync < 2);

7321
	j = 0;
7322
	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
L
Linus Torvalds 已提交
7323
		/* resync follows the size requested by the personality,
7324
		 * which defaults to physical size, but can be virtual size
L
Linus Torvalds 已提交
7325 7326
		 */
		max_sectors = mddev->resync_max_sectors;
7327
		atomic64_set(&mddev->resync_mismatches, 0);
7328
		/* we don't use the checkpoint if there's a bitmap */
7329 7330 7331
		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
			j = mddev->resync_min;
		else if (!mddev->bitmap)
7332
			j = mddev->recovery_cp;
7333

7334
	} else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7335
		max_sectors = mddev->resync_max_sectors;
7336
	else {
L
Linus Torvalds 已提交
7337
		/* recovery follows the physical size of devices */
A
Andre Noll 已提交
7338
		max_sectors = mddev->dev_sectors;
7339
		j = MaxSector;
7340
		rcu_read_lock();
N
NeilBrown 已提交
7341
		rdev_for_each_rcu(rdev, mddev)
7342 7343 7344 7345 7346
			if (rdev->raid_disk >= 0 &&
			    !test_bit(Faulty, &rdev->flags) &&
			    !test_bit(In_sync, &rdev->flags) &&
			    rdev->recovery_offset < j)
				j = rdev->recovery_offset;
7347
		rcu_read_unlock();
7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360

		/* If there is a bitmap, we need to make sure all
		 * writes that started before we added a spare
		 * complete before we start doing a recovery.
		 * Otherwise the write might complete and (via
		 * bitmap_endwrite) set a bit in the bitmap after the
		 * recovery has checked that bit and skipped that
		 * region.
		 */
		if (mddev->bitmap) {
			mddev->pers->quiesce(mddev, 1);
			mddev->pers->quiesce(mddev, 0);
		}
7361
	}
L
Linus Torvalds 已提交
7362

7363 7364 7365
	printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
	printk(KERN_INFO "md: minimum _guaranteed_  speed:"
		" %d KB/sec/disk.\n", speed_min(mddev));
7366
	printk(KERN_INFO "md: using maximum available idle IO bandwidth "
7367 7368
	       "(but not more than %d KB/sec) for %s.\n",
	       speed_max(mddev), desc);
L
Linus Torvalds 已提交
7369

N
NeilBrown 已提交
7370
	is_mddev_idle(mddev, 1); /* this initializes IO event counters */
7371

7372
	io_sectors = 0;
L
Linus Torvalds 已提交
7373 7374
	for (m = 0; m < SYNC_MARKS; m++) {
		mark[m] = jiffies;
7375
		mark_cnt[m] = io_sectors;
L
Linus Torvalds 已提交
7376 7377 7378 7379 7380 7381 7382 7383 7384
	}
	last_mark = 0;
	mddev->resync_mark = mark[last_mark];
	mddev->resync_mark_cnt = mark_cnt[last_mark];

	/*
	 * Tune reconstruction:
	 */
	window = 32*(PAGE_SIZE/512);
J
Jonathan Brassow 已提交
7385 7386
	printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
		window/2, (unsigned long long)max_sectors/2);
L
Linus Torvalds 已提交
7387 7388 7389 7390 7391

	atomic_set(&mddev->recovery_active, 0);
	last_check = 0;

	if (j>2) {
7392
		printk(KERN_INFO
7393 7394
		       "md: resuming %s of %s from checkpoint.\n",
		       desc, mdname(mddev));
L
Linus Torvalds 已提交
7395
		mddev->curr_resync = j;
7396 7397
	} else
		mddev->curr_resync = 3; /* no longer delayed */
7398
	mddev->curr_resync_completed = j;
7399 7400
	sysfs_notify(&mddev->kobj, NULL, "sync_completed");
	md_new_event(mddev);
7401
	update_time = jiffies;
L
Linus Torvalds 已提交
7402

M
majianpeng 已提交
7403
	blk_start_plug(&plug);
L
Linus Torvalds 已提交
7404
	while (j < max_sectors) {
7405
		sector_t sectors;
L
Linus Torvalds 已提交
7406

7407
		skipped = 0;
7408

7409 7410 7411 7412
		if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
		    ((mddev->curr_resync > mddev->curr_resync_completed &&
		      (mddev->curr_resync - mddev->curr_resync_completed)
		      > (max_sectors >> 4)) ||
7413
		     time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
7414 7415 7416
		     (j - mddev->curr_resync_completed)*2
		     >= mddev->resync_max - mddev->curr_resync_completed
			    )) {
7417 7418 7419
			/* time to update curr_resync_completed */
			wait_event(mddev->recovery_wait,
				   atomic_read(&mddev->recovery_active) == 0);
7420
			mddev->curr_resync_completed = j;
K
kernelmail 已提交
7421 7422 7423
			if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
			    j > mddev->recovery_cp)
				mddev->recovery_cp = j;
7424
			update_time = jiffies;
7425
			set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7426
			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7427
		}
7428

7429 7430
		while (j >= mddev->resync_max &&
		       !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7431 7432 7433 7434 7435 7436 7437
			/* As this condition is controlled by user-space,
			 * we can block indefinitely, so use '_interruptible'
			 * to avoid triggering warnings.
			 */
			flush_signals(current); /* just in case */
			wait_event_interruptible(mddev->recovery_wait,
						 mddev->resync_max > j
7438 7439
						 || test_bit(MD_RECOVERY_INTR,
							     &mddev->recovery));
7440
		}
7441

7442 7443
		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
			break;
7444

7445
		sectors = mddev->pers->sync_request(mddev, j, &skipped,
7446
						  currspeed < speed_min(mddev));
7447
		if (sectors == 0) {
7448
			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7449
			break;
L
Linus Torvalds 已提交
7450
		}
7451 7452 7453 7454 7455 7456

		if (!skipped) { /* actual IO requested */
			io_sectors += sectors;
			atomic_add(sectors, &mddev->recovery_active);
		}

7457 7458 7459
		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
			break;

L
Linus Torvalds 已提交
7460
		j += sectors;
7461 7462
		if (j > 2)
			mddev->curr_resync = j;
7463
		mddev->curr_mark_cnt = io_sectors;
7464
		if (last_check == 0)
7465
			/* this is the earliest that rebuild will be
7466 7467 7468
			 * visible in /proc/mdstat
			 */
			md_new_event(mddev);
7469 7470

		if (last_check + window > io_sectors || j == max_sectors)
L
Linus Torvalds 已提交
7471 7472
			continue;

7473
		last_check = io_sectors;
L
Linus Torvalds 已提交
7474 7475 7476 7477 7478 7479 7480 7481
	repeat:
		if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
			/* step marks */
			int next = (last_mark+1) % SYNC_MARKS;

			mddev->resync_mark = mark[next];
			mddev->resync_mark_cnt = mark_cnt[next];
			mark[next] = jiffies;
7482
			mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
L
Linus Torvalds 已提交
7483 7484 7485
			last_mark = next;
		}

7486 7487
		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
			break;
L
Linus Torvalds 已提交
7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498

		/*
		 * this loop exits only if either when we are slower than
		 * the 'hard' speed limit, or the system was IO-idle for
		 * a jiffy.
		 * the system might be non-idle CPU-wise, but we only care
		 * about not overloading the IO subsystem. (things like an
		 * e2fsck being done on the RAID array should execute fast)
		 */
		cond_resched();

X
Xiao Ni 已提交
7499 7500
		recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
		currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
7501
			/((jiffies-mddev->resync_mark)/HZ +1) +1;
L
Linus Torvalds 已提交
7502

7503 7504
		if (currspeed > speed_min(mddev)) {
			if ((currspeed > speed_max(mddev)) ||
N
NeilBrown 已提交
7505
					!is_mddev_idle(mddev, 0)) {
7506
				msleep(500);
L
Linus Torvalds 已提交
7507 7508 7509 7510
				goto repeat;
			}
		}
	}
7511 7512 7513
	printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
	       test_bit(MD_RECOVERY_INTR, &mddev->recovery)
	       ? "interrupted" : "done");
L
Linus Torvalds 已提交
7514 7515 7516
	/*
	 * this also signals 'finished resyncing' to md_stop
	 */
M
majianpeng 已提交
7517
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
7518 7519 7520
	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));

	/* tell personality that we are finished */
7521
	mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
L
Linus Torvalds 已提交
7522

7523
	if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
7524 7525 7526 7527 7528
	    mddev->curr_resync > 2) {
		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
			if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
				if (mddev->curr_resync >= mddev->recovery_cp) {
					printk(KERN_INFO
7529 7530
					       "md: checkpointing %s of %s.\n",
					       desc, mdname(mddev));
7531 7532 7533 7534 7535 7536 7537
					if (test_bit(MD_RECOVERY_ERROR,
						&mddev->recovery))
						mddev->recovery_cp =
							mddev->curr_resync_completed;
					else
						mddev->recovery_cp =
							mddev->curr_resync;
7538 7539 7540 7541 7542 7543
				}
			} else
				mddev->recovery_cp = MaxSector;
		} else {
			if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
				mddev->curr_resync = MaxSector;
7544
			rcu_read_lock();
N
NeilBrown 已提交
7545
			rdev_for_each_rcu(rdev, mddev)
7546
				if (rdev->raid_disk >= 0 &&
7547
				    mddev->delta_disks >= 0 &&
7548 7549 7550 7551
				    !test_bit(Faulty, &rdev->flags) &&
				    !test_bit(In_sync, &rdev->flags) &&
				    rdev->recovery_offset < mddev->curr_resync)
					rdev->recovery_offset = mddev->curr_resync;
7552
			rcu_read_unlock();
7553
		}
L
Linus Torvalds 已提交
7554
	}
7555
 skip:
7556
	set_bit(MD_CHANGE_DEVS, &mddev->flags);
L
Linus Torvalds 已提交
7557

7558 7559 7560 7561 7562 7563 7564
	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
		/* We completed so min/max setting can be forgotten if used. */
		if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
			mddev->resync_min = 0;
		mddev->resync_max = MaxSector;
	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
		mddev->resync_min = mddev->curr_resync_completed;
L
Linus Torvalds 已提交
7565 7566 7567 7568
	mddev->curr_resync = 0;
	wake_up(&resync_wait);
	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
	md_wakeup_thread(mddev->thread);
7569
	return;
L
Linus Torvalds 已提交
7570
}
7571
EXPORT_SYMBOL_GPL(md_do_sync);
L
Linus Torvalds 已提交
7572

7573 7574
static int remove_and_add_spares(struct mddev *mddev,
				 struct md_rdev *this)
7575
{
7576
	struct md_rdev *rdev;
7577
	int spares = 0;
7578
	int removed = 0;
7579

N
NeilBrown 已提交
7580
	rdev_for_each(rdev, mddev)
7581 7582
		if ((this == NULL || rdev == this) &&
		    rdev->raid_disk >= 0 &&
7583
		    !test_bit(Blocked, &rdev->flags) &&
7584 7585 7586 7587
		    (test_bit(Faulty, &rdev->flags) ||
		     ! test_bit(In_sync, &rdev->flags)) &&
		    atomic_read(&rdev->nr_pending)==0) {
			if (mddev->pers->hot_remove_disk(
7588
				    mddev, rdev) == 0) {
7589
				sysfs_unlink_rdev(mddev, rdev);
7590
				rdev->raid_disk = -1;
7591
				removed++;
7592 7593
			}
		}
7594 7595
	if (removed && mddev->kobj.sd)
		sysfs_notify(&mddev->kobj, NULL, "degraded");
7596

7597 7598 7599
	if (this)
		goto no_add;

N
NeilBrown 已提交
7600
	rdev_for_each(rdev, mddev) {
7601 7602 7603 7604
		if (rdev->raid_disk >= 0 &&
		    !test_bit(In_sync, &rdev->flags) &&
		    !test_bit(Faulty, &rdev->flags))
			spares++;
7605 7606 7607 7608 7609
		if (rdev->raid_disk >= 0)
			continue;
		if (test_bit(Faulty, &rdev->flags))
			continue;
		if (mddev->ro &&
7610 7611
		    ! (rdev->saved_raid_disk >= 0 &&
		       !test_bit(Bitmap_sync, &rdev->flags)))
7612 7613
			continue;

7614 7615
		if (rdev->saved_raid_disk < 0)
			rdev->recovery_offset = 0;
7616 7617 7618 7619 7620 7621 7622
		if (mddev->pers->
		    hot_add_disk(mddev, rdev) == 0) {
			if (sysfs_link_rdev(mddev, rdev))
				/* failure here is OK */;
			spares++;
			md_new_event(mddev);
			set_bit(MD_CHANGE_DEVS, &mddev->flags);
7623
		}
7624
	}
7625
no_add:
7626 7627
	if (removed)
		set_bit(MD_CHANGE_DEVS, &mddev->flags);
7628 7629
	return spares;
}
7630

7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647
static void md_start_sync(struct work_struct *ws)
{
	struct mddev *mddev = container_of(ws, struct mddev, del_work);

	mddev->sync_thread = md_register_thread(md_do_sync,
						mddev,
						"resync");
	if (!mddev->sync_thread) {
		printk(KERN_ERR "%s: could not start resync"
		       " thread...\n",
		       mdname(mddev));
		/* leave the spares where they are, it shouldn't hurt */
		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
		clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
		clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
		clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7648
		wake_up(&resync_wait);
7649 7650 7651 7652 7653 7654 7655 7656 7657 7658
		if (test_and_clear_bit(MD_RECOVERY_RECOVER,
				       &mddev->recovery))
			if (mddev->sysfs_action)
				sysfs_notify_dirent_safe(mddev->sysfs_action);
	} else
		md_wakeup_thread(mddev->sync_thread);
	sysfs_notify_dirent_safe(mddev->sysfs_action);
	md_new_event(mddev);
}

L
Linus Torvalds 已提交
7659 7660 7661 7662 7663 7664 7665 7666 7667 7668
/*
 * This routine is regularly called by all per-raid-array threads to
 * deal with generic issues like resync and super-block update.
 * Raid personalities that don't have a thread (linear/raid0) do not
 * need this as they never do any recovery or update the superblock.
 *
 * It does not do any resync itself, but rather "forks" off other threads
 * to do that as needed.
 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
 * "->recovery" and create a thread at ->sync_thread.
7669
 * When the thread finishes it sets MD_RECOVERY_DONE
L
Linus Torvalds 已提交
7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680
 * and wakeups up this thread which will reap the thread and finish up.
 * This thread also removes any faulty devices (with nr_pending == 0).
 *
 * The overall approach is:
 *  1/ if the superblock needs updating, update it.
 *  2/ If a recovery thread is running, don't do anything else.
 *  3/ If recovery has finished, clean up, possibly marking spares active.
 *  4/ If there are any faulty devices, remove them.
 *  5/ If array is degraded, try to add spares devices
 *  6/ If array has spares or is not in-sync, start a resync thread.
 */
7681
void md_check_recovery(struct mddev *mddev)
L
Linus Torvalds 已提交
7682
{
7683 7684 7685
	if (mddev->suspended)
		return;

7686
	if (mddev->bitmap)
7687
		bitmap_daemon_work(mddev);
L
Linus Torvalds 已提交
7688

7689
	if (signal_pending(current)) {
7690
		if (mddev->pers->sync_request && !mddev->external) {
7691 7692 7693 7694 7695 7696 7697
			printk(KERN_INFO "md: %s in immediate safe mode\n",
			       mdname(mddev));
			mddev->safemode = 2;
		}
		flush_signals(current);
	}

7698 7699
	if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
		return;
L
Linus Torvalds 已提交
7700
	if ( ! (
7701
		(mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
L
Linus Torvalds 已提交
7702
		test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7703
		test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7704
		(mddev->external == 0 && mddev->safemode == 1) ||
7705 7706
		(mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
		 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
L
Linus Torvalds 已提交
7707 7708
		))
		return;
7709

7710
	if (mddev_trylock(mddev)) {
7711
		int spares = 0;
7712

7713
		if (mddev->ro) {
7714 7715 7716 7717 7718 7719
			/* On a read-only array we can:
			 * - remove failed devices
			 * - add already-in_sync devices if the array itself
			 *   is in-sync.
			 * As we only add devices that are already in-sync,
			 * we can activate the spares immediately.
7720
			 */
7721
			remove_and_add_spares(mddev, NULL);
7722 7723 7724
			/* There is no thread, but we need to call
			 * ->spare_active and clear saved_raid_disk
			 */
7725
			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7726 7727
			md_reap_sync_thread(mddev);
			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7728 7729 7730
			goto unlock;
		}

7731
		if (!mddev->external) {
7732
			int did_change = 0;
7733
			spin_lock(&mddev->lock);
7734 7735 7736 7737 7738
			if (mddev->safemode &&
			    !atomic_read(&mddev->writes_pending) &&
			    !mddev->in_sync &&
			    mddev->recovery_cp == MaxSector) {
				mddev->in_sync = 1;
7739
				did_change = 1;
7740
				set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7741 7742 7743
			}
			if (mddev->safemode == 1)
				mddev->safemode = 0;
7744
			spin_unlock(&mddev->lock);
7745
			if (did_change)
N
NeilBrown 已提交
7746
				sysfs_notify_dirent_safe(mddev->sysfs_state);
7747 7748
		}

7749
		if (mddev->flags & MD_UPDATE_SB_FLAGS)
7750
			md_update_sb(mddev, 0);
7751

L
Linus Torvalds 已提交
7752 7753 7754 7755 7756 7757 7758
		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
		    !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
			/* resync/recovery still happening */
			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
			goto unlock;
		}
		if (mddev->sync_thread) {
7759
			md_reap_sync_thread(mddev);
L
Linus Torvalds 已提交
7760 7761
			goto unlock;
		}
7762 7763 7764
		/* Set RUNNING before clearing NEEDED to avoid
		 * any transients in the value of "sync_action".
		 */
7765
		mddev->curr_resync_completed = 0;
7766
		set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7767 7768 7769 7770 7771
		/* Clear some bits that don't mean anything, but
		 * might be left set
		 */
		clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
		clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
L
Linus Torvalds 已提交
7772

7773 7774
		if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
		    test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7775
			goto not_running;
L
Linus Torvalds 已提交
7776 7777 7778
		/* no recovery is running.
		 * remove any failed drives, then
		 * add spares if possible.
7779
		 * Spares are also removed and re-added, to allow
L
Linus Torvalds 已提交
7780 7781 7782
		 * the personality to fail the re-add.
		 */

7783
		if (mddev->reshape_position != MaxSector) {
7784 7785
			if (mddev->pers->check_reshape == NULL ||
			    mddev->pers->check_reshape(mddev) != 0)
7786
				/* Cannot proceed */
7787
				goto not_running;
7788
			set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7789
			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7790
		} else if ((spares = remove_and_add_spares(mddev, NULL))) {
7791 7792
			clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
			clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7793
			clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7794
			set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7795 7796
		} else if (mddev->recovery_cp < MaxSector) {
			set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7797
			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7798 7799
		} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
			/* nothing to be done ... */
7800
			goto not_running;
7801

L
Linus Torvalds 已提交
7802
		if (mddev->pers->sync_request) {
7803
			if (spares) {
7804 7805 7806 7807 7808 7809
				/* We are adding a device or devices to an array
				 * which has the bitmap stored on all devices.
				 * So make sure all bitmap pages get written
				 */
				bitmap_write_all(mddev->bitmap);
			}
7810 7811 7812
			INIT_WORK(&mddev->del_work, md_start_sync);
			queue_work(md_misc_wq, &mddev->del_work);
			goto unlock;
L
Linus Torvalds 已提交
7813
		}
7814
	not_running:
7815 7816
		if (!mddev->sync_thread) {
			clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7817
			wake_up(&resync_wait);
7818 7819
			if (test_and_clear_bit(MD_RECOVERY_RECOVER,
					       &mddev->recovery))
7820
				if (mddev->sysfs_action)
N
NeilBrown 已提交
7821
					sysfs_notify_dirent_safe(mddev->sysfs_action);
7822
		}
7823 7824
	unlock:
		wake_up(&mddev->sb_wait);
L
Linus Torvalds 已提交
7825 7826 7827
		mddev_unlock(mddev);
	}
}
7828
EXPORT_SYMBOL(md_check_recovery);
L
Linus Torvalds 已提交
7829

7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850
void md_reap_sync_thread(struct mddev *mddev)
{
	struct md_rdev *rdev;

	/* resync has finished, collect result */
	md_unregister_thread(&mddev->sync_thread);
	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
		/* success...*/
		/* activate any spares */
		if (mddev->pers->spare_active(mddev)) {
			sysfs_notify(&mddev->kobj, NULL,
				     "degraded");
			set_bit(MD_CHANGE_DEVS, &mddev->flags);
		}
	}
	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
	    mddev->pers->finish_reshape)
		mddev->pers->finish_reshape(mddev);

	/* If array is no-longer degraded, then any saved_raid_disk
7851
	 * information must be scrapped.
7852
	 */
7853 7854
	if (!mddev->degraded)
		rdev_for_each(rdev, mddev)
7855 7856 7857 7858 7859 7860 7861 7862
			rdev->saved_raid_disk = -1;

	md_update_sb(mddev, 1);
	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7863
	wake_up(&resync_wait);
7864 7865 7866 7867 7868 7869 7870
	/* flag recovery needed just to double check */
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	sysfs_notify_dirent_safe(mddev->sysfs_action);
	md_new_event(mddev);
	if (mddev->event_work.func)
		queue_work(md_misc_wq, &mddev->event_work);
}
7871
EXPORT_SYMBOL(md_reap_sync_thread);
7872

7873
void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
7874
{
N
NeilBrown 已提交
7875
	sysfs_notify_dirent_safe(rdev->sysfs_state);
7876
	wait_event_timeout(rdev->blocked_wait,
7877 7878
			   !test_bit(Blocked, &rdev->flags) &&
			   !test_bit(BlockedBadBlocks, &rdev->flags),
7879 7880 7881 7882 7883
			   msecs_to_jiffies(5000));
	rdev_dec_pending(rdev, mddev);
}
EXPORT_SYMBOL(md_wait_for_blocked_rdev);

7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897
void md_finish_reshape(struct mddev *mddev)
{
	/* called be personality module when reshape completes. */
	struct md_rdev *rdev;

	rdev_for_each(rdev, mddev) {
		if (rdev->data_offset > rdev->new_data_offset)
			rdev->sectors += rdev->data_offset - rdev->new_data_offset;
		else
			rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
		rdev->data_offset = rdev->new_data_offset;
	}
}
EXPORT_SYMBOL(md_finish_reshape);
7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928

/* Bad block management.
 * We can record which blocks on each device are 'bad' and so just
 * fail those blocks, or that stripe, rather than the whole device.
 * Entries in the bad-block table are 64bits wide.  This comprises:
 * Length of bad-range, in sectors: 0-511 for lengths 1-512
 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
 *  A 'shift' can be set so that larger blocks are tracked and
 *  consequently larger devices can be covered.
 * 'Acknowledged' flag - 1 bit. - the most significant bit.
 *
 * Locking of the bad-block table uses a seqlock so md_is_badblock
 * might need to retry if it is very unlucky.
 * We will sometimes want to check for bad blocks in a bi_end_io function,
 * so we use the write_seqlock_irq variant.
 *
 * When looking for a bad block we specify a range and want to
 * know if any block in the range is bad.  So we binary-search
 * to the last range that starts at-or-before the given endpoint,
 * (or "before the sector after the target range")
 * then see if it ends after the given start.
 * We return
 *  0 if there are no known bad blocks in the range
 *  1 if there are known bad block which are all acknowledged
 * -1 if there are bad blocks which have not yet been acknowledged in metadata.
 * plus the start/length of the first bad section we overlap.
 */
int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
		   sector_t *first_bad, int *bad_sectors)
{
	int hi;
7929
	int lo;
7930
	u64 *p = bb->page;
7931
	int rv;
7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945
	sector_t target = s + sectors;
	unsigned seq;

	if (bb->shift > 0) {
		/* round the start down, and the end up */
		s >>= bb->shift;
		target += (1<<bb->shift) - 1;
		target >>= bb->shift;
		sectors = target - s;
	}
	/* 'target' is now the first block after the bad range */

retry:
	seq = read_seqbegin(&bb->lock);
7946 7947
	lo = 0;
	rv = 0;
7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010
	hi = bb->count;

	/* Binary search between lo and hi for 'target'
	 * i.e. for the last range that starts before 'target'
	 */
	/* INVARIANT: ranges before 'lo' and at-or-after 'hi'
	 * are known not to be the last range before target.
	 * VARIANT: hi-lo is the number of possible
	 * ranges, and decreases until it reaches 1
	 */
	while (hi - lo > 1) {
		int mid = (lo + hi) / 2;
		sector_t a = BB_OFFSET(p[mid]);
		if (a < target)
			/* This could still be the one, earlier ranges
			 * could not. */
			lo = mid;
		else
			/* This and later ranges are definitely out. */
			hi = mid;
	}
	/* 'lo' might be the last that started before target, but 'hi' isn't */
	if (hi > lo) {
		/* need to check all range that end after 's' to see if
		 * any are unacknowledged.
		 */
		while (lo >= 0 &&
		       BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
			if (BB_OFFSET(p[lo]) < target) {
				/* starts before the end, and finishes after
				 * the start, so they must overlap
				 */
				if (rv != -1 && BB_ACK(p[lo]))
					rv = 1;
				else
					rv = -1;
				*first_bad = BB_OFFSET(p[lo]);
				*bad_sectors = BB_LEN(p[lo]);
			}
			lo--;
		}
	}

	if (read_seqretry(&bb->lock, seq))
		goto retry;

	return rv;
}
EXPORT_SYMBOL_GPL(md_is_badblock);

/*
 * Add a range of bad blocks to the table.
 * This might extend the table, or might contract it
 * if two adjacent ranges can be merged.
 * We binary-search to find the 'insertion' point, then
 * decide how best to handle it.
 */
static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
			    int acknowledged)
{
	u64 *p;
	int lo, hi;
	int rv = 1;
8011
	unsigned long flags;
8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025

	if (bb->shift < 0)
		/* badblocks are disabled */
		return 0;

	if (bb->shift) {
		/* round the start down, and the end up */
		sector_t next = s + sectors;
		s >>= bb->shift;
		next += (1<<bb->shift) - 1;
		next >>= bb->shift;
		sectors = next - s;
	}

8026
	write_seqlock_irqsave(&bb->lock, flags);
8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139

	p = bb->page;
	lo = 0;
	hi = bb->count;
	/* Find the last range that starts at-or-before 's' */
	while (hi - lo > 1) {
		int mid = (lo + hi) / 2;
		sector_t a = BB_OFFSET(p[mid]);
		if (a <= s)
			lo = mid;
		else
			hi = mid;
	}
	if (hi > lo && BB_OFFSET(p[lo]) > s)
		hi = lo;

	if (hi > lo) {
		/* we found a range that might merge with the start
		 * of our new range
		 */
		sector_t a = BB_OFFSET(p[lo]);
		sector_t e = a + BB_LEN(p[lo]);
		int ack = BB_ACK(p[lo]);
		if (e >= s) {
			/* Yes, we can merge with a previous range */
			if (s == a && s + sectors >= e)
				/* new range covers old */
				ack = acknowledged;
			else
				ack = ack && acknowledged;

			if (e < s + sectors)
				e = s + sectors;
			if (e - a <= BB_MAX_LEN) {
				p[lo] = BB_MAKE(a, e-a, ack);
				s = e;
			} else {
				/* does not all fit in one range,
				 * make p[lo] maximal
				 */
				if (BB_LEN(p[lo]) != BB_MAX_LEN)
					p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
				s = a + BB_MAX_LEN;
			}
			sectors = e - s;
		}
	}
	if (sectors && hi < bb->count) {
		/* 'hi' points to the first range that starts after 's'.
		 * Maybe we can merge with the start of that range */
		sector_t a = BB_OFFSET(p[hi]);
		sector_t e = a + BB_LEN(p[hi]);
		int ack = BB_ACK(p[hi]);
		if (a <= s + sectors) {
			/* merging is possible */
			if (e <= s + sectors) {
				/* full overlap */
				e = s + sectors;
				ack = acknowledged;
			} else
				ack = ack && acknowledged;

			a = s;
			if (e - a <= BB_MAX_LEN) {
				p[hi] = BB_MAKE(a, e-a, ack);
				s = e;
			} else {
				p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
				s = a + BB_MAX_LEN;
			}
			sectors = e - s;
			lo = hi;
			hi++;
		}
	}
	if (sectors == 0 && hi < bb->count) {
		/* we might be able to combine lo and hi */
		/* Note: 's' is at the end of 'lo' */
		sector_t a = BB_OFFSET(p[hi]);
		int lolen = BB_LEN(p[lo]);
		int hilen = BB_LEN(p[hi]);
		int newlen = lolen + hilen - (s - a);
		if (s >= a && newlen < BB_MAX_LEN) {
			/* yes, we can combine them */
			int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
			p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
			memmove(p + hi, p + hi + 1,
				(bb->count - hi - 1) * 8);
			bb->count--;
		}
	}
	while (sectors) {
		/* didn't merge (it all).
		 * Need to add a range just before 'hi' */
		if (bb->count >= MD_MAX_BADBLOCKS) {
			/* No room for more */
			rv = 0;
			break;
		} else {
			int this_sectors = sectors;
			memmove(p + hi + 1, p + hi,
				(bb->count - hi) * 8);
			bb->count++;

			if (this_sectors > BB_MAX_LEN)
				this_sectors = BB_MAX_LEN;
			p[hi] = BB_MAKE(s, this_sectors, acknowledged);
			sectors -= this_sectors;
			s += this_sectors;
		}
	}

	bb->changed = 1;
8140 8141
	if (!acknowledged)
		bb->unacked_exist = 1;
8142
	write_sequnlock_irqrestore(&bb->lock, flags);
8143 8144 8145 8146

	return rv;
}

8147
int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8148
		       int is_new)
8149
{
8150 8151 8152 8153 8154 8155 8156
	int rv;
	if (is_new)
		s += rdev->new_data_offset;
	else
		s += rdev->data_offset;
	rv = md_set_badblocks(&rdev->badblocks,
			      s, sectors, 0);
8157 8158
	if (rv) {
		/* Make sure they get written out promptly */
8159
		sysfs_notify_dirent_safe(rdev->sysfs_state);
8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220
		set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
		md_wakeup_thread(rdev->mddev->thread);
	}
	return rv;
}
EXPORT_SYMBOL_GPL(rdev_set_badblocks);

/*
 * Remove a range of bad blocks from the table.
 * This may involve extending the table if we spilt a region,
 * but it must not fail.  So if the table becomes full, we just
 * drop the remove request.
 */
static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
{
	u64 *p;
	int lo, hi;
	sector_t target = s + sectors;
	int rv = 0;

	if (bb->shift > 0) {
		/* When clearing we round the start up and the end down.
		 * This should not matter as the shift should align with
		 * the block size and no rounding should ever be needed.
		 * However it is better the think a block is bad when it
		 * isn't than to think a block is not bad when it is.
		 */
		s += (1<<bb->shift) - 1;
		s >>= bb->shift;
		target >>= bb->shift;
		sectors = target - s;
	}

	write_seqlock_irq(&bb->lock);

	p = bb->page;
	lo = 0;
	hi = bb->count;
	/* Find the last range that starts before 'target' */
	while (hi - lo > 1) {
		int mid = (lo + hi) / 2;
		sector_t a = BB_OFFSET(p[mid]);
		if (a < target)
			lo = mid;
		else
			hi = mid;
	}
	if (hi > lo) {
		/* p[lo] is the last range that could overlap the
		 * current range.  Earlier ranges could also overlap,
		 * but only this one can overlap the end of the range.
		 */
		if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
			/* Partial overlap, leave the tail of this range */
			int ack = BB_ACK(p[lo]);
			sector_t a = BB_OFFSET(p[lo]);
			sector_t end = a + BB_LEN(p[lo]);

			if (a < s) {
				/* we need to split this range */
				if (bb->count >= MD_MAX_BADBLOCKS) {
8221
					rv = -ENOSPC;
8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261
					goto out;
				}
				memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
				bb->count++;
				p[lo] = BB_MAKE(a, s-a, ack);
				lo++;
			}
			p[lo] = BB_MAKE(target, end - target, ack);
			/* there is no longer an overlap */
			hi = lo;
			lo--;
		}
		while (lo >= 0 &&
		       BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
			/* This range does overlap */
			if (BB_OFFSET(p[lo]) < s) {
				/* Keep the early parts of this range. */
				int ack = BB_ACK(p[lo]);
				sector_t start = BB_OFFSET(p[lo]);
				p[lo] = BB_MAKE(start, s - start, ack);
				/* now low doesn't overlap, so.. */
				break;
			}
			lo--;
		}
		/* 'lo' is strictly before, 'hi' is strictly after,
		 * anything between needs to be discarded
		 */
		if (hi - lo > 1) {
			memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
			bb->count -= (hi - lo - 1);
		}
	}

	bb->changed = 1;
out:
	write_sequnlock_irq(&bb->lock);
	return rv;
}

8262 8263
int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
			 int is_new)
8264
{
8265 8266 8267 8268
	if (is_new)
		s += rdev->new_data_offset;
	else
		s += rdev->data_offset;
8269
	return md_clear_badblocks(&rdev->badblocks,
8270
				  s, sectors);
8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);

/*
 * Acknowledge all bad blocks in a list.
 * This only succeeds if ->changed is clear.  It is used by
 * in-kernel metadata updates
 */
void md_ack_all_badblocks(struct badblocks *bb)
{
	if (bb->page == NULL || bb->changed)
		/* no point even trying */
		return;
	write_seqlock_irq(&bb->lock);

8286
	if (bb->changed == 0 && bb->unacked_exist) {
8287 8288 8289 8290 8291 8292 8293 8294 8295
		u64 *p = bb->page;
		int i;
		for (i = 0; i < bb->count ; i++) {
			if (!BB_ACK(p[i])) {
				sector_t start = BB_OFFSET(p[i]);
				int len = BB_LEN(p[i]);
				p[i] = BB_MAKE(start, len, 1);
			}
		}
8296
		bb->unacked_exist = 0;
8297 8298 8299 8300 8301
	}
	write_sequnlock_irq(&bb->lock);
}
EXPORT_SYMBOL_GPL(md_ack_all_badblocks);

8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343
/* sysfs access to bad-blocks list.
 * We present two files.
 * 'bad-blocks' lists sector numbers and lengths of ranges that
 *    are recorded as bad.  The list is truncated to fit within
 *    the one-page limit of sysfs.
 *    Writing "sector length" to this file adds an acknowledged
 *    bad block list.
 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
 *    been acknowledged.  Writing to this file adds bad blocks
 *    without acknowledging them.  This is largely for testing.
 */

static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack)
{
	size_t len;
	int i;
	u64 *p = bb->page;
	unsigned seq;

	if (bb->shift < 0)
		return 0;

retry:
	seq = read_seqbegin(&bb->lock);

	len = 0;
	i = 0;

	while (len < PAGE_SIZE && i < bb->count) {
		sector_t s = BB_OFFSET(p[i]);
		unsigned int length = BB_LEN(p[i]);
		int ack = BB_ACK(p[i]);
		i++;

		if (unack && ack)
			continue;

		len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
				(unsigned long long)s << bb->shift,
				length << bb->shift);
	}
8344 8345
	if (unack && len == 0)
		bb->unacked_exist = 0;
8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395

	if (read_seqretry(&bb->lock, seq))
		goto retry;

	return len;
}

#define DO_DEBUG 1

static ssize_t
badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
{
	unsigned long long sector;
	int length;
	char newline;
#ifdef DO_DEBUG
	/* Allow clearing via sysfs *only* for testing/debugging.
	 * Normally only a successful write may clear a badblock
	 */
	int clear = 0;
	if (page[0] == '-') {
		clear = 1;
		page++;
	}
#endif /* DO_DEBUG */

	switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
	case 3:
		if (newline != '\n')
			return -EINVAL;
	case 2:
		if (length <= 0)
			return -EINVAL;
		break;
	default:
		return -EINVAL;
	}

#ifdef DO_DEBUG
	if (clear) {
		md_clear_badblocks(bb, sector, length);
		return len;
	}
#endif /* DO_DEBUG */
	if (md_set_badblocks(bb, sector, length, !unack))
		return len;
	else
		return -ENOSPC;
}

A
Adrian Bunk 已提交
8396 8397
static int md_notify_reboot(struct notifier_block *this,
			    unsigned long code, void *x)
L
Linus Torvalds 已提交
8398 8399
{
	struct list_head *tmp;
8400
	struct mddev *mddev;
8401
	int need_delay = 0;
L
Linus Torvalds 已提交
8402

8403 8404
	for_each_mddev(mddev, tmp) {
		if (mddev_trylock(mddev)) {
8405 8406
			if (mddev->pers)
				__md_stop_writes(mddev);
8407 8408
			if (mddev->persistent)
				mddev->safemode = 2;
8409
			mddev_unlock(mddev);
8410
		}
8411
		need_delay = 1;
L
Linus Torvalds 已提交
8412
	}
8413 8414 8415 8416 8417 8418 8419 8420 8421
	/*
	 * certain more exotic SCSI devices are known to be
	 * volatile wrt too early system reboots. While the
	 * right place to handle this issue is the given
	 * driver, we do want to have a safe RAID driver ...
	 */
	if (need_delay)
		mdelay(1000*1);

L
Linus Torvalds 已提交
8422 8423 8424
	return NOTIFY_DONE;
}

A
Adrian Bunk 已提交
8425
static struct notifier_block md_notifier = {
L
Linus Torvalds 已提交
8426 8427 8428 8429 8430 8431 8432
	.notifier_call	= md_notify_reboot,
	.next		= NULL,
	.priority	= INT_MAX, /* before any real devices */
};

static void md_geninit(void)
{
8433
	pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
L
Linus Torvalds 已提交
8434

8435
	proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
L
Linus Torvalds 已提交
8436 8437
}

A
Adrian Bunk 已提交
8438
static int __init md_init(void)
L
Linus Torvalds 已提交
8439
{
T
Tejun Heo 已提交
8440 8441
	int ret = -ENOMEM;

8442
	md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
T
Tejun Heo 已提交
8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456
	if (!md_wq)
		goto err_wq;

	md_misc_wq = alloc_workqueue("md_misc", 0, 0);
	if (!md_misc_wq)
		goto err_misc_wq;

	if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
		goto err_md;

	if ((ret = register_blkdev(0, "mdp")) < 0)
		goto err_mdp;
	mdp_major = ret;

8457
	blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
8458 8459
			    md_probe, NULL, NULL);
	blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
L
Linus Torvalds 已提交
8460 8461 8462
			    md_probe, NULL, NULL);

	register_reboot_notifier(&md_notifier);
8463
	raid_table_header = register_sysctl_table(raid_root_table);
L
Linus Torvalds 已提交
8464 8465

	md_geninit();
8466
	return 0;
L
Linus Torvalds 已提交
8467

T
Tejun Heo 已提交
8468 8469 8470 8471 8472 8473 8474 8475 8476
err_mdp:
	unregister_blkdev(MD_MAJOR, "md");
err_md:
	destroy_workqueue(md_misc_wq);
err_misc_wq:
	destroy_workqueue(md_wq);
err_wq:
	return ret;
}
L
Linus Torvalds 已提交
8477 8478 8479 8480 8481 8482 8483

#ifndef MODULE

/*
 * Searches all registered partitions for autorun RAID arrays
 * at boot time.
 */
8484 8485 8486 8487 8488 8489

static LIST_HEAD(all_detected_devices);
struct detected_devices_node {
	struct list_head list;
	dev_t dev;
};
L
Linus Torvalds 已提交
8490 8491 8492

void md_autodetect_dev(dev_t dev)
{
8493 8494 8495 8496 8497 8498 8499 8500 8501 8502
	struct detected_devices_node *node_detected_dev;

	node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
	if (node_detected_dev) {
		node_detected_dev->dev = dev;
		list_add_tail(&node_detected_dev->list, &all_detected_devices);
	} else {
		printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
			", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
	}
L
Linus Torvalds 已提交
8503 8504 8505 8506
}

static void autostart_arrays(int part)
{
8507
	struct md_rdev *rdev;
8508 8509 8510
	struct detected_devices_node *node_detected_dev;
	dev_t dev;
	int i_scanned, i_passed;
L
Linus Torvalds 已提交
8511

8512 8513
	i_scanned = 0;
	i_passed = 0;
L
Linus Torvalds 已提交
8514

8515
	printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
L
Linus Torvalds 已提交
8516

8517 8518 8519 8520 8521 8522 8523
	while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
		i_scanned++;
		node_detected_dev = list_entry(all_detected_devices.next,
					struct detected_devices_node, list);
		list_del(&node_detected_dev->list);
		dev = node_detected_dev->dev;
		kfree(node_detected_dev);
8524
		rdev = md_import_device(dev,0, 90);
L
Linus Torvalds 已提交
8525 8526 8527
		if (IS_ERR(rdev))
			continue;

N
NeilBrown 已提交
8528
		if (test_bit(Faulty, &rdev->flags))
L
Linus Torvalds 已提交
8529
			continue;
N
NeilBrown 已提交
8530

8531
		set_bit(AutoDetected, &rdev->flags);
L
Linus Torvalds 已提交
8532
		list_add(&rdev->same_set, &pending_raid_disks);
8533
		i_passed++;
L
Linus Torvalds 已提交
8534
	}
8535 8536 8537

	printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
						i_scanned, i_passed);
L
Linus Torvalds 已提交
8538 8539 8540 8541

	autorun_devices(part);
}

J
Jeff Garzik 已提交
8542
#endif /* !MODULE */
L
Linus Torvalds 已提交
8543 8544 8545

static __exit void md_exit(void)
{
8546
	struct mddev *mddev;
L
Linus Torvalds 已提交
8547
	struct list_head *tmp;
8548
	int delay = 1;
8549

8550
	blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
8551
	blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
L
Linus Torvalds 已提交
8552

C
Christoph Hellwig 已提交
8553
	unregister_blkdev(MD_MAJOR,"md");
L
Linus Torvalds 已提交
8554 8555 8556
	unregister_blkdev(mdp_major, "mdp");
	unregister_reboot_notifier(&md_notifier);
	unregister_sysctl_table(raid_table_header);
8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567

	/* We cannot unload the modules while some process is
	 * waiting for us in select() or poll() - wake them up
	 */
	md_unloading = 1;
	while (waitqueue_active(&md_event_waiters)) {
		/* not safe to leave yet */
		wake_up(&md_event_waiters);
		msleep(delay);
		delay += delay;
	}
L
Linus Torvalds 已提交
8568
	remove_proc_entry("mdstat", NULL);
8569

8570
	for_each_mddev(mddev, tmp) {
L
Linus Torvalds 已提交
8571
		export_array(mddev);
8572
		mddev->hold_active = 0;
L
Linus Torvalds 已提交
8573
	}
T
Tejun Heo 已提交
8574 8575
	destroy_workqueue(md_misc_wq);
	destroy_workqueue(md_wq);
L
Linus Torvalds 已提交
8576 8577
}

8578
subsys_initcall(md_init);
L
Linus Torvalds 已提交
8579 8580
module_exit(md_exit)

8581 8582 8583 8584 8585 8586 8587 8588 8589 8590
static int get_ro(char *buffer, struct kernel_param *kp)
{
	return sprintf(buffer, "%d", start_readonly);
}
static int set_ro(const char *val, struct kernel_param *kp)
{
	char *e;
	int num = simple_strtoul(val, &e, 10);
	if (*val && (*e == '\0' || *e == '\n')) {
		start_readonly = num;
8591
		return 0;
8592 8593 8594 8595
	}
	return -EINVAL;
}

8596 8597
module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
8598
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
8599

L
Linus Torvalds 已提交
8600
MODULE_LICENSE("GPL");
8601
MODULE_DESCRIPTION("MD RAID framework");
8602
MODULE_ALIAS("md");
8603
MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);