raid0.c 20.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
   raid0.c : Multiple Devices driver for Linux
             Copyright (C) 1994-96 Marc ZYNGIER
	     <zyngier@ufr-info-p7.ibp.fr> or
	     <maz@gloups.fdn.fr>
             Copyright (C) 1999, 2000 Ingo Molnar, Red Hat


   RAID-0 management functions.

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2, or (at your option)
   any later version.
   
   You should have received a copy of the GNU General Public License
   (for example /usr/src/linux/COPYING); if not, write to the Free
   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
*/

21 22
#include <linux/blkdev.h>
#include <linux/seq_file.h>
23
#include <linux/module.h>
24
#include <linux/slab.h>
25
#include "md.h"
26
#include "raid0.h"
27
#include "raid5.h"
L
Linus Torvalds 已提交
28

29 30
static int raid0_congested(void *data, int bits)
{
31
	struct mddev *mddev = data;
32
	struct r0conf *conf = mddev->private;
33
	struct md_rdev **devlist = conf->devlist;
34
	int raid_disks = conf->strip_zone[0].nb_dev;
35 36
	int i, ret = 0;

37 38 39
	if (mddev_congested(mddev, bits))
		return 1;

40
	for (i = 0; i < raid_disks && !ret ; i++) {
41
		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
42 43 44 45 46 47

		ret |= bdi_congested(&q->backing_dev_info, bits);
	}
	return ret;
}

48 49 50
/*
 * inform the user of the raid configuration
*/
51
static void dump_zones(struct mddev *mddev)
52
{
53
	int j, k;
54 55 56
	sector_t zone_size = 0;
	sector_t zone_start = 0;
	char b[BDEVNAME_SIZE];
57
	struct r0conf *conf = mddev->private;
58
	int raid_disks = conf->strip_zone[0].nb_dev;
59 60 61
	printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
	       mdname(mddev),
	       conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
62
	for (j = 0; j < conf->nr_strip_zones; j++) {
63
		printk(KERN_INFO "md: zone%d=[", j);
64
		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
65
			printk(KERN_CONT "%s%s", k?"/":"",
66
			bdevname(conf->devlist[j*raid_disks
67
						+ k]->bdev, b));
N
NeilBrown 已提交
68
		printk(KERN_CONT "]\n");
69 70

		zone_size  = conf->strip_zone[j].zone_end - zone_start;
71 72
		printk(KERN_INFO "      zone-offset=%10lluKB, "
				"device-offset=%10lluKB, size=%10lluKB\n",
73 74 75 76 77
			(unsigned long long)zone_start>>1,
			(unsigned long long)conf->strip_zone[j].dev_start>>1,
			(unsigned long long)zone_size>>1);
		zone_start = conf->strip_zone[j].zone_end;
	}
78
	printk(KERN_INFO "\n");
79 80
}

81
static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
L
Linus Torvalds 已提交
82
{
83
	int i, c, err;
84
	sector_t curr_zone_end, sectors;
85
	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
L
Linus Torvalds 已提交
86 87 88
	struct strip_zone *zone;
	int cnt;
	char b[BDEVNAME_SIZE];
89
	char b2[BDEVNAME_SIZE];
90
	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
S
Shaohua Li 已提交
91
	bool discard_supported = false;
92 93 94

	if (!conf)
		return -ENOMEM;
N
NeilBrown 已提交
95
	rdev_for_each(rdev1, mddev) {
96 97 98
		pr_debug("md/raid0:%s: looking at %s\n",
			 mdname(mddev),
			 bdevname(rdev1->bdev, b));
L
Linus Torvalds 已提交
99
		c = 0;
100 101 102 103 104 105

		/* round size to chunk_size */
		sectors = rdev1->sectors;
		sector_div(sectors, mddev->chunk_sectors);
		rdev1->sectors = sectors * mddev->chunk_sectors;

N
NeilBrown 已提交
106
		rdev_for_each(rdev2, mddev) {
107 108 109 110 111 112 113
			pr_debug("md/raid0:%s:   comparing %s(%llu)"
				 " with %s(%llu)\n",
				 mdname(mddev),
				 bdevname(rdev1->bdev,b),
				 (unsigned long long)rdev1->sectors,
				 bdevname(rdev2->bdev,b2),
				 (unsigned long long)rdev2->sectors);
L
Linus Torvalds 已提交
114
			if (rdev2 == rdev1) {
115 116
				pr_debug("md/raid0:%s:   END\n",
					 mdname(mddev));
L
Linus Torvalds 已提交
117 118
				break;
			}
119
			if (rdev2->sectors == rdev1->sectors) {
L
Linus Torvalds 已提交
120 121 122 123
				/*
				 * Not unique, don't count it as a new
				 * group
				 */
124 125
				pr_debug("md/raid0:%s:   EQUAL\n",
					 mdname(mddev));
L
Linus Torvalds 已提交
126 127 128
				c = 1;
				break;
			}
129 130
			pr_debug("md/raid0:%s:   NOT EQUAL\n",
				 mdname(mddev));
L
Linus Torvalds 已提交
131 132
		}
		if (!c) {
133 134
			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
				 mdname(mddev));
L
Linus Torvalds 已提交
135
			conf->nr_strip_zones++;
136 137
			pr_debug("md/raid0:%s: %d zones\n",
				 mdname(mddev), conf->nr_strip_zones);
L
Linus Torvalds 已提交
138 139
		}
	}
140 141
	pr_debug("md/raid0:%s: FINAL %d zones\n",
		 mdname(mddev), conf->nr_strip_zones);
142
	err = -ENOMEM;
143
	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
L
Linus Torvalds 已提交
144 145
				conf->nr_strip_zones, GFP_KERNEL);
	if (!conf->strip_zone)
146
		goto abort;
147
	conf->devlist = kzalloc(sizeof(struct md_rdev*)*
L
Linus Torvalds 已提交
148 149 150
				conf->nr_strip_zones*mddev->raid_disks,
				GFP_KERNEL);
	if (!conf->devlist)
151
		goto abort;
L
Linus Torvalds 已提交
152 153 154 155 156 157 158

	/* The first zone must contain all devices, so here we check that
	 * there is a proper alignment of slots to devices and find them all
	 */
	zone = &conf->strip_zone[0];
	cnt = 0;
	smallest = NULL;
159
	dev = conf->devlist;
160
	err = -EINVAL;
N
NeilBrown 已提交
161
	rdev_for_each(rdev1, mddev) {
L
Linus Torvalds 已提交
162 163
		int j = rdev1->raid_disk;

164
		if (mddev->level == 10) {
165 166
			/* taking over a raid10-n2 array */
			j /= 2;
167 168
			rdev1->new_raid_disk = j;
		}
169

170 171 172 173 174 175 176 177
		if (mddev->level == 1) {
			/* taiking over a raid1 array-
			 * we have only one active disk
			 */
			j = 0;
			rdev1->new_raid_disk = j;
		}

L
Linus Torvalds 已提交
178
		if (j < 0 || j >= mddev->raid_disks) {
N
NeilBrown 已提交
179 180
			printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
			       "aborting!\n", mdname(mddev), j);
L
Linus Torvalds 已提交
181 182
			goto abort;
		}
183
		if (dev[j]) {
N
NeilBrown 已提交
184 185
			printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
			       "aborting!\n", mdname(mddev), j);
L
Linus Torvalds 已提交
186 187
			goto abort;
		}
188
		dev[j] = rdev1;
L
Linus Torvalds 已提交
189

190 191
		disk_stack_limits(mddev->gendisk, rdev1->bdev,
				  rdev1->data_offset << 9);
L
Linus Torvalds 已提交
192

193 194 195
		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
			conf->has_merge_bvec = 1;

196
		if (!smallest || (rdev1->sectors < smallest->sectors))
L
Linus Torvalds 已提交
197 198
			smallest = rdev1;
		cnt++;
S
Shaohua Li 已提交
199 200 201

		if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
			discard_supported = true;
L
Linus Torvalds 已提交
202 203
	}
	if (cnt != mddev->raid_disks) {
N
NeilBrown 已提交
204 205
		printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
		       "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
L
Linus Torvalds 已提交
206 207 208
		goto abort;
	}
	zone->nb_dev = cnt;
209
	zone->zone_end = smallest->sectors * cnt;
L
Linus Torvalds 已提交
210

211
	curr_zone_end = zone->zone_end;
L
Linus Torvalds 已提交
212 213 214 215

	/* now do the other zones */
	for (i = 1; i < conf->nr_strip_zones; i++)
	{
216 217
		int j;

L
Linus Torvalds 已提交
218
		zone = conf->strip_zone + i;
219
		dev = conf->devlist + i * mddev->raid_disks;
L
Linus Torvalds 已提交
220

221
		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
222
		zone->dev_start = smallest->sectors;
L
Linus Torvalds 已提交
223 224 225 226
		smallest = NULL;
		c = 0;

		for (j=0; j<cnt; j++) {
227
			rdev = conf->devlist[j];
228
			if (rdev->sectors <= zone->dev_start) {
229 230 231
				pr_debug("md/raid0:%s: checking %s ... nope\n",
					 mdname(mddev),
					 bdevname(rdev->bdev, b));
232 233
				continue;
			}
234 235 236 237
			pr_debug("md/raid0:%s: checking %s ..."
				 " contained as device %d\n",
				 mdname(mddev),
				 bdevname(rdev->bdev, b), c);
238
			dev[c] = rdev;
239 240 241
			c++;
			if (!smallest || rdev->sectors < smallest->sectors) {
				smallest = rdev;
242 243 244
				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
					 mdname(mddev),
					 (unsigned long long)rdev->sectors);
245
			}
L
Linus Torvalds 已提交
246 247 248
		}

		zone->nb_dev = c;
249
		sectors = (smallest->sectors - zone->dev_start) * c;
250 251 252
		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
			 mdname(mddev),
			 zone->nb_dev, (unsigned long long)sectors);
L
Linus Torvalds 已提交
253

254
		curr_zone_end += sectors;
255
		zone->zone_end = curr_zone_end;
L
Linus Torvalds 已提交
256

257 258 259
		pr_debug("md/raid0:%s: current zone start: %llu\n",
			 mdname(mddev),
			 (unsigned long long)smallest->sectors);
L
Linus Torvalds 已提交
260
	}
261 262
	mddev->queue->backing_dev_info.congested_fn = raid0_congested;
	mddev->queue->backing_dev_info.congested_data = mddev;
L
Linus Torvalds 已提交
263

264 265 266 267
	/*
	 * now since we have the hard sector sizes, we can make sure
	 * chunk size is a multiple of that sector size
	 */
268
	if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
N
NeilBrown 已提交
269
		printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
270
		       mdname(mddev),
271
		       mddev->chunk_sectors << 9);
272 273
		goto abort;
	}
274 275 276 277 278

	blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
	blk_queue_io_opt(mddev->queue,
			 (mddev->chunk_sectors << 9) * mddev->raid_disks);

S
Shaohua Li 已提交
279 280 281 282 283
	if (!discard_supported)
		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
	else
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);

284
	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
285 286
	*private_conf = conf;

L
Linus Torvalds 已提交
287
	return 0;
288
abort:
289 290 291
	kfree(conf->strip_zone);
	kfree(conf->devlist);
	kfree(conf);
292
	*private_conf = ERR_PTR(err);
293
	return err;
L
Linus Torvalds 已提交
294 295
}

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
/* Find the zone which holds a particular offset
 * Update *sectorp to be an offset in that zone
 */
static struct strip_zone *find_zone(struct r0conf *conf,
				    sector_t *sectorp)
{
	int i;
	struct strip_zone *z = conf->strip_zone;
	sector_t sector = *sectorp;

	for (i = 0; i < conf->nr_strip_zones; i++)
		if (sector < z[i].zone_end) {
			if (i)
				*sectorp = sector - z[i-1].zone_end;
			return z + i;
		}
	BUG();
}

/*
 * remaps the bio to the target device. we separate two flows.
 * power 2 flow and a general flow for the sake of perfromance
*/
static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
				sector_t sector, sector_t *sector_offset)
{
	unsigned int sect_in_chunk;
	sector_t chunk;
	struct r0conf *conf = mddev->private;
	int raid_disks = conf->strip_zone[0].nb_dev;
	unsigned int chunk_sects = mddev->chunk_sectors;

	if (is_power_of_2(chunk_sects)) {
		int chunksect_bits = ffz(~chunk_sects);
		/* find the sector offset inside the chunk */
		sect_in_chunk  = sector & (chunk_sects - 1);
		sector >>= chunksect_bits;
		/* chunk in zone */
		chunk = *sector_offset;
		/* quotient is the chunk in real device*/
		sector_div(chunk, zone->nb_dev << chunksect_bits);
	} else{
		sect_in_chunk = sector_div(sector, chunk_sects);
		chunk = *sector_offset;
		sector_div(chunk, chunk_sects * zone->nb_dev);
	}
	/*
	*  position the bio over the real device
	*  real sector = chunk in device + starting of zone
	*	+ the position in the chunk
	*/
	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
	return conf->devlist[(zone - conf->strip_zone)*raid_disks
			     + sector_div(sector, zone->nb_dev)];
}

L
Linus Torvalds 已提交
352
/**
353
 *	raid0_mergeable_bvec -- tell bio layer if two requests can be merged
L
Linus Torvalds 已提交
354
 *	@q: request queue
355
 *	@bvm: properties of new bio
L
Linus Torvalds 已提交
356 357 358 359
 *	@biovec: the request that could be merged to it.
 *
 *	Return amount of bytes we can accept at this offset
 */
360 361 362
static int raid0_mergeable_bvec(struct request_queue *q,
				struct bvec_merge_data *bvm,
				struct bio_vec *biovec)
L
Linus Torvalds 已提交
363
{
364
	struct mddev *mddev = q->queuedata;
365
	struct r0conf *conf = mddev->private;
366
	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
367
	sector_t sector_offset = sector;
L
Linus Torvalds 已提交
368
	int max;
369
	unsigned int chunk_sectors = mddev->chunk_sectors;
370
	unsigned int bio_sectors = bvm->bi_size >> 9;
371 372 373
	struct strip_zone *zone;
	struct md_rdev *rdev;
	struct request_queue *subq;
L
Linus Torvalds 已提交
374

N
NeilBrown 已提交
375
	if (is_power_of_2(chunk_sectors))
376 377 378 379 380
		max =  (chunk_sectors - ((sector & (chunk_sectors-1))
						+ bio_sectors)) << 9;
	else
		max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
						+ bio_sectors)) << 9;
381 382
	if (max < 0)
		max = 0; /* bio_add cannot handle a negative return */
L
Linus Torvalds 已提交
383 384
	if (max <= biovec->bv_len && bio_sectors == 0)
		return biovec->bv_len;
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
	if (max < biovec->bv_len)
		/* too small already, no need to check further */
		return max;
	if (!conf->has_merge_bvec)
		return max;

	/* May need to check subordinate device */
	sector = sector_offset;
	zone = find_zone(mddev->private, &sector_offset);
	rdev = map_sector(mddev, zone, sector, &sector_offset);
	subq = bdev_get_queue(rdev->bdev);
	if (subq->merge_bvec_fn) {
		bvm->bi_bdev = rdev->bdev;
		bvm->bi_sector = sector_offset + zone->dev_start +
			rdev->data_offset;
		return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
	} else
L
Linus Torvalds 已提交
402 403 404
		return max;
}

405
static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
406 407
{
	sector_t array_sectors = 0;
408
	struct md_rdev *rdev;
409 410 411 412

	WARN_ONCE(sectors || raid_disks,
		  "%s does not support generic reshape\n", __func__);

N
NeilBrown 已提交
413
	rdev_for_each(rdev, mddev)
414 415
		array_sectors += (rdev->sectors &
				  ~(sector_t)(mddev->chunk_sectors-1));
416 417 418 419

	return array_sectors;
}

420 421
static int raid0_stop(struct mddev *mddev);

422
static int raid0_run(struct mddev *mddev)
L
Linus Torvalds 已提交
423
{
424
	struct r0conf *conf;
425
	int ret;
L
Linus Torvalds 已提交
426

427
	if (mddev->chunk_sectors == 0) {
N
NeilBrown 已提交
428 429
		printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
		       mdname(mddev));
430 431
		return -EINVAL;
	}
432 433
	if (md_check_no_bitmap(mddev))
		return -EINVAL;
434
	blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
435
	blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
S
Shaohua Li 已提交
436
	blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
L
Linus Torvalds 已提交
437

438 439 440 441 442 443 444 445
	/* if private is not null, we are here after takeover */
	if (mddev->private == NULL) {
		ret = create_strip_zones(mddev, &conf);
		if (ret < 0)
			return ret;
		mddev->private = conf;
	}
	conf = mddev->private;
L
Linus Torvalds 已提交
446 447

	/* calculate array device size */
448
	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
L
Linus Torvalds 已提交
449

N
NeilBrown 已提交
450 451 452
	printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
	       mdname(mddev),
	       (unsigned long long)mddev->array_sectors);
L
Linus Torvalds 已提交
453 454 455 456 457 458 459 460 461 462
	/* calculate the max read-ahead size.
	 * For read-ahead of large files to be effective, we need to
	 * readahead at least twice a whole stripe. i.e. number of devices
	 * multiplied by chunk size times 2.
	 * If an individual device has an ra_pages greater than the
	 * chunk size, then we will not drive that device as hard as it
	 * wants.  We consider this a configuration error: a larger
	 * chunksize should be used in that case.
	 */
	{
463 464
		int stripe = mddev->raid_disks *
			(mddev->chunk_sectors << 9) / PAGE_SIZE;
L
Linus Torvalds 已提交
465 466 467 468 469
		if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
			mddev->queue->backing_dev_info.ra_pages = 2* stripe;
	}

	blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
470
	dump_zones(mddev);
471 472 473 474 475 476

	ret = md_integrity_register(mddev);
	if (ret)
		raid0_stop(mddev);

	return ret;
L
Linus Torvalds 已提交
477 478
}

479
static int raid0_stop(struct mddev *mddev)
L
Linus Torvalds 已提交
480
{
481
	struct r0conf *conf = mddev->private;
L
Linus Torvalds 已提交
482 483

	blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
484
	kfree(conf->strip_zone);
485
	kfree(conf->devlist);
486
	kfree(conf);
L
Linus Torvalds 已提交
487 488 489 490
	mddev->private = NULL;
	return 0;
}

491 492 493
/*
 * Is io distribute over 1 or more chunks ?
*/
494
static inline int is_io_in_chunk_boundary(struct mddev *mddev,
495 496
			unsigned int chunk_sects, struct bio *bio)
{
N
NeilBrown 已提交
497
	if (likely(is_power_of_2(chunk_sects))) {
498 499 500 501 502 503 504 505 506
		return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
					+ (bio->bi_size >> 9));
	} else{
		sector_t sector = bio->bi_sector;
		return chunk_sects >= (sector_div(sector, chunk_sects)
						+ (bio->bi_size >> 9));
	}
}

507
static void raid0_make_request(struct mddev *mddev, struct bio *bio)
508 509 510
{
	unsigned int chunk_sects;
	sector_t sector_offset;
L
Linus Torvalds 已提交
511
	struct strip_zone *zone;
512
	struct md_rdev *tmp_dev;
L
Linus Torvalds 已提交
513

T
Tejun Heo 已提交
514 515
	if (unlikely(bio->bi_rw & REQ_FLUSH)) {
		md_flush_request(mddev, bio);
516
		return;
517 518
	}

519
	chunk_sects = mddev->chunk_sectors;
520 521
	if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
		sector_t sector = bio->bi_sector;
L
Linus Torvalds 已提交
522 523
		struct bio_pair *bp;
		/* Sanity check -- queue functions should prevent this happening */
S
Shaohua Li 已提交
524
		if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
L
Linus Torvalds 已提交
525 526 527 528 529
		    bio->bi_idx != 0)
			goto bad_map;
		/* This is a one page bio that upper layers
		 * refuse to split for us, so we need to split it.
		 */
N
NeilBrown 已提交
530
		if (likely(is_power_of_2(chunk_sects)))
531 532 533 534 535
			bp = bio_split(bio, chunk_sects - (sector &
							   (chunk_sects-1)));
		else
			bp = bio_split(bio, chunk_sects -
				       sector_div(sector, chunk_sects));
536 537
		raid0_make_request(mddev, &bp->bio1);
		raid0_make_request(mddev, &bp->bio2);
L
Linus Torvalds 已提交
538
		bio_pair_release(bp);
539
		return;
L
Linus Torvalds 已提交
540 541
	}

542
	sector_offset = bio->bi_sector;
543
	zone = find_zone(mddev->private, &sector_offset);
544 545
	tmp_dev = map_sector(mddev, zone, bio->bi_sector,
			     &sector_offset);
L
Linus Torvalds 已提交
546
	bio->bi_bdev = tmp_dev->bdev;
547 548
	bio->bi_sector = sector_offset + zone->dev_start +
		tmp_dev->data_offset;
549

S
Shaohua Li 已提交
550 551 552 553 554 555 556
	if (unlikely((bio->bi_rw & REQ_DISCARD) &&
		     !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
		/* Just ignore it */
		bio_endio(bio, 0);
		return;
	}

557 558
	generic_make_request(bio);
	return;
L
Linus Torvalds 已提交
559 560

bad_map:
N
NeilBrown 已提交
561 562 563 564
	printk("md/raid0:%s: make_request bug: can't convert block across chunks"
	       " or bigger than %dk %llu %d\n",
	       mdname(mddev), chunk_sects / 2,
	       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
L
Linus Torvalds 已提交
565

566
	bio_io_error(bio);
567
	return;
L
Linus Torvalds 已提交
568
}
N
NeilBrown 已提交
569

570
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
L
Linus Torvalds 已提交
571
{
572
	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
L
Linus Torvalds 已提交
573 574 575
	return;
}

576
static void *raid0_takeover_raid45(struct mddev *mddev)
577
{
578
	struct md_rdev *rdev;
579
	struct r0conf *priv_conf;
580 581

	if (mddev->degraded != 1) {
N
NeilBrown 已提交
582 583
		printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
		       mdname(mddev),
584 585 586 587
		       mddev->degraded);
		return ERR_PTR(-EINVAL);
	}

N
NeilBrown 已提交
588
	rdev_for_each(rdev, mddev) {
589 590
		/* check slot number for a disk */
		if (rdev->raid_disk == mddev->raid_disks-1) {
N
NeilBrown 已提交
591 592
			printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
			       mdname(mddev));
593 594 595 596 597 598
			return ERR_PTR(-EINVAL);
		}
	}

	/* Set new parameters */
	mddev->new_level = 0;
599
	mddev->new_layout = 0;
600 601 602 603 604 605 606 607 608 609
	mddev->new_chunk_sectors = mddev->chunk_sectors;
	mddev->raid_disks--;
	mddev->delta_disks = -1;
	/* make sure it will be not marked as dirty */
	mddev->recovery_cp = MaxSector;

	create_strip_zones(mddev, &priv_conf);
	return priv_conf;
}

610
static void *raid0_takeover_raid10(struct mddev *mddev)
611
{
612
	struct r0conf *priv_conf;
613 614 615 616 617 618 619 620

	/* Check layout:
	 *  - far_copies must be 1
	 *  - near_copies must be 2
	 *  - disks number must be even
	 *  - all mirrors must be already degraded
	 */
	if (mddev->layout != ((1 << 8) + 2)) {
N
NeilBrown 已提交
621 622
		printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
		       mdname(mddev),
623 624 625 626
		       mddev->layout);
		return ERR_PTR(-EINVAL);
	}
	if (mddev->raid_disks & 1) {
N
NeilBrown 已提交
627 628
		printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
		       mdname(mddev));
629 630 631
		return ERR_PTR(-EINVAL);
	}
	if (mddev->degraded != (mddev->raid_disks>>1)) {
N
NeilBrown 已提交
632 633
		printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
		       mdname(mddev));
634 635 636 637 638
		return ERR_PTR(-EINVAL);
	}

	/* Set new parameters */
	mddev->new_level = 0;
639
	mddev->new_layout = 0;
640 641 642 643 644 645 646 647 648 649 650
	mddev->new_chunk_sectors = mddev->chunk_sectors;
	mddev->delta_disks = - mddev->raid_disks / 2;
	mddev->raid_disks += mddev->delta_disks;
	mddev->degraded = 0;
	/* make sure it will be not marked as dirty */
	mddev->recovery_cp = MaxSector;

	create_strip_zones(mddev, &priv_conf);
	return priv_conf;
}

651
static void *raid0_takeover_raid1(struct mddev *mddev)
652
{
653
	struct r0conf *priv_conf;
654
	int chunksect;
655 656 657 658 659 660 661 662 663 664

	/* Check layout:
	 *  - (N - 1) mirror drives must be already faulty
	 */
	if ((mddev->raid_disks - 1) != mddev->degraded) {
		printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
		       mdname(mddev));
		return ERR_PTR(-EINVAL);
	}

665 666 667 668 669 670 671 672 673 674 675 676 677 678
	/*
	 * a raid1 doesn't have the notion of chunk size, so
	 * figure out the largest suitable size we can use.
	 */
	chunksect = 64 * 2; /* 64K by default */

	/* The array must be an exact multiple of chunksize */
	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
		chunksect >>= 1;

	if ((chunksect << 9) < PAGE_SIZE)
		/* array size does not allow a suitable chunk size */
		return ERR_PTR(-EINVAL);

679 680 681
	/* Set new parameters */
	mddev->new_level = 0;
	mddev->new_layout = 0;
682 683
	mddev->new_chunk_sectors = chunksect;
	mddev->chunk_sectors = chunksect;
684
	mddev->delta_disks = 1 - mddev->raid_disks;
K
Krzysztof Wojcik 已提交
685
	mddev->raid_disks = 1;
686 687 688 689 690 691 692
	/* make sure it will be not marked as dirty */
	mddev->recovery_cp = MaxSector;

	create_strip_zones(mddev, &priv_conf);
	return priv_conf;
}

693
static void *raid0_takeover(struct mddev *mddev)
694 695
{
	/* raid0 can take over:
M
Maciej Trela 已提交
696
	 *  raid4 - if all data disks are active.
697 698
	 *  raid5 - providing it is Raid4 layout and one disk is faulty
	 *  raid10 - assuming we have all necessary active disks
699
	 *  raid1 - with (N -1) mirror drives faulty
700
	 */
M
Maciej Trela 已提交
701 702 703
	if (mddev->level == 4)
		return raid0_takeover_raid45(mddev);

704 705
	if (mddev->level == 5) {
		if (mddev->layout == ALGORITHM_PARITY_N)
M
Maciej Trela 已提交
706
			return raid0_takeover_raid45(mddev);
707

N
NeilBrown 已提交
708 709
		printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
		       mdname(mddev), ALGORITHM_PARITY_N);
710 711 712 713 714
	}

	if (mddev->level == 10)
		return raid0_takeover_raid10(mddev);

715 716 717 718 719 720
	if (mddev->level == 1)
		return raid0_takeover_raid1(mddev);

	printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
		mddev->level);

721 722 723
	return ERR_PTR(-EINVAL);
}

724
static void raid0_quiesce(struct mddev *mddev, int state)
725 726 727
{
}

728
static struct md_personality raid0_personality=
L
Linus Torvalds 已提交
729 730
{
	.name		= "raid0",
731
	.level		= 0,
L
Linus Torvalds 已提交
732 733 734 735 736
	.owner		= THIS_MODULE,
	.make_request	= raid0_make_request,
	.run		= raid0_run,
	.stop		= raid0_stop,
	.status		= raid0_status,
737
	.size		= raid0_size,
738 739
	.takeover	= raid0_takeover,
	.quiesce	= raid0_quiesce,
L
Linus Torvalds 已提交
740 741 742 743
};

static int __init raid0_init (void)
{
744
	return register_md_personality (&raid0_personality);
L
Linus Torvalds 已提交
745 746 747 748
}

static void raid0_exit (void)
{
749
	unregister_md_personality (&raid0_personality);
L
Linus Torvalds 已提交
750 751 752 753 754
}

module_init(raid0_init);
module_exit(raid0_exit);
MODULE_LICENSE("GPL");
755
MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
L
Linus Torvalds 已提交
756
MODULE_ALIAS("md-personality-2"); /* RAID0 */
757
MODULE_ALIAS("md-raid0");
758
MODULE_ALIAS("md-level-0");