raid0.c 22.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3
/*
   raid0.c : Multiple Devices driver for Linux
4
	     Copyright (C) 1994-96 Marc ZYNGIER
L
Linus Torvalds 已提交
5 6
	     <zyngier@ufr-info-p7.ibp.fr> or
	     <maz@gloups.fdn.fr>
7
	     Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
L
Linus Torvalds 已提交
8 9 10 11 12

   RAID-0 management functions.

*/

13 14
#include <linux/blkdev.h>
#include <linux/seq_file.h>
15
#include <linux/module.h>
16
#include <linux/slab.h>
17
#include <trace/events/block.h>
18
#include "md.h"
19
#include "raid0.h"
20
#include "raid5.h"
L
Linus Torvalds 已提交
21

22 23 24
static int default_layout = 0;
module_param(default_layout, int, 0644);

25 26 27
#define UNSUPPORTED_MDDEV_FLAGS		\
	((1L << MD_HAS_JOURNAL) |	\
	 (1L << MD_JOURNAL_CLEAN) |	\
28
	 (1L << MD_FAILFAST_SUPPORTED) |\
29 30
	 (1L << MD_HAS_PPL) |		\
	 (1L << MD_HAS_MULTIPLE_PPLS))
31

32
static int raid0_congested(struct mddev *mddev, int bits)
33
{
34
	struct r0conf *conf = mddev->private;
35
	struct md_rdev **devlist = conf->devlist;
36
	int raid_disks = conf->strip_zone[0].nb_dev;
37 38
	int i, ret = 0;

39
	for (i = 0; i < raid_disks && !ret ; i++) {
40
		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
41

42
		ret |= bdi_congested(q->backing_dev_info, bits);
43 44 45 46
	}
	return ret;
}

47 48 49
/*
 * inform the user of the raid configuration
*/
50
static void dump_zones(struct mddev *mddev)
51
{
52
	int j, k;
53 54 55
	sector_t zone_size = 0;
	sector_t zone_start = 0;
	char b[BDEVNAME_SIZE];
56
	struct r0conf *conf = mddev->private;
57
	int raid_disks = conf->strip_zone[0].nb_dev;
N
NeilBrown 已提交
58 59 60
	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
		 mdname(mddev),
		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
61
	for (j = 0; j < conf->nr_strip_zones; j++) {
N
NeilBrown 已提交
62 63 64
		char line[200];
		int len = 0;

65
		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
N
NeilBrown 已提交
66 67 68 69
			len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
					bdevname(conf->devlist[j*raid_disks
							       + k]->bdev, b));
		pr_debug("md: zone%d=[%s]\n", j, line);
70 71

		zone_size  = conf->strip_zone[j].zone_end - zone_start;
N
NeilBrown 已提交
72
		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
73 74 75 76 77 78 79
			(unsigned long long)zone_start>>1,
			(unsigned long long)conf->strip_zone[j].dev_start>>1,
			(unsigned long long)zone_size>>1);
		zone_start = conf->strip_zone[j].zone_end;
	}
}

80
static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
L
Linus Torvalds 已提交
81
{
82
	int i, c, err;
83
	sector_t curr_zone_end, sectors;
84
	struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
L
Linus Torvalds 已提交
85 86 87
	struct strip_zone *zone;
	int cnt;
	char b[BDEVNAME_SIZE];
88
	char b2[BDEVNAME_SIZE];
89
	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
90
	unsigned blksize = 512;
91

92
	*private_conf = ERR_PTR(-ENOMEM);
93 94
	if (!conf)
		return -ENOMEM;
N
NeilBrown 已提交
95
	rdev_for_each(rdev1, mddev) {
96 97 98
		pr_debug("md/raid0:%s: looking at %s\n",
			 mdname(mddev),
			 bdevname(rdev1->bdev, b));
L
Linus Torvalds 已提交
99
		c = 0;
100 101 102 103 104 105

		/* round size to chunk_size */
		sectors = rdev1->sectors;
		sector_div(sectors, mddev->chunk_sectors);
		rdev1->sectors = sectors * mddev->chunk_sectors;

106 107 108
		blksize = max(blksize, queue_logical_block_size(
				      rdev1->bdev->bd_disk->queue));

N
NeilBrown 已提交
109
		rdev_for_each(rdev2, mddev) {
110 111 112 113 114 115 116
			pr_debug("md/raid0:%s:   comparing %s(%llu)"
				 " with %s(%llu)\n",
				 mdname(mddev),
				 bdevname(rdev1->bdev,b),
				 (unsigned long long)rdev1->sectors,
				 bdevname(rdev2->bdev,b2),
				 (unsigned long long)rdev2->sectors);
L
Linus Torvalds 已提交
117
			if (rdev2 == rdev1) {
118 119
				pr_debug("md/raid0:%s:   END\n",
					 mdname(mddev));
L
Linus Torvalds 已提交
120 121
				break;
			}
122
			if (rdev2->sectors == rdev1->sectors) {
L
Linus Torvalds 已提交
123 124 125 126
				/*
				 * Not unique, don't count it as a new
				 * group
				 */
127 128
				pr_debug("md/raid0:%s:   EQUAL\n",
					 mdname(mddev));
L
Linus Torvalds 已提交
129 130 131
				c = 1;
				break;
			}
132 133
			pr_debug("md/raid0:%s:   NOT EQUAL\n",
				 mdname(mddev));
L
Linus Torvalds 已提交
134 135
		}
		if (!c) {
136 137
			pr_debug("md/raid0:%s:   ==> UNIQUE\n",
				 mdname(mddev));
L
Linus Torvalds 已提交
138
			conf->nr_strip_zones++;
139 140
			pr_debug("md/raid0:%s: %d zones\n",
				 mdname(mddev), conf->nr_strip_zones);
L
Linus Torvalds 已提交
141 142
		}
	}
143 144
	pr_debug("md/raid0:%s: FINAL %d zones\n",
		 mdname(mddev), conf->nr_strip_zones);
145 146 147

	if (conf->nr_strip_zones == 1) {
		conf->layout = RAID0_ORIG_LAYOUT;
148 149 150
	} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
		   mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
		conf->layout = mddev->layout;
151 152 153 154 155 156
	} else if (default_layout == RAID0_ORIG_LAYOUT ||
		   default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
		conf->layout = default_layout;
	} else {
		pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
		       mdname(mddev));
157
		pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
158 159 160
		err = -ENOTSUPP;
		goto abort;
	}
161 162 163 164 165
	/*
	 * now since we have the hard sector sizes, we can make sure
	 * chunk size is a multiple of that sector size
	 */
	if ((mddev->chunk_sectors << 9) % blksize) {
N
NeilBrown 已提交
166 167 168
		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
			mdname(mddev),
			mddev->chunk_sectors << 9, blksize);
169 170 171 172
		err = -EINVAL;
		goto abort;
	}

173
	err = -ENOMEM;
K
Kees Cook 已提交
174 175 176
	conf->strip_zone = kcalloc(conf->nr_strip_zones,
				   sizeof(struct strip_zone),
				   GFP_KERNEL);
L
Linus Torvalds 已提交
177
	if (!conf->strip_zone)
178
		goto abort;
K
Kees Cook 已提交
179 180 181
	conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
					    conf->nr_strip_zones,
					    mddev->raid_disks),
L
Linus Torvalds 已提交
182 183
				GFP_KERNEL);
	if (!conf->devlist)
184
		goto abort;
L
Linus Torvalds 已提交
185 186 187 188 189 190 191

	/* The first zone must contain all devices, so here we check that
	 * there is a proper alignment of slots to devices and find them all
	 */
	zone = &conf->strip_zone[0];
	cnt = 0;
	smallest = NULL;
192
	dev = conf->devlist;
193
	err = -EINVAL;
N
NeilBrown 已提交
194
	rdev_for_each(rdev1, mddev) {
L
Linus Torvalds 已提交
195 196
		int j = rdev1->raid_disk;

197
		if (mddev->level == 10) {
198 199
			/* taking over a raid10-n2 array */
			j /= 2;
200 201
			rdev1->new_raid_disk = j;
		}
202

203 204 205 206 207 208 209 210
		if (mddev->level == 1) {
			/* taiking over a raid1 array-
			 * we have only one active disk
			 */
			j = 0;
			rdev1->new_raid_disk = j;
		}

211
		if (j < 0) {
N
NeilBrown 已提交
212 213
			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
				mdname(mddev));
214 215 216
			goto abort;
		}
		if (j >= mddev->raid_disks) {
N
NeilBrown 已提交
217 218
			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
				mdname(mddev), j);
L
Linus Torvalds 已提交
219 220
			goto abort;
		}
221
		if (dev[j]) {
N
NeilBrown 已提交
222 223
			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
				mdname(mddev), j);
L
Linus Torvalds 已提交
224 225
			goto abort;
		}
226
		dev[j] = rdev1;
L
Linus Torvalds 已提交
227

228
		if (!smallest || (rdev1->sectors < smallest->sectors))
L
Linus Torvalds 已提交
229 230 231 232
			smallest = rdev1;
		cnt++;
	}
	if (cnt != mddev->raid_disks) {
N
NeilBrown 已提交
233 234
		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
			mdname(mddev), cnt, mddev->raid_disks);
L
Linus Torvalds 已提交
235 236 237
		goto abort;
	}
	zone->nb_dev = cnt;
238
	zone->zone_end = smallest->sectors * cnt;
L
Linus Torvalds 已提交
239

240
	curr_zone_end = zone->zone_end;
L
Linus Torvalds 已提交
241 242 243 244

	/* now do the other zones */
	for (i = 1; i < conf->nr_strip_zones; i++)
	{
245 246
		int j;

L
Linus Torvalds 已提交
247
		zone = conf->strip_zone + i;
248
		dev = conf->devlist + i * mddev->raid_disks;
L
Linus Torvalds 已提交
249

250
		pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
251
		zone->dev_start = smallest->sectors;
L
Linus Torvalds 已提交
252 253 254 255
		smallest = NULL;
		c = 0;

		for (j=0; j<cnt; j++) {
256
			rdev = conf->devlist[j];
257
			if (rdev->sectors <= zone->dev_start) {
258 259 260
				pr_debug("md/raid0:%s: checking %s ... nope\n",
					 mdname(mddev),
					 bdevname(rdev->bdev, b));
261 262
				continue;
			}
263 264 265 266
			pr_debug("md/raid0:%s: checking %s ..."
				 " contained as device %d\n",
				 mdname(mddev),
				 bdevname(rdev->bdev, b), c);
267
			dev[c] = rdev;
268 269 270
			c++;
			if (!smallest || rdev->sectors < smallest->sectors) {
				smallest = rdev;
271 272 273
				pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
					 mdname(mddev),
					 (unsigned long long)rdev->sectors);
274
			}
L
Linus Torvalds 已提交
275 276 277
		}

		zone->nb_dev = c;
278
		sectors = (smallest->sectors - zone->dev_start) * c;
279 280 281
		pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
			 mdname(mddev),
			 zone->nb_dev, (unsigned long long)sectors);
L
Linus Torvalds 已提交
282

283
		curr_zone_end += sectors;
284
		zone->zone_end = curr_zone_end;
L
Linus Torvalds 已提交
285

286 287 288
		pr_debug("md/raid0:%s: current zone start: %llu\n",
			 mdname(mddev),
			 (unsigned long long)smallest->sectors);
L
Linus Torvalds 已提交
289 290
	}

291
	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
292 293
	*private_conf = conf;

L
Linus Torvalds 已提交
294
	return 0;
295
abort:
296 297 298
	kfree(conf->strip_zone);
	kfree(conf->devlist);
	kfree(conf);
299
	*private_conf = ERR_PTR(err);
300
	return err;
L
Linus Torvalds 已提交
301 302
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
/* Find the zone which holds a particular offset
 * Update *sectorp to be an offset in that zone
 */
static struct strip_zone *find_zone(struct r0conf *conf,
				    sector_t *sectorp)
{
	int i;
	struct strip_zone *z = conf->strip_zone;
	sector_t sector = *sectorp;

	for (i = 0; i < conf->nr_strip_zones; i++)
		if (sector < z[i].zone_end) {
			if (i)
				*sectorp = sector - z[i-1].zone_end;
			return z + i;
		}
	BUG();
}

/*
 * remaps the bio to the target device. we separate two flows.
324
 * power 2 flow and a general flow for the sake of performance
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
*/
static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
				sector_t sector, sector_t *sector_offset)
{
	unsigned int sect_in_chunk;
	sector_t chunk;
	struct r0conf *conf = mddev->private;
	int raid_disks = conf->strip_zone[0].nb_dev;
	unsigned int chunk_sects = mddev->chunk_sectors;

	if (is_power_of_2(chunk_sects)) {
		int chunksect_bits = ffz(~chunk_sects);
		/* find the sector offset inside the chunk */
		sect_in_chunk  = sector & (chunk_sects - 1);
		sector >>= chunksect_bits;
		/* chunk in zone */
		chunk = *sector_offset;
		/* quotient is the chunk in real device*/
		sector_div(chunk, zone->nb_dev << chunksect_bits);
	} else{
		sect_in_chunk = sector_div(sector, chunk_sects);
		chunk = *sector_offset;
		sector_div(chunk, chunk_sects * zone->nb_dev);
	}
	/*
	*  position the bio over the real device
	*  real sector = chunk in device + starting of zone
	*	+ the position in the chunk
	*/
	*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
	return conf->devlist[(zone - conf->strip_zone)*raid_disks
			     + sector_div(sector, zone->nb_dev)];
}

359
static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
360 361
{
	sector_t array_sectors = 0;
362
	struct md_rdev *rdev;
363 364 365 366

	WARN_ONCE(sectors || raid_disks,
		  "%s does not support generic reshape\n", __func__);

N
NeilBrown 已提交
367
	rdev_for_each(rdev, mddev)
368 369
		array_sectors += (rdev->sectors &
				  ~(sector_t)(mddev->chunk_sectors-1));
370 371 372 373

	return array_sectors;
}

N
NeilBrown 已提交
374
static void raid0_free(struct mddev *mddev, void *priv);
375

376
static int raid0_run(struct mddev *mddev)
L
Linus Torvalds 已提交
377
{
378
	struct r0conf *conf;
379
	int ret;
L
Linus Torvalds 已提交
380

381
	if (mddev->chunk_sectors == 0) {
N
NeilBrown 已提交
382
		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
383 384
		return -EINVAL;
	}
385 386
	if (md_check_no_bitmap(mddev))
		return -EINVAL;
387

388 389 390 391 392 393 394 395
	/* if private is not null, we are here after takeover */
	if (mddev->private == NULL) {
		ret = create_strip_zones(mddev, &conf);
		if (ret < 0)
			return ret;
		mddev->private = conf;
	}
	conf = mddev->private;
396 397 398 399 400 401
	if (mddev->queue) {
		struct md_rdev *rdev;
		bool discard_supported = false;

		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
402
		blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
403
		blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
404 405 406 407 408

		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
		blk_queue_io_opt(mddev->queue,
				 (mddev->chunk_sectors << 9) * mddev->raid_disks);

409 410 411 412 413 414
		rdev_for_each(rdev, mddev) {
			disk_stack_limits(mddev->gendisk, rdev->bdev,
					  rdev->data_offset << 9);
			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
				discard_supported = true;
		}
415
		if (!discard_supported)
416
			blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
417
		else
418
			blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
419
	}
L
Linus Torvalds 已提交
420 421

	/* calculate array device size */
422
	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
L
Linus Torvalds 已提交
423

N
NeilBrown 已提交
424 425 426
	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
		 mdname(mddev),
		 (unsigned long long)mddev->array_sectors);
427 428 429 430 431 432 433 434 435 436 437

	if (mddev->queue) {
		/* calculate the max read-ahead size.
		 * For read-ahead of large files to be effective, we need to
		 * readahead at least twice a whole stripe. i.e. number of devices
		 * multiplied by chunk size times 2.
		 * If an individual device has an ra_pages greater than the
		 * chunk size, then we will not drive that device as hard as it
		 * wants.  We consider this a configuration error: a larger
		 * chunksize should be used in that case.
		 */
438 439
		int stripe = mddev->raid_disks *
			(mddev->chunk_sectors << 9) / PAGE_SIZE;
440 441
		if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
			mddev->queue->backing_dev_info->ra_pages = 2* stripe;
L
Linus Torvalds 已提交
442 443
	}

444
	dump_zones(mddev);
445 446 447 448

	ret = md_integrity_register(mddev);

	return ret;
L
Linus Torvalds 已提交
449 450
}

N
NeilBrown 已提交
451
static void raid0_free(struct mddev *mddev, void *priv)
L
Linus Torvalds 已提交
452
{
N
NeilBrown 已提交
453
	struct r0conf *conf = priv;
L
Linus Torvalds 已提交
454

455
	kfree(conf->strip_zone);
456
	kfree(conf->devlist);
457
	kfree(conf);
L
Linus Torvalds 已提交
458 459
}

460 461 462
/*
 * Is io distribute over 1 or more chunks ?
*/
463
static inline int is_io_in_chunk_boundary(struct mddev *mddev,
464 465
			unsigned int chunk_sects, struct bio *bio)
{
N
NeilBrown 已提交
466
	if (likely(is_power_of_2(chunk_sects))) {
467 468
		return chunk_sects >=
			((bio->bi_iter.bi_sector & (chunk_sects-1))
469
					+ bio_sectors(bio));
470
	} else{
471
		sector_t sector = bio->bi_iter.bi_sector;
472
		return chunk_sects >= (sector_div(sector, chunk_sects)
473
						+ bio_sectors(bio));
474 475 476
	}
}

477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
{
	struct r0conf *conf = mddev->private;
	struct strip_zone *zone;
	sector_t start = bio->bi_iter.bi_sector;
	sector_t end;
	unsigned int stripe_size;
	sector_t first_stripe_index, last_stripe_index;
	sector_t start_disk_offset;
	unsigned int start_disk_index;
	sector_t end_disk_offset;
	unsigned int end_disk_index;
	unsigned int disk;

	zone = find_zone(conf, &start);

	if (bio_end_sector(bio) > zone->zone_end) {
		struct bio *split = bio_split(bio,
			zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
496
			&mddev->bio_set);
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
		bio_chain(split, bio);
		generic_make_request(bio);
		bio = split;
		end = zone->zone_end;
	} else
		end = bio_end_sector(bio);

	if (zone != conf->strip_zone)
		end = end - zone[-1].zone_end;

	/* Now start and end is the offset in zone */
	stripe_size = zone->nb_dev * mddev->chunk_sectors;

	first_stripe_index = start;
	sector_div(first_stripe_index, stripe_size);
	last_stripe_index = end;
	sector_div(last_stripe_index, stripe_size);

	start_disk_index = (int)(start - first_stripe_index * stripe_size) /
		mddev->chunk_sectors;
	start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
		mddev->chunk_sectors) +
		first_stripe_index * mddev->chunk_sectors;
	end_disk_index = (int)(end - last_stripe_index * stripe_size) /
		mddev->chunk_sectors;
	end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
		mddev->chunk_sectors) +
		last_stripe_index * mddev->chunk_sectors;

	for (disk = 0; disk < zone->nb_dev; disk++) {
		sector_t dev_start, dev_end;
		struct bio *discard_bio = NULL;
		struct md_rdev *rdev;

		if (disk < start_disk_index)
			dev_start = (first_stripe_index + 1) *
				mddev->chunk_sectors;
		else if (disk > start_disk_index)
			dev_start = first_stripe_index * mddev->chunk_sectors;
		else
			dev_start = start_disk_offset;

		if (disk < end_disk_index)
			dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
		else if (disk > end_disk_index)
			dev_end = last_stripe_index * mddev->chunk_sectors;
		else
			dev_end = end_disk_offset;

		if (dev_end <= dev_start)
			continue;

		rdev = conf->devlist[(zone - conf->strip_zone) *
			conf->strip_zone[0].nb_dev + disk];
		if (__blkdev_issue_discard(rdev->bdev,
			dev_start + zone->dev_start + rdev->data_offset,
			dev_end - dev_start, GFP_NOIO, 0, &discard_bio) ||
		    !discard_bio)
			continue;
		bio_chain(discard_bio, bio);
557
		bio_clone_blkg_association(discard_bio, bio);
558 559 560 561 562 563 564 565 566
		if (mddev->gendisk)
			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
				discard_bio, disk_devt(mddev->gendisk),
				bio->bi_iter.bi_sector);
		generic_make_request(discard_bio);
	}
	bio_endio(bio);
}

567
static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
568
{
569
	struct r0conf *conf = mddev->private;
L
Linus Torvalds 已提交
570
	struct strip_zone *zone;
571
	struct md_rdev *tmp_dev;
N
NeilBrown 已提交
572 573
	sector_t bio_sector;
	sector_t sector;
574
	sector_t orig_sector;
N
NeilBrown 已提交
575 576
	unsigned chunk_sects;
	unsigned sectors;
L
Linus Torvalds 已提交
577

578 579
	if (unlikely(bio->bi_opf & REQ_PREFLUSH)
	    && md_flush_request(mddev, bio))
580
		return true;
581

582 583
	if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
		raid0_handle_discard(mddev, bio);
584
		return true;
585 586
	}

N
NeilBrown 已提交
587 588 589
	bio_sector = bio->bi_iter.bi_sector;
	sector = bio_sector;
	chunk_sects = mddev->chunk_sectors;
K
Kent Overstreet 已提交
590

N
NeilBrown 已提交
591 592 593 594
	sectors = chunk_sects -
		(likely(is_power_of_2(chunk_sects))
		 ? (sector & (chunk_sects-1))
		 : sector_div(sector, chunk_sects));
K
Kent Overstreet 已提交
595

N
NeilBrown 已提交
596 597
	/* Restore due to sector_div */
	sector = bio_sector;
598

N
NeilBrown 已提交
599
	if (sectors < bio_sectors(bio)) {
600 601
		struct bio *split = bio_split(bio, sectors, GFP_NOIO,
					      &mddev->bio_set);
N
NeilBrown 已提交
602 603 604 605
		bio_chain(split, bio);
		generic_make_request(bio);
		bio = split;
	}
L
Linus Torvalds 已提交
606

607
	orig_sector = sector;
N
NeilBrown 已提交
608
	zone = find_zone(mddev->private, &sector);
609 610 611 612 613 614 615 616
	switch (conf->layout) {
	case RAID0_ORIG_LAYOUT:
		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
		break;
	case RAID0_ALT_MULTIZONE_LAYOUT:
		tmp_dev = map_sector(mddev, zone, sector, &sector);
		break;
	default:
617
		WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
618 619 620
		bio_io_error(bio);
		return true;
	}
621 622 623 624 625 626

	if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
		bio_io_error(bio);
		return true;
	}

627
	bio_set_dev(bio, tmp_dev->bdev);
N
NeilBrown 已提交
628 629 630
	bio->bi_iter.bi_sector = sector + zone->dev_start +
		tmp_dev->data_offset;

631
	if (mddev->gendisk)
632 633
		trace_block_bio_remap(bio->bi_disk->queue, bio,
				disk_devt(mddev->gendisk), bio_sector);
634 635 636
	mddev_check_writesame(mddev, bio);
	mddev_check_write_zeroes(mddev, bio);
	generic_make_request(bio);
637
	return true;
L
Linus Torvalds 已提交
638
}
N
NeilBrown 已提交
639

640
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
L
Linus Torvalds 已提交
641
{
642
	seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
L
Linus Torvalds 已提交
643 644 645
	return;
}

646
static void *raid0_takeover_raid45(struct mddev *mddev)
647
{
648
	struct md_rdev *rdev;
649
	struct r0conf *priv_conf;
650 651

	if (mddev->degraded != 1) {
N
NeilBrown 已提交
652 653 654
		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
			mdname(mddev),
			mddev->degraded);
655 656 657
		return ERR_PTR(-EINVAL);
	}

N
NeilBrown 已提交
658
	rdev_for_each(rdev, mddev) {
659 660
		/* check slot number for a disk */
		if (rdev->raid_disk == mddev->raid_disks-1) {
N
NeilBrown 已提交
661 662
			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
				mdname(mddev));
663 664
			return ERR_PTR(-EINVAL);
		}
665
		rdev->sectors = mddev->dev_sectors;
666 667 668 669
	}

	/* Set new parameters */
	mddev->new_level = 0;
670
	mddev->new_layout = 0;
671 672 673 674 675
	mddev->new_chunk_sectors = mddev->chunk_sectors;
	mddev->raid_disks--;
	mddev->delta_disks = -1;
	/* make sure it will be not marked as dirty */
	mddev->recovery_cp = MaxSector;
676
	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
677 678

	create_strip_zones(mddev, &priv_conf);
679

680 681 682
	return priv_conf;
}

683
static void *raid0_takeover_raid10(struct mddev *mddev)
684
{
685
	struct r0conf *priv_conf;
686 687 688 689 690 691 692 693

	/* Check layout:
	 *  - far_copies must be 1
	 *  - near_copies must be 2
	 *  - disks number must be even
	 *  - all mirrors must be already degraded
	 */
	if (mddev->layout != ((1 << 8) + 2)) {
N
NeilBrown 已提交
694 695 696
		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
			mdname(mddev),
			mddev->layout);
697 698 699
		return ERR_PTR(-EINVAL);
	}
	if (mddev->raid_disks & 1) {
N
NeilBrown 已提交
700 701
		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
			mdname(mddev));
702 703 704
		return ERR_PTR(-EINVAL);
	}
	if (mddev->degraded != (mddev->raid_disks>>1)) {
N
NeilBrown 已提交
705 706
		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
			mdname(mddev));
707 708 709 710 711
		return ERR_PTR(-EINVAL);
	}

	/* Set new parameters */
	mddev->new_level = 0;
712
	mddev->new_layout = 0;
713 714 715 716 717 718
	mddev->new_chunk_sectors = mddev->chunk_sectors;
	mddev->delta_disks = - mddev->raid_disks / 2;
	mddev->raid_disks += mddev->delta_disks;
	mddev->degraded = 0;
	/* make sure it will be not marked as dirty */
	mddev->recovery_cp = MaxSector;
719
	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
720 721 722 723 724

	create_strip_zones(mddev, &priv_conf);
	return priv_conf;
}

725
static void *raid0_takeover_raid1(struct mddev *mddev)
726
{
727
	struct r0conf *priv_conf;
728
	int chunksect;
729 730 731 732 733

	/* Check layout:
	 *  - (N - 1) mirror drives must be already faulty
	 */
	if ((mddev->raid_disks - 1) != mddev->degraded) {
N
NeilBrown 已提交
734
		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
735 736 737 738
		       mdname(mddev));
		return ERR_PTR(-EINVAL);
	}

739 740 741 742 743 744 745 746 747 748 749 750 751 752
	/*
	 * a raid1 doesn't have the notion of chunk size, so
	 * figure out the largest suitable size we can use.
	 */
	chunksect = 64 * 2; /* 64K by default */

	/* The array must be an exact multiple of chunksize */
	while (chunksect && (mddev->array_sectors & (chunksect - 1)))
		chunksect >>= 1;

	if ((chunksect << 9) < PAGE_SIZE)
		/* array size does not allow a suitable chunk size */
		return ERR_PTR(-EINVAL);

753 754 755
	/* Set new parameters */
	mddev->new_level = 0;
	mddev->new_layout = 0;
756 757
	mddev->new_chunk_sectors = chunksect;
	mddev->chunk_sectors = chunksect;
758
	mddev->delta_disks = 1 - mddev->raid_disks;
K
Krzysztof Wojcik 已提交
759
	mddev->raid_disks = 1;
760 761
	/* make sure it will be not marked as dirty */
	mddev->recovery_cp = MaxSector;
762
	mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
763 764 765 766 767

	create_strip_zones(mddev, &priv_conf);
	return priv_conf;
}

768
static void *raid0_takeover(struct mddev *mddev)
769 770
{
	/* raid0 can take over:
M
Maciej Trela 已提交
771
	 *  raid4 - if all data disks are active.
772 773
	 *  raid5 - providing it is Raid4 layout and one disk is faulty
	 *  raid10 - assuming we have all necessary active disks
774
	 *  raid1 - with (N -1) mirror drives faulty
775
	 */
776 777

	if (mddev->bitmap) {
N
NeilBrown 已提交
778 779
		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
			mdname(mddev));
780 781
		return ERR_PTR(-EBUSY);
	}
M
Maciej Trela 已提交
782 783 784
	if (mddev->level == 4)
		return raid0_takeover_raid45(mddev);

785 786
	if (mddev->level == 5) {
		if (mddev->layout == ALGORITHM_PARITY_N)
M
Maciej Trela 已提交
787
			return raid0_takeover_raid45(mddev);
788

N
NeilBrown 已提交
789 790
		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
			mdname(mddev), ALGORITHM_PARITY_N);
791 792 793 794 795
	}

	if (mddev->level == 10)
		return raid0_takeover_raid10(mddev);

796 797 798
	if (mddev->level == 1)
		return raid0_takeover_raid1(mddev);

N
NeilBrown 已提交
799
	pr_warn("Takeover from raid%i to raid0 not supported\n",
800 801
		mddev->level);

802 803 804
	return ERR_PTR(-EINVAL);
}

805
static void raid0_quiesce(struct mddev *mddev, int quiesce)
806 807 808
{
}

809
static struct md_personality raid0_personality=
L
Linus Torvalds 已提交
810 811
{
	.name		= "raid0",
812
	.level		= 0,
L
Linus Torvalds 已提交
813 814 815
	.owner		= THIS_MODULE,
	.make_request	= raid0_make_request,
	.run		= raid0_run,
N
NeilBrown 已提交
816
	.free		= raid0_free,
L
Linus Torvalds 已提交
817
	.status		= raid0_status,
818
	.size		= raid0_size,
819 820
	.takeover	= raid0_takeover,
	.quiesce	= raid0_quiesce,
821
	.congested	= raid0_congested,
L
Linus Torvalds 已提交
822 823 824 825
};

static int __init raid0_init (void)
{
826
	return register_md_personality (&raid0_personality);
L
Linus Torvalds 已提交
827 828 829 830
}

static void raid0_exit (void)
{
831
	unregister_md_personality (&raid0_personality);
L
Linus Torvalds 已提交
832 833 834 835 836
}

module_init(raid0_init);
module_exit(raid0_exit);
MODULE_LICENSE("GPL");
837
MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
L
Linus Torvalds 已提交
838
MODULE_ALIAS("md-personality-2"); /* RAID0 */
839
MODULE_ALIAS("md-raid0");
840
MODULE_ALIAS("md-level-0");