blk-zoned.c 15.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Zoned block device handling
 *
 * Copyright (c) 2015, Hannes Reinecke
 * Copyright (c) 2015, SUSE Linux GmbH
 *
 * Copyright (c) 2016, Damien Le Moal
 * Copyright (c) 2016, Western Digital
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/blkdev.h>
16
#include <linux/blk-mq.h>
17 18
#include <linux/mm.h>
#include <linux/vmalloc.h>
19
#include <linux/sched/mm.h>
20

21 22
#include "blk.h"

23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
#define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
static const char *const zone_cond_name[] = {
	ZONE_COND_NAME(NOT_WP),
	ZONE_COND_NAME(EMPTY),
	ZONE_COND_NAME(IMP_OPEN),
	ZONE_COND_NAME(EXP_OPEN),
	ZONE_COND_NAME(CLOSED),
	ZONE_COND_NAME(READONLY),
	ZONE_COND_NAME(FULL),
	ZONE_COND_NAME(OFFLINE),
};
#undef ZONE_COND_NAME

/**
 * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
 * @zone_cond: BLK_ZONE_COND_XXX.
 *
 * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
 * into string format. Useful in the debugging and tracing zone conditions. For
 * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
 */
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
{
	static const char *zone_cond_str = "UNKNOWN";

	if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
		zone_cond_str = zone_cond_name[zone_cond];

	return zone_cond_str;
}
EXPORT_SYMBOL_GPL(blk_zone_cond_str);

55 56 57
static inline sector_t blk_zone_start(struct request_queue *q,
				      sector_t sector)
{
58
	sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
59 60 61 62

	return sector & ~zone_mask;
}

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
/*
 * Return true if a request is a write requests that needs zone write locking.
 */
bool blk_req_needs_zone_write_lock(struct request *rq)
{
	if (!rq->q->seq_zones_wlock)
		return false;

	if (blk_rq_is_passthrough(rq))
		return false;

	switch (req_op(rq)) {
	case REQ_OP_WRITE_ZEROES:
	case REQ_OP_WRITE_SAME:
	case REQ_OP_WRITE:
		return blk_rq_zone_is_seq(rq);
	default:
		return false;
	}
}
EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);

85 86 87 88 89 90 91 92 93 94 95 96 97 98
bool blk_req_zone_write_trylock(struct request *rq)
{
	unsigned int zno = blk_rq_zone_no(rq);

	if (test_and_set_bit(zno, rq->q->seq_zones_wlock))
		return false;

	WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
	rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;

	return true;
}
EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock);

99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
void __blk_req_zone_write_lock(struct request *rq)
{
	if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
					  rq->q->seq_zones_wlock)))
		return;

	WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
	rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
}
EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);

void __blk_req_zone_write_unlock(struct request *rq)
{
	rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
	if (rq->q->seq_zones_wlock)
		WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
						 rq->q->seq_zones_wlock));
}
EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);

119 120
/**
 * blkdev_nr_zones - Get number of zones
121
 * @disk:	Target gendisk
122
 *
123 124
 * Return the total number of zones of a zoned block device.  For a block
 * device without zone capabilities, the number of zones is always 0.
125
 */
126
unsigned int blkdev_nr_zones(struct gendisk *disk)
127
{
128
	sector_t zone_sectors = blk_queue_zone_sectors(disk->queue);
129

130
	if (!blk_queue_is_zoned(disk->queue))
131
		return 0;
132
	return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors);
133 134 135
}
EXPORT_SYMBOL_GPL(blkdev_nr_zones);

136 137 138 139
/**
 * blkdev_report_zones - Get zones information
 * @bdev:	Target block device
 * @sector:	Sector from which to report zones
C
Christoph Hellwig 已提交
140 141 142
 * @nr_zones:	Maximum number of zones to report
 * @cb:		Callback function called for each reported zone
 * @data:	Private data for the callback
143 144
 *
 * Description:
C
Christoph Hellwig 已提交
145 146 147 148 149 150 151 152 153
 *    Get zone information starting from the zone containing @sector for at most
 *    @nr_zones, and call @cb for each zone reported by the device.
 *    To report all zones in a device starting from @sector, the BLK_ALL_ZONES
 *    constant can be passed to @nr_zones.
 *    Returns the number of zones reported by the device, or a negative errno
 *    value in case of failure.
 *
 *    Note: The caller must use memalloc_noXX_save/restore() calls to control
 *    memory allocations done within this function.
154
 */
155
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
C
Christoph Hellwig 已提交
156
			unsigned int nr_zones, report_zones_cb cb, void *data)
157
{
158
	struct gendisk *disk = bdev->bd_disk;
159
	sector_t capacity = get_capacity(disk);
160

C
Christoph Hellwig 已提交
161 162
	if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
	    WARN_ON_ONCE(!disk->fops->report_zones))
163
		return -EOPNOTSUPP;
164

C
Christoph Hellwig 已提交
165
	if (!nr_zones || sector >= capacity)
166 167
		return 0;

C
Christoph Hellwig 已提交
168
	return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
169 170 171
}
EXPORT_SYMBOL_GPL(blkdev_report_zones);

172
static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
173
						sector_t sector,
174 175 176 177 178 179
						sector_t nr_sectors)
{
	if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
		return false;

	/*
180 181
	 * REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors
	 * of the applicable zone range is the entire disk.
182
	 */
183
	return !sector && nr_sectors == get_capacity(bdev->bd_disk);
184 185
}

186
/**
187
 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
188
 * @bdev:	Target block device
189 190 191 192
 * @op:		Operation to be performed on the zones
 * @sector:	Start sector of the first zone to operate on
 * @nr_sectors:	Number of sectors, should be at least the length of one zone and
 *		must be zone size aligned.
193 194 195
 * @gfp_mask:	Memory allocation flags (for bio_alloc)
 *
 * Description:
196
 *    Perform the specified operation on the range of zones specified by
197 198
 *    @sector..@sector+@nr_sectors. Specifying the entire disk sector range
 *    is valid, but the specified range should not contain conventional zones.
199 200
 *    The operation to execute on each zone can be a zone reset, open, close
 *    or finish request.
201
 */
202 203 204
int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
		     sector_t sector, sector_t nr_sectors,
		     gfp_t gfp_mask)
205 206
{
	struct request_queue *q = bdev_get_queue(bdev);
207
	sector_t zone_sectors = blk_queue_zone_sectors(q);
208
	sector_t capacity = get_capacity(bdev->bd_disk);
209
	sector_t end_sector = sector + nr_sectors;
210
	struct bio *bio = NULL;
211 212 213 214 215
	int ret;

	if (!blk_queue_is_zoned(q))
		return -EOPNOTSUPP;

216 217 218
	if (bdev_read_only(bdev))
		return -EPERM;

219 220 221
	if (!op_is_zone_mgmt(op))
		return -EOPNOTSUPP;

222
	if (end_sector <= sector || end_sector > capacity)
223 224 225 226 227 228 229
		/* Out of range */
		return -EINVAL;

	/* Check alignment (handle eventual smaller last zone) */
	if (sector & (zone_sectors - 1))
		return -EINVAL;

230
	if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
231 232 233
		return -EINVAL;

	while (sector < end_sector) {
234
		bio = blk_next_bio(bio, 0, gfp_mask);
235
		bio_set_dev(bio, bdev);
236

237 238 239 240
		/*
		 * Special case for the zone reset operation that reset all
		 * zones, this is useful for applications like mkfs.
		 */
241 242
		if (op == REQ_OP_ZONE_RESET &&
		    blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
243
			bio->bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC;
244 245 246
			break;
		}

247
		bio->bi_opf = op | REQ_SYNC;
248
		bio->bi_iter.bi_sector = sector;
249 250 251 252 253 254
		sector += zone_sectors;

		/* This may take a while, so be nice to others */
		cond_resched();
	}

255 256 257 258
	ret = submit_bio_wait(bio);
	bio_put(bio);

	return ret;
259
}
260
EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
S
Shaun Tancheff 已提交
261

C
Christoph Hellwig 已提交
262 263 264 265 266 267 268 269 270 271 272 273 274 275
struct zone_report_args {
	struct blk_zone __user *zones;
};

static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
				    void *data)
{
	struct zone_report_args *args = data;

	if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
		return -EFAULT;
	return 0;
}

276
/*
S
Shaun Tancheff 已提交
277 278 279 280 281 282 283
 * BLKREPORTZONE ioctl processing.
 * Called from blkdev_ioctl.
 */
int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
			      unsigned int cmd, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
C
Christoph Hellwig 已提交
284
	struct zone_report_args args;
S
Shaun Tancheff 已提交
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
	struct request_queue *q;
	struct blk_zone_report rep;
	int ret;

	if (!argp)
		return -EINVAL;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	if (!blk_queue_is_zoned(q))
		return -ENOTTY;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
		return -EFAULT;

	if (!rep.nr_zones)
		return -EINVAL;

C
Christoph Hellwig 已提交
308 309 310 311 312
	args.zones = argp + sizeof(struct blk_zone_report);
	ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
				  blkdev_copy_zone_to_user, &args);
	if (ret < 0)
		return ret;
S
Shaun Tancheff 已提交
313

C
Christoph Hellwig 已提交
314
	rep.nr_zones = ret;
315
	rep.flags = BLK_ZONE_REP_CAPACITY;
C
Christoph Hellwig 已提交
316 317 318
	if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
		return -EFAULT;
	return 0;
S
Shaun Tancheff 已提交
319 320
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
				      const struct blk_zone_range *zrange)
{
	loff_t start, end;

	if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
	    zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
		/* Out of range */
		return -EINVAL;

	start = zrange->sector << SECTOR_SHIFT;
	end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;

	return truncate_bdev_range(bdev, mode, start, end);
}

337
/*
338
 * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
S
Shaun Tancheff 已提交
339 340
 * Called from blkdev_ioctl.
 */
341 342
int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
			   unsigned int cmd, unsigned long arg)
S
Shaun Tancheff 已提交
343 344 345 346
{
	void __user *argp = (void __user *)arg;
	struct request_queue *q;
	struct blk_zone_range zrange;
347
	enum req_opf op;
348
	int ret;
S
Shaun Tancheff 已提交
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368

	if (!argp)
		return -EINVAL;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	if (!blk_queue_is_zoned(q))
		return -ENOTTY;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	if (!(mode & FMODE_WRITE))
		return -EBADF;

	if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
		return -EFAULT;

369 370 371
	switch (cmd) {
	case BLKRESETZONE:
		op = REQ_OP_ZONE_RESET;
372 373 374 375 376

		/* Invalidate the page cache, including dirty pages. */
		ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
		if (ret)
			return ret;
377 378 379 380 381 382 383 384 385 386 387 388 389 390
		break;
	case BLKOPENZONE:
		op = REQ_OP_ZONE_OPEN;
		break;
	case BLKCLOSEZONE:
		op = REQ_OP_ZONE_CLOSE;
		break;
	case BLKFINISHZONE:
		op = REQ_OP_ZONE_FINISH;
		break;
	default:
		return -ENOTTY;
	}

391 392 393 394 395 396 397 398 399 400 401 402 403 404
	ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
			       GFP_KERNEL);

	/*
	 * Invalidate the page cache again for zone reset: writes can only be
	 * direct for zoned devices so concurrent writes would not add any page
	 * to the page cache after/during reset. The page cache may be filled
	 * again due to concurrent reads though and dropping the pages for
	 * these is fine.
	 */
	if (!ret && cmd == BLKRESETZONE)
		ret = blkdev_truncate_zone_range(bdev, mode, &zrange);

	return ret;
S
Shaun Tancheff 已提交
405
}
406 407 408 409 410 411 412 413 414 415

static inline unsigned long *blk_alloc_zone_bitmap(int node,
						   unsigned int nr_zones)
{
	return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
			    GFP_NOIO, node);
}

void blk_queue_free_zone_bitmaps(struct request_queue *q)
{
416 417
	kfree(q->conv_zones_bitmap);
	q->conv_zones_bitmap = NULL;
418 419 420 421
	kfree(q->seq_zones_wlock);
	q->seq_zones_wlock = NULL;
}

C
Christoph Hellwig 已提交
422 423
struct blk_revalidate_zone_args {
	struct gendisk	*disk;
424
	unsigned long	*conv_zones_bitmap;
C
Christoph Hellwig 已提交
425
	unsigned long	*seq_zones_wlock;
426
	unsigned int	nr_zones;
427
	sector_t	zone_sectors;
C
Christoph Hellwig 已提交
428 429 430
	sector_t	sector;
};

431 432 433
/*
 * Helper function to check the validity of zones of a zoned block device.
 */
C
Christoph Hellwig 已提交
434 435
static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
				  void *data)
436
{
C
Christoph Hellwig 已提交
437 438
	struct blk_revalidate_zone_args *args = data;
	struct gendisk *disk = args->disk;
439 440 441 442 443 444 445
	struct request_queue *q = disk->queue;
	sector_t capacity = get_capacity(disk);

	/*
	 * All zones must have the same size, with the exception on an eventual
	 * smaller last zone.
	 */
446 447 448 449 450 451
	if (zone->start == 0) {
		if (zone->len == 0 || !is_power_of_2(zone->len)) {
			pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
				disk->disk_name, zone->len);
			return -ENODEV;
		}
452

453 454 455 456 457 458 459 460 461 462 463 464 465 466
		args->zone_sectors = zone->len;
		args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
	} else if (zone->start + args->zone_sectors < capacity) {
		if (zone->len != args->zone_sectors) {
			pr_warn("%s: Invalid zoned device with non constant zone size\n",
				disk->disk_name);
			return -ENODEV;
		}
	} else {
		if (zone->len > args->zone_sectors) {
			pr_warn("%s: Invalid zoned device with larger last zone size\n",
				disk->disk_name);
			return -ENODEV;
		}
467 468 469
	}

	/* Check for holes in the zone report */
C
Christoph Hellwig 已提交
470
	if (zone->start != args->sector) {
471
		pr_warn("%s: Zone gap at sectors %llu..%llu\n",
C
Christoph Hellwig 已提交
472 473
			disk->disk_name, args->sector, zone->start);
		return -ENODEV;
474 475 476 477 478
	}

	/* Check zone type */
	switch (zone->type) {
	case BLK_ZONE_TYPE_CONVENTIONAL:
479 480 481 482 483 484 485 486
		if (!args->conv_zones_bitmap) {
			args->conv_zones_bitmap =
				blk_alloc_zone_bitmap(q->node, args->nr_zones);
			if (!args->conv_zones_bitmap)
				return -ENOMEM;
		}
		set_bit(idx, args->conv_zones_bitmap);
		break;
487 488
	case BLK_ZONE_TYPE_SEQWRITE_REQ:
	case BLK_ZONE_TYPE_SEQWRITE_PREF:
489 490 491 492 493 494
		if (!args->seq_zones_wlock) {
			args->seq_zones_wlock =
				blk_alloc_zone_bitmap(q->node, args->nr_zones);
			if (!args->seq_zones_wlock)
				return -ENOMEM;
		}
495 496 497 498
		break;
	default:
		pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
			disk->disk_name, (int)zone->type, zone->start);
C
Christoph Hellwig 已提交
499
		return -ENODEV;
500 501
	}

C
Christoph Hellwig 已提交
502 503 504 505
	args->sector += zone->len;
	return 0;
}

506 507 508
/**
 * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
 * @disk:	Target disk
D
Damien Le Moal 已提交
509
 * @update_driver_data:	Callback to update driver data on the frozen disk
510 511 512
 *
 * Helper function for low-level device drivers to (re) allocate and initialize
 * a disk request queue zone bitmaps. This functions should normally be called
513 514 515
 * within the disk ->revalidate method for blk-mq based drivers.  For BIO based
 * drivers only q->nr_zones needs to be updated so that the sysfs exposed value
 * is correct.
D
Damien Le Moal 已提交
516 517 518
 * If the @update_driver_data callback function is not NULL, the callback is
 * executed with the device request queue frozen after all zones have been
 * checked.
519
 */
D
Damien Le Moal 已提交
520 521
int blk_revalidate_disk_zones(struct gendisk *disk,
			      void (*update_driver_data)(struct gendisk *disk))
522 523
{
	struct request_queue *q = disk->queue;
524 525 526
	struct blk_revalidate_zone_args args = {
		.disk		= disk,
	};
527 528
	unsigned int noio_flag;
	int ret;
529

530 531
	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
		return -EIO;
532 533
	if (WARN_ON_ONCE(!queue_is_mq(q)))
		return -EIO;
534

535 536 537
	if (!get_capacity(disk))
		return -EIO;

538
	/*
539 540
	 * Ensure that all memory allocations in this context are done as if
	 * GFP_NOIO was specified.
541
	 */
542 543 544
	noio_flag = memalloc_noio_save();
	ret = disk->fops->report_zones(disk, 0, UINT_MAX,
				       blk_revalidate_zone_cb, &args);
545 546 547 548
	if (!ret) {
		pr_warn("%s: No zones reported\n", disk->disk_name);
		ret = -ENODEV;
	}
549
	memalloc_noio_restore(noio_flag);
550

551 552 553 554 555 556 557 558 559 560
	/*
	 * If zones where reported, make sure that the entire disk capacity
	 * has been checked.
	 */
	if (ret > 0 && args.sector != get_capacity(disk)) {
		pr_warn("%s: Missing zones from sector %llu\n",
			disk->disk_name, args.sector);
		ret = -ENODEV;
	}

561
	/*
562 563 564
	 * Install the new bitmaps and update nr_zones only once the queue is
	 * stopped and all I/Os are completed (i.e. a scheduler is not
	 * referencing the bitmaps).
565 566
	 */
	blk_mq_freeze_queue(q);
567
	if (ret > 0) {
568
		blk_queue_chunk_sectors(q, args.zone_sectors);
569
		q->nr_zones = args.nr_zones;
C
Christoph Hellwig 已提交
570
		swap(q->seq_zones_wlock, args.seq_zones_wlock);
571
		swap(q->conv_zones_bitmap, args.conv_zones_bitmap);
D
Damien Le Moal 已提交
572 573
		if (update_driver_data)
			update_driver_data(disk);
C
Christoph Hellwig 已提交
574 575
		ret = 0;
	} else {
576 577 578
		pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
		blk_queue_free_zone_bitmaps(q);
	}
C
Christoph Hellwig 已提交
579
	blk_mq_unfreeze_queue(q);
580

C
Christoph Hellwig 已提交
581
	kfree(args.seq_zones_wlock);
582
	kfree(args.conv_zones_bitmap);
583 584 585
	return ret;
}
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602

void blk_queue_clear_zone_settings(struct request_queue *q)
{
	blk_mq_freeze_queue(q);

	blk_queue_free_zone_bitmaps(q);
	blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q);
	q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE;
	q->nr_zones = 0;
	q->max_open_zones = 0;
	q->max_active_zones = 0;
	q->limits.chunk_sectors = 0;
	q->limits.zone_write_granularity = 0;
	q->limits.max_zone_append_sectors = 0;

	blk_mq_unfreeze_queue(q);
}