blk-zoned.c 13.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Zoned block device handling
 *
 * Copyright (c) 2015, Hannes Reinecke
 * Copyright (c) 2015, SUSE Linux GmbH
 *
 * Copyright (c) 2016, Damien Le Moal
 * Copyright (c) 2016, Western Digital
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/blkdev.h>
16
#include <linux/blk-mq.h>
17 18
#include <linux/mm.h>
#include <linux/vmalloc.h>
19
#include <linux/sched/mm.h>
20

21 22
#include "blk.h"

23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
#define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
static const char *const zone_cond_name[] = {
	ZONE_COND_NAME(NOT_WP),
	ZONE_COND_NAME(EMPTY),
	ZONE_COND_NAME(IMP_OPEN),
	ZONE_COND_NAME(EXP_OPEN),
	ZONE_COND_NAME(CLOSED),
	ZONE_COND_NAME(READONLY),
	ZONE_COND_NAME(FULL),
	ZONE_COND_NAME(OFFLINE),
};
#undef ZONE_COND_NAME

/**
 * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
 * @zone_cond: BLK_ZONE_COND_XXX.
 *
 * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
 * into string format. Useful in the debugging and tracing zone conditions. For
 * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
 */
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
{
	static const char *zone_cond_str = "UNKNOWN";

	if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
		zone_cond_str = zone_cond_name[zone_cond];

	return zone_cond_str;
}
EXPORT_SYMBOL_GPL(blk_zone_cond_str);

55 56 57
static inline sector_t blk_zone_start(struct request_queue *q,
				      sector_t sector)
{
58
	sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
59 60 61 62

	return sector & ~zone_mask;
}

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
/*
 * Return true if a request is a write requests that needs zone write locking.
 */
bool blk_req_needs_zone_write_lock(struct request *rq)
{
	if (!rq->q->seq_zones_wlock)
		return false;

	if (blk_rq_is_passthrough(rq))
		return false;

	switch (req_op(rq)) {
	case REQ_OP_WRITE_ZEROES:
	case REQ_OP_WRITE_SAME:
	case REQ_OP_WRITE:
		return blk_rq_zone_is_seq(rq);
	default:
		return false;
	}
}
EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);

85 86 87 88 89 90 91 92 93 94 95 96 97 98
bool blk_req_zone_write_trylock(struct request *rq)
{
	unsigned int zno = blk_rq_zone_no(rq);

	if (test_and_set_bit(zno, rq->q->seq_zones_wlock))
		return false;

	WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
	rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;

	return true;
}
EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock);

99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
void __blk_req_zone_write_lock(struct request *rq)
{
	if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
					  rq->q->seq_zones_wlock)))
		return;

	WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
	rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
}
EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);

void __blk_req_zone_write_unlock(struct request *rq)
{
	rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
	if (rq->q->seq_zones_wlock)
		WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
						 rq->q->seq_zones_wlock));
}
EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);

119 120
/**
 * blkdev_nr_zones - Get number of zones
121
 * @disk:	Target gendisk
122
 *
123 124
 * Return the total number of zones of a zoned block device.  For a block
 * device without zone capabilities, the number of zones is always 0.
125
 */
126
unsigned int blkdev_nr_zones(struct gendisk *disk)
127
{
128
	sector_t zone_sectors = blk_queue_zone_sectors(disk->queue);
129

130
	if (!blk_queue_is_zoned(disk->queue))
131
		return 0;
132
	return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors);
133 134 135
}
EXPORT_SYMBOL_GPL(blkdev_nr_zones);

136 137 138 139
/**
 * blkdev_report_zones - Get zones information
 * @bdev:	Target block device
 * @sector:	Sector from which to report zones
C
Christoph Hellwig 已提交
140 141 142
 * @nr_zones:	Maximum number of zones to report
 * @cb:		Callback function called for each reported zone
 * @data:	Private data for the callback
143 144
 *
 * Description:
C
Christoph Hellwig 已提交
145 146 147 148 149 150 151 152 153
 *    Get zone information starting from the zone containing @sector for at most
 *    @nr_zones, and call @cb for each zone reported by the device.
 *    To report all zones in a device starting from @sector, the BLK_ALL_ZONES
 *    constant can be passed to @nr_zones.
 *    Returns the number of zones reported by the device, or a negative errno
 *    value in case of failure.
 *
 *    Note: The caller must use memalloc_noXX_save/restore() calls to control
 *    memory allocations done within this function.
154
 */
155
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
C
Christoph Hellwig 已提交
156
			unsigned int nr_zones, report_zones_cb cb, void *data)
157
{
158
	struct gendisk *disk = bdev->bd_disk;
159
	sector_t capacity = get_capacity(disk);
160

C
Christoph Hellwig 已提交
161 162
	if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
	    WARN_ON_ONCE(!disk->fops->report_zones))
163
		return -EOPNOTSUPP;
164

C
Christoph Hellwig 已提交
165
	if (!nr_zones || sector >= capacity)
166 167
		return 0;

C
Christoph Hellwig 已提交
168
	return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
169 170 171
}
EXPORT_SYMBOL_GPL(blkdev_report_zones);

172
static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
173
						sector_t sector,
174 175 176 177 178 179
						sector_t nr_sectors)
{
	if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
		return false;

	/*
180 181
	 * REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors
	 * of the applicable zone range is the entire disk.
182
	 */
183
	return !sector && nr_sectors == get_capacity(bdev->bd_disk);
184 185
}

186
/**
187
 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
188
 * @bdev:	Target block device
189 190 191 192
 * @op:		Operation to be performed on the zones
 * @sector:	Start sector of the first zone to operate on
 * @nr_sectors:	Number of sectors, should be at least the length of one zone and
 *		must be zone size aligned.
193 194 195
 * @gfp_mask:	Memory allocation flags (for bio_alloc)
 *
 * Description:
196
 *    Perform the specified operation on the range of zones specified by
197 198
 *    @sector..@sector+@nr_sectors. Specifying the entire disk sector range
 *    is valid, but the specified range should not contain conventional zones.
199 200
 *    The operation to execute on each zone can be a zone reset, open, close
 *    or finish request.
201
 */
202 203 204
int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
		     sector_t sector, sector_t nr_sectors,
		     gfp_t gfp_mask)
205 206
{
	struct request_queue *q = bdev_get_queue(bdev);
207
	sector_t zone_sectors = blk_queue_zone_sectors(q);
208
	sector_t capacity = get_capacity(bdev->bd_disk);
209
	sector_t end_sector = sector + nr_sectors;
210
	struct bio *bio = NULL;
211 212 213 214 215
	int ret;

	if (!blk_queue_is_zoned(q))
		return -EOPNOTSUPP;

216 217 218
	if (bdev_read_only(bdev))
		return -EPERM;

219 220 221
	if (!op_is_zone_mgmt(op))
		return -EOPNOTSUPP;

222
	if (end_sector <= sector || end_sector > capacity)
223 224 225 226 227 228 229
		/* Out of range */
		return -EINVAL;

	/* Check alignment (handle eventual smaller last zone) */
	if (sector & (zone_sectors - 1))
		return -EINVAL;

230
	if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
231 232 233
		return -EINVAL;

	while (sector < end_sector) {
234
		bio = blk_next_bio(bio, 0, gfp_mask);
235
		bio_set_dev(bio, bdev);
236

237 238 239 240
		/*
		 * Special case for the zone reset operation that reset all
		 * zones, this is useful for applications like mkfs.
		 */
241 242
		if (op == REQ_OP_ZONE_RESET &&
		    blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
243 244 245 246
			bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
			break;
		}

247
		bio->bi_opf = op | REQ_SYNC;
248
		bio->bi_iter.bi_sector = sector;
249 250 251 252 253 254
		sector += zone_sectors;

		/* This may take a while, so be nice to others */
		cond_resched();
	}

255 256 257 258
	ret = submit_bio_wait(bio);
	bio_put(bio);

	return ret;
259
}
260
EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
S
Shaun Tancheff 已提交
261

C
Christoph Hellwig 已提交
262 263 264 265 266 267 268 269 270 271 272 273 274 275
struct zone_report_args {
	struct blk_zone __user *zones;
};

static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
				    void *data)
{
	struct zone_report_args *args = data;

	if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
		return -EFAULT;
	return 0;
}

276
/*
S
Shaun Tancheff 已提交
277 278 279 280 281 282 283
 * BLKREPORTZONE ioctl processing.
 * Called from blkdev_ioctl.
 */
int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
			      unsigned int cmd, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
C
Christoph Hellwig 已提交
284
	struct zone_report_args args;
S
Shaun Tancheff 已提交
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
	struct request_queue *q;
	struct blk_zone_report rep;
	int ret;

	if (!argp)
		return -EINVAL;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	if (!blk_queue_is_zoned(q))
		return -ENOTTY;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
		return -EFAULT;

	if (!rep.nr_zones)
		return -EINVAL;

C
Christoph Hellwig 已提交
308 309 310 311 312
	args.zones = argp + sizeof(struct blk_zone_report);
	ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
				  blkdev_copy_zone_to_user, &args);
	if (ret < 0)
		return ret;
S
Shaun Tancheff 已提交
313

C
Christoph Hellwig 已提交
314 315 316 317
	rep.nr_zones = ret;
	if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
		return -EFAULT;
	return 0;
S
Shaun Tancheff 已提交
318 319
}

320
/*
321
 * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
S
Shaun Tancheff 已提交
322 323
 * Called from blkdev_ioctl.
 */
324 325
int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
			   unsigned int cmd, unsigned long arg)
S
Shaun Tancheff 已提交
326 327 328 329
{
	void __user *argp = (void __user *)arg;
	struct request_queue *q;
	struct blk_zone_range zrange;
330
	enum req_opf op;
S
Shaun Tancheff 已提交
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350

	if (!argp)
		return -EINVAL;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	if (!blk_queue_is_zoned(q))
		return -ENOTTY;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	if (!(mode & FMODE_WRITE))
		return -EBADF;

	if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
		return -EFAULT;

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	switch (cmd) {
	case BLKRESETZONE:
		op = REQ_OP_ZONE_RESET;
		break;
	case BLKOPENZONE:
		op = REQ_OP_ZONE_OPEN;
		break;
	case BLKCLOSEZONE:
		op = REQ_OP_ZONE_CLOSE;
		break;
	case BLKFINISHZONE:
		op = REQ_OP_ZONE_FINISH;
		break;
	default:
		return -ENOTTY;
	}

	return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
				GFP_KERNEL);
S
Shaun Tancheff 已提交
370
}
371 372 373 374 375 376 377 378 379 380

static inline unsigned long *blk_alloc_zone_bitmap(int node,
						   unsigned int nr_zones)
{
	return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
			    GFP_NOIO, node);
}

void blk_queue_free_zone_bitmaps(struct request_queue *q)
{
381 382
	kfree(q->conv_zones_bitmap);
	q->conv_zones_bitmap = NULL;
383 384 385 386
	kfree(q->seq_zones_wlock);
	q->seq_zones_wlock = NULL;
}

C
Christoph Hellwig 已提交
387 388
struct blk_revalidate_zone_args {
	struct gendisk	*disk;
389
	unsigned long	*conv_zones_bitmap;
C
Christoph Hellwig 已提交
390
	unsigned long	*seq_zones_wlock;
391
	unsigned int	nr_zones;
392
	sector_t	zone_sectors;
C
Christoph Hellwig 已提交
393 394 395
	sector_t	sector;
};

396 397 398
/*
 * Helper function to check the validity of zones of a zoned block device.
 */
C
Christoph Hellwig 已提交
399 400
static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
				  void *data)
401
{
C
Christoph Hellwig 已提交
402 403
	struct blk_revalidate_zone_args *args = data;
	struct gendisk *disk = args->disk;
404 405 406 407 408 409 410
	struct request_queue *q = disk->queue;
	sector_t capacity = get_capacity(disk);

	/*
	 * All zones must have the same size, with the exception on an eventual
	 * smaller last zone.
	 */
411 412 413 414 415 416
	if (zone->start == 0) {
		if (zone->len == 0 || !is_power_of_2(zone->len)) {
			pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
				disk->disk_name, zone->len);
			return -ENODEV;
		}
417

418 419 420 421 422 423 424 425 426 427 428 429 430 431
		args->zone_sectors = zone->len;
		args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
	} else if (zone->start + args->zone_sectors < capacity) {
		if (zone->len != args->zone_sectors) {
			pr_warn("%s: Invalid zoned device with non constant zone size\n",
				disk->disk_name);
			return -ENODEV;
		}
	} else {
		if (zone->len > args->zone_sectors) {
			pr_warn("%s: Invalid zoned device with larger last zone size\n",
				disk->disk_name);
			return -ENODEV;
		}
432 433 434
	}

	/* Check for holes in the zone report */
C
Christoph Hellwig 已提交
435
	if (zone->start != args->sector) {
436
		pr_warn("%s: Zone gap at sectors %llu..%llu\n",
C
Christoph Hellwig 已提交
437 438
			disk->disk_name, args->sector, zone->start);
		return -ENODEV;
439 440 441 442 443
	}

	/* Check zone type */
	switch (zone->type) {
	case BLK_ZONE_TYPE_CONVENTIONAL:
444 445 446 447 448 449 450 451
		if (!args->conv_zones_bitmap) {
			args->conv_zones_bitmap =
				blk_alloc_zone_bitmap(q->node, args->nr_zones);
			if (!args->conv_zones_bitmap)
				return -ENOMEM;
		}
		set_bit(idx, args->conv_zones_bitmap);
		break;
452 453
	case BLK_ZONE_TYPE_SEQWRITE_REQ:
	case BLK_ZONE_TYPE_SEQWRITE_PREF:
454 455 456 457 458 459
		if (!args->seq_zones_wlock) {
			args->seq_zones_wlock =
				blk_alloc_zone_bitmap(q->node, args->nr_zones);
			if (!args->seq_zones_wlock)
				return -ENOMEM;
		}
460 461 462 463
		break;
	default:
		pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
			disk->disk_name, (int)zone->type, zone->start);
C
Christoph Hellwig 已提交
464
		return -ENODEV;
465 466
	}

C
Christoph Hellwig 已提交
467 468 469 470
	args->sector += zone->len;
	return 0;
}

471 472 473
/**
 * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
 * @disk:	Target disk
D
Damien Le Moal 已提交
474
 * @update_driver_data:	Callback to update driver data on the frozen disk
475 476 477
 *
 * Helper function for low-level device drivers to (re) allocate and initialize
 * a disk request queue zone bitmaps. This functions should normally be called
478 479 480
 * within the disk ->revalidate method for blk-mq based drivers.  For BIO based
 * drivers only q->nr_zones needs to be updated so that the sysfs exposed value
 * is correct.
D
Damien Le Moal 已提交
481 482 483
 * If the @update_driver_data callback function is not NULL, the callback is
 * executed with the device request queue frozen after all zones have been
 * checked.
484
 */
D
Damien Le Moal 已提交
485 486
int blk_revalidate_disk_zones(struct gendisk *disk,
			      void (*update_driver_data)(struct gendisk *disk))
487 488
{
	struct request_queue *q = disk->queue;
489 490 491
	struct blk_revalidate_zone_args args = {
		.disk		= disk,
	};
492 493
	unsigned int noio_flag;
	int ret;
494

495 496
	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
		return -EIO;
497 498
	if (WARN_ON_ONCE(!queue_is_mq(q)))
		return -EIO;
499

500
	/*
501 502
	 * Ensure that all memory allocations in this context are done as if
	 * GFP_NOIO was specified.
503
	 */
504 505 506 507
	noio_flag = memalloc_noio_save();
	ret = disk->fops->report_zones(disk, 0, UINT_MAX,
				       blk_revalidate_zone_cb, &args);
	memalloc_noio_restore(noio_flag);
508 509

	/*
510 511 512
	 * Install the new bitmaps and update nr_zones only once the queue is
	 * stopped and all I/Os are completed (i.e. a scheduler is not
	 * referencing the bitmaps).
513 514
	 */
	blk_mq_freeze_queue(q);
C
Christoph Hellwig 已提交
515
	if (ret >= 0) {
516
		blk_queue_chunk_sectors(q, args.zone_sectors);
517
		q->nr_zones = args.nr_zones;
C
Christoph Hellwig 已提交
518
		swap(q->seq_zones_wlock, args.seq_zones_wlock);
519
		swap(q->conv_zones_bitmap, args.conv_zones_bitmap);
D
Damien Le Moal 已提交
520 521
		if (update_driver_data)
			update_driver_data(disk);
C
Christoph Hellwig 已提交
522 523
		ret = 0;
	} else {
524 525 526
		pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
		blk_queue_free_zone_bitmaps(q);
	}
C
Christoph Hellwig 已提交
527
	blk_mq_unfreeze_queue(q);
528

C
Christoph Hellwig 已提交
529
	kfree(args.seq_zones_wlock);
530
	kfree(args.conv_zones_bitmap);
531 532 533
	return ret;
}
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);