blk-lib.c 11.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12
/*
 * Functions related to generic helpers functions
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include "blk.h"

13
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
14
{
15 16 17 18
	struct bio *new = bio_alloc(gfp, nr_pages);

	if (bio) {
		bio_chain(bio, new);
19
		submit_bio(bio);
20
	}
21

22
	return new;
23
}
24
EXPORT_SYMBOL_GPL(blk_next_bio);
25

26
int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
27
		sector_t nr_sects, gfp_t gfp_mask, int flags,
28
		struct bio **biop)
29 30
{
	struct request_queue *q = bdev_get_queue(bdev);
31
	struct bio *bio = *biop;
32
	unsigned int op;
33
	sector_t bs_mask, part_offset = 0;
34 35 36

	if (!q)
		return -ENXIO;
37

38 39 40
	if (bdev_read_only(bdev))
		return -EPERM;

41 42 43 44 45 46 47 48 49
	if (flags & BLKDEV_DISCARD_SECURE) {
		if (!blk_queue_secure_erase(q))
			return -EOPNOTSUPP;
		op = REQ_OP_SECURE_ERASE;
	} else {
		if (!blk_queue_discard(q))
			return -EOPNOTSUPP;
		op = REQ_OP_DISCARD;
	}
50

51 52 53 54 55 56 57 58 59
	/* In case the discard granularity isn't set by buggy device driver */
	if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
		char dev_name[BDEVNAME_SIZE];

		bdevname(bdev, dev_name);
		pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name);
		return -EOPNOTSUPP;
	}

60 61 62 63
	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
	if ((sector | nr_sects) & bs_mask)
		return -EINVAL;

M
Ming Lei 已提交
64 65
	if (!nr_sects)
		return -EINVAL;
66

67
	/* In case the discard request is in a partition */
68
	if (bdev_is_partition(bdev))
69
		part_offset = bdev->bd_start_sect;
70

M
Ming Lei 已提交
71
	while (nr_sects) {
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
		sector_t granularity_aligned_lba, req_sects;
		sector_t sector_mapped = sector + part_offset;

		granularity_aligned_lba = round_up(sector_mapped,
				q->limits.discard_granularity >> SECTOR_SHIFT);

		/*
		 * Check whether the discard bio starts at a discard_granularity
		 * aligned LBA,
		 * - If no: set (granularity_aligned_lba - sector_mapped) to
		 *   bi_size of the first split bio, then the second bio will
		 *   start at a discard_granularity aligned LBA on the device.
		 * - If yes: use bio_aligned_discard_max_sectors() as the max
		 *   possible bi_size of the first split bio. Then when this bio
		 *   is split in device drive, the split ones are very probably
		 *   to be aligned to discard_granularity of the device's queue.
		 */
		if (granularity_aligned_lba == sector_mapped)
			req_sects = min_t(sector_t, nr_sects,
					  bio_aligned_discard_max_sectors(q));
		else
			req_sects = min_t(sector_t, nr_sects,
					  granularity_aligned_lba - sector_mapped);
95

96 97
		WARN_ON_ONCE((req_sects << 9) > UINT_MAX);

98
		bio = blk_next_bio(bio, 0, gfp_mask);
99
		bio->bi_iter.bi_sector = sector;
100
		bio_set_dev(bio, bdev);
101
		bio_set_op_attrs(bio, op, 0);
102

103
		bio->bi_iter.bi_size = req_sects << 9;
M
Ming Lei 已提交
104
		sector += req_sects;
105
		nr_sects -= req_sects;
106

107 108 109 110 111 112 113
		/*
		 * We can loop for a long time in here, if someone does
		 * full device discards (like mkfs). Be nice and allow
		 * us to schedule out to avoid softlocking if preempt
		 * is disabled.
		 */
		cond_resched();
114
	}
115 116 117 118 119 120 121 122 123 124 125 126

	*biop = bio;
	return 0;
}
EXPORT_SYMBOL(__blkdev_issue_discard);

/**
 * blkdev_issue_discard - queue a discard
 * @bdev:	blockdev to issue discard for
 * @sector:	start sector
 * @nr_sects:	number of sectors to discard
 * @gfp_mask:	memory allocation flags (for bio_alloc)
127
 * @flags:	BLKDEV_DISCARD_* flags to control behaviour
128 129 130 131 132 133 134 135 136 137 138 139
 *
 * Description:
 *    Issue a discard request for the sectors in question.
 */
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
	struct bio *bio = NULL;
	struct blk_plug plug;
	int ret;

	blk_start_plug(&plug);
140
	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
141
			&bio);
142
	if (!ret && bio) {
143
		ret = submit_bio_wait(bio);
144
		if (ret == -EOPNOTSUPP)
145
			ret = 0;
146
		bio_put(bio);
147
	}
148
	blk_finish_plug(&plug);
149

150
	return ret;
151 152
}
EXPORT_SYMBOL(blkdev_issue_discard);
153

154
/**
155
 * __blkdev_issue_write_same - generate number of bios with same page
156 157 158 159 160
 * @bdev:	target blockdev
 * @sector:	start sector
 * @nr_sects:	number of sectors to write
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @page:	page containing data to write
161
 * @biop:	pointer to anchor bio
162 163
 *
 * Description:
164
 *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
165
 */
166 167 168
static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
		struct bio **biop)
169 170 171
{
	struct request_queue *q = bdev_get_queue(bdev);
	unsigned int max_write_same_sectors;
172
	struct bio *bio = *biop;
173
	sector_t bs_mask;
174 175 176 177

	if (!q)
		return -ENXIO;

178 179 180
	if (bdev_read_only(bdev))
		return -EPERM;

181 182 183 184
	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
	if ((sector | nr_sects) & bs_mask)
		return -EINVAL;

185 186 187
	if (!bdev_write_same(bdev))
		return -EOPNOTSUPP;

188
	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
189
	max_write_same_sectors = bio_allowed_max_sectors(q);
190 191

	while (nr_sects) {
192
		bio = blk_next_bio(bio, 1, gfp_mask);
193
		bio->bi_iter.bi_sector = sector;
194
		bio_set_dev(bio, bdev);
195 196 197 198
		bio->bi_vcnt = 1;
		bio->bi_io_vec->bv_page = page;
		bio->bi_io_vec->bv_offset = 0;
		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
199
		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
200 201

		if (nr_sects > max_write_same_sectors) {
202
			bio->bi_iter.bi_size = max_write_same_sectors << 9;
203 204 205
			nr_sects -= max_write_same_sectors;
			sector += max_write_same_sectors;
		} else {
206
			bio->bi_iter.bi_size = nr_sects << 9;
207 208
			nr_sects = 0;
		}
209
		cond_resched();
210 211
	}

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
	*biop = bio;
	return 0;
}

/**
 * blkdev_issue_write_same - queue a write same operation
 * @bdev:	target blockdev
 * @sector:	start sector
 * @nr_sects:	number of sectors to write
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @page:	page containing data
 *
 * Description:
 *    Issue a write same request for the sectors in question.
 */
int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
				sector_t nr_sects, gfp_t gfp_mask,
				struct page *page)
{
	struct bio *bio = NULL;
	struct blk_plug plug;
	int ret;

	blk_start_plug(&plug);
	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
			&bio);
	if (ret == 0 && bio) {
239
		ret = submit_bio_wait(bio);
240 241
		bio_put(bio);
	}
242
	blk_finish_plug(&plug);
243
	return ret;
244 245 246
}
EXPORT_SYMBOL(blkdev_issue_write_same);

247 248
static int __blkdev_issue_write_zeroes(struct block_device *bdev,
		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
249
		struct bio **biop, unsigned flags)
250 251 252 253 254 255 256 257
{
	struct bio *bio = *biop;
	unsigned int max_write_zeroes_sectors;
	struct request_queue *q = bdev_get_queue(bdev);

	if (!q)
		return -ENXIO;

258 259 260
	if (bdev_read_only(bdev))
		return -EPERM;

261 262 263 264 265 266 267
	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);

	if (max_write_zeroes_sectors == 0)
		return -EOPNOTSUPP;

	while (nr_sects) {
268
		bio = blk_next_bio(bio, 0, gfp_mask);
269
		bio->bi_iter.bi_sector = sector;
270
		bio_set_dev(bio, bdev);
271 272 273
		bio->bi_opf = REQ_OP_WRITE_ZEROES;
		if (flags & BLKDEV_ZERO_NOUNMAP)
			bio->bi_opf |= REQ_NOUNMAP;
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289

		if (nr_sects > max_write_zeroes_sectors) {
			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
			nr_sects -= max_write_zeroes_sectors;
			sector += max_write_zeroes_sectors;
		} else {
			bio->bi_iter.bi_size = nr_sects << 9;
			nr_sects = 0;
		}
		cond_resched();
	}

	*biop = bio;
	return 0;
}

290 291 292 293 294 295 296 297
/*
 * Convert a number of 512B sectors to a number of pages.
 * The result is limited to a number of pages that can fit into a BIO.
 * Also make sure that the result is always at least 1 (page) for the cases
 * where nr_sects is lower than the number of sectors in a page.
 */
static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
{
298
	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
299

300
	return min(pages, (sector_t)BIO_MAX_VECS);
301 302
}

303 304 305 306 307 308 309 310 311 312 313 314
static int __blkdev_issue_zero_pages(struct block_device *bdev,
		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
		struct bio **biop)
{
	struct request_queue *q = bdev_get_queue(bdev);
	struct bio *bio = *biop;
	int bi_size = 0;
	unsigned int sz;

	if (!q)
		return -ENXIO;

315 316 317
	if (bdev_read_only(bdev))
		return -EPERM;

318
	while (nr_sects != 0) {
319 320
		bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
				   gfp_mask);
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
		bio->bi_iter.bi_sector = sector;
		bio_set_dev(bio, bdev);
		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);

		while (nr_sects != 0) {
			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
			nr_sects -= bi_size >> 9;
			sector += bi_size >> 9;
			if (bi_size < sz)
				break;
		}
		cond_resched();
	}

	*biop = bio;
	return 0;
}

340
/**
341
 * __blkdev_issue_zeroout - generate number of zero filed write bios
342 343 344 345
 * @bdev:	blockdev to issue
 * @sector:	start sector
 * @nr_sects:	number of sectors to write
 * @gfp_mask:	memory allocation flags (for bio_alloc)
346
 * @biop:	pointer to anchor bio
347
 * @flags:	controls detailed behavior
348 349
 *
 * Description:
350 351 352 353 354
 *  Zero-fill a block range, either using hardware offload or by explicitly
 *  writing zeroes to the device.
 *
 *  If a device is using logical block provisioning, the underlying space will
 *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
355 356 357
 *
 *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
 *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
358
 */
359 360
int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
361
		unsigned flags)
362
{
363
	int ret;
364 365 366 367 368
	sector_t bs_mask;

	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
	if ((sector | nr_sects) & bs_mask)
		return -EINVAL;
369

370
	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
371
			biop, flags);
372
	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
373
		return ret;
374

375 376
	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
					 biop);
377
}
378
EXPORT_SYMBOL(__blkdev_issue_zeroout);
379 380 381 382 383 384 385

/**
 * blkdev_issue_zeroout - zero-fill a block range
 * @bdev:	blockdev to write
 * @sector:	start sector
 * @nr_sects:	number of sectors to write
 * @gfp_mask:	memory allocation flags (for bio_alloc)
386
 * @flags:	controls detailed behavior
387 388
 *
 * Description:
389 390 391
 *  Zero-fill a block range, either using hardware offload or by explicitly
 *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
 *  valid values for %flags.
392 393
 */
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
394
		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
395
{
396 397 398
	int ret = 0;
	sector_t bs_mask;
	struct bio *bio;
399
	struct blk_plug plug;
400
	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
401

402 403 404 405 406 407
	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
	if ((sector | nr_sects) & bs_mask)
		return -EINVAL;

retry:
	bio = NULL;
408
	blk_start_plug(&plug);
409 410 411 412 413 414 415 416 417 418
	if (try_write_zeroes) {
		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
						  gfp_mask, &bio, flags);
	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
						gfp_mask, &bio);
	} else {
		/* No zeroing offload support */
		ret = -EOPNOTSUPP;
	}
419 420 421 422 423
	if (ret == 0 && bio) {
		ret = submit_bio_wait(bio);
		bio_put(bio);
	}
	blk_finish_plug(&plug);
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
	if (ret && try_write_zeroes) {
		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
			try_write_zeroes = false;
			goto retry;
		}
		if (!bdev_write_zeroes_sectors(bdev)) {
			/*
			 * Zeroing offload support was indicated, but the
			 * device reported ILLEGAL REQUEST (for some devices
			 * there is no non-destructive way to verify whether
			 * WRITE ZEROES is actually supported).
			 */
			ret = -EOPNOTSUPP;
		}
	}
439

440
	return ret;
441
}
442
EXPORT_SYMBOL(blkdev_issue_zeroout);