blk-merge.c 29.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10
/*
 * Functions related to segment and merge handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

11 12
#include <trace/events/block.h>

13
#include "blk.h"
14
#include "blk-rq-qos.h"
15

16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
static inline bool bio_will_gap(struct request_queue *q,
		struct request *prev_rq, struct bio *prev, struct bio *next)
{
	struct bio_vec pb, nb;

	if (!bio_has_data(prev) || !queue_virt_boundary(q))
		return false;

	/*
	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
	 * is quite difficult to respect the sg gap limit.  We work hard to
	 * merge a huge number of small single bios in case of mkfs.
	 */
	if (prev_rq)
		bio_get_first_bvec(prev_rq->bio, &pb);
	else
		bio_get_first_bvec(prev, &pb);
33
	if (pb.bv_offset & queue_virt_boundary(q))
34 35 36 37 38 39 40 41 42 43 44 45 46
		return true;

	/*
	 * We don't need to worry about the situation that the merged segment
	 * ends in unaligned virt boundary:
	 *
	 * - if 'pb' ends aligned, the merged segment ends aligned
	 * - if 'pb' ends unaligned, the next bio must include
	 *   one single bvec of 'nb', otherwise the 'nb' can't
	 *   merge with 'pb'
	 */
	bio_get_last_bvec(prev, &pb);
	bio_get_first_bvec(next, &nb);
47
	if (biovec_phys_mergeable(q, &pb, &nb))
48 49 50 51 52 53 54 55 56 57 58 59 60 61
		return false;
	return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
}

static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
{
	return bio_will_gap(req->q, req, req->biotail, bio);
}

static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
{
	return bio_will_gap(req->q, NULL, bio, req->bio);
}

62 63
static struct bio *blk_bio_discard_split(struct request_queue *q,
					 struct bio *bio,
64 65
					 struct bio_set *bs,
					 unsigned *nsegs)
66 67 68 69 70 71
{
	unsigned int max_discard_sectors, granularity;
	int alignment;
	sector_t tmp;
	unsigned split_sectors;

72 73
	*nsegs = 1;

74 75 76
	/* Zero-sector (unknown) and one-sector granularities are the same.  */
	granularity = max(q->limits.discard_granularity >> 9, 1U);

77 78
	max_discard_sectors = min(q->limits.max_discard_sectors,
			bio_allowed_max_sectors(q));
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
	max_discard_sectors -= max_discard_sectors % granularity;

	if (unlikely(!max_discard_sectors)) {
		/* XXX: warn */
		return NULL;
	}

	if (bio_sectors(bio) <= max_discard_sectors)
		return NULL;

	split_sectors = max_discard_sectors;

	/*
	 * If the next starting sector would be misaligned, stop the discard at
	 * the previous aligned sector.
	 */
	alignment = (q->limits.discard_alignment >> 9) % granularity;

	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
	tmp = sector_div(tmp, granularity);

	if (split_sectors > tmp)
		split_sectors -= tmp;

	return bio_split(bio, split_sectors, GFP_NOIO, bs);
}

106 107 108
static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
		struct bio *bio, struct bio_set *bs, unsigned *nsegs)
{
109
	*nsegs = 0;
110 111 112 113 114 115 116 117 118 119

	if (!q->limits.max_write_zeroes_sectors)
		return NULL;

	if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
		return NULL;

	return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
}

120 121
static struct bio *blk_bio_write_same_split(struct request_queue *q,
					    struct bio *bio,
122 123
					    struct bio_set *bs,
					    unsigned *nsegs)
124
{
125 126
	*nsegs = 1;

127 128 129 130 131 132 133 134 135
	if (!q->limits.max_write_same_sectors)
		return NULL;

	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
		return NULL;

	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
}

136 137 138 139 140 141 142 143
/*
 * Return the maximum number of sectors from the start of a bio that may be
 * submitted as a single request to a block device. If enough sectors remain,
 * align the end to the physical block size. Otherwise align the end to the
 * logical block size. This approach minimizes the number of non-aligned
 * requests that are submitted to a block device if the start of a bio is not
 * aligned to a physical block boundary.
 */
144 145 146 147
static inline unsigned get_max_io_size(struct request_queue *q,
				       struct bio *bio)
{
	unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
148 149 150 151
	unsigned max_sectors = sectors;
	unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
	unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
	unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
152

153 154 155 156
	max_sectors += start_offset;
	max_sectors &= ~(pbs - 1);
	if (max_sectors > start_offset)
		return max_sectors - start_offset;
157

K
Keith Busch 已提交
158
	return sectors & ~(lbs - 1);
159 160
}

161 162 163
static inline unsigned get_max_segment_size(const struct request_queue *q,
					    struct page *start_page,
					    unsigned long offset)
164 165 166
{
	unsigned long mask = queue_segment_boundary(q);

167
	offset = mask & (page_to_phys(start_page) + offset);
168 169 170 171 172 173 174

	/*
	 * overflow may be triggered in case of zero page physical address
	 * on 32bit arch, use queue's max segment size when that happens.
	 */
	return min_not_zero(mask - offset + 1,
			(unsigned long)queue_max_segment_size(q));
175 176
}

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
/**
 * bvec_split_segs - verify whether or not a bvec should be split in the middle
 * @q:        [in] request queue associated with the bio associated with @bv
 * @bv:       [in] bvec to examine
 * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
 *            by the number of segments from @bv that may be appended to that
 *            bio without exceeding @max_segs
 * @sectors:  [in,out] Number of sectors in the bio being built. Incremented
 *            by the number of sectors from @bv that may be appended to that
 *            bio without exceeding @max_sectors
 * @max_segs: [in] upper bound for *@nsegs
 * @max_sectors: [in] upper bound for *@sectors
 *
 * When splitting a bio, it can happen that a bvec is encountered that is too
 * big to fit in a single segment and hence that it has to be split in the
 * middle. This function verifies whether or not that should happen. The value
 * %true is returned if and only if appending the entire @bv to a bio with
 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
 * the block driver.
196
 */
197 198
static bool bvec_split_segs(const struct request_queue *q,
			    const struct bio_vec *bv, unsigned *nsegs,
199 200
			    unsigned *sectors, unsigned max_segs,
			    unsigned max_sectors)
201
{
202 203
	unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
	unsigned len = min(bv->bv_len, max_len);
204
	unsigned total_len = 0;
205
	unsigned seg_size = 0;
206

207
	while (len && *nsegs < max_segs) {
208 209
		seg_size = get_max_segment_size(q, bv->bv_page,
						bv->bv_offset + total_len);
210 211
		seg_size = min(seg_size, len);

212
		(*nsegs)++;
213 214 215 216 217 218 219
		total_len += seg_size;
		len -= seg_size;

		if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
			break;
	}

220
	*sectors += total_len >> 9;
221

222 223
	/* tell the caller to split the bvec if it is too big to fit */
	return len > 0 || bv->bv_len > max_len;
224 225
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
/**
 * blk_bio_segment_split - split a bio in two bios
 * @q:    [in] request queue pointer
 * @bio:  [in] bio to be split
 * @bs:	  [in] bio set to allocate the clone from
 * @segs: [out] number of segments in the bio with the first half of the sectors
 *
 * Clone @bio, update the bi_iter of the clone to represent the first sectors
 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
 * following is guaranteed for the cloned bio:
 * - That it has at most get_max_io_size(@q, @bio) sectors.
 * - That it has at most queue_max_segments(@q) segments.
 *
 * Except for discard requests the cloned bio will point at the bi_io_vec of
 * the original bio. It is the responsibility of the caller to ensure that the
 * original bio is not freed before the cloned bio. The caller is also
 * responsible for ensuring that @bs is only destroyed after processing of the
 * split bio has finished.
 */
245 246
static struct bio *blk_bio_segment_split(struct request_queue *q,
					 struct bio *bio,
247 248
					 struct bio_set *bs,
					 unsigned *segs)
249
{
250
	struct bio_vec bv, bvprv, *bvprvp = NULL;
251
	struct bvec_iter iter;
252
	unsigned nsegs = 0, sectors = 0;
253
	const unsigned max_sectors = get_max_io_size(q, bio);
254
	const unsigned max_segs = queue_max_segments(q);
255

256
	bio_for_each_bvec(bv, bio, iter) {
257 258 259 260
		/*
		 * If the queue doesn't support SG gaps and adding this
		 * offset would create a gap, disallow it.
		 */
261
		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
262 263
			goto split;

264 265 266 267 268 269 270
		if (nsegs < max_segs &&
		    sectors + (bv.bv_len >> 9) <= max_sectors &&
		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
			nsegs++;
			sectors += bv.bv_len >> 9;
		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
					 max_sectors)) {
271
			goto split;
272 273
		}

274
		bvprv = bv;
M
Ming Lei 已提交
275
		bvprvp = &bvprv;
276 277
	}

278 279
	*segs = nsegs;
	return NULL;
280
split:
281
	*segs = nsegs;
282
	return bio_split(bio, sectors, GFP_NOIO, bs);
283 284
}

285 286 287 288 289 290 291 292
/**
 * __blk_queue_split - split a bio and submit the second half
 * @bio:     [in, out] bio to be split
 * @nr_segs: [out] number of segments in the first bio
 *
 * Split a bio into two bios, chain the two bios, submit the second half and
 * store a pointer to the first half in *@bio. If the second bio is still too
 * big it will be split by a recursive call to this function. Since this
293 294 295
 * function may allocate a new bio from @bio->bi_disk->queue->bio_split, it is
 * the responsibility of the caller to ensure that
 * @bio->bi_disk->queue->bio_split is only released after processing of the
296 297
 * split bio has finished.
 */
298
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
299
{
300
	struct request_queue *q = (*bio)->bi_disk->queue;
301
	struct bio *split = NULL;
302

A
Adrian Hunter 已提交
303 304 305
	switch (bio_op(*bio)) {
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
306
		split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
A
Adrian Hunter 已提交
307
		break;
308
	case REQ_OP_WRITE_ZEROES:
309 310
		split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
				nr_segs);
311
		break;
A
Adrian Hunter 已提交
312
	case REQ_OP_WRITE_SAME:
313 314
		split = blk_bio_write_same_split(q, *bio, &q->bio_split,
				nr_segs);
A
Adrian Hunter 已提交
315 316
		break;
	default:
317 318 319 320 321 322 323 324 325 326
		/*
		 * All drivers must accept single-segments bios that are <=
		 * PAGE_SIZE.  This is a quick and dirty check that relies on
		 * the fact that bi_io_vec[0] is always valid if a bio has data.
		 * The check might lead to occasional false negatives when bios
		 * are cloned, but compared to the performance impact of cloned
		 * bios themselves the loop below doesn't matter anyway.
		 */
		if (!q->limits.chunk_sectors &&
		    (*bio)->bi_vcnt == 1 &&
327
		    ((*bio)->bi_io_vec[0].bv_len +
328
		     (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
329 330 331
			*nr_segs = 1;
			break;
		}
332
		split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
A
Adrian Hunter 已提交
333 334
		break;
	}
335

336
	if (split) {
M
Ming Lei 已提交
337
		/* there isn't chance to merge the splitted bio */
J
Jens Axboe 已提交
338
		split->bi_opf |= REQ_NOMERGE;
M
Ming Lei 已提交
339

340
		bio_chain(split, *bio);
341
		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
342
		submit_bio_noacct(*bio);
343 344 345
		*bio = split;
	}
}
346

347 348 349 350 351 352
/**
 * blk_queue_split - split a bio and submit the second half
 * @bio: [in, out] bio to be split
 *
 * Split a bio into two bios, chains the two bios, submit the second half and
 * store a pointer to the first half in *@bio. Since this function may allocate
353 354 355
 * a new bio from @bio->bi_disk->queue->bio_split, it is the responsibility of
 * the caller to ensure that @bio->bi_disk->queue->bio_split is only released
 * after processing of the split bio has finished.
356
 */
357
void blk_queue_split(struct bio **bio)
358 359 360
{
	unsigned int nr_segs;

361
	__blk_queue_split(bio, &nr_segs);
362
}
363 364
EXPORT_SYMBOL(blk_queue_split);

365
unsigned int blk_recalc_rq_segments(struct request *rq)
366
{
367
	unsigned int nr_phys_segs = 0;
368
	unsigned int nr_sectors = 0;
369
	struct req_iterator iter;
370
	struct bio_vec bv;
371

372
	if (!rq->bio)
373
		return 0;
374

375
	switch (bio_op(rq->bio)) {
376 377 378
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
	case REQ_OP_WRITE_ZEROES:
379 380
		return 0;
	case REQ_OP_WRITE_SAME:
381
		return 1;
382
	}
383

384
	rq_for_each_bvec(bv, rq, iter)
385
		bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
386
				UINT_MAX, UINT_MAX);
387 388 389
	return nr_phys_segs;
}

390
static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
		struct scatterlist *sglist)
{
	if (!*sg)
		return sglist;

	/*
	 * If the driver previously mapped a shorter list, we could see a
	 * termination bit prematurely unless it fully inits the sg table
	 * on each mapping. We KNOW that there must be more entries here
	 * or the driver would be buggy, so force clear the termination bit
	 * to avoid doing a full sg_init_table() in drivers for each command.
	 */
	sg_unmark_end(*sg);
	return sg_next(*sg);
}

static unsigned blk_bvec_map_sg(struct request_queue *q,
		struct bio_vec *bvec, struct scatterlist *sglist,
		struct scatterlist **sg)
{
	unsigned nbytes = bvec->bv_len;
412
	unsigned nsegs = 0, total = 0;
413 414

	while (nbytes > 0) {
415
		unsigned offset = bvec->bv_offset + total;
416 417
		unsigned len = min(get_max_segment_size(q, bvec->bv_page,
					offset), nbytes);
418 419 420 421 422 423 424 425 426 427 428 429
		struct page *page = bvec->bv_page;

		/*
		 * Unfortunately a fair number of drivers barf on scatterlists
		 * that have an offset larger than PAGE_SIZE, despite other
		 * subsystems dealing with that invariant just fine.  For now
		 * stick to the legacy format where we never present those from
		 * the block layer, but the code below should be removed once
		 * these offenders (mostly MMC/SD drivers) are fixed.
		 */
		page += (offset >> PAGE_SHIFT);
		offset &= ~PAGE_MASK;
430 431

		*sg = blk_next_sg(sg, sglist);
432
		sg_set_page(*sg, page, len, offset);
433

434 435
		total += len;
		nbytes -= len;
436 437 438 439 440 441
		nsegs++;
	}

	return nsegs;
}

442 443 444 445 446 447 448 449
static inline int __blk_bvec_map_sg(struct bio_vec bv,
		struct scatterlist *sglist, struct scatterlist **sg)
{
	*sg = blk_next_sg(sg, sglist);
	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
	return 1;
}

450 451 452 453
/* only try to merge bvecs into one sg if they are from two bios */
static inline bool
__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
			   struct bio_vec *bvprv, struct scatterlist **sg)
454 455 456 457
{

	int nbytes = bvec->bv_len;

458 459
	if (!*sg)
		return false;
460

461 462 463 464 465 466 467 468 469
	if ((*sg)->length + nbytes > queue_max_segment_size(q))
		return false;

	if (!biovec_phys_mergeable(q, bvprv, bvec))
		return false;

	(*sg)->length += nbytes;

	return true;
470 471
}

472 473 474
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
			     struct scatterlist *sglist,
			     struct scatterlist **sg)
475
{
476
	struct bio_vec bvec, bvprv = { NULL };
477
	struct bvec_iter iter;
478
	int nsegs = 0;
479
	bool new_bio = false;
480

481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
	for_each_bio(bio) {
		bio_for_each_bvec(bvec, bio, iter) {
			/*
			 * Only try to merge bvecs from two bios given we
			 * have done bio internal merge when adding pages
			 * to bio
			 */
			if (new_bio &&
			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
				goto next_bvec;

			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
			else
				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
 next_bvec:
			new_bio = false;
		}
499 500 501 502
		if (likely(bio->bi_iter.bi_size)) {
			bvprv = bvec;
			new_bio = true;
		}
503
	}
504

505 506 507 508 509 510 511
	return nsegs;
}

/*
 * map a request to scatterlist, return number of sg entries setup. Caller
 * must make sure sg can hold rq->nr_phys_segments entries
 */
512 513
int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
		struct scatterlist *sglist, struct scatterlist **last_sg)
514 515 516
{
	int nsegs = 0;

517
	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
518
		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
519
	else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
520
		nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
521
	else if (rq->bio)
522
		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
523

524 525
	if (*last_sg)
		sg_mark_end(*last_sg);
526

527 528 529 530
	/*
	 * Something must have been wrong if the figured number of
	 * segment is bigger than number of req's physical segments
	 */
531
	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
532

533 534
	return nsegs;
}
535
EXPORT_SYMBOL(__blk_rq_map_sg);
536

537 538 539 540 541 542 543
static inline unsigned int blk_rq_get_max_segments(struct request *rq)
{
	if (req_op(rq) == REQ_OP_DISCARD)
		return queue_max_discard_segments(rq->q);
	return queue_max_segments(rq->q);
}

544 545
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
		unsigned int nr_phys_segs)
546
{
547
	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
548 549
		goto no_merge;

550
	if (blk_integrity_merge_bio(req->q, req, bio) == false)
551
		goto no_merge;
552 553 554 555 556 557 558

	/*
	 * This will form the start of a new hw segment.  Bump both
	 * counters.
	 */
	req->nr_phys_segments += nr_phys_segs;
	return 1;
559 560

no_merge:
561
	req_set_nomerge(req->q, req);
562
	return 0;
563 564
}

565
int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
566
{
567 568
	if (req_gap_back_merge(req, bio))
		return 0;
569 570 571
	if (blk_integrity_rq(req) &&
	    integrity_req_gap_back_merge(req, bio))
		return 0;
572 573
	if (!bio_crypt_ctx_back_mergeable(req, bio))
		return 0;
574
	if (blk_rq_sectors(req) + bio_sectors(bio) >
D
Damien Le Moal 已提交
575
	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
576
		req_set_nomerge(req->q, req);
577 578 579
		return 0;
	}

580
	return ll_new_hw_segment(req, bio, nr_segs);
581 582
}

583
int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
584
{
585 586
	if (req_gap_front_merge(req, bio))
		return 0;
587 588 589
	if (blk_integrity_rq(req) &&
	    integrity_req_gap_front_merge(req, bio))
		return 0;
590 591
	if (!bio_crypt_ctx_front_mergeable(req, bio))
		return 0;
592
	if (blk_rq_sectors(req) + bio_sectors(bio) >
D
Damien Le Moal 已提交
593
	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
594
		req_set_nomerge(req->q, req);
595 596 597
		return 0;
	}

598
	return ll_new_hw_segment(req, bio, nr_segs);
599 600
}

601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
		struct request *next)
{
	unsigned short segments = blk_rq_nr_discard_segments(req);

	if (segments >= queue_max_discard_segments(q))
		goto no_merge;
	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
		goto no_merge;

	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
	return true;
no_merge:
	req_set_nomerge(q, req);
	return false;
}

619 620 621 622 623
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
				struct request *next)
{
	int total_phys_segments;

624
	if (req_gap_back_merge(req, next->bio))
625 626
		return 0;

627 628 629
	/*
	 * Will it become too large?
	 */
630
	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
D
Damien Le Moal 已提交
631
	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
632 633 634
		return 0;

	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
635
	if (total_phys_segments > blk_rq_get_max_segments(req))
636 637
		return 0;

638
	if (blk_integrity_merge_rq(q, req, next) == false)
639 640
		return 0;

641 642 643
	if (!bio_crypt_ctx_merge_rq(req, next))
		return 0;

644 645 646 647 648
	/* Merge is OK... */
	req->nr_phys_segments = total_phys_segments;
	return 1;
}

649 650 651 652 653 654 655 656 657 658 659 660 661 662
/**
 * blk_rq_set_mixed_merge - mark a request as mixed merge
 * @rq: request to mark as mixed merge
 *
 * Description:
 *     @rq is about to be mixed merged.  Make sure the attributes
 *     which can be mixed are set in each bio and mark @rq as mixed
 *     merged.
 */
void blk_rq_set_mixed_merge(struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	struct bio *bio;

663
	if (rq->rq_flags & RQF_MIXED_MERGE)
664 665 666 667 668 669 670 671
		return;

	/*
	 * @rq will no longer represent mixable attributes for all the
	 * contained bios.  It will just track those of the first one.
	 * Distributes the attributs to each bio.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
J
Jens Axboe 已提交
672 673 674
		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
		bio->bi_opf |= ff;
675
	}
676
	rq->rq_flags |= RQF_MIXED_MERGE;
677 678
}

679
static void blk_account_io_merge_request(struct request *req)
680 681
{
	if (blk_do_io_stat(req)) {
682
		part_stat_lock();
683
		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
684
		part_stat_unlock();
685 686

		hd_struct_put(req->part);
687 688
	}
}
689

690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
/*
 * Two cases of handling DISCARD merge:
 * If max_discard_segments > 1, the driver takes every bio
 * as a range and send them to controller together. The ranges
 * needn't to be contiguous.
 * Otherwise, the bios/requests will be handled as same as
 * others which should be contiguous.
 */
static inline bool blk_discard_mergable(struct request *req)
{
	if (req_op(req) == REQ_OP_DISCARD &&
	    queue_max_discard_segments(req->q) > 1)
		return true;
	return false;
}

706 707
static enum elv_merge blk_try_req_merge(struct request *req,
					struct request *next)
708 709 710 711 712 713 714 715
{
	if (blk_discard_mergable(req))
		return ELEVATOR_DISCARD_MERGE;
	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
		return ELEVATOR_BACK_MERGE;

	return ELEVATOR_NO_MERGE;
}
716

717
/*
718 719
 * For non-mq, this has to be called with the request spinlock acquired.
 * For mq with scheduling, the appropriate queue wide lock should be held.
720
 */
721 722
static struct request *attempt_merge(struct request_queue *q,
				     struct request *req, struct request *next)
723 724
{
	if (!rq_mergeable(req) || !rq_mergeable(next))
725
		return NULL;
726

727
	if (req_op(req) != req_op(next))
728
		return NULL;
729

730
	if (rq_data_dir(req) != rq_data_dir(next)
731
	    || req->rq_disk != next->rq_disk)
732
		return NULL;
733

734
	if (req_op(req) == REQ_OP_WRITE_SAME &&
735
	    !blk_write_same_mergeable(req->bio, next->bio))
736
		return NULL;
737

738 739 740 741 742 743 744
	/*
	 * Don't allow merge of different write hints, or for a hint with
	 * non-hint IO.
	 */
	if (req->write_hint != next->write_hint)
		return NULL;

745 746 747
	if (req->ioprio != next->ioprio)
		return NULL;

748 749 750 751
	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
	 * will have updated segment counts, update sector
752 753
	 * counts here. Handle DISCARDs separately, as they
	 * have separate settings.
754
	 */
755 756 757

	switch (blk_try_req_merge(req, next)) {
	case ELEVATOR_DISCARD_MERGE:
758 759
		if (!req_attempt_discard_merge(q, req, next))
			return NULL;
760 761 762 763 764 765
		break;
	case ELEVATOR_BACK_MERGE:
		if (!ll_merge_requests_fn(q, req, next))
			return NULL;
		break;
	default:
766
		return NULL;
767
	}
768

769 770 771 772 773 774
	/*
	 * If failfast settings disagree or any of the two is already
	 * a mixed merge, mark both as mixed before proceeding.  This
	 * makes sure that all involved bios have mixable attributes
	 * set properly.
	 */
775
	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
776 777 778 779 780 781
	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
		blk_rq_set_mixed_merge(req);
		blk_rq_set_mixed_merge(next);
	}

782
	/*
783 784 785
	 * At this point we have either done a back merge or front merge. We
	 * need the smaller start_time_ns of the merged requests to be the
	 * current request for accounting purposes.
786
	 */
787 788
	if (next->start_time_ns < req->start_time_ns)
		req->start_time_ns = next->start_time_ns;
789 790 791 792

	req->biotail->bi_next = next->bio;
	req->biotail = next->biotail;

793
	req->__data_len += blk_rq_bytes(next);
794

M
Ming Lei 已提交
795
	if (!blk_discard_mergable(req))
796
		elv_merge_requests(q, req, next);
797

798 799 800
	/*
	 * 'next' is going away, so update stats accordingly
	 */
801
	blk_account_io_merge_request(next);
802

803 804
	trace_block_rq_merge(q, next);

805 806 807 808
	/*
	 * ownership of bio passed from next to req, return 'next' for
	 * the caller to free
	 */
809
	next->bio = NULL;
810
	return next;
811 812
}

813
struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
814 815 816 817 818 819
{
	struct request *next = elv_latter_request(q, rq);

	if (next)
		return attempt_merge(q, rq, next);

820
	return NULL;
821 822
}

823
struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
824 825 826 827 828 829
{
	struct request *prev = elv_former_request(q, rq);

	if (prev)
		return attempt_merge(q, prev, rq);

830
	return NULL;
831
}
832 833 834 835

int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
			  struct request *next)
{
836
	struct request *free;
837

838 839
	free = attempt_merge(q, rq, next);
	if (free) {
J
Jens Axboe 已提交
840
		blk_put_request(free);
841 842 843 844
		return 1;
	}

	return 0;
845
}
846 847 848

bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
849
	if (!rq_mergeable(rq) || !bio_mergeable(bio))
850 851
		return false;

852
	if (req_op(rq) != bio_op(bio))
853 854
		return false;

855 856 857 858
	/* different data direction or already started, don't merge */
	if (bio_data_dir(bio) != rq_data_dir(rq))
		return false;

859 860
	/* must be same device */
	if (rq->rq_disk != bio->bi_disk)
861 862 863
		return false;

	/* only merge integrity protected bio into ditto rq */
864
	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
865 866
		return false;

867 868 869 870
	/* Only merge if the crypt contexts are compatible */
	if (!bio_crypt_rq_ctx_compatible(rq, bio))
		return false;

871
	/* must be using the same buffer */
872
	if (req_op(rq) == REQ_OP_WRITE_SAME &&
873 874 875
	    !blk_write_same_mergeable(rq->bio, bio))
		return false;

876 877 878 879 880 881 882
	/*
	 * Don't allow merge of different write hints, or for a hint with
	 * non-hint IO.
	 */
	if (rq->write_hint != bio->bi_write_hint)
		return false;

883 884 885
	if (rq->ioprio != bio_prio(bio))
		return false;

886 887 888
	return true;
}

889
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
890
{
891
	if (blk_discard_mergable(rq))
892 893
		return ELEVATOR_DISCARD_MERGE;
	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
894
		return ELEVATOR_BACK_MERGE;
895
	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
896 897 898
		return ELEVATOR_FRONT_MERGE;
	return ELEVATOR_NO_MERGE;
}
899 900 901 902 903 904 905 906 907 908 909

static void blk_account_io_merge_bio(struct request *req)
{
	if (!blk_do_io_stat(req))
		return;

	part_stat_lock();
	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
	part_stat_unlock();
}

910 911 912
enum bio_merge_status bio_attempt_back_merge(struct request *req,
					     struct bio *bio,
					     unsigned int nr_segs)
913 914 915 916
{
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;

	if (!ll_back_merge_fn(req, bio, nr_segs))
917
		return BIO_MERGE_FAILED;
918 919 920 921 922 923 924 925 926 927 928 929 930 931

	trace_block_bio_backmerge(req->q, req, bio);
	rq_qos_merge(req->q, req, bio);

	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
		blk_rq_set_mixed_merge(req);

	req->biotail->bi_next = bio;
	req->biotail = bio;
	req->__data_len += bio->bi_iter.bi_size;

	bio_crypt_free_ctx(bio);

	blk_account_io_merge_bio(req);
932
	return BIO_MERGE_OK;
933 934
}

935 936 937
enum bio_merge_status bio_attempt_front_merge(struct request *req,
					      struct bio *bio,
					      unsigned int nr_segs)
938 939 940 941
{
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;

	if (!ll_front_merge_fn(req, bio, nr_segs))
942
		return BIO_MERGE_FAILED;
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958

	trace_block_bio_frontmerge(req->q, req, bio);
	rq_qos_merge(req->q, req, bio);

	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
		blk_rq_set_mixed_merge(req);

	bio->bi_next = req->bio;
	req->bio = bio;

	req->__sector = bio->bi_iter.bi_sector;
	req->__data_len += bio->bi_iter.bi_size;

	bio_crypt_do_front_merge(req, bio);

	blk_account_io_merge_bio(req);
959
	return BIO_MERGE_OK;
960 961
}

962 963 964
enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
						struct request *req,
						struct bio *bio)
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
{
	unsigned short segments = blk_rq_nr_discard_segments(req);

	if (segments >= queue_max_discard_segments(q))
		goto no_merge;
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
		goto no_merge;

	rq_qos_merge(q, req, bio);

	req->biotail->bi_next = bio;
	req->biotail = bio;
	req->__data_len += bio->bi_iter.bi_size;
	req->nr_phys_segments = segments + 1;

	blk_account_io_merge_bio(req);
982
	return BIO_MERGE_OK;
983 984
no_merge:
	req_set_nomerge(q, req);
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
	return BIO_MERGE_FAILED;
}

static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
						   struct request *rq,
						   struct bio *bio,
						   unsigned int nr_segs,
						   bool sched_allow_merge)
{
	if (!blk_rq_merge_ok(rq, bio))
		return BIO_MERGE_NONE;

	switch (blk_try_merge(rq, bio)) {
	case ELEVATOR_BACK_MERGE:
		if (!sched_allow_merge ||
		    (sched_allow_merge && blk_mq_sched_allow_merge(q, rq, bio)))
			return bio_attempt_back_merge(rq, bio, nr_segs);
		break;
	case ELEVATOR_FRONT_MERGE:
		if (!sched_allow_merge ||
		    (sched_allow_merge && blk_mq_sched_allow_merge(q, rq, bio)))
			return bio_attempt_front_merge(rq, bio, nr_segs);
		break;
	case ELEVATOR_DISCARD_MERGE:
		return bio_attempt_discard_merge(q, rq, bio);
	default:
		return BIO_MERGE_NONE;
	}

	return BIO_MERGE_FAILED;
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
}

/**
 * blk_attempt_plug_merge - try to merge with %current's plugged list
 * @q: request_queue new bio is being queued at
 * @bio: new bio being queued
 * @nr_segs: number of segments in @bio
 * @same_queue_rq: pointer to &struct request that gets filled in when
 * another request associated with @q is found on the plug list
 * (optional, may be %NULL)
 *
 * Determine whether @bio being queued on @q can be merged with a request
 * on %current's plugged list.  Returns %true if merge was successful,
 * otherwise %false.
 *
 * Plugging coalesces IOs from the same issuer for the same purpose without
 * going through @q->queue_lock.  As such it's more of an issuing mechanism
 * than scheduling, and the request, while may have elvpriv data, is not
 * added on the elevator at this point.  In addition, we don't have
 * reliable access to the elevator outside queue lock.  Only check basic
 * merging parameters without querying the elevator.
 *
 * Caller must ensure !blk_queue_nomerges(q) beforehand.
 */
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
		unsigned int nr_segs, struct request **same_queue_rq)
{
	struct blk_plug *plug;
	struct request *rq;
	struct list_head *plug_list;

	plug = blk_mq_plug(q, bio);
	if (!plug)
		return false;

	plug_list = &plug->mq_list;

	list_for_each_entry_reverse(rq, plug_list, queuelist) {
		if (rq->q == q && same_queue_rq) {
			/*
			 * Only blk-mq multiple hardware queues case checks the
			 * rq in the same queue, there should be only one such
			 * rq in a queue
			 **/
			*same_queue_rq = rq;
		}

1062
		if (rq->q != q)
1063 1064
			continue;

1065 1066
		if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
		    BIO_MERGE_OK)
1067 1068 1069 1070 1071
			return true;
	}

	return false;
}
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086

/*
 * Iterate list of requests and see if we can merge this bio with any
 * of them.
 */
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
			struct bio *bio, unsigned int nr_segs)
{
	struct request *rq;
	int checked = 8;

	list_for_each_entry_reverse(rq, list, queuelist) {
		if (!checked--)
			break;

1087 1088
		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
		case BIO_MERGE_NONE:
1089
			continue;
1090 1091 1092 1093
		case BIO_MERGE_OK:
			return true;
		case BIO_MERGE_FAILED:
			return false;
1094 1095 1096 1097 1098 1099 1100
		}

	}

	return false;
}
EXPORT_SYMBOL_GPL(blk_bio_list_merge);