blk-merge.c 17.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Functions related to segment and merge handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include "blk.h"

12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
static struct bio *blk_bio_discard_split(struct request_queue *q,
					 struct bio *bio,
					 struct bio_set *bs)
{
	unsigned int max_discard_sectors, granularity;
	int alignment;
	sector_t tmp;
	unsigned split_sectors;

	/* Zero-sector (unknown) and one-sector granularities are the same.  */
	granularity = max(q->limits.discard_granularity >> 9, 1U);

	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
	max_discard_sectors -= max_discard_sectors % granularity;

	if (unlikely(!max_discard_sectors)) {
		/* XXX: warn */
		return NULL;
	}

	if (bio_sectors(bio) <= max_discard_sectors)
		return NULL;

	split_sectors = max_discard_sectors;

	/*
	 * If the next starting sector would be misaligned, stop the discard at
	 * the previous aligned sector.
	 */
	alignment = (q->limits.discard_alignment >> 9) % granularity;

	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
	tmp = sector_div(tmp, granularity);

	if (split_sectors > tmp)
		split_sectors -= tmp;

	return bio_split(bio, split_sectors, GFP_NOIO, bs);
}

static struct bio *blk_bio_write_same_split(struct request_queue *q,
					    struct bio *bio,
					    struct bio_set *bs)
{
	if (!q->limits.max_write_same_sectors)
		return NULL;

	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
		return NULL;

	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
}

static struct bio *blk_bio_segment_split(struct request_queue *q,
					 struct bio *bio,
					 struct bio_set *bs)
{
	struct bio *split;
70
	struct bio_vec bv, bvprv, *bvprvp = NULL;
71
	struct bvec_iter iter;
72
	unsigned seg_size = 0, nsegs = 0, sectors = 0;
73 74

	bio_for_each_segment(bv, bio, iter) {
75
		sectors += bv.bv_len >> 9;
76

77
		if (sectors > queue_max_sectors(q))
78 79 80 81 82 83
			goto split;

		/*
		 * If the queue doesn't support SG gaps and adding this
		 * offset would create a gap, disallow it.
		 */
84
		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
85 86
			goto split;

87
		if (bvprvp && blk_queue_cluster(q)) {
88 89
			if (seg_size + bv.bv_len > queue_max_segment_size(q))
				goto new_segment;
90
			if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
91
				goto new_segment;
92
			if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
93 94 95 96
				goto new_segment;

			seg_size += bv.bv_len;
			bvprv = bv;
97
			bvprvp = &bv;
98 99 100 101 102 103 104 105
			continue;
		}
new_segment:
		if (nsegs == queue_max_segments(q))
			goto split;

		nsegs++;
		bvprv = bv;
106
		bvprvp = &bv;
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
		seg_size = bv.bv_len;
	}

	return NULL;
split:
	split = bio_clone_bioset(bio, GFP_NOIO, bs);

	split->bi_iter.bi_size -= iter.bi_size;
	bio->bi_iter = iter;

	if (bio_integrity(bio)) {
		bio_integrity_advance(bio, split->bi_iter.bi_size);
		bio_integrity_trim(split, 0, bio_sectors(split));
	}

	return split;
}

void blk_queue_split(struct request_queue *q, struct bio **bio,
		     struct bio_set *bs)
{
	struct bio *split;

	if ((*bio)->bi_rw & REQ_DISCARD)
		split = blk_bio_discard_split(q, *bio, bs);
	else if ((*bio)->bi_rw & REQ_WRITE_SAME)
		split = blk_bio_write_same_split(q, *bio, bs);
	else
		split = blk_bio_segment_split(q, *bio, q->bio_split);

	if (split) {
		bio_chain(split, *bio);
		generic_make_request(*bio);
		*bio = split;
	}
}
EXPORT_SYMBOL(blk_queue_split);

145
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
M
Ming Lei 已提交
146 147
					     struct bio *bio,
					     bool no_sg_merge)
148
{
149
	struct bio_vec bv, bvprv = { NULL };
150
	int cluster, prev = 0;
151
	unsigned int seg_size, nr_phys_segs;
152
	struct bio *fbio, *bbio;
153
	struct bvec_iter iter;
154

155 156
	if (!bio)
		return 0;
157

158 159 160 161 162 163 164 165 166 167
	/*
	 * This should probably be returning 0, but blk_add_request_payload()
	 * (Christoph!!!!)
	 */
	if (bio->bi_rw & REQ_DISCARD)
		return 1;

	if (bio->bi_rw & REQ_WRITE_SAME)
		return 1;

168
	fbio = bio;
169
	cluster = blk_queue_cluster(q);
M
Mikulas Patocka 已提交
170
	seg_size = 0;
171
	nr_phys_segs = 0;
172
	for_each_bio(bio) {
173
		bio_for_each_segment(bv, bio, iter) {
174 175 176 177 178 179 180
			/*
			 * If SG merging is disabled, each bio vector is
			 * a segment
			 */
			if (no_sg_merge)
				goto new_segment;

181
			if (prev && cluster) {
182
				if (seg_size + bv.bv_len
183
				    > queue_max_segment_size(q))
184
					goto new_segment;
185
				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
186
					goto new_segment;
187
				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
188
					goto new_segment;
189

190
				seg_size += bv.bv_len;
191 192 193
				bvprv = bv;
				continue;
			}
194
new_segment:
195 196 197
			if (nr_phys_segs == 1 && seg_size >
			    fbio->bi_seg_front_size)
				fbio->bi_seg_front_size = seg_size;
198

199 200
			nr_phys_segs++;
			bvprv = bv;
201
			prev = 1;
202
			seg_size = bv.bv_len;
203
		}
204
		bbio = bio;
205 206
	}

207 208 209 210
	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
		fbio->bi_seg_front_size = seg_size;
	if (seg_size > bbio->bi_seg_back_size)
		bbio->bi_seg_back_size = seg_size;
211 212 213 214 215 216

	return nr_phys_segs;
}

void blk_recalc_rq_segments(struct request *rq)
{
M
Ming Lei 已提交
217 218 219 220 221
	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
			&rq->q->queue_flags);

	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
			no_sg_merge);
222 223 224 225
}

void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
226 227 228 229 230 231 232
	unsigned short seg_cnt;

	/* estimate segment number by bi_vcnt for non-cloned bio */
	if (bio_flagged(bio, BIO_CLONED))
		seg_cnt = bio_segments(bio);
	else
		seg_cnt = bio->bi_vcnt;
233

234 235 236
	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
			(seg_cnt < queue_max_segments(q)))
		bio->bi_phys_segments = seg_cnt;
237 238 239 240
	else {
		struct bio *nxt = bio->bi_next;

		bio->bi_next = NULL;
241
		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
242 243
		bio->bi_next = nxt;
	}
244

245
	bio_set_flag(bio, BIO_SEG_VALID);
246 247 248 249 250 251
}
EXPORT_SYMBOL(blk_recount_segments);

static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
				   struct bio *nxt)
{
252
	struct bio_vec end_bv = { NULL }, nxt_bv;
253 254
	struct bvec_iter iter;

255
	if (!blk_queue_cluster(q))
256 257
		return 0;

258
	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
259
	    queue_max_segment_size(q))
260 261
		return 0;

262 263 264
	if (!bio_has_data(bio))
		return 1;

265 266 267 268 269 270 271
	bio_for_each_segment(end_bv, bio, iter)
		if (end_bv.bv_len == iter.bi_size)
			break;

	nxt_bv = bio_iovec(nxt);

	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
272 273
		return 0;

274
	/*
275
	 * bio and nxt are contiguous in memory; check if the queue allows
276 277
	 * these two to be merged into one
	 */
278
	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
279 280 281 282 283
		return 1;

	return 0;
}

284
static inline void
285
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
286
		     struct scatterlist *sglist, struct bio_vec *bvprv,
287 288 289 290 291
		     struct scatterlist **sg, int *nsegs, int *cluster)
{

	int nbytes = bvec->bv_len;

292
	if (*sg && *cluster) {
293 294 295
		if ((*sg)->length + nbytes > queue_max_segment_size(q))
			goto new_segment;

296
		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
297
			goto new_segment;
298
		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
			goto new_segment;

		(*sg)->length += nbytes;
	} else {
new_segment:
		if (!*sg)
			*sg = sglist;
		else {
			/*
			 * If the driver previously mapped a shorter
			 * list, we could see a termination bit
			 * prematurely unless it fully inits the sg
			 * table on each mapping. We KNOW that there
			 * must be more entries here or the driver
			 * would be buggy, so force clear the
			 * termination bit to avoid doing a full
			 * sg_init_table() in drivers for each command.
			 */
317
			sg_unmark_end(*sg);
318 319 320 321 322 323
			*sg = sg_next(*sg);
		}

		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
		(*nsegs)++;
	}
324
	*bvprv = *bvec;
325 326
}

327 328 329
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
			     struct scatterlist *sglist,
			     struct scatterlist **sg)
330
{
331
	struct bio_vec bvec, bvprv = { NULL };
332
	struct bvec_iter iter;
333 334 335
	int nsegs, cluster;

	nsegs = 0;
336
	cluster = blk_queue_cluster(q);
337

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
	if (bio->bi_rw & REQ_DISCARD) {
		/*
		 * This is a hack - drivers should be neither modifying the
		 * biovec, nor relying on bi_vcnt - but because of
		 * blk_add_request_payload(), a discard bio may or may not have
		 * a payload we need to set up here (thank you Christoph) and
		 * bi_vcnt is really the only way of telling if we need to.
		 */

		if (bio->bi_vcnt)
			goto single_segment;

		return 0;
	}

	if (bio->bi_rw & REQ_WRITE_SAME) {
single_segment:
		*sg = sglist;
		bvec = bio_iovec(bio);
		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
		return 1;
	}

	for_each_bio(bio)
		bio_for_each_segment(bvec, bio, iter)
			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
					     &nsegs, &cluster);
365

366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
	return nsegs;
}

/*
 * map a request to scatterlist, return number of sg entries setup. Caller
 * must make sure sg can hold rq->nr_phys_segments entries
 */
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
		  struct scatterlist *sglist)
{
	struct scatterlist *sg = NULL;
	int nsegs = 0;

	if (rq->bio)
		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
381 382

	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
383 384 385
	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
		unsigned int pad_len =
			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
386 387 388 389 390

		sg->length += pad_len;
		rq->extra_len += pad_len;
	}

391
	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
392
		if (rq->cmd_flags & REQ_WRITE)
393 394
			memset(q->dma_drain_buffer, 0, q->dma_drain_size);

395
		sg_unmark_end(sg);
396 397 398 399 400 401
		sg = sg_next(sg);
		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
			    q->dma_drain_size,
			    ((unsigned long)q->dma_drain_buffer) &
			    (PAGE_SIZE - 1));
		nsegs++;
402
		rq->extra_len += q->dma_drain_size;
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
	}

	if (sg)
		sg_mark_end(sg);

	return nsegs;
}
EXPORT_SYMBOL(blk_rq_map_sg);

static inline int ll_new_hw_segment(struct request_queue *q,
				    struct request *req,
				    struct bio *bio)
{
	int nr_phys_segs = bio_phys_segments(q, bio);

418 419 420
	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
		goto no_merge;

421
	if (blk_integrity_merge_bio(q, req, bio) == false)
422
		goto no_merge;
423 424 425 426 427 428 429

	/*
	 * This will form the start of a new hw segment.  Bump both
	 * counters.
	 */
	req->nr_phys_segments += nr_phys_segs;
	return 1;
430 431 432 433 434 435

no_merge:
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
	return 0;
436 437 438 439 440
}

int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio)
{
441 442
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
443 444 445 446 447
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
448
	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
449
		blk_recount_segments(q, req->biotail);
450
	if (!bio_flagged(bio, BIO_SEG_VALID))
451 452 453 454 455
		blk_recount_segments(q, bio);

	return ll_new_hw_segment(q, req, bio);
}

456
int ll_front_merge_fn(struct request_queue *q, struct request *req,
457 458
		      struct bio *bio)
{
459 460
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
461 462 463 464 465
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
466
	if (!bio_flagged(bio, BIO_SEG_VALID))
467
		blk_recount_segments(q, bio);
468
	if (!bio_flagged(req->bio, BIO_SEG_VALID))
469 470 471 472 473
		blk_recount_segments(q, req->bio);

	return ll_new_hw_segment(q, req, bio);
}

474 475 476 477 478 479 480 481 482 483 484
/*
 * blk-mq uses req->special to carry normal driver per-request payload, it
 * does not indicate a prepared command that we cannot merge with.
 */
static bool req_no_special_merge(struct request *req)
{
	struct request_queue *q = req->q;

	return !q->mq_ops && req->special;
}

485
static int req_gap_to_prev(struct request *req, struct bio *next)
486 487 488
{
	struct bio *prev = req->biotail;

489
	return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1],
490
			next->bi_io_vec[0].bv_offset);
491 492
}

493 494 495 496
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
				struct request *next)
{
	int total_phys_segments;
497 498
	unsigned int seg_size =
		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
499 500 501 502 503

	/*
	 * First check if the either of the requests are re-queued
	 * requests.  Can't merge them if they are.
	 */
504
	if (req_no_special_merge(req) || req_no_special_merge(next))
505 506
		return 0;

507
	if (req_gap_to_prev(req, next->bio))
508 509
		return 0;

510 511 512
	/*
	 * Will it become too large?
	 */
513 514
	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
	    blk_rq_get_max_sectors(req))
515 516 517
		return 0;

	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
518 519 520 521 522
	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
		if (req->nr_phys_segments == 1)
			req->bio->bi_seg_front_size = seg_size;
		if (next->nr_phys_segments == 1)
			next->biotail->bi_seg_back_size = seg_size;
523
		total_phys_segments--;
524
	}
525

526
	if (total_phys_segments > queue_max_segments(q))
527 528
		return 0;

529
	if (blk_integrity_merge_rq(q, req, next) == false)
530 531
		return 0;

532 533 534 535 536
	/* Merge is OK... */
	req->nr_phys_segments = total_phys_segments;
	return 1;
}

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
/**
 * blk_rq_set_mixed_merge - mark a request as mixed merge
 * @rq: request to mark as mixed merge
 *
 * Description:
 *     @rq is about to be mixed merged.  Make sure the attributes
 *     which can be mixed are set in each bio and mark @rq as mixed
 *     merged.
 */
void blk_rq_set_mixed_merge(struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	struct bio *bio;

	if (rq->cmd_flags & REQ_MIXED_MERGE)
		return;

	/*
	 * @rq will no longer represent mixable attributes for all the
	 * contained bios.  It will just track those of the first one.
	 * Distributes the attributs to each bio.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
		bio->bi_rw |= ff;
	}
	rq->cmd_flags |= REQ_MIXED_MERGE;
}

567 568 569 570 571 572 573
static void blk_account_io_merge(struct request *req)
{
	if (blk_do_io_stat(req)) {
		struct hd_struct *part;
		int cpu;

		cpu = part_stat_lock();
574
		part = req->part;
575 576

		part_round_stats(cpu, part);
577
		part_dec_in_flight(part, rq_data_dir(req));
578

579
		hd_struct_put(part);
580 581 582 583
		part_stat_unlock();
	}
}

584 585 586 587 588 589 590 591 592
/*
 * Has to be called with the request spinlock acquired
 */
static int attempt_merge(struct request_queue *q, struct request *req,
			  struct request *next)
{
	if (!rq_mergeable(req) || !rq_mergeable(next))
		return 0;

593 594 595
	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
		return 0;

596 597 598
	/*
	 * not contiguous
	 */
599
	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
600 601 602 603
		return 0;

	if (rq_data_dir(req) != rq_data_dir(next)
	    || req->rq_disk != next->rq_disk
604
	    || req_no_special_merge(next))
605 606
		return 0;

607 608 609 610
	if (req->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(req->bio, next->bio))
		return 0;

611 612 613 614 615 616 617 618 619
	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
	 * will have updated segment counts, update sector
	 * counts here.
	 */
	if (!ll_merge_requests_fn(q, req, next))
		return 0;

620 621 622 623 624 625 626 627 628 629 630 631 632
	/*
	 * If failfast settings disagree or any of the two is already
	 * a mixed merge, mark both as mixed before proceeding.  This
	 * makes sure that all involved bios have mixable attributes
	 * set properly.
	 */
	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
		blk_rq_set_mixed_merge(req);
		blk_rq_set_mixed_merge(next);
	}

633 634 635 636 637 638 639 640 641 642 643 644
	/*
	 * At this point we have either done a back merge
	 * or front merge. We need the smaller start_time of
	 * the merged requests to be the current request
	 * for accounting purposes.
	 */
	if (time_after(req->start_time, next->start_time))
		req->start_time = next->start_time;

	req->biotail->bi_next = next->bio;
	req->biotail = next->biotail;

645
	req->__data_len += blk_rq_bytes(next);
646 647 648

	elv_merge_requests(q, req, next);

649 650 651 652
	/*
	 * 'next' is going away, so update stats accordingly
	 */
	blk_account_io_merge(next);
653 654

	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
655 656
	if (blk_rq_cpu_valid(next))
		req->cpu = next->cpu;
657

658 659
	/* owner-ship of bio passed from next to req */
	next->bio = NULL;
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
	__blk_put_request(q, next);
	return 1;
}

int attempt_back_merge(struct request_queue *q, struct request *rq)
{
	struct request *next = elv_latter_request(q, rq);

	if (next)
		return attempt_merge(q, rq, next);

	return 0;
}

int attempt_front_merge(struct request_queue *q, struct request *rq)
{
	struct request *prev = elv_former_request(q, rq);

	if (prev)
		return attempt_merge(q, prev, rq);

	return 0;
}
683 684 685 686 687 688

int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
			  struct request *next)
{
	return attempt_merge(q, rq, next);
}
689 690 691

bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
692
	if (!rq_mergeable(rq) || !bio_mergeable(bio))
693 694
		return false;

695 696 697
	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
		return false;

698 699 700 701 702
	/* different data direction or already started, don't merge */
	if (bio_data_dir(bio) != rq_data_dir(rq))
		return false;

	/* must be same device and not a special request */
703
	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
704 705 706
		return false;

	/* only merge integrity protected bio into ditto rq */
707
	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
708 709
		return false;

710 711 712 713 714
	/* must be using the same buffer */
	if (rq->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(rq->bio, bio))
		return false;

715
	/* Only check gaps if the bio carries data */
716 717
	if (bio_has_data(bio) && req_gap_to_prev(rq, bio))
		return false;
718

719 720 721 722 723
	return true;
}

int blk_try_merge(struct request *rq, struct bio *bio)
{
724
	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
725
		return ELEVATOR_BACK_MERGE;
726
	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
727 728 729
		return ELEVATOR_FRONT_MERGE;
	return ELEVATOR_NO_MERGE;
}