blk-merge.c 16.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Functions related to segment and merge handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include "blk.h"

12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
static struct bio *blk_bio_discard_split(struct request_queue *q,
					 struct bio *bio,
					 struct bio_set *bs)
{
	unsigned int max_discard_sectors, granularity;
	int alignment;
	sector_t tmp;
	unsigned split_sectors;

	/* Zero-sector (unknown) and one-sector granularities are the same.  */
	granularity = max(q->limits.discard_granularity >> 9, 1U);

	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
	max_discard_sectors -= max_discard_sectors % granularity;

	if (unlikely(!max_discard_sectors)) {
		/* XXX: warn */
		return NULL;
	}

	if (bio_sectors(bio) <= max_discard_sectors)
		return NULL;

	split_sectors = max_discard_sectors;

	/*
	 * If the next starting sector would be misaligned, stop the discard at
	 * the previous aligned sector.
	 */
	alignment = (q->limits.discard_alignment >> 9) % granularity;

	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
	tmp = sector_div(tmp, granularity);

	if (split_sectors > tmp)
		split_sectors -= tmp;

	return bio_split(bio, split_sectors, GFP_NOIO, bs);
}

static struct bio *blk_bio_write_same_split(struct request_queue *q,
					    struct bio *bio,
					    struct bio_set *bs)
{
	if (!q->limits.max_write_same_sectors)
		return NULL;

	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
		return NULL;

	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
}

static struct bio *blk_bio_segment_split(struct request_queue *q,
					 struct bio *bio,
					 struct bio_set *bs)
{
69
	struct bio_vec bv, bvprv, *bvprvp = NULL;
70
	struct bvec_iter iter;
71
	unsigned seg_size = 0, nsegs = 0, sectors = 0;
72 73

	bio_for_each_segment(bv, bio, iter) {
74
		if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
75 76 77 78 79 80
			goto split;

		/*
		 * If the queue doesn't support SG gaps and adding this
		 * offset would create a gap, disallow it.
		 */
81
		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
82 83
			goto split;

84
		if (bvprvp && blk_queue_cluster(q)) {
85 86
			if (seg_size + bv.bv_len > queue_max_segment_size(q))
				goto new_segment;
87
			if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
88
				goto new_segment;
89
			if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
90 91 92 93
				goto new_segment;

			seg_size += bv.bv_len;
			bvprv = bv;
94
			bvprvp = &bv;
95
			sectors += bv.bv_len >> 9;
96 97 98 99 100 101 102 103
			continue;
		}
new_segment:
		if (nsegs == queue_max_segments(q))
			goto split;

		nsegs++;
		bvprv = bv;
104
		bvprvp = &bv;
105
		seg_size = bv.bv_len;
106
		sectors += bv.bv_len >> 9;
107 108 109 110
	}

	return NULL;
split:
111
	return bio_split(bio, sectors, GFP_NOIO, bs);
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
}

void blk_queue_split(struct request_queue *q, struct bio **bio,
		     struct bio_set *bs)
{
	struct bio *split;

	if ((*bio)->bi_rw & REQ_DISCARD)
		split = blk_bio_discard_split(q, *bio, bs);
	else if ((*bio)->bi_rw & REQ_WRITE_SAME)
		split = blk_bio_write_same_split(q, *bio, bs);
	else
		split = blk_bio_segment_split(q, *bio, q->bio_split);

	if (split) {
		bio_chain(split, *bio);
		generic_make_request(*bio);
		*bio = split;
	}
}
EXPORT_SYMBOL(blk_queue_split);

134
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
M
Ming Lei 已提交
135 136
					     struct bio *bio,
					     bool no_sg_merge)
137
{
138
	struct bio_vec bv, bvprv = { NULL };
139
	int cluster, prev = 0;
140
	unsigned int seg_size, nr_phys_segs;
141
	struct bio *fbio, *bbio;
142
	struct bvec_iter iter;
143

144 145
	if (!bio)
		return 0;
146

147 148 149 150 151 152 153 154 155 156
	/*
	 * This should probably be returning 0, but blk_add_request_payload()
	 * (Christoph!!!!)
	 */
	if (bio->bi_rw & REQ_DISCARD)
		return 1;

	if (bio->bi_rw & REQ_WRITE_SAME)
		return 1;

157
	fbio = bio;
158
	cluster = blk_queue_cluster(q);
M
Mikulas Patocka 已提交
159
	seg_size = 0;
160
	nr_phys_segs = 0;
161
	for_each_bio(bio) {
162
		bio_for_each_segment(bv, bio, iter) {
163 164 165 166 167 168 169
			/*
			 * If SG merging is disabled, each bio vector is
			 * a segment
			 */
			if (no_sg_merge)
				goto new_segment;

170
			if (prev && cluster) {
171
				if (seg_size + bv.bv_len
172
				    > queue_max_segment_size(q))
173
					goto new_segment;
174
				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
175
					goto new_segment;
176
				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
177
					goto new_segment;
178

179
				seg_size += bv.bv_len;
180 181 182
				bvprv = bv;
				continue;
			}
183
new_segment:
184 185 186
			if (nr_phys_segs == 1 && seg_size >
			    fbio->bi_seg_front_size)
				fbio->bi_seg_front_size = seg_size;
187

188 189
			nr_phys_segs++;
			bvprv = bv;
190
			prev = 1;
191
			seg_size = bv.bv_len;
192
		}
193
		bbio = bio;
194 195
	}

196 197 198 199
	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
		fbio->bi_seg_front_size = seg_size;
	if (seg_size > bbio->bi_seg_back_size)
		bbio->bi_seg_back_size = seg_size;
200 201 202 203 204 205

	return nr_phys_segs;
}

void blk_recalc_rq_segments(struct request *rq)
{
M
Ming Lei 已提交
206 207 208 209 210
	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
			&rq->q->queue_flags);

	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
			no_sg_merge);
211 212 213 214
}

void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
215 216 217 218 219 220 221
	unsigned short seg_cnt;

	/* estimate segment number by bi_vcnt for non-cloned bio */
	if (bio_flagged(bio, BIO_CLONED))
		seg_cnt = bio_segments(bio);
	else
		seg_cnt = bio->bi_vcnt;
222

223 224 225
	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
			(seg_cnt < queue_max_segments(q)))
		bio->bi_phys_segments = seg_cnt;
226 227 228 229
	else {
		struct bio *nxt = bio->bi_next;

		bio->bi_next = NULL;
230
		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
231 232
		bio->bi_next = nxt;
	}
233

234
	bio_set_flag(bio, BIO_SEG_VALID);
235 236 237 238 239 240
}
EXPORT_SYMBOL(blk_recount_segments);

static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
				   struct bio *nxt)
{
241
	struct bio_vec end_bv = { NULL }, nxt_bv;
242 243
	struct bvec_iter iter;

244
	if (!blk_queue_cluster(q))
245 246
		return 0;

247
	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
248
	    queue_max_segment_size(q))
249 250
		return 0;

251 252 253
	if (!bio_has_data(bio))
		return 1;

254 255 256 257 258 259 260
	bio_for_each_segment(end_bv, bio, iter)
		if (end_bv.bv_len == iter.bi_size)
			break;

	nxt_bv = bio_iovec(nxt);

	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
261 262
		return 0;

263
	/*
264
	 * bio and nxt are contiguous in memory; check if the queue allows
265 266
	 * these two to be merged into one
	 */
267
	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
268 269 270 271 272
		return 1;

	return 0;
}

273
static inline void
274
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
275
		     struct scatterlist *sglist, struct bio_vec *bvprv,
276 277 278 279 280
		     struct scatterlist **sg, int *nsegs, int *cluster)
{

	int nbytes = bvec->bv_len;

281
	if (*sg && *cluster) {
282 283 284
		if ((*sg)->length + nbytes > queue_max_segment_size(q))
			goto new_segment;

285
		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
286
			goto new_segment;
287
		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
			goto new_segment;

		(*sg)->length += nbytes;
	} else {
new_segment:
		if (!*sg)
			*sg = sglist;
		else {
			/*
			 * If the driver previously mapped a shorter
			 * list, we could see a termination bit
			 * prematurely unless it fully inits the sg
			 * table on each mapping. We KNOW that there
			 * must be more entries here or the driver
			 * would be buggy, so force clear the
			 * termination bit to avoid doing a full
			 * sg_init_table() in drivers for each command.
			 */
306
			sg_unmark_end(*sg);
307 308 309 310 311 312
			*sg = sg_next(*sg);
		}

		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
		(*nsegs)++;
	}
313
	*bvprv = *bvec;
314 315
}

316 317 318
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
			     struct scatterlist *sglist,
			     struct scatterlist **sg)
319
{
320
	struct bio_vec bvec, bvprv = { NULL };
321
	struct bvec_iter iter;
322 323 324
	int nsegs, cluster;

	nsegs = 0;
325
	cluster = blk_queue_cluster(q);
326

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
	if (bio->bi_rw & REQ_DISCARD) {
		/*
		 * This is a hack - drivers should be neither modifying the
		 * biovec, nor relying on bi_vcnt - but because of
		 * blk_add_request_payload(), a discard bio may or may not have
		 * a payload we need to set up here (thank you Christoph) and
		 * bi_vcnt is really the only way of telling if we need to.
		 */

		if (bio->bi_vcnt)
			goto single_segment;

		return 0;
	}

	if (bio->bi_rw & REQ_WRITE_SAME) {
single_segment:
		*sg = sglist;
		bvec = bio_iovec(bio);
		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
		return 1;
	}

	for_each_bio(bio)
		bio_for_each_segment(bvec, bio, iter)
			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
					     &nsegs, &cluster);
354

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	return nsegs;
}

/*
 * map a request to scatterlist, return number of sg entries setup. Caller
 * must make sure sg can hold rq->nr_phys_segments entries
 */
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
		  struct scatterlist *sglist)
{
	struct scatterlist *sg = NULL;
	int nsegs = 0;

	if (rq->bio)
		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
370 371

	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
372 373 374
	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
		unsigned int pad_len =
			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
375 376 377 378 379

		sg->length += pad_len;
		rq->extra_len += pad_len;
	}

380
	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
381
		if (rq->cmd_flags & REQ_WRITE)
382 383
			memset(q->dma_drain_buffer, 0, q->dma_drain_size);

384
		sg_unmark_end(sg);
385 386 387 388 389 390
		sg = sg_next(sg);
		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
			    q->dma_drain_size,
			    ((unsigned long)q->dma_drain_buffer) &
			    (PAGE_SIZE - 1));
		nsegs++;
391
		rq->extra_len += q->dma_drain_size;
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
	}

	if (sg)
		sg_mark_end(sg);

	return nsegs;
}
EXPORT_SYMBOL(blk_rq_map_sg);

static inline int ll_new_hw_segment(struct request_queue *q,
				    struct request *req,
				    struct bio *bio)
{
	int nr_phys_segs = bio_phys_segments(q, bio);

407 408 409
	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
		goto no_merge;

410
	if (blk_integrity_merge_bio(q, req, bio) == false)
411
		goto no_merge;
412 413 414 415 416 417 418

	/*
	 * This will form the start of a new hw segment.  Bump both
	 * counters.
	 */
	req->nr_phys_segments += nr_phys_segs;
	return 1;
419 420 421 422 423 424

no_merge:
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
	return 0;
425 426 427 428 429
}

int ll_back_merge_fn(struct request_queue *q, struct request *req,
		     struct bio *bio)
{
430 431
	if (req_gap_back_merge(req, bio))
		return 0;
432 433 434
	if (blk_integrity_rq(req) &&
	    integrity_req_gap_back_merge(req, bio))
		return 0;
435 436
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
437 438 439 440 441
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
442
	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
443
		blk_recount_segments(q, req->biotail);
444
	if (!bio_flagged(bio, BIO_SEG_VALID))
445 446 447 448 449
		blk_recount_segments(q, bio);

	return ll_new_hw_segment(q, req, bio);
}

450
int ll_front_merge_fn(struct request_queue *q, struct request *req,
451 452
		      struct bio *bio)
{
453 454 455

	if (req_gap_front_merge(req, bio))
		return 0;
456 457 458
	if (blk_integrity_rq(req) &&
	    integrity_req_gap_front_merge(req, bio))
		return 0;
459 460
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
461 462 463 464 465
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
466
	if (!bio_flagged(bio, BIO_SEG_VALID))
467
		blk_recount_segments(q, bio);
468
	if (!bio_flagged(req->bio, BIO_SEG_VALID))
469 470 471 472 473
		blk_recount_segments(q, req->bio);

	return ll_new_hw_segment(q, req, bio);
}

474 475 476 477 478 479 480 481 482 483 484
/*
 * blk-mq uses req->special to carry normal driver per-request payload, it
 * does not indicate a prepared command that we cannot merge with.
 */
static bool req_no_special_merge(struct request *req)
{
	struct request_queue *q = req->q;

	return !q->mq_ops && req->special;
}

485 486 487 488
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
				struct request *next)
{
	int total_phys_segments;
489 490
	unsigned int seg_size =
		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
491 492 493 494 495

	/*
	 * First check if the either of the requests are re-queued
	 * requests.  Can't merge them if they are.
	 */
496
	if (req_no_special_merge(req) || req_no_special_merge(next))
497 498
		return 0;

499
	if (req_gap_back_merge(req, next->bio))
500 501
		return 0;

502 503 504
	/*
	 * Will it become too large?
	 */
505 506
	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
	    blk_rq_get_max_sectors(req))
507 508 509
		return 0;

	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
510 511 512 513 514
	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
		if (req->nr_phys_segments == 1)
			req->bio->bi_seg_front_size = seg_size;
		if (next->nr_phys_segments == 1)
			next->biotail->bi_seg_back_size = seg_size;
515
		total_phys_segments--;
516
	}
517

518
	if (total_phys_segments > queue_max_segments(q))
519 520
		return 0;

521
	if (blk_integrity_merge_rq(q, req, next) == false)
522 523
		return 0;

524 525 526 527 528
	/* Merge is OK... */
	req->nr_phys_segments = total_phys_segments;
	return 1;
}

529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
/**
 * blk_rq_set_mixed_merge - mark a request as mixed merge
 * @rq: request to mark as mixed merge
 *
 * Description:
 *     @rq is about to be mixed merged.  Make sure the attributes
 *     which can be mixed are set in each bio and mark @rq as mixed
 *     merged.
 */
void blk_rq_set_mixed_merge(struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	struct bio *bio;

	if (rq->cmd_flags & REQ_MIXED_MERGE)
		return;

	/*
	 * @rq will no longer represent mixable attributes for all the
	 * contained bios.  It will just track those of the first one.
	 * Distributes the attributs to each bio.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
		bio->bi_rw |= ff;
	}
	rq->cmd_flags |= REQ_MIXED_MERGE;
}

559 560 561 562 563 564 565
static void blk_account_io_merge(struct request *req)
{
	if (blk_do_io_stat(req)) {
		struct hd_struct *part;
		int cpu;

		cpu = part_stat_lock();
566
		part = req->part;
567 568

		part_round_stats(cpu, part);
569
		part_dec_in_flight(part, rq_data_dir(req));
570

571
		hd_struct_put(part);
572 573 574 575
		part_stat_unlock();
	}
}

576 577 578 579 580 581 582 583 584
/*
 * Has to be called with the request spinlock acquired
 */
static int attempt_merge(struct request_queue *q, struct request *req,
			  struct request *next)
{
	if (!rq_mergeable(req) || !rq_mergeable(next))
		return 0;

585 586 587
	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
		return 0;

588 589 590
	/*
	 * not contiguous
	 */
591
	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
592 593 594 595
		return 0;

	if (rq_data_dir(req) != rq_data_dir(next)
	    || req->rq_disk != next->rq_disk
596
	    || req_no_special_merge(next))
597 598
		return 0;

599 600 601 602
	if (req->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(req->bio, next->bio))
		return 0;

603 604 605 606 607 608 609 610 611
	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
	 * will have updated segment counts, update sector
	 * counts here.
	 */
	if (!ll_merge_requests_fn(q, req, next))
		return 0;

612 613 614 615 616 617 618 619 620 621 622 623 624
	/*
	 * If failfast settings disagree or any of the two is already
	 * a mixed merge, mark both as mixed before proceeding.  This
	 * makes sure that all involved bios have mixable attributes
	 * set properly.
	 */
	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
		blk_rq_set_mixed_merge(req);
		blk_rq_set_mixed_merge(next);
	}

625 626 627 628 629 630 631 632 633 634 635 636
	/*
	 * At this point we have either done a back merge
	 * or front merge. We need the smaller start_time of
	 * the merged requests to be the current request
	 * for accounting purposes.
	 */
	if (time_after(req->start_time, next->start_time))
		req->start_time = next->start_time;

	req->biotail->bi_next = next->bio;
	req->biotail = next->biotail;

637
	req->__data_len += blk_rq_bytes(next);
638 639 640

	elv_merge_requests(q, req, next);

641 642 643 644
	/*
	 * 'next' is going away, so update stats accordingly
	 */
	blk_account_io_merge(next);
645 646

	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
647 648
	if (blk_rq_cpu_valid(next))
		req->cpu = next->cpu;
649

650 651
	/* owner-ship of bio passed from next to req */
	next->bio = NULL;
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
	__blk_put_request(q, next);
	return 1;
}

int attempt_back_merge(struct request_queue *q, struct request *rq)
{
	struct request *next = elv_latter_request(q, rq);

	if (next)
		return attempt_merge(q, rq, next);

	return 0;
}

int attempt_front_merge(struct request_queue *q, struct request *rq)
{
	struct request *prev = elv_former_request(q, rq);

	if (prev)
		return attempt_merge(q, prev, rq);

	return 0;
}
675 676 677 678 679 680

int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
			  struct request *next)
{
	return attempt_merge(q, rq, next);
}
681 682 683

bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
684
	if (!rq_mergeable(rq) || !bio_mergeable(bio))
685 686
		return false;

687 688 689
	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
		return false;

690 691 692 693 694
	/* different data direction or already started, don't merge */
	if (bio_data_dir(bio) != rq_data_dir(rq))
		return false;

	/* must be same device and not a special request */
695
	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
696 697 698
		return false;

	/* only merge integrity protected bio into ditto rq */
699
	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
700 701
		return false;

702 703 704 705 706
	/* must be using the same buffer */
	if (rq->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(rq->bio, bio))
		return false;

707 708 709 710 711
	return true;
}

int blk_try_merge(struct request *rq, struct bio *bio)
{
712
	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
713
		return ELEVATOR_BACK_MERGE;
714
	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
715 716 717
		return ELEVATOR_FRONT_MERGE;
	return ELEVATOR_NO_MERGE;
}