blk-core.c 47.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 8
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *	-  July2000
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
20
#include <linux/blk-mq.h>
L
Linus Torvalds 已提交
21 22 23 24 25 26 27 28 29
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
30
#include <linux/task_io_accounting_ops.h>
31
#include <linux/fault-inject.h>
32
#include <linux/list_sort.h>
T
Tejun Heo 已提交
33
#include <linux/delay.h>
34
#include <linux/ratelimit.h>
L
Lin Ming 已提交
35
#include <linux/pm_runtime.h>
36
#include <linux/blk-cgroup.h>
37
#include <linux/t10-pi.h>
38
#include <linux/debugfs.h>
39
#include <linux/bpf.h>
40
#include <linux/psi.h>
41 42 43

#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
L
Linus Torvalds 已提交
44

45
#include "blk.h"
46
#include "blk-mq.h"
47
#include "blk-mq-sched.h"
48
#include "blk-pm.h"
49
#include "blk-rq-qos.h"
50

51 52 53 54
#ifdef CONFIG_DEBUG_FS
struct dentry *blk_debugfs_root;
#endif

55
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
56
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
57
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
K
Keith Busch 已提交
58
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
N
NeilBrown 已提交
59
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
60

61 62
DEFINE_IDA(blk_queue_ida);

L
Linus Torvalds 已提交
63 64 65
/*
 * For queue allocation
 */
66
struct kmem_cache *blk_requestq_cachep;
L
Linus Torvalds 已提交
67 68 69 70

/*
 * Controlling structure to kblockd
 */
71
static struct workqueue_struct *kblockd_workqueue;
L
Linus Torvalds 已提交
72

73 74 75 76 77 78 79
/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
80
	set_bit(flag, &q->queue_flags);
81 82 83 84 85 86 87 88 89 90
}
EXPORT_SYMBOL(blk_queue_flag_set);

/**
 * blk_queue_flag_clear - atomically clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
91
	clear_bit(flag, &q->queue_flags);
92 93 94 95 96 97 98 99 100 101 102 103 104
}
EXPORT_SYMBOL(blk_queue_flag_clear);

/**
 * blk_queue_flag_test_and_set - atomically test and set a queue flag
 * @flag: flag to be set
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was already set.
 */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
105
	return test_and_set_bit(flag, &q->queue_flags);
106 107 108
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);

109
void blk_rq_init(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
110
{
111 112
	memset(rq, 0, sizeof(*rq));

L
Linus Torvalds 已提交
113
	INIT_LIST_HEAD(&rq->queuelist);
J
Jens Axboe 已提交
114
	rq->q = q;
115
	rq->__sector = (sector_t) -1;
116 117
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
J
Jens Axboe 已提交
118
	rq->tag = -1;
119
	rq->internal_tag = -1;
120
	rq->start_time_ns = ktime_get_ns();
121
	rq->part = NULL;
122
	refcount_set(&rq->ref, 1);
L
Linus Torvalds 已提交
123
}
124
EXPORT_SYMBOL(blk_rq_init);
L
Linus Torvalds 已提交
125

126 127 128 129 130 131 132 133
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
	REQ_OP_NAME(READ),
	REQ_OP_NAME(WRITE),
	REQ_OP_NAME(FLUSH),
	REQ_OP_NAME(DISCARD),
	REQ_OP_NAME(SECURE_ERASE),
	REQ_OP_NAME(ZONE_RESET),
134
	REQ_OP_NAME(ZONE_RESET_ALL),
135 136 137
	REQ_OP_NAME(ZONE_OPEN),
	REQ_OP_NAME(ZONE_CLOSE),
	REQ_OP_NAME(ZONE_FINISH),
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
	REQ_OP_NAME(WRITE_SAME),
	REQ_OP_NAME(WRITE_ZEROES),
	REQ_OP_NAME(SCSI_IN),
	REQ_OP_NAME(SCSI_OUT),
	REQ_OP_NAME(DRV_IN),
	REQ_OP_NAME(DRV_OUT),
};
#undef REQ_OP_NAME

/**
 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 * @op: REQ_OP_XXX.
 *
 * Description: Centralize block layer function to convert REQ_OP_XXX into
 * string format. Useful in the debugging and tracing bio or request. For
 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 */
inline const char *blk_op_str(unsigned int op)
{
	const char *op_str = "UNKNOWN";

	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
		op_str = blk_op_name[op];

	return op_str;
}
EXPORT_SYMBOL_GPL(blk_op_str);

166 167 168 169 170 171 172 173 174 175 176 177 178 179
static const struct {
	int		errno;
	const char	*name;
} blk_errors[] = {
	[BLK_STS_OK]		= { 0,		"" },
	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
180
	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
181
	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
182

183 184 185
	/* device mapper special case, should not leak out: */
	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
	/* everything else not covered above: */
	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
};

blk_status_t errno_to_blk_status(int errno)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
		if (blk_errors[i].errno == errno)
			return (__force blk_status_t)i;
	}

	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);

int blk_status_to_errno(blk_status_t status)
{
	int idx = (__force int)status;

207
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
208 209 210 211 212
		return -EIO;
	return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);

213 214
static void print_req_error(struct request *req, blk_status_t status,
		const char *caller)
215 216 217
{
	int idx = (__force int)status;

218
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
219 220
		return;

221
	printk_ratelimited(KERN_ERR
222 223
		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
		"phys_seg %u prio class %u\n",
224
		caller, blk_errors[idx].name,
225 226 227 228 229
		req->rq_disk ? req->rq_disk->disk_name : "?",
		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
		req->cmd_flags & ~REQ_OP_MASK,
		req->nr_phys_segments,
		IOPRIO_PRIO_CLASS(req->ioprio));
230 231
}

N
NeilBrown 已提交
232
static void req_bio_endio(struct request *rq, struct bio *bio,
233
			  unsigned int nbytes, blk_status_t error)
L
Linus Torvalds 已提交
234
{
235
	if (error)
236
		bio->bi_status = error;
237

238
	if (unlikely(rq->rq_flags & RQF_QUIET))
239
		bio_set_flag(bio, BIO_QUIET);
240

241
	bio_advance(bio, nbytes);
242

T
Tejun Heo 已提交
243
	/* don't actually finish bio if it's part of flush sequence */
244
	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
245
		bio_endio(bio);
L
Linus Torvalds 已提交
246 247 248 249
}

void blk_dump_rq_flags(struct request *rq, char *msg)
{
250 251
	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
		rq->rq_disk ? rq->rq_disk->disk_name : "?",
J
Jens Axboe 已提交
252
		(unsigned long long) rq->cmd_flags);
L
Linus Torvalds 已提交
253

254 255 256
	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
257 258
	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
	       rq->bio, rq->biotail, blk_rq_bytes(rq));
L
Linus Torvalds 已提交
259 260 261 262 263 264 265 266 267 268 269 270
}
EXPORT_SYMBOL(blk_dump_rq_flags);

/**
 * blk_sync_queue - cancel any pending callbacks on a queue
 * @q: the queue
 *
 * Description:
 *     The block layer may perform asynchronous callback activity
 *     on a queue, such as calling the unplug function after a timeout.
 *     A block device may call blk_sync_queue to ensure that any
 *     such activity is cancelled, thus allowing it to release resources
271
 *     that the callbacks might use. The caller must already have made sure
L
Linus Torvalds 已提交
272 273 274
 *     that its ->make_request_fn will not re-add plugging prior to calling
 *     this function.
 *
275
 *     This function does not cancel any asynchronous activity arising
276
 *     out of elevator or throttling code. That would require elevator_exit()
277
 *     and blkcg_exit_queue() to be called with queue lock initialized.
278
 *
L
Linus Torvalds 已提交
279 280 281
 */
void blk_sync_queue(struct request_queue *q)
{
282
	del_timer_sync(&q->timeout);
283
	cancel_work_sync(&q->timeout_work);
L
Linus Torvalds 已提交
284 285 286
}
EXPORT_SYMBOL(blk_sync_queue);

287
/**
288
 * blk_set_pm_only - increment pm_only counter
289 290
 * @q: request queue pointer
 */
291
void blk_set_pm_only(struct request_queue *q)
292
{
293
	atomic_inc(&q->pm_only);
294
}
295
EXPORT_SYMBOL_GPL(blk_set_pm_only);
296

297
void blk_clear_pm_only(struct request_queue *q)
298
{
299 300 301 302 303 304
	int pm_only;

	pm_only = atomic_dec_return(&q->pm_only);
	WARN_ON_ONCE(pm_only < 0);
	if (pm_only == 0)
		wake_up_all(&q->mq_freeze_wq);
305
}
306
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
307

308
void blk_put_queue(struct request_queue *q)
309 310 311
{
	kobject_put(&q->kobj);
}
J
Jens Axboe 已提交
312
EXPORT_SYMBOL(blk_put_queue);
313

314 315
void blk_set_queue_dying(struct request_queue *q)
{
316
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
317

318 319 320 321 322 323 324
	/*
	 * When queue DYING flag is set, we need to block new req
	 * entering queue, so we call blk_freeze_queue_start() to
	 * prevent I/O from crossing blk_queue_enter().
	 */
	blk_freeze_queue_start(q);

J
Jens Axboe 已提交
325
	if (queue_is_mq(q))
326
		blk_mq_wake_waiters(q);
327 328 329

	/* Make blk_queue_enter() reexamine the DYING flag. */
	wake_up_all(&q->mq_freeze_wq);
330 331 332
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);

333 334 335 336
/**
 * blk_cleanup_queue - shutdown a request queue
 * @q: request queue to shutdown
 *
337 338
 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 * put it.  All future requests will be failed immediately with -ENODEV.
339
 */
340
void blk_cleanup_queue(struct request_queue *q)
341
{
342 343
	WARN_ON_ONCE(blk_queue_registered(q));

B
Bart Van Assche 已提交
344
	/* mark @q DYING, no new request or merges will be allowed afterwards */
345
	blk_set_queue_dying(q);
346

347 348
	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
349

350 351
	/*
	 * Drain all requests queued before DYING marking. Set DEAD flag to
352 353
	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
	 * after draining finished.
354
	 */
355
	blk_freeze_queue(q);
356 357 358

	rq_qos_exit(q);

359
	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
360

361 362 363
	/* for synchronous bio-based driver finish in-flight integrity i/o */
	blk_flush_integrity();

364
	/* @q won't process any more request, flush async actions */
365
	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
366 367
	blk_sync_queue(q);

J
Jens Axboe 已提交
368
	if (queue_is_mq(q))
369
		blk_mq_exit_queue(q);
J
Jens Axboe 已提交
370

371 372 373 374 375 376 377 378 379 380 381 382 383
	/*
	 * In theory, request pool of sched_tags belongs to request queue.
	 * However, the current implementation requires tag_set for freeing
	 * requests, so free the pool now.
	 *
	 * Queue has become frozen, there can't be any in-queue requests, so
	 * it is safe to free requests now.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator)
		blk_mq_sched_free_requests(q);
	mutex_unlock(&q->sysfs_lock);

384
	percpu_ref_exit(&q->q_usage_counter);
B
Bart Van Assche 已提交
385

386
	/* @q is and will stay empty, shutdown and put */
387 388
	blk_put_queue(q);
}
L
Linus Torvalds 已提交
389 390
EXPORT_SYMBOL(blk_cleanup_queue);

391 392 393 394 395
/**
 * blk_queue_enter() - try to increase q->q_usage_counter
 * @q: request queue pointer
 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
 */
396
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
397
{
398
	const bool pm = flags & BLK_MQ_REQ_PREEMPT;
399

400
	while (true) {
401
		bool success = false;
402

403
		rcu_read_lock();
404 405
		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
			/*
406 407 408
			 * The code that increments the pm_only counter is
			 * responsible for ensuring that that counter is
			 * globally visible before the queue is unfrozen.
409
			 */
410
			if (pm || !blk_queue_pm_only(q)) {
411 412 413 414 415
				success = true;
			} else {
				percpu_ref_put(&q->q_usage_counter);
			}
		}
416
		rcu_read_unlock();
417 418

		if (success)
419 420
			return 0;

421
		if (flags & BLK_MQ_REQ_NOWAIT)
422 423
			return -EBUSY;

424
		/*
425
		 * read pair of barrier in blk_freeze_queue_start(),
426
		 * we need to order reading __PERCPU_REF_DEAD flag of
427 428 429
		 * .q_usage_counter and reading .mq_freeze_depth or
		 * queue dying flag, otherwise the following wait may
		 * never return if the two reads are reordered.
430 431 432
		 */
		smp_rmb();

433
		wait_event(q->mq_freeze_wq,
434
			   (!q->mq_freeze_depth &&
435 436
			    (pm || (blk_pm_request_resume(q),
				    !blk_queue_pm_only(q)))) ||
437
			   blk_queue_dying(q));
438 439 440 441 442
		if (blk_queue_dying(q))
			return -ENODEV;
	}
}

443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
static inline int bio_queue_enter(struct bio *bio)
{
	struct request_queue *q = bio->bi_disk->queue;
	bool nowait = bio->bi_opf & REQ_NOWAIT;
	int ret;

	ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
	if (unlikely(ret)) {
		if (nowait && !blk_queue_dying(q))
			bio_wouldblock_error(bio);
		else
			bio_io_error(bio);
	}

	return ret;
}

460 461 462 463 464 465 466 467 468 469 470 471 472
void blk_queue_exit(struct request_queue *q)
{
	percpu_ref_put(&q->q_usage_counter);
}

static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
	struct request_queue *q =
		container_of(ref, struct request_queue, q_usage_counter);

	wake_up_all(&q->mq_freeze_wq);
}

473
static void blk_rq_timed_out_timer(struct timer_list *t)
474
{
475
	struct request_queue *q = from_timer(q, t, timeout);
476 477 478 479

	kblockd_schedule_work(&q->timeout_work);
}

480 481 482 483
static void blk_timeout_work(struct work_struct *work)
{
}

484
struct request_queue *__blk_alloc_queue(int node_id)
485
{
486
	struct request_queue *q;
487
	int ret;
488

489
	q = kmem_cache_alloc_node(blk_requestq_cachep,
490
				GFP_KERNEL | __GFP_ZERO, node_id);
L
Linus Torvalds 已提交
491 492 493
	if (!q)
		return NULL;

494 495
	q->last_merge = NULL;

496
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
497
	if (q->id < 0)
498
		goto fail_q;
499

500 501
	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
	if (ret)
502 503
		goto fail_id;

504
	q->backing_dev_info = bdi_alloc_node(GFP_KERNEL, node_id);
505 506 507
	if (!q->backing_dev_info)
		goto fail_split;

508 509 510 511
	q->stats = blk_alloc_queue_stats();
	if (!q->stats)
		goto fail_stats;

512
	q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
513 514
	q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
	q->backing_dev_info->name = "block";
515
	q->node = node_id;
516

517 518 519
	timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
		    laptop_mode_timer_fn, 0);
	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
520
	INIT_WORK(&q->timeout_work, blk_timeout_work);
521
	INIT_LIST_HEAD(&q->icq_list);
522
#ifdef CONFIG_BLK_CGROUP
523
	INIT_LIST_HEAD(&q->blkg_list);
524
#endif
525

526
	kobject_init(&q->kobj, &blk_queue_ktype);
L
Linus Torvalds 已提交
527

528 529 530
#ifdef CONFIG_BLK_DEV_IO_TRACE
	mutex_init(&q->blk_trace_mutex);
#endif
531
	mutex_init(&q->sysfs_lock);
532
	mutex_init(&q->sysfs_dir_lock);
533
	spin_lock_init(&q->queue_lock);
534

535
	init_waitqueue_head(&q->mq_freeze_wq);
536
	mutex_init(&q->mq_freeze_lock);
537

538 539 540 541 542 543 544
	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
	 * See blk_register_queue() for details.
	 */
	if (percpu_ref_init(&q->q_usage_counter,
				blk_queue_usage_counter_release,
				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
545
		goto fail_bdi;
546

547 548 549
	if (blkcg_init_queue(q))
		goto fail_ref;

550 551 552
	blk_queue_dma_alignment(q, 511);
	blk_set_default_limits(&q->limits);

L
Linus Torvalds 已提交
553
	return q;
554

555 556
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
557
fail_bdi:
558 559
	blk_free_queue_stats(q->stats);
fail_stats:
560
	bdi_put(q->backing_dev_info);
561
fail_split:
562
	bioset_exit(&q->bio_split);
563 564 565 566 567
fail_id:
	ida_simple_remove(&blk_queue_ida, q->id);
fail_q:
	kmem_cache_free(blk_requestq_cachep, q);
	return NULL;
L
Linus Torvalds 已提交
568
}
569 570 571 572 573 574

struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id)
{
	struct request_queue *q;

	if (WARN_ON_ONCE(!make_request))
575
		return NULL;
576 577 578 579 580 581 582 583 584

	q = __blk_alloc_queue(node_id);
	if (!q)
		return NULL;
	q->make_request_fn = make_request;
	q->nr_requests = BLKDEV_MAX_RQ;
	return q;
}
EXPORT_SYMBOL(blk_alloc_queue);
L
Linus Torvalds 已提交
585

T
Tejun Heo 已提交
586
bool blk_get_queue(struct request_queue *q)
L
Linus Torvalds 已提交
587
{
B
Bart Van Assche 已提交
588
	if (likely(!blk_queue_dying(q))) {
T
Tejun Heo 已提交
589 590
		__blk_get_queue(q);
		return true;
L
Linus Torvalds 已提交
591 592
	}

T
Tejun Heo 已提交
593
	return false;
L
Linus Torvalds 已提交
594
}
J
Jens Axboe 已提交
595
EXPORT_SYMBOL(blk_get_queue);
L
Linus Torvalds 已提交
596

J
Jens Axboe 已提交
597 598 599 600 601
/**
 * blk_get_request - allocate a request
 * @q: request queue to allocate a request for
 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
L
Linus Torvalds 已提交
602
 */
J
Jens Axboe 已提交
603 604
struct request *blk_get_request(struct request_queue *q, unsigned int op,
				blk_mq_req_flags_t flags)
L
Linus Torvalds 已提交
605
{
J
Jens Axboe 已提交
606
	struct request *req;
L
Linus Torvalds 已提交
607

J
Jens Axboe 已提交
608 609
	WARN_ON_ONCE(op & REQ_NOWAIT);
	WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
L
Linus Torvalds 已提交
610

J
Jens Axboe 已提交
611 612 613
	req = blk_mq_alloc_request(q, op, flags);
	if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
		q->mq_ops->initialize_rq_fn(req);
L
Linus Torvalds 已提交
614

J
Jens Axboe 已提交
615
	return req;
L
Linus Torvalds 已提交
616
}
J
Jens Axboe 已提交
617
EXPORT_SYMBOL(blk_get_request);
L
Linus Torvalds 已提交
618 619 620

void blk_put_request(struct request *req)
{
J
Jens Axboe 已提交
621
	blk_mq_free_request(req);
L
Linus Torvalds 已提交
622 623 624
}
EXPORT_SYMBOL(blk_put_request);

625 626
bool bio_attempt_back_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs)
627
{
J
Jens Axboe 已提交
628
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
629

630
	if (!ll_back_merge_fn(req, bio, nr_segs))
631 632
		return false;

633
	trace_block_bio_backmerge(req->q, req, bio);
T
Tejun Heo 已提交
634
	rq_qos_merge(req->q, req, bio);
635 636 637 638 639 640

	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
		blk_rq_set_mixed_merge(req);

	req->biotail->bi_next = bio;
	req->biotail = bio;
641
	req->__data_len += bio->bi_iter.bi_size;
642

643
	blk_account_io_start(req, false);
644 645 646
	return true;
}

647 648
bool bio_attempt_front_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs)
649
{
J
Jens Axboe 已提交
650
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
651

652
	if (!ll_front_merge_fn(req, bio, nr_segs))
653 654
		return false;

655
	trace_block_bio_frontmerge(req->q, req, bio);
T
Tejun Heo 已提交
656
	rq_qos_merge(req->q, req, bio);
657 658 659 660 661 662 663

	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
		blk_rq_set_mixed_merge(req);

	bio->bi_next = req->bio;
	req->bio = bio;

664 665
	req->__sector = bio->bi_iter.bi_sector;
	req->__data_len += bio->bi_iter.bi_size;
666

667
	blk_account_io_start(req, false);
668 669 670
	return true;
}

671 672 673 674 675 676 677 678 679 680 681
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
		struct bio *bio)
{
	unsigned short segments = blk_rq_nr_discard_segments(req);

	if (segments >= queue_max_discard_segments(q))
		goto no_merge;
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
		goto no_merge;

T
Tejun Heo 已提交
682 683
	rq_qos_merge(q, req, bio);

684 685 686 687 688 689 690 691 692 693 694 695
	req->biotail->bi_next = bio;
	req->biotail = bio;
	req->__data_len += bio->bi_iter.bi_size;
	req->nr_phys_segments = segments + 1;

	blk_account_io_start(req, false);
	return true;
no_merge:
	req_set_nomerge(q, req);
	return false;
}

696
/**
697
 * blk_attempt_plug_merge - try to merge with %current's plugged list
698 699
 * @q: request_queue new bio is being queued at
 * @bio: new bio being queued
700
 * @nr_segs: number of segments in @bio
701 702 703
 * @same_queue_rq: pointer to &struct request that gets filled in when
 * another request associated with @q is found on the plug list
 * (optional, may be %NULL)
704 705 706 707 708
 *
 * Determine whether @bio being queued on @q can be merged with a request
 * on %current's plugged list.  Returns %true if merge was successful,
 * otherwise %false.
 *
709 710 711 712 713 714
 * Plugging coalesces IOs from the same issuer for the same purpose without
 * going through @q->queue_lock.  As such it's more of an issuing mechanism
 * than scheduling, and the request, while may have elvpriv data, is not
 * added on the elevator at this point.  In addition, we don't have
 * reliable access to the elevator outside queue lock.  Only check basic
 * merging parameters without querying the elevator.
715 716
 *
 * Caller must ensure !blk_queue_nomerges(q) beforehand.
717
 */
718
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
719
		unsigned int nr_segs, struct request **same_queue_rq)
720 721 722
{
	struct blk_plug *plug;
	struct request *rq;
S
Shaohua Li 已提交
723
	struct list_head *plug_list;
724

725
	plug = blk_mq_plug(q, bio);
726
	if (!plug)
727
		return false;
728

J
Jens Axboe 已提交
729
	plug_list = &plug->mq_list;
S
Shaohua Li 已提交
730 731

	list_for_each_entry_reverse(rq, plug_list, queuelist) {
732
		bool merged = false;
733

734
		if (rq->q == q && same_queue_rq) {
735 736 737 738 739
			/*
			 * Only blk-mq multiple hardware queues case checks the
			 * rq in the same queue, there should be only one such
			 * rq in a queue
			 **/
740
			*same_queue_rq = rq;
741
		}
742

743
		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
744 745
			continue;

746 747
		switch (blk_try_merge(rq, bio)) {
		case ELEVATOR_BACK_MERGE:
748
			merged = bio_attempt_back_merge(rq, bio, nr_segs);
749 750
			break;
		case ELEVATOR_FRONT_MERGE:
751
			merged = bio_attempt_front_merge(rq, bio, nr_segs);
752
			break;
753 754 755
		case ELEVATOR_DISCARD_MERGE:
			merged = bio_attempt_discard_merge(q, rq, bio);
			break;
756 757
		default:
			break;
758
		}
759 760 761

		if (merged)
			return true;
762
	}
763 764

	return false;
765 766
}

767
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
L
Linus Torvalds 已提交
768 769 770 771
{
	char b[BDEVNAME_SIZE];

	printk(KERN_INFO "attempt to access beyond end of device\n");
772
	printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
773
			bio_devname(bio, b), bio->bi_opf,
K
Kent Overstreet 已提交
774
			(unsigned long long)bio_end_sector(bio),
775
			(long long)maxsector);
L
Linus Torvalds 已提交
776 777
}

778 779 780 781 782 783 784 785 786 787
#ifdef CONFIG_FAIL_MAKE_REQUEST

static DECLARE_FAULT_ATTR(fail_make_request);

static int __init setup_fail_make_request(char *str)
{
	return setup_fault_attr(&fail_make_request, str);
}
__setup("fail_make_request=", setup_fail_make_request);

788
static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
789
{
790
	return part->make_it_fail && should_fail(&fail_make_request, bytes);
791 792 793 794
}

static int __init fail_make_request_debugfs(void)
{
795 796 797
	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
						NULL, &fail_make_request);

798
	return PTR_ERR_OR_ZERO(dir);
799 800 801 802 803 804
}

late_initcall(fail_make_request_debugfs);

#else /* CONFIG_FAIL_MAKE_REQUEST */

805 806
static inline bool should_fail_request(struct hd_struct *part,
					unsigned int bytes)
807
{
808
	return false;
809 810 811 812
}

#endif /* CONFIG_FAIL_MAKE_REQUEST */

813 814
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{
815 816
	const int op = bio_op(bio);

817
	if (part->policy && op_is_write(op)) {
818 819
		char b[BDEVNAME_SIZE];

820 821 822
		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
			return false;

823
		WARN_ONCE(1,
824 825 826
		       "generic_make_request: Trying to write "
			"to read-only block-device %s (partno %d)\n",
			bio_devname(bio, b), part->partno);
827 828
		/* Older lvm-tools actually trigger this */
		return false;
829 830 831 832 833
	}

	return false;
}

834 835 836 837 838 839 840 841
static noinline int should_fail_bio(struct bio *bio)
{
	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
		return -EIO;
	return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);

842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
/*
 * Check whether this bio extends beyond the end of the device or partition.
 * This may well happen - the kernel calls bread() without checking the size of
 * the device, e.g., when mounting a file system.
 */
static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
{
	unsigned int nr_sectors = bio_sectors(bio);

	if (nr_sectors && maxsector &&
	    (nr_sectors > maxsector ||
	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
		handle_bad_sector(bio, maxsector);
		return -EIO;
	}
	return 0;
}

860 861 862 863 864 865
/*
 * Remap block n of partition p to block n+start(p) of the disk.
 */
static inline int blk_partition_remap(struct bio *bio)
{
	struct hd_struct *p;
866
	int ret = -EIO;
867

868 869
	rcu_read_lock();
	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
870 871 872 873 874
	if (unlikely(!p))
		goto out;
	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
		goto out;
	if (unlikely(bio_check_ro(bio, p)))
875 876
		goto out;

877
	if (bio_sectors(bio)) {
878 879 880 881 882 883
		if (bio_check_eod(bio, part_nr_sects_read(p)))
			goto out;
		bio->bi_iter.bi_sector += p->start_sect;
		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
				      bio->bi_iter.bi_sector - p->start_sect);
	}
884
	bio->bi_partno = 0;
885
	ret = 0;
886 887
out:
	rcu_read_unlock();
888 889 890
	return ret;
}

891 892
static noinline_for_stack bool
generic_make_request_checks(struct bio *bio)
L
Linus Torvalds 已提交
893
{
894
	struct request_queue *q;
895
	int nr_sectors = bio_sectors(bio);
896
	blk_status_t status = BLK_STS_IOERR;
897
	char b[BDEVNAME_SIZE];
L
Linus Torvalds 已提交
898 899 900

	might_sleep();

901
	q = bio->bi_disk->queue;
902 903 904 905
	if (unlikely(!q)) {
		printk(KERN_ERR
		       "generic_make_request: Trying to access "
			"nonexistent block-device %s (%Lu)\n",
906
			bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
907 908
		goto end_io;
	}
909

910
	/*
911 912 913
	 * Non-mq queues do not honor REQ_NOWAIT, so complete a bio
	 * with BLK_STS_AGAIN status in order to catch -EAGAIN and
	 * to give a chance to the caller to repeat request gracefully.
914
	 */
915 916 917 918
	if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
		status = BLK_STS_AGAIN;
		goto end_io;
	}
919

920
	if (should_fail_bio(bio))
921
		goto end_io;
922

923 924
	if (bio->bi_partno) {
		if (unlikely(blk_partition_remap(bio)))
925 926
			goto end_io;
	} else {
927 928 929
		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
			goto end_io;
		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
930 931
			goto end_io;
	}
932

933 934 935 936 937
	/*
	 * Filter flush bio's early so that make_request based
	 * drivers without flush support don't have to worry
	 * about them.
	 */
938
	if (op_is_flush(bio->bi_opf) &&
J
Jens Axboe 已提交
939
	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
J
Jens Axboe 已提交
940
		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
941
		if (!nr_sectors) {
942
			status = BLK_STS_OK;
943 944
			goto end_io;
		}
945
	}
946

947 948 949
	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		bio->bi_opf &= ~REQ_HIPRI;

950 951 952 953 954 955 956 957 958 959
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
		if (!blk_queue_secure_erase(q))
			goto not_supported;
		break;
	case REQ_OP_WRITE_SAME:
960
		if (!q->limits.max_write_same_sectors)
961
			goto not_supported;
962
		break;
963
	case REQ_OP_ZONE_RESET:
964 965 966
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
967
		if (!blk_queue_is_zoned(q))
968
			goto not_supported;
969
		break;
970 971 972 973
	case REQ_OP_ZONE_RESET_ALL:
		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
			goto not_supported;
		break;
974
	case REQ_OP_WRITE_ZEROES:
975
		if (!q->limits.max_write_zeroes_sectors)
976 977
			goto not_supported;
		break;
978 979
	default:
		break;
980
	}
981

T
Tejun Heo 已提交
982
	/*
983 984 985 986
	 * Various block parts want %current->io_context, so allocate it up
	 * front rather than dealing with lots of pain to allocate it only
	 * where needed. This may fail and the block layer knows how to live
	 * with it.
T
Tejun Heo 已提交
987
	 */
988 989
	if (unlikely(!current->io_context))
		create_task_io_context(current, GFP_ATOMIC, q->node);
T
Tejun Heo 已提交
990

991 992
	if (!blkcg_bio_issue_check(q, bio))
		return false;
993

N
NeilBrown 已提交
994 995 996 997 998 999 1000
	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
		trace_block_bio_queue(q, bio);
		/* Now that enqueuing has been traced, we need to trace
		 * completion as well.
		 */
		bio_set_flag(bio, BIO_TRACE_COMPLETION);
	}
1001
	return true;
1002

1003
not_supported:
1004
	status = BLK_STS_NOTSUPP;
1005
end_io:
1006
	bio->bi_status = status;
1007
	bio_endio(bio);
1008
	return false;
L
Linus Torvalds 已提交
1009 1010
}

1011
/**
1012
 * generic_make_request - re-submit a bio to the block device layer for I/O
1013 1014
 * @bio:  The bio describing the location in memory and on the device.
 *
1015 1016 1017 1018
 * This is a version of submit_bio() that shall only be used for I/O that is
 * resubmitted to lower level drivers by stacking block drivers.  All file
 * systems and other upper level users of the block layer should use
 * submit_bio() instead.
1019
 */
1020
blk_qc_t generic_make_request(struct bio *bio)
1021
{
1022 1023 1024 1025 1026 1027 1028 1029
	/*
	 * bio_list_on_stack[0] contains bios submitted by the current
	 * make_request_fn.
	 * bio_list_on_stack[1] contains bios that were submitted before
	 * the current make_request_fn, but that haven't been processed
	 * yet.
	 */
	struct bio_list bio_list_on_stack[2];
1030
	blk_qc_t ret = BLK_QC_T_NONE;
1031

1032
	if (!generic_make_request_checks(bio))
1033
		goto out;
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044

	/*
	 * We only want one ->make_request_fn to be active at a time, else
	 * stack usage with stacked devices could be a problem.  So use
	 * current->bio_list to keep a list of requests submited by a
	 * make_request_fn function.  current->bio_list is also used as a
	 * flag to say if generic_make_request is currently active in this
	 * task or not.  If it is NULL, then no make_request is active.  If
	 * it is non-NULL, then a make_request is active, and new requests
	 * should be added at the tail
	 */
1045
	if (current->bio_list) {
1046
		bio_list_add(&current->bio_list[0], bio);
1047
		goto out;
1048
	}
1049

1050 1051 1052 1053 1054
	/* following loop may be a bit non-obvious, and so deserves some
	 * explanation.
	 * Before entering the loop, bio->bi_next is NULL (as all callers
	 * ensure that) so we have a list with a single bio.
	 * We pretend that we have just taken it off a longer list, so
1055 1056
	 * we assign bio_list to a pointer to the bio_list_on_stack,
	 * thus initialising the bio_list of new bios to be
1057
	 * added.  ->make_request() may indeed add some more bios
1058 1059 1060
	 * through a recursive call to generic_make_request.  If it
	 * did, we find a non-NULL value in bio_list and re-enter the loop
	 * from the top.  In this case we really did just take the bio
1061
	 * of the top of the list (no pretending) and so remove it from
1062
	 * bio_list, and call into ->make_request() again.
1063 1064
	 */
	BUG_ON(bio->bi_next);
1065 1066
	bio_list_init(&bio_list_on_stack[0]);
	current->bio_list = bio_list_on_stack;
1067
	do {
1068
		struct request_queue *q = bio->bi_disk->queue;
1069

1070
		if (likely(bio_queue_enter(bio) == 0)) {
1071 1072 1073
			struct bio_list lower, same;

			/* Create a fresh bio_list for all subordinate requests */
1074 1075
			bio_list_on_stack[1] = bio_list_on_stack[0];
			bio_list_init(&bio_list_on_stack[0]);
1076 1077 1078 1079
			if (q->make_request_fn)
				ret = q->make_request_fn(q, bio);
			else
				ret = blk_mq_make_request(q, bio);
1080

1081 1082
			blk_queue_exit(q);

1083 1084 1085 1086 1087
			/* sort new bios into those for a lower level
			 * and those for the same level
			 */
			bio_list_init(&lower);
			bio_list_init(&same);
1088
			while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
1089
				if (q == bio->bi_disk->queue)
1090 1091 1092 1093
					bio_list_add(&same, bio);
				else
					bio_list_add(&lower, bio);
			/* now assemble so we handle the lowest level first */
1094 1095 1096
			bio_list_merge(&bio_list_on_stack[0], &lower);
			bio_list_merge(&bio_list_on_stack[0], &same);
			bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
1097
		}
1098
		bio = bio_list_pop(&bio_list_on_stack[0]);
1099
	} while (bio);
1100
	current->bio_list = NULL; /* deactivate */
1101 1102 1103

out:
	return ret;
1104
}
L
Linus Torvalds 已提交
1105 1106
EXPORT_SYMBOL(generic_make_request);

1107 1108 1109 1110 1111 1112
/**
 * direct_make_request - hand a buffer directly to its device driver for I/O
 * @bio:  The bio describing the location in memory and on the device.
 *
 * This function behaves like generic_make_request(), but does not protect
 * against recursion.  Must only be used if the called driver is known
1113
 * to be blk-mq based.
1114 1115 1116 1117 1118 1119
 */
blk_qc_t direct_make_request(struct bio *bio)
{
	struct request_queue *q = bio->bi_disk->queue;
	blk_qc_t ret;

1120 1121
	if (WARN_ON_ONCE(q->make_request_fn)) {
		bio_io_error(bio);
1122 1123
		return BLK_QC_T_NONE;
	}
1124 1125 1126 1127
	if (!generic_make_request_checks(bio))
		return BLK_QC_T_NONE;
	if (unlikely(bio_queue_enter(bio)))
		return BLK_QC_T_NONE;
1128
	ret = blk_mq_make_request(q, bio);
1129 1130 1131 1132 1133
	blk_queue_exit(q);
	return ret;
}
EXPORT_SYMBOL_GPL(direct_make_request);

L
Linus Torvalds 已提交
1134
/**
1135
 * submit_bio - submit a bio to the block device layer for I/O
L
Linus Torvalds 已提交
1136 1137
 * @bio: The &struct bio which describes the I/O
 *
1138 1139 1140
 * submit_bio() is used to submit I/O requests to block devices.  It is passed a
 * fully set up &struct bio that describes the I/O that needs to be done.  The
 * bio will be send to the device described by the bi_disk and bi_partno fields.
L
Linus Torvalds 已提交
1141
 *
1142 1143 1144 1145
 * The success/failure status of the request, along with notification of
 * completion, is delivered asynchronously through the ->bi_end_io() callback
 * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
 * been called.
L
Linus Torvalds 已提交
1146
 */
1147
blk_qc_t submit_bio(struct bio *bio)
L
Linus Torvalds 已提交
1148
{
T
Tejun Heo 已提交
1149 1150 1151
	if (blkcg_punt_bio_submit(bio))
		return BLK_QC_T_NONE;

1152 1153 1154 1155
	/*
	 * If it's a regular read/write or a barrier with data attached,
	 * go through the normal accounting stuff before submission.
	 */
1156
	if (bio_has_data(bio)) {
1157 1158
		unsigned int count;

1159
		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1160
			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1161 1162 1163
		else
			count = bio_sectors(bio);

1164
		if (op_is_write(bio_op(bio))) {
1165 1166
			count_vm_events(PGPGOUT, count);
		} else {
1167
			task_io_account_read(bio->bi_iter.bi_size);
1168 1169 1170 1171 1172
			count_vm_events(PGPGIN, count);
		}

		if (unlikely(block_dump)) {
			char b[BDEVNAME_SIZE];
1173
			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1174
			current->comm, task_pid_nr(current),
1175
				op_is_write(bio_op(bio)) ? "WRITE" : "READ",
1176
				(unsigned long long)bio->bi_iter.bi_sector,
1177
				bio_devname(bio, b), count);
1178
		}
L
Linus Torvalds 已提交
1179 1180
	}

1181
	/*
1182 1183 1184 1185
	 * If we're reading data that is part of the userspace workingset, count
	 * submission time as memory stall.  When the device is congested, or
	 * the submitting cgroup IO-throttled, submission can be a significant
	 * part of overall IO time.
1186
	 */
1187 1188 1189 1190
	if (unlikely(bio_op(bio) == REQ_OP_READ &&
	    bio_flagged(bio, BIO_WORKINGSET))) {
		unsigned long pflags;
		blk_qc_t ret;
1191

1192 1193
		psi_memstall_enter(&pflags);
		ret = generic_make_request(bio);
1194 1195
		psi_memstall_leave(&pflags);

1196 1197 1198 1199
		return ret;
	}

	return generic_make_request(bio);
L
Linus Torvalds 已提交
1200 1201 1202
}
EXPORT_SYMBOL(submit_bio);

1203
/**
1204
 * blk_cloned_rq_check_limits - Helper function to check a cloned request
1205
 *                              for the new queue limits
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
 * @q:  the queue
 * @rq: the request being checked
 *
 * Description:
 *    @rq may have been made based on weaker limitations of upper-level queues
 *    in request stacking drivers, and it may violate the limitation of @q.
 *    Since the block layer and the underlying device driver trust @rq
 *    after it is inserted to @q, it should be checked against @q before
 *    the insertion using this generic function.
 *
 *    Request stacking drivers like request-based dm may change the queue
1217 1218
 *    limits when retrying requests on other queues. Those requests need
 *    to be checked against the new queue limits again during dispatch.
1219
 */
1220 1221
static int blk_cloned_rq_check_limits(struct request_queue *q,
				      struct request *rq)
1222
{
1223
	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
1224 1225 1226
		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
			__func__, blk_rq_sectors(rq),
			blk_queue_get_max_sectors(q, req_op(rq)));
1227 1228 1229 1230 1231 1232 1233 1234 1235
		return -EIO;
	}

	/*
	 * queue's settings related to segment counting like q->bounce_pfn
	 * may differ from that of other stacking queues.
	 * Recalculate it to check the request correctly on this queue's
	 * limitation.
	 */
1236
	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1237
	if (rq->nr_phys_segments > queue_max_segments(q)) {
1238 1239
		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
			__func__, rq->nr_phys_segments, queue_max_segments(q));
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
		return -EIO;
	}

	return 0;
}

/**
 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
 * @q:  the queue to submit the request
 * @rq: the request being queued
 */
1251
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1252
{
1253
	if (blk_cloned_rq_check_limits(q, rq))
1254
		return BLK_STS_IOERR;
1255

1256 1257
	if (rq->rq_disk &&
	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1258
		return BLK_STS_IOERR;
1259

J
Jens Axboe 已提交
1260 1261
	if (blk_queue_io_stat(q))
		blk_account_io_start(rq, true);
1262 1263

	/*
J
Jens Axboe 已提交
1264 1265 1266
	 * Since we have a scheduler attached on the top device,
	 * bypass a potential scheduler on the bottom device for
	 * insert.
1267
	 */
1268
	return blk_mq_request_issue_directly(rq, true);
1269 1270 1271
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);

1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
/**
 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
 * @rq: request to examine
 *
 * Description:
 *     A request could be merge of IOs which require different failure
 *     handling.  This function determines the number of bytes which
 *     can be failed from the beginning of the request without
 *     crossing into area which need to be retried further.
 *
 * Return:
 *     The number of bytes to fail.
 */
unsigned int blk_rq_err_bytes(const struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	unsigned int bytes = 0;
	struct bio *bio;

1291
	if (!(rq->rq_flags & RQF_MIXED_MERGE))
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
		return blk_rq_bytes(rq);

	/*
	 * Currently the only 'mixing' which can happen is between
	 * different fastfail types.  We can safely fail portions
	 * which have all the failfast bits that the first one has -
	 * the ones which are at least as eager to fail as the first
	 * one.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
J
Jens Axboe 已提交
1302
		if ((bio->bi_opf & ff) != ff)
1303
			break;
1304
		bytes += bio->bi_iter.bi_size;
1305 1306 1307 1308 1309 1310 1311 1312
	}

	/* this could lead to infinite loop */
	BUG_ON(blk_rq_bytes(rq) && !bytes);
	return bytes;
}
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);

1313
void blk_account_io_completion(struct request *req, unsigned int bytes)
1314
{
1315
	if (req->part && blk_do_io_stat(req)) {
1316
		const int sgrp = op_stat_group(req_op(req));
1317 1318
		struct hd_struct *part;

1319
		part_stat_lock();
1320
		part = req->part;
1321
		part_stat_add(part, sectors[sgrp], bytes >> 9);
1322 1323 1324 1325
		part_stat_unlock();
	}
}

1326
void blk_account_io_done(struct request *req, u64 now)
1327 1328
{
	/*
1329 1330 1331
	 * Account IO completion.  flush_rq isn't accounted as a
	 * normal IO on queueing nor completion.  Accounting the
	 * containing request is enough.
1332
	 */
1333 1334
	if (req->part && blk_do_io_stat(req) &&
	    !(req->rq_flags & RQF_FLUSH_SEQ)) {
1335
		const int sgrp = op_stat_group(req_op(req));
1336 1337
		struct hd_struct *part;

1338
		part_stat_lock();
1339
		part = req->part;
1340

1341
		update_io_ticks(part, jiffies, true);
1342 1343
		part_stat_inc(part, ios[sgrp]);
		part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
1344
		part_dec_in_flight(req->q, part, rq_data_dir(req));
1345

1346
		hd_struct_put(part);
1347 1348 1349 1350
		part_stat_unlock();
	}
}

1351 1352 1353 1354 1355 1356 1357 1358
void blk_account_io_start(struct request *rq, bool new_io)
{
	struct hd_struct *part;
	int rw = rq_data_dir(rq);

	if (!blk_do_io_stat(rq))
		return;

1359
	part_stat_lock();
1360 1361 1362

	if (!new_io) {
		part = rq->part;
1363
		part_stat_inc(part, merges[rw]);
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
	} else {
		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
		if (!hd_struct_try_get(part)) {
			/*
			 * The partition is already being removed,
			 * the request will be accounted on the disk only
			 *
			 * We take a reference on disk->part0 although that
			 * partition will never be deleted, so we can treat
			 * it as any other partition.
			 */
			part = &rq->rq_disk->part0;
			hd_struct_get(part);
		}
1378
		part_inc_in_flight(rq->q, part, rw);
1379 1380 1381
		rq->part = part;
	}

1382
	update_io_ticks(part, jiffies, false);
1383

1384 1385 1386
	part_stat_unlock();
}

1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
/*
 * Steal bios from a request and add them to a bio list.
 * The request must not have been partially completed before.
 */
void blk_steal_bios(struct bio_list *list, struct request *rq)
{
	if (rq->bio) {
		if (list->tail)
			list->tail->bi_next = rq->bio;
		else
			list->head = rq->bio;
		list->tail = rq->biotail;

		rq->bio = NULL;
		rq->biotail = NULL;
	}

	rq->__data_len = 0;
}
EXPORT_SYMBOL_GPL(blk_steal_bios);

1408
/**
1409
 * blk_update_request - Special helper function for request stacking drivers
1410
 * @req:      the request being processed
1411
 * @error:    block status code
1412
 * @nr_bytes: number of bytes to complete @req
1413 1414
 *
 * Description:
1415 1416 1417
 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
 *     the request structure even if @req doesn't have leftover.
 *     If @req has leftover, sets it up for the next range of segments.
1418 1419 1420
 *
 *     This special helper function is only for request stacking drivers
 *     (e.g. request-based dm) so that they can handle partial completion.
1421
 *     Actual device drivers should use blk_mq_end_request instead.
1422 1423 1424
 *
 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
 *     %false return from this function.
1425
 *
1426 1427 1428 1429
 * Note:
 *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
 *	blk_rq_bytes() and in blk_update_request().
 *
1430
 * Return:
1431 1432
 *     %false - this request doesn't have any more data
 *     %true  - this request has more data
1433
 **/
1434 1435
bool blk_update_request(struct request *req, blk_status_t error,
		unsigned int nr_bytes)
L
Linus Torvalds 已提交
1436
{
1437
	int total_bytes;
L
Linus Torvalds 已提交
1438

1439
	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1440

1441 1442 1443
	if (!req->bio)
		return false;

1444 1445 1446 1447 1448 1449
#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
	    error == BLK_STS_OK)
		req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif

1450 1451
	if (unlikely(error && !blk_rq_is_passthrough(req) &&
		     !(req->rq_flags & RQF_QUIET)))
1452
		print_req_error(req, error, __func__);
L
Linus Torvalds 已提交
1453

1454
	blk_account_io_completion(req, nr_bytes);
1455

1456 1457 1458
	total_bytes = 0;
	while (req->bio) {
		struct bio *bio = req->bio;
1459
		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
L
Linus Torvalds 已提交
1460

1461
		if (bio_bytes == bio->bi_iter.bi_size)
L
Linus Torvalds 已提交
1462 1463
			req->bio = bio->bi_next;

N
NeilBrown 已提交
1464 1465
		/* Completion has already been traced */
		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1466
		req_bio_endio(req, bio, bio_bytes, error);
L
Linus Torvalds 已提交
1467

1468 1469
		total_bytes += bio_bytes;
		nr_bytes -= bio_bytes;
L
Linus Torvalds 已提交
1470

1471 1472
		if (!nr_bytes)
			break;
L
Linus Torvalds 已提交
1473 1474 1475 1476 1477
	}

	/*
	 * completely done
	 */
1478 1479 1480 1481 1482 1483
	if (!req->bio) {
		/*
		 * Reset counters so that the request stacking driver
		 * can find how many bytes remain in the request
		 * later.
		 */
1484
		req->__data_len = 0;
1485 1486
		return false;
	}
L
Linus Torvalds 已提交
1487

1488
	req->__data_len -= total_bytes;
1489 1490

	/* update sector only for requests with clear definition of sector */
1491
	if (!blk_rq_is_passthrough(req))
1492
		req->__sector += total_bytes >> 9;
1493

1494
	/* mixed attributes always follow the first bio */
1495
	if (req->rq_flags & RQF_MIXED_MERGE) {
1496
		req->cmd_flags &= ~REQ_FAILFAST_MASK;
J
Jens Axboe 已提交
1497
		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1498 1499
	}

1500 1501 1502 1503 1504 1505 1506 1507 1508
	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
		/*
		 * If total number of sectors is less than the first segment
		 * size, something has gone terribly wrong.
		 */
		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
			blk_dump_rq_flags(req, "request botched");
			req->__data_len = blk_rq_cur_bytes(req);
		}
1509

1510
		/* recalculate the number of segments */
1511
		req->nr_phys_segments = blk_recalc_rq_segments(req);
1512
	}
1513

1514
	return true;
L
Linus Torvalds 已提交
1515
}
1516
EXPORT_SYMBOL_GPL(blk_update_request);
L
Linus Torvalds 已提交
1517

1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
/**
 * rq_flush_dcache_pages - Helper function to flush all pages in a request
 * @rq: the request to be flushed
 *
 * Description:
 *     Flush all pages in @rq.
 */
void rq_flush_dcache_pages(struct request *rq)
{
	struct req_iterator iter;
1529
	struct bio_vec bvec;
1530 1531

	rq_for_each_segment(bvec, rq, iter)
1532
		flush_dcache_page(bvec.bv_page);
1533 1534 1535 1536
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
/**
 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
 * @q : the queue of the device being checked
 *
 * Description:
 *    Check if underlying low-level drivers of a device are busy.
 *    If the drivers want to export their busy state, they must set own
 *    exporting function using blk_queue_lld_busy() first.
 *
 *    Basically, this function is used only by request stacking drivers
 *    to stop dispatching requests to underlying devices when underlying
 *    devices are busy.  This behavior helps more I/O merging on the queue
 *    of the request stacking driver and prevents I/O throughput regression
 *    on burst I/O load.
 *
 * Return:
 *    0 - Not busy (The request stacking driver should dispatch request)
 *    1 - Busy (The request stacking driver should stop dispatching request)
 */
int blk_lld_busy(struct request_queue *q)
{
J
Jens Axboe 已提交
1558
	if (queue_is_mq(q) && q->mq_ops->busy)
J
Jens Axboe 已提交
1559
		return q->mq_ops->busy(q);
1560 1561 1562 1563 1564

	return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);

1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
/**
 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
 * @rq: the clone request to be cleaned up
 *
 * Description:
 *     Free all bios in @rq for a cloned request.
 */
void blk_rq_unprep_clone(struct request *rq)
{
	struct bio *bio;

	while ((bio = rq->bio) != NULL) {
		rq->bio = bio->bi_next;

		bio_put(bio);
	}
}
EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);

/**
 * blk_rq_prep_clone - Helper function to setup clone request
 * @rq: the request to be setup
 * @rq_src: original request to be cloned
 * @bs: bio_set that bios for clone are allocated from
 * @gfp_mask: memory allocation mask for bio
 * @bio_ctr: setup function to be called for each clone bio.
 *           Returns %0 for success, non %0 for failure.
 * @data: private data to be passed to @bio_ctr
 *
 * Description:
 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
 *     Also, pages which the original bios are pointing to are not copied
 *     and the cloned bios just point same pages.
 *     So cloned bios must be completed before original bios, which means
 *     the caller must complete @rq before @rq_src.
 */
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
		      struct bio_set *bs, gfp_t gfp_mask,
		      int (*bio_ctr)(struct bio *, struct bio *, void *),
		      void *data)
{
	struct bio *bio, *bio_src;

	if (!bs)
1609
		bs = &fs_bio_set;
1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625

	__rq_for_each_bio(bio_src, rq_src) {
		bio = bio_clone_fast(bio_src, gfp_mask, bs);
		if (!bio)
			goto free_and_out;

		if (bio_ctr && bio_ctr(bio, bio_src, data))
			goto free_and_out;

		if (rq->bio) {
			rq->biotail->bi_next = bio;
			rq->biotail = bio;
		} else
			rq->bio = rq->biotail = bio;
	}

1626 1627 1628 1629 1630 1631 1632 1633 1634
	/* Copy attributes of the original request to the clone request. */
	rq->__sector = blk_rq_pos(rq_src);
	rq->__data_len = blk_rq_bytes(rq_src);
	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
		rq->special_vec = rq_src->special_vec;
	}
	rq->nr_phys_segments = rq_src->nr_phys_segments;
	rq->ioprio = rq_src->ioprio;
1635 1636 1637 1638 1639 1640 1641 1642 1643

	return 0;

free_and_out:
	if (bio)
		bio_put(bio);
	blk_rq_unprep_clone(rq);

	return -ENOMEM;
1644 1645 1646
}
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);

1647
int kblockd_schedule_work(struct work_struct *work)
L
Linus Torvalds 已提交
1648 1649 1650 1651 1652
{
	return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);

1653 1654 1655 1656 1657 1658 1659
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
				unsigned long delay)
{
	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);

S
Suresh Jayaraman 已提交
1660 1661 1662 1663 1664
/**
 * blk_start_plug - initialize blk_plug and track it inside the task_struct
 * @plug:	The &struct blk_plug that needs to be initialized
 *
 * Description:
1665 1666 1667 1668 1669 1670 1671 1672 1673
 *   blk_start_plug() indicates to the block layer an intent by the caller
 *   to submit multiple I/O requests in a batch.  The block layer may use
 *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
 *   is called.  However, the block layer may choose to submit requests
 *   before a call to blk_finish_plug() if the number of queued I/Os
 *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
 *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
 *   the task schedules (see below).
 *
S
Suresh Jayaraman 已提交
1674 1675 1676 1677 1678 1679 1680 1681 1682
 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
 *   pending I/O should the task end up blocking between blk_start_plug() and
 *   blk_finish_plug(). This is important from a performance perspective, but
 *   also ensures that we don't deadlock. For instance, if the task is blocking
 *   for a memory allocation, memory reclaim could end up wanting to free a
 *   page belonging to that request that is currently residing in our private
 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
 *   this kind of deadlock.
 */
1683 1684 1685 1686
void blk_start_plug(struct blk_plug *plug)
{
	struct task_struct *tsk = current;

S
Shaohua Li 已提交
1687 1688 1689 1690 1691 1692
	/*
	 * If this is a nested plug, don't actually assign it.
	 */
	if (tsk->plug)
		return;

1693
	INIT_LIST_HEAD(&plug->mq_list);
1694
	INIT_LIST_HEAD(&plug->cb_list);
1695
	plug->rq_count = 0;
1696
	plug->multiple_queues = false;
1697

1698
	/*
S
Shaohua Li 已提交
1699 1700
	 * Store ordering should not be needed here, since a potential
	 * preempt will imply a full memory barrier
1701
	 */
S
Shaohua Li 已提交
1702
	tsk->plug = plug;
1703 1704 1705
}
EXPORT_SYMBOL(blk_start_plug);

1706
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1707 1708 1709
{
	LIST_HEAD(callbacks);

S
Shaohua Li 已提交
1710 1711
	while (!list_empty(&plug->cb_list)) {
		list_splice_init(&plug->cb_list, &callbacks);
1712

S
Shaohua Li 已提交
1713 1714
		while (!list_empty(&callbacks)) {
			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1715 1716
							  struct blk_plug_cb,
							  list);
S
Shaohua Li 已提交
1717
			list_del(&cb->list);
1718
			cb->callback(cb, from_schedule);
S
Shaohua Li 已提交
1719
		}
1720 1721 1722
	}
}

1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
				      int size)
{
	struct blk_plug *plug = current->plug;
	struct blk_plug_cb *cb;

	if (!plug)
		return NULL;

	list_for_each_entry(cb, &plug->cb_list, list)
		if (cb->callback == unplug && cb->data == data)
			return cb;

	/* Not currently on the callback list */
	BUG_ON(size < sizeof(*cb));
	cb = kzalloc(size, GFP_ATOMIC);
	if (cb) {
		cb->data = data;
		cb->callback = unplug;
		list_add(&cb->list, &plug->cb_list);
	}
	return cb;
}
EXPORT_SYMBOL(blk_check_plugged);

1748
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1749
{
1750
	flush_plug_callbacks(plug, from_schedule);
1751 1752 1753

	if (!list_empty(&plug->mq_list))
		blk_mq_flush_plug_list(plug, from_schedule);
1754 1755
}

1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
/**
 * blk_finish_plug - mark the end of a batch of submitted I/O
 * @plug:	The &struct blk_plug passed to blk_start_plug()
 *
 * Description:
 * Indicate that a batch of I/O submissions is complete.  This function
 * must be paired with an initial call to blk_start_plug().  The intent
 * is to allow the block layer to optimize I/O submission.  See the
 * documentation for blk_start_plug() for more information.
 */
1766 1767
void blk_finish_plug(struct blk_plug *plug)
{
S
Shaohua Li 已提交
1768 1769
	if (plug != current->plug)
		return;
1770
	blk_flush_plug_list(plug, false);
1771

S
Shaohua Li 已提交
1772
	current->plug = NULL;
1773
}
1774
EXPORT_SYMBOL(blk_finish_plug);
1775

L
Linus Torvalds 已提交
1776 1777
int __init blk_dev_init(void)
{
1778 1779
	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1780
			sizeof_field(struct request, cmd_flags));
1781
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1782
			sizeof_field(struct bio, bi_opf));
1783

1784 1785
	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
	kblockd_workqueue = alloc_workqueue("kblockd",
1786
					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
L
Linus Torvalds 已提交
1787 1788 1789
	if (!kblockd_workqueue)
		panic("Failed to create kblockd\n");

1790
	blk_requestq_cachep = kmem_cache_create("request_queue",
1791
			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1792

1793 1794 1795 1796
#ifdef CONFIG_DEBUG_FS
	blk_debugfs_root = debugfs_create_dir("block", NULL);
#endif

1797
	return 0;
L
Linus Torvalds 已提交
1798
}