blk-core.c 51.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 8
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *	-  July2000
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
20
#include <linux/blk-mq.h>
L
Linus Torvalds 已提交
21 22
#include <linux/highmem.h>
#include <linux/mm.h>
23
#include <linux/pagemap.h>
L
Linus Torvalds 已提交
24 25 26 27 28 29 30
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
31
#include <linux/task_io_accounting_ops.h>
32
#include <linux/fault-inject.h>
33
#include <linux/list_sort.h>
T
Tejun Heo 已提交
34
#include <linux/delay.h>
35
#include <linux/ratelimit.h>
L
Lin Ming 已提交
36
#include <linux/pm_runtime.h>
37
#include <linux/blk-cgroup.h>
38
#include <linux/t10-pi.h>
39
#include <linux/debugfs.h>
40
#include <linux/bpf.h>
41
#include <linux/psi.h>
42
#include <linux/sched/sysctl.h>
43
#include <linux/blk-crypto.h>
44 45 46

#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
L
Linus Torvalds 已提交
47

48
#include "blk.h"
49
#include "blk-mq.h"
50
#include "blk-mq-sched.h"
51
#include "blk-pm.h"
52
#include "blk-rq-qos.h"
53

54 55
struct dentry *blk_debugfs_root;

56
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
K
Keith Busch 已提交
59
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
N
NeilBrown 已提交
60
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61

62 63
DEFINE_IDA(blk_queue_ida);

L
Linus Torvalds 已提交
64 65 66
/*
 * For queue allocation
 */
67
struct kmem_cache *blk_requestq_cachep;
L
Linus Torvalds 已提交
68 69 70 71

/*
 * Controlling structure to kblockd
 */
72
static struct workqueue_struct *kblockd_workqueue;
L
Linus Torvalds 已提交
73

74 75 76 77 78 79 80
/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
81
	set_bit(flag, &q->queue_flags);
82 83 84 85 86 87 88 89 90 91
}
EXPORT_SYMBOL(blk_queue_flag_set);

/**
 * blk_queue_flag_clear - atomically clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
92
	clear_bit(flag, &q->queue_flags);
93 94 95 96 97 98 99 100 101 102 103 104 105
}
EXPORT_SYMBOL(blk_queue_flag_clear);

/**
 * blk_queue_flag_test_and_set - atomically test and set a queue flag
 * @flag: flag to be set
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was already set.
 */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
106
	return test_and_set_bit(flag, &q->queue_flags);
107 108 109
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);

110
void blk_rq_init(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
111
{
112 113
	memset(rq, 0, sizeof(*rq));

L
Linus Torvalds 已提交
114
	INIT_LIST_HEAD(&rq->queuelist);
J
Jens Axboe 已提交
115
	rq->q = q;
116
	rq->__sector = (sector_t) -1;
117 118
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
J
Jens Axboe 已提交
119
	rq->tag = -1;
120
	rq->internal_tag = -1;
121
	rq->start_time_ns = ktime_get_ns();
122
	rq->part = NULL;
123
	refcount_set(&rq->ref, 1);
124
	blk_crypto_rq_set_defaults(rq);
L
Linus Torvalds 已提交
125
}
126
EXPORT_SYMBOL(blk_rq_init);
L
Linus Torvalds 已提交
127

128 129 130 131 132 133 134 135
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
	REQ_OP_NAME(READ),
	REQ_OP_NAME(WRITE),
	REQ_OP_NAME(FLUSH),
	REQ_OP_NAME(DISCARD),
	REQ_OP_NAME(SECURE_ERASE),
	REQ_OP_NAME(ZONE_RESET),
136
	REQ_OP_NAME(ZONE_RESET_ALL),
137 138 139
	REQ_OP_NAME(ZONE_OPEN),
	REQ_OP_NAME(ZONE_CLOSE),
	REQ_OP_NAME(ZONE_FINISH),
140
	REQ_OP_NAME(ZONE_APPEND),
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
	REQ_OP_NAME(WRITE_SAME),
	REQ_OP_NAME(WRITE_ZEROES),
	REQ_OP_NAME(SCSI_IN),
	REQ_OP_NAME(SCSI_OUT),
	REQ_OP_NAME(DRV_IN),
	REQ_OP_NAME(DRV_OUT),
};
#undef REQ_OP_NAME

/**
 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 * @op: REQ_OP_XXX.
 *
 * Description: Centralize block layer function to convert REQ_OP_XXX into
 * string format. Useful in the debugging and tracing bio or request. For
 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 */
inline const char *blk_op_str(unsigned int op)
{
	const char *op_str = "UNKNOWN";

	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
		op_str = blk_op_name[op];

	return op_str;
}
EXPORT_SYMBOL_GPL(blk_op_str);

169 170 171 172 173 174 175 176 177 178 179 180 181 182
static const struct {
	int		errno;
	const char	*name;
} blk_errors[] = {
	[BLK_STS_OK]		= { 0,		"" },
	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
183
	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
184
	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
185

186 187 188
	/* device mapper special case, should not leak out: */
	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
	/* everything else not covered above: */
	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
};

blk_status_t errno_to_blk_status(int errno)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
		if (blk_errors[i].errno == errno)
			return (__force blk_status_t)i;
	}

	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);

int blk_status_to_errno(blk_status_t status)
{
	int idx = (__force int)status;

210
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
211 212 213 214 215
		return -EIO;
	return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);

216 217
static void print_req_error(struct request *req, blk_status_t status,
		const char *caller)
218 219 220
{
	int idx = (__force int)status;

221
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
222 223
		return;

224
	printk_ratelimited(KERN_ERR
225 226
		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
		"phys_seg %u prio class %u\n",
227
		caller, blk_errors[idx].name,
228 229 230 231 232
		req->rq_disk ? req->rq_disk->disk_name : "?",
		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
		req->cmd_flags & ~REQ_OP_MASK,
		req->nr_phys_segments,
		IOPRIO_PRIO_CLASS(req->ioprio));
233 234
}

N
NeilBrown 已提交
235
static void req_bio_endio(struct request *rq, struct bio *bio,
236
			  unsigned int nbytes, blk_status_t error)
L
Linus Torvalds 已提交
237
{
238
	if (error)
239
		bio->bi_status = error;
240

241
	if (unlikely(rq->rq_flags & RQF_QUIET))
242
		bio_set_flag(bio, BIO_QUIET);
243

244
	bio_advance(bio, nbytes);
245

246 247 248 249 250 251 252 253 254 255 256
	if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
		/*
		 * Partial zone append completions cannot be supported as the
		 * BIO fragments may end up not being written sequentially.
		 */
		if (bio->bi_iter.bi_size)
			bio->bi_status = BLK_STS_IOERR;
		else
			bio->bi_iter.bi_sector = rq->__sector;
	}

T
Tejun Heo 已提交
257
	/* don't actually finish bio if it's part of flush sequence */
258
	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
259
		bio_endio(bio);
L
Linus Torvalds 已提交
260 261 262 263
}

void blk_dump_rq_flags(struct request *rq, char *msg)
{
264 265
	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
		rq->rq_disk ? rq->rq_disk->disk_name : "?",
J
Jens Axboe 已提交
266
		(unsigned long long) rq->cmd_flags);
L
Linus Torvalds 已提交
267

268 269 270
	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
271 272
	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
	       rq->bio, rq->biotail, blk_rq_bytes(rq));
L
Linus Torvalds 已提交
273 274 275 276 277 278 279 280 281 282 283 284
}
EXPORT_SYMBOL(blk_dump_rq_flags);

/**
 * blk_sync_queue - cancel any pending callbacks on a queue
 * @q: the queue
 *
 * Description:
 *     The block layer may perform asynchronous callback activity
 *     on a queue, such as calling the unplug function after a timeout.
 *     A block device may call blk_sync_queue to ensure that any
 *     such activity is cancelled, thus allowing it to release resources
285
 *     that the callbacks might use. The caller must already have made sure
L
Linus Torvalds 已提交
286 287 288
 *     that its ->make_request_fn will not re-add plugging prior to calling
 *     this function.
 *
289
 *     This function does not cancel any asynchronous activity arising
290
 *     out of elevator or throttling code. That would require elevator_exit()
291
 *     and blkcg_exit_queue() to be called with queue lock initialized.
292
 *
L
Linus Torvalds 已提交
293 294 295
 */
void blk_sync_queue(struct request_queue *q)
{
296
	del_timer_sync(&q->timeout);
297
	cancel_work_sync(&q->timeout_work);
L
Linus Torvalds 已提交
298 299 300
}
EXPORT_SYMBOL(blk_sync_queue);

301
/**
302
 * blk_set_pm_only - increment pm_only counter
303 304
 * @q: request queue pointer
 */
305
void blk_set_pm_only(struct request_queue *q)
306
{
307
	atomic_inc(&q->pm_only);
308
}
309
EXPORT_SYMBOL_GPL(blk_set_pm_only);
310

311
void blk_clear_pm_only(struct request_queue *q)
312
{
313 314 315 316 317 318
	int pm_only;

	pm_only = atomic_dec_return(&q->pm_only);
	WARN_ON_ONCE(pm_only < 0);
	if (pm_only == 0)
		wake_up_all(&q->mq_freeze_wq);
319
}
320
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
321

322 323 324 325 326 327
/**
 * blk_put_queue - decrement the request_queue refcount
 * @q: the request_queue structure to decrement the refcount for
 *
 * Decrements the refcount of the request_queue kobject. When this reaches 0
 * we'll have blk_release_queue() called.
328 329 330
 *
 * Context: Any context, but the last reference must not be dropped from
 *          atomic context.
331
 */
332
void blk_put_queue(struct request_queue *q)
333 334 335
{
	kobject_put(&q->kobj);
}
J
Jens Axboe 已提交
336
EXPORT_SYMBOL(blk_put_queue);
337

338 339
void blk_set_queue_dying(struct request_queue *q)
{
340
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
341

342 343 344 345 346 347 348
	/*
	 * When queue DYING flag is set, we need to block new req
	 * entering queue, so we call blk_freeze_queue_start() to
	 * prevent I/O from crossing blk_queue_enter().
	 */
	blk_freeze_queue_start(q);

J
Jens Axboe 已提交
349
	if (queue_is_mq(q))
350
		blk_mq_wake_waiters(q);
351 352 353

	/* Make blk_queue_enter() reexamine the DYING flag. */
	wake_up_all(&q->mq_freeze_wq);
354 355 356
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);

357 358 359 360
/**
 * blk_cleanup_queue - shutdown a request queue
 * @q: request queue to shutdown
 *
361 362
 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 * put it.  All future requests will be failed immediately with -ENODEV.
363 364
 *
 * Context: can sleep
365
 */
366
void blk_cleanup_queue(struct request_queue *q)
367
{
368 369 370
	/* cannot be called from atomic context */
	might_sleep();

371 372
	WARN_ON_ONCE(blk_queue_registered(q));

B
Bart Van Assche 已提交
373
	/* mark @q DYING, no new request or merges will be allowed afterwards */
374
	blk_set_queue_dying(q);
375

376 377
	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
378

379 380
	/*
	 * Drain all requests queued before DYING marking. Set DEAD flag to
381 382
	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
	 * after draining finished.
383
	 */
384
	blk_freeze_queue(q);
385 386 387

	rq_qos_exit(q);

388
	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
389

390 391 392
	/* for synchronous bio-based driver finish in-flight integrity i/o */
	blk_flush_integrity();

393
	/* @q won't process any more request, flush async actions */
394
	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
395 396
	blk_sync_queue(q);

J
Jens Axboe 已提交
397
	if (queue_is_mq(q))
398
		blk_mq_exit_queue(q);
J
Jens Axboe 已提交
399

400 401 402 403 404 405 406 407 408 409 410 411 412
	/*
	 * In theory, request pool of sched_tags belongs to request queue.
	 * However, the current implementation requires tag_set for freeing
	 * requests, so free the pool now.
	 *
	 * Queue has become frozen, there can't be any in-queue requests, so
	 * it is safe to free requests now.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator)
		blk_mq_sched_free_requests(q);
	mutex_unlock(&q->sysfs_lock);

413
	percpu_ref_exit(&q->q_usage_counter);
B
Bart Van Assche 已提交
414

415
	/* @q is and will stay empty, shutdown and put */
416 417
	blk_put_queue(q);
}
L
Linus Torvalds 已提交
418 419
EXPORT_SYMBOL(blk_cleanup_queue);

420 421 422 423 424
/**
 * blk_queue_enter() - try to increase q->q_usage_counter
 * @q: request queue pointer
 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
 */
425
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
426
{
427
	const bool pm = flags & BLK_MQ_REQ_PREEMPT;
428

429
	while (true) {
430
		bool success = false;
431

432
		rcu_read_lock();
433 434
		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
			/*
435 436 437
			 * The code that increments the pm_only counter is
			 * responsible for ensuring that that counter is
			 * globally visible before the queue is unfrozen.
438
			 */
439
			if (pm || !blk_queue_pm_only(q)) {
440 441 442 443 444
				success = true;
			} else {
				percpu_ref_put(&q->q_usage_counter);
			}
		}
445
		rcu_read_unlock();
446 447

		if (success)
448 449
			return 0;

450
		if (flags & BLK_MQ_REQ_NOWAIT)
451 452
			return -EBUSY;

453
		/*
454
		 * read pair of barrier in blk_freeze_queue_start(),
455
		 * we need to order reading __PERCPU_REF_DEAD flag of
456 457 458
		 * .q_usage_counter and reading .mq_freeze_depth or
		 * queue dying flag, otherwise the following wait may
		 * never return if the two reads are reordered.
459 460 461
		 */
		smp_rmb();

462
		wait_event(q->mq_freeze_wq,
463
			   (!q->mq_freeze_depth &&
464 465
			    (pm || (blk_pm_request_resume(q),
				    !blk_queue_pm_only(q)))) ||
466
			   blk_queue_dying(q));
467 468 469 470 471
		if (blk_queue_dying(q))
			return -ENODEV;
	}
}

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
static inline int bio_queue_enter(struct bio *bio)
{
	struct request_queue *q = bio->bi_disk->queue;
	bool nowait = bio->bi_opf & REQ_NOWAIT;
	int ret;

	ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
	if (unlikely(ret)) {
		if (nowait && !blk_queue_dying(q))
			bio_wouldblock_error(bio);
		else
			bio_io_error(bio);
	}

	return ret;
}

489 490 491 492 493 494 495 496 497 498 499 500 501
void blk_queue_exit(struct request_queue *q)
{
	percpu_ref_put(&q->q_usage_counter);
}

static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
	struct request_queue *q =
		container_of(ref, struct request_queue, q_usage_counter);

	wake_up_all(&q->mq_freeze_wq);
}

502
static void blk_rq_timed_out_timer(struct timer_list *t)
503
{
504
	struct request_queue *q = from_timer(q, t, timeout);
505 506 507 508

	kblockd_schedule_work(&q->timeout_work);
}

509 510 511 512
static void blk_timeout_work(struct work_struct *work)
{
}

513
struct request_queue *__blk_alloc_queue(int node_id)
514
{
515
	struct request_queue *q;
516
	int ret;
517

518
	q = kmem_cache_alloc_node(blk_requestq_cachep,
519
				GFP_KERNEL | __GFP_ZERO, node_id);
L
Linus Torvalds 已提交
520 521 522
	if (!q)
		return NULL;

523 524
	q->last_merge = NULL;

525
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
526
	if (q->id < 0)
527
		goto fail_q;
528

529 530
	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
	if (ret)
531 532
		goto fail_id;

C
Christoph Hellwig 已提交
533
	q->backing_dev_info = bdi_alloc(node_id);
534 535 536
	if (!q->backing_dev_info)
		goto fail_split;

537 538 539 540
	q->stats = blk_alloc_queue_stats();
	if (!q->stats)
		goto fail_stats;

541
	q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
542
	q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
543
	q->node = node_id;
544

545 546 547
	timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
		    laptop_mode_timer_fn, 0);
	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
548
	INIT_WORK(&q->timeout_work, blk_timeout_work);
549
	INIT_LIST_HEAD(&q->icq_list);
550
#ifdef CONFIG_BLK_CGROUP
551
	INIT_LIST_HEAD(&q->blkg_list);
552
#endif
553

554
	kobject_init(&q->kobj, &blk_queue_ktype);
L
Linus Torvalds 已提交
555

556
	mutex_init(&q->debugfs_mutex);
557
	mutex_init(&q->sysfs_lock);
558
	mutex_init(&q->sysfs_dir_lock);
559
	spin_lock_init(&q->queue_lock);
560

561
	init_waitqueue_head(&q->mq_freeze_wq);
562
	mutex_init(&q->mq_freeze_lock);
563

564 565 566 567 568 569 570
	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
	 * See blk_register_queue() for details.
	 */
	if (percpu_ref_init(&q->q_usage_counter,
				blk_queue_usage_counter_release,
				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
571
		goto fail_bdi;
572

573 574 575
	if (blkcg_init_queue(q))
		goto fail_ref;

576 577 578
	blk_queue_dma_alignment(q, 511);
	blk_set_default_limits(&q->limits);

L
Linus Torvalds 已提交
579
	return q;
580

581 582
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
583
fail_bdi:
584 585
	blk_free_queue_stats(q->stats);
fail_stats:
586
	bdi_put(q->backing_dev_info);
587
fail_split:
588
	bioset_exit(&q->bio_split);
589 590 591 592 593
fail_id:
	ida_simple_remove(&blk_queue_ida, q->id);
fail_q:
	kmem_cache_free(blk_requestq_cachep, q);
	return NULL;
L
Linus Torvalds 已提交
594
}
595 596 597 598 599 600

struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id)
{
	struct request_queue *q;

	if (WARN_ON_ONCE(!make_request))
601
		return NULL;
602 603 604 605 606 607 608 609 610

	q = __blk_alloc_queue(node_id);
	if (!q)
		return NULL;
	q->make_request_fn = make_request;
	q->nr_requests = BLKDEV_MAX_RQ;
	return q;
}
EXPORT_SYMBOL(blk_alloc_queue);
L
Linus Torvalds 已提交
611

612 613 614 615 616
/**
 * blk_get_queue - increment the request_queue refcount
 * @q: the request_queue structure to increment the refcount for
 *
 * Increment the refcount of the request_queue kobject.
617 618
 *
 * Context: Any context.
619
 */
T
Tejun Heo 已提交
620
bool blk_get_queue(struct request_queue *q)
L
Linus Torvalds 已提交
621
{
B
Bart Van Assche 已提交
622
	if (likely(!blk_queue_dying(q))) {
T
Tejun Heo 已提交
623 624
		__blk_get_queue(q);
		return true;
L
Linus Torvalds 已提交
625 626
	}

T
Tejun Heo 已提交
627
	return false;
L
Linus Torvalds 已提交
628
}
J
Jens Axboe 已提交
629
EXPORT_SYMBOL(blk_get_queue);
L
Linus Torvalds 已提交
630

J
Jens Axboe 已提交
631 632 633 634 635
/**
 * blk_get_request - allocate a request
 * @q: request queue to allocate a request for
 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
L
Linus Torvalds 已提交
636
 */
J
Jens Axboe 已提交
637 638
struct request *blk_get_request(struct request_queue *q, unsigned int op,
				blk_mq_req_flags_t flags)
L
Linus Torvalds 已提交
639
{
J
Jens Axboe 已提交
640
	struct request *req;
L
Linus Torvalds 已提交
641

J
Jens Axboe 已提交
642 643
	WARN_ON_ONCE(op & REQ_NOWAIT);
	WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
L
Linus Torvalds 已提交
644

J
Jens Axboe 已提交
645 646 647
	req = blk_mq_alloc_request(q, op, flags);
	if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
		q->mq_ops->initialize_rq_fn(req);
L
Linus Torvalds 已提交
648

J
Jens Axboe 已提交
649
	return req;
L
Linus Torvalds 已提交
650
}
J
Jens Axboe 已提交
651
EXPORT_SYMBOL(blk_get_request);
L
Linus Torvalds 已提交
652 653 654

void blk_put_request(struct request *req)
{
J
Jens Axboe 已提交
655
	blk_mq_free_request(req);
L
Linus Torvalds 已提交
656 657 658
}
EXPORT_SYMBOL(blk_put_request);

659 660 661 662 663 664 665 666 667 668
static void blk_account_io_merge_bio(struct request *req)
{
	if (!blk_do_io_stat(req))
		return;

	part_stat_lock();
	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
	part_stat_unlock();
}

669 670
bool bio_attempt_back_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs)
671
{
J
Jens Axboe 已提交
672
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
673

674
	if (!ll_back_merge_fn(req, bio, nr_segs))
675 676
		return false;

677
	trace_block_bio_backmerge(req->q, req, bio);
T
Tejun Heo 已提交
678
	rq_qos_merge(req->q, req, bio);
679 680 681 682 683 684

	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
		blk_rq_set_mixed_merge(req);

	req->biotail->bi_next = bio;
	req->biotail = bio;
685
	req->__data_len += bio->bi_iter.bi_size;
686

687 688
	bio_crypt_free_ctx(bio);

689
	blk_account_io_merge_bio(req);
690 691 692
	return true;
}

693 694
bool bio_attempt_front_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs)
695
{
J
Jens Axboe 已提交
696
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
697

698
	if (!ll_front_merge_fn(req, bio, nr_segs))
699 700
		return false;

701
	trace_block_bio_frontmerge(req->q, req, bio);
T
Tejun Heo 已提交
702
	rq_qos_merge(req->q, req, bio);
703 704 705 706 707 708 709

	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
		blk_rq_set_mixed_merge(req);

	bio->bi_next = req->bio;
	req->bio = bio;

710 711
	req->__sector = bio->bi_iter.bi_sector;
	req->__data_len += bio->bi_iter.bi_size;
712

713 714
	bio_crypt_do_front_merge(req, bio);

715
	blk_account_io_merge_bio(req);
716 717 718
	return true;
}

719 720 721 722 723 724 725 726 727 728 729
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
		struct bio *bio)
{
	unsigned short segments = blk_rq_nr_discard_segments(req);

	if (segments >= queue_max_discard_segments(q))
		goto no_merge;
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
		goto no_merge;

T
Tejun Heo 已提交
730 731
	rq_qos_merge(q, req, bio);

732 733 734 735 736
	req->biotail->bi_next = bio;
	req->biotail = bio;
	req->__data_len += bio->bi_iter.bi_size;
	req->nr_phys_segments = segments + 1;

737
	blk_account_io_merge_bio(req);
738 739 740 741 742 743
	return true;
no_merge:
	req_set_nomerge(q, req);
	return false;
}

744
/**
745
 * blk_attempt_plug_merge - try to merge with %current's plugged list
746 747
 * @q: request_queue new bio is being queued at
 * @bio: new bio being queued
748
 * @nr_segs: number of segments in @bio
749 750 751
 * @same_queue_rq: pointer to &struct request that gets filled in when
 * another request associated with @q is found on the plug list
 * (optional, may be %NULL)
752 753 754 755 756
 *
 * Determine whether @bio being queued on @q can be merged with a request
 * on %current's plugged list.  Returns %true if merge was successful,
 * otherwise %false.
 *
757 758 759 760 761 762
 * Plugging coalesces IOs from the same issuer for the same purpose without
 * going through @q->queue_lock.  As such it's more of an issuing mechanism
 * than scheduling, and the request, while may have elvpriv data, is not
 * added on the elevator at this point.  In addition, we don't have
 * reliable access to the elevator outside queue lock.  Only check basic
 * merging parameters without querying the elevator.
763 764
 *
 * Caller must ensure !blk_queue_nomerges(q) beforehand.
765
 */
766
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
767
		unsigned int nr_segs, struct request **same_queue_rq)
768 769 770
{
	struct blk_plug *plug;
	struct request *rq;
S
Shaohua Li 已提交
771
	struct list_head *plug_list;
772

773
	plug = blk_mq_plug(q, bio);
774
	if (!plug)
775
		return false;
776

J
Jens Axboe 已提交
777
	plug_list = &plug->mq_list;
S
Shaohua Li 已提交
778 779

	list_for_each_entry_reverse(rq, plug_list, queuelist) {
780
		bool merged = false;
781

782
		if (rq->q == q && same_queue_rq) {
783 784 785 786 787
			/*
			 * Only blk-mq multiple hardware queues case checks the
			 * rq in the same queue, there should be only one such
			 * rq in a queue
			 **/
788
			*same_queue_rq = rq;
789
		}
790

791
		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
792 793
			continue;

794 795
		switch (blk_try_merge(rq, bio)) {
		case ELEVATOR_BACK_MERGE:
796
			merged = bio_attempt_back_merge(rq, bio, nr_segs);
797 798
			break;
		case ELEVATOR_FRONT_MERGE:
799
			merged = bio_attempt_front_merge(rq, bio, nr_segs);
800
			break;
801 802 803
		case ELEVATOR_DISCARD_MERGE:
			merged = bio_attempt_discard_merge(q, rq, bio);
			break;
804 805
		default:
			break;
806
		}
807 808 809

		if (merged)
			return true;
810
	}
811 812

	return false;
813 814
}

815
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
L
Linus Torvalds 已提交
816 817 818 819
{
	char b[BDEVNAME_SIZE];

	printk(KERN_INFO "attempt to access beyond end of device\n");
820
	printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
821
			bio_devname(bio, b), bio->bi_opf,
K
Kent Overstreet 已提交
822
			(unsigned long long)bio_end_sector(bio),
823
			(long long)maxsector);
L
Linus Torvalds 已提交
824 825
}

826 827 828 829 830 831 832 833 834 835
#ifdef CONFIG_FAIL_MAKE_REQUEST

static DECLARE_FAULT_ATTR(fail_make_request);

static int __init setup_fail_make_request(char *str)
{
	return setup_fault_attr(&fail_make_request, str);
}
__setup("fail_make_request=", setup_fail_make_request);

836
static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
837
{
838
	return part->make_it_fail && should_fail(&fail_make_request, bytes);
839 840 841 842
}

static int __init fail_make_request_debugfs(void)
{
843 844 845
	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
						NULL, &fail_make_request);

846
	return PTR_ERR_OR_ZERO(dir);
847 848 849 850 851 852
}

late_initcall(fail_make_request_debugfs);

#else /* CONFIG_FAIL_MAKE_REQUEST */

853 854
static inline bool should_fail_request(struct hd_struct *part,
					unsigned int bytes)
855
{
856
	return false;
857 858 859 860
}

#endif /* CONFIG_FAIL_MAKE_REQUEST */

861 862
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{
863 864
	const int op = bio_op(bio);

865
	if (part->policy && op_is_write(op)) {
866 867
		char b[BDEVNAME_SIZE];

868 869 870
		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
			return false;

871
		WARN_ONCE(1,
872
		       "Trying to write to read-only block-device %s (partno %d)\n",
873
			bio_devname(bio, b), part->partno);
874 875
		/* Older lvm-tools actually trigger this */
		return false;
876 877 878 879 880
	}

	return false;
}

881 882 883 884 885 886 887 888
static noinline int should_fail_bio(struct bio *bio)
{
	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
		return -EIO;
	return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
/*
 * Check whether this bio extends beyond the end of the device or partition.
 * This may well happen - the kernel calls bread() without checking the size of
 * the device, e.g., when mounting a file system.
 */
static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
{
	unsigned int nr_sectors = bio_sectors(bio);

	if (nr_sectors && maxsector &&
	    (nr_sectors > maxsector ||
	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
		handle_bad_sector(bio, maxsector);
		return -EIO;
	}
	return 0;
}

907 908 909 910 911 912
/*
 * Remap block n of partition p to block n+start(p) of the disk.
 */
static inline int blk_partition_remap(struct bio *bio)
{
	struct hd_struct *p;
913
	int ret = -EIO;
914

915 916
	rcu_read_lock();
	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
917 918 919 920 921
	if (unlikely(!p))
		goto out;
	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
		goto out;
	if (unlikely(bio_check_ro(bio, p)))
922 923
		goto out;

924
	if (bio_sectors(bio)) {
925 926 927 928 929 930
		if (bio_check_eod(bio, part_nr_sects_read(p)))
			goto out;
		bio->bi_iter.bi_sector += p->start_sect;
		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
				      bio->bi_iter.bi_sector - p->start_sect);
	}
931
	bio->bi_partno = 0;
932
	ret = 0;
933 934
out:
	rcu_read_unlock();
935 936 937
	return ret;
}

938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
/*
 * Check write append to a zoned block device.
 */
static inline blk_status_t blk_check_zone_append(struct request_queue *q,
						 struct bio *bio)
{
	sector_t pos = bio->bi_iter.bi_sector;
	int nr_sectors = bio_sectors(bio);

	/* Only applicable to zoned block devices */
	if (!blk_queue_is_zoned(q))
		return BLK_STS_NOTSUPP;

	/* The bio sector must point to the start of a sequential zone */
	if (pos & (blk_queue_zone_sectors(q) - 1) ||
	    !blk_queue_zone_is_seq(q, pos))
		return BLK_STS_IOERR;

	/*
	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
	 * split and could result in non-contiguous sectors being written in
	 * different zones.
	 */
	if (nr_sectors > q->limits.chunk_sectors)
		return BLK_STS_IOERR;

	/* Make sure the BIO is small enough and will not get split */
	if (nr_sectors > q->limits.max_zone_append_sectors)
		return BLK_STS_IOERR;

	bio->bi_opf |= REQ_NOMERGE;

	return BLK_STS_OK;
}

973 974
static noinline_for_stack bool
generic_make_request_checks(struct bio *bio)
L
Linus Torvalds 已提交
975
{
976
	struct request_queue *q;
977
	int nr_sectors = bio_sectors(bio);
978
	blk_status_t status = BLK_STS_IOERR;
979
	char b[BDEVNAME_SIZE];
L
Linus Torvalds 已提交
980 981 982

	might_sleep();

983
	q = bio->bi_disk->queue;
984 985 986 987
	if (unlikely(!q)) {
		printk(KERN_ERR
		       "generic_make_request: Trying to access "
			"nonexistent block-device %s (%Lu)\n",
988
			bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
989 990
		goto end_io;
	}
991

992
	/*
993 994
	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
	 * if queue is not a request based queue.
995
	 */
996 997
	if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
		goto not_supported;
998

999
	if (should_fail_bio(bio))
1000
		goto end_io;
1001

1002 1003
	if (bio->bi_partno) {
		if (unlikely(blk_partition_remap(bio)))
1004 1005
			goto end_io;
	} else {
1006 1007 1008
		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
			goto end_io;
		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
1009 1010
			goto end_io;
	}
1011

1012 1013 1014 1015 1016
	/*
	 * Filter flush bio's early so that make_request based
	 * drivers without flush support don't have to worry
	 * about them.
	 */
1017
	if (op_is_flush(bio->bi_opf) &&
J
Jens Axboe 已提交
1018
	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
J
Jens Axboe 已提交
1019
		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
1020
		if (!nr_sectors) {
1021
			status = BLK_STS_OK;
1022 1023
			goto end_io;
		}
1024
	}
1025

1026 1027 1028
	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		bio->bi_opf &= ~REQ_HIPRI;

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
		if (!blk_queue_secure_erase(q))
			goto not_supported;
		break;
	case REQ_OP_WRITE_SAME:
1039
		if (!q->limits.max_write_same_sectors)
1040
			goto not_supported;
1041
		break;
1042 1043 1044 1045 1046
	case REQ_OP_ZONE_APPEND:
		status = blk_check_zone_append(q, bio);
		if (status != BLK_STS_OK)
			goto end_io;
		break;
1047
	case REQ_OP_ZONE_RESET:
1048 1049 1050
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
1051
		if (!blk_queue_is_zoned(q))
1052
			goto not_supported;
1053
		break;
1054 1055 1056 1057
	case REQ_OP_ZONE_RESET_ALL:
		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
			goto not_supported;
		break;
1058
	case REQ_OP_WRITE_ZEROES:
1059
		if (!q->limits.max_write_zeroes_sectors)
1060 1061
			goto not_supported;
		break;
1062 1063
	default:
		break;
1064
	}
1065

T
Tejun Heo 已提交
1066
	/*
1067 1068 1069 1070
	 * Various block parts want %current->io_context, so allocate it up
	 * front rather than dealing with lots of pain to allocate it only
	 * where needed. This may fail and the block layer knows how to live
	 * with it.
T
Tejun Heo 已提交
1071
	 */
1072 1073
	if (unlikely(!current->io_context))
		create_task_io_context(current, GFP_ATOMIC, q->node);
T
Tejun Heo 已提交
1074

1075 1076
	if (blk_throtl_bio(bio)) {
		blkcg_bio_issue_init(bio);
1077
		return false;
1078 1079 1080 1081
	}

	blk_cgroup_bio_start(bio);
	blkcg_bio_issue_init(bio);
1082

N
NeilBrown 已提交
1083 1084 1085 1086 1087 1088 1089
	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
		trace_block_bio_queue(q, bio);
		/* Now that enqueuing has been traced, we need to trace
		 * completion as well.
		 */
		bio_set_flag(bio, BIO_TRACE_COMPLETION);
	}
1090
	return true;
1091

1092
not_supported:
1093
	status = BLK_STS_NOTSUPP;
1094
end_io:
1095
	bio->bi_status = status;
1096
	bio_endio(bio);
1097
	return false;
L
Linus Torvalds 已提交
1098 1099
}

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
static blk_qc_t do_make_request(struct bio *bio)
{
	struct request_queue *q = bio->bi_disk->queue;
	blk_qc_t ret = BLK_QC_T_NONE;

	if (blk_crypto_bio_prep(&bio)) {
		if (!q->make_request_fn)
			return blk_mq_make_request(q, bio);
		ret = q->make_request_fn(q, bio);
	}
	blk_queue_exit(q);
	return ret;
}

1114
/**
1115
 * generic_make_request - re-submit a bio to the block device layer for I/O
1116 1117
 * @bio:  The bio describing the location in memory and on the device.
 *
1118 1119 1120 1121
 * This is a version of submit_bio() that shall only be used for I/O that is
 * resubmitted to lower level drivers by stacking block drivers.  All file
 * systems and other upper level users of the block layer should use
 * submit_bio() instead.
1122
 */
1123
blk_qc_t generic_make_request(struct bio *bio)
1124
{
1125 1126 1127 1128 1129 1130 1131 1132
	/*
	 * bio_list_on_stack[0] contains bios submitted by the current
	 * make_request_fn.
	 * bio_list_on_stack[1] contains bios that were submitted before
	 * the current make_request_fn, but that haven't been processed
	 * yet.
	 */
	struct bio_list bio_list_on_stack[2];
1133
	blk_qc_t ret = BLK_QC_T_NONE;
1134

1135
	if (!generic_make_request_checks(bio))
1136
		goto out;
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147

	/*
	 * We only want one ->make_request_fn to be active at a time, else
	 * stack usage with stacked devices could be a problem.  So use
	 * current->bio_list to keep a list of requests submited by a
	 * make_request_fn function.  current->bio_list is also used as a
	 * flag to say if generic_make_request is currently active in this
	 * task or not.  If it is NULL, then no make_request is active.  If
	 * it is non-NULL, then a make_request is active, and new requests
	 * should be added at the tail
	 */
1148
	if (current->bio_list) {
1149
		bio_list_add(&current->bio_list[0], bio);
1150
		goto out;
1151
	}
1152

1153 1154 1155 1156 1157
	/* following loop may be a bit non-obvious, and so deserves some
	 * explanation.
	 * Before entering the loop, bio->bi_next is NULL (as all callers
	 * ensure that) so we have a list with a single bio.
	 * We pretend that we have just taken it off a longer list, so
1158 1159
	 * we assign bio_list to a pointer to the bio_list_on_stack,
	 * thus initialising the bio_list of new bios to be
1160
	 * added.  ->make_request() may indeed add some more bios
1161 1162 1163
	 * through a recursive call to generic_make_request.  If it
	 * did, we find a non-NULL value in bio_list and re-enter the loop
	 * from the top.  In this case we really did just take the bio
1164
	 * of the top of the list (no pretending) and so remove it from
1165
	 * bio_list, and call into ->make_request() again.
1166 1167
	 */
	BUG_ON(bio->bi_next);
1168 1169
	bio_list_init(&bio_list_on_stack[0]);
	current->bio_list = bio_list_on_stack;
1170
	do {
1171
		struct request_queue *q = bio->bi_disk->queue;
1172

1173
		if (likely(bio_queue_enter(bio) == 0)) {
1174 1175 1176
			struct bio_list lower, same;

			/* Create a fresh bio_list for all subordinate requests */
1177 1178
			bio_list_on_stack[1] = bio_list_on_stack[0];
			bio_list_init(&bio_list_on_stack[0]);
1179
			ret = do_make_request(bio);
1180

1181 1182 1183 1184 1185
			/* sort new bios into those for a lower level
			 * and those for the same level
			 */
			bio_list_init(&lower);
			bio_list_init(&same);
1186
			while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
1187
				if (q == bio->bi_disk->queue)
1188 1189 1190 1191
					bio_list_add(&same, bio);
				else
					bio_list_add(&lower, bio);
			/* now assemble so we handle the lowest level first */
1192 1193 1194
			bio_list_merge(&bio_list_on_stack[0], &lower);
			bio_list_merge(&bio_list_on_stack[0], &same);
			bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
1195
		}
1196
		bio = bio_list_pop(&bio_list_on_stack[0]);
1197
	} while (bio);
1198
	current->bio_list = NULL; /* deactivate */
1199 1200 1201

out:
	return ret;
1202
}
L
Linus Torvalds 已提交
1203 1204
EXPORT_SYMBOL(generic_make_request);

1205 1206 1207 1208 1209 1210
/**
 * direct_make_request - hand a buffer directly to its device driver for I/O
 * @bio:  The bio describing the location in memory and on the device.
 *
 * This function behaves like generic_make_request(), but does not protect
 * against recursion.  Must only be used if the called driver is known
1211
 * to be blk-mq based.
1212 1213 1214 1215 1216
 */
blk_qc_t direct_make_request(struct bio *bio)
{
	struct request_queue *q = bio->bi_disk->queue;

1217 1218
	if (WARN_ON_ONCE(q->make_request_fn)) {
		bio_io_error(bio);
1219 1220 1221 1222
		return BLK_QC_T_NONE;
	}
	if (!generic_make_request_checks(bio))
		return BLK_QC_T_NONE;
1223 1224
	if (unlikely(bio_queue_enter(bio)))
		return BLK_QC_T_NONE;
1225 1226
	if (!blk_crypto_bio_prep(&bio)) {
		blk_queue_exit(q);
1227 1228
		return BLK_QC_T_NONE;
	}
1229
	return blk_mq_make_request(q, bio);
1230 1231 1232
}
EXPORT_SYMBOL_GPL(direct_make_request);

L
Linus Torvalds 已提交
1233
/**
1234
 * submit_bio - submit a bio to the block device layer for I/O
L
Linus Torvalds 已提交
1235 1236
 * @bio: The &struct bio which describes the I/O
 *
1237 1238 1239
 * submit_bio() is used to submit I/O requests to block devices.  It is passed a
 * fully set up &struct bio that describes the I/O that needs to be done.  The
 * bio will be send to the device described by the bi_disk and bi_partno fields.
L
Linus Torvalds 已提交
1240
 *
1241 1242 1243 1244
 * The success/failure status of the request, along with notification of
 * completion, is delivered asynchronously through the ->bi_end_io() callback
 * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
 * been called.
L
Linus Torvalds 已提交
1245
 */
1246
blk_qc_t submit_bio(struct bio *bio)
L
Linus Torvalds 已提交
1247
{
T
Tejun Heo 已提交
1248 1249 1250
	if (blkcg_punt_bio_submit(bio))
		return BLK_QC_T_NONE;

1251 1252 1253 1254
	/*
	 * If it's a regular read/write or a barrier with data attached,
	 * go through the normal accounting stuff before submission.
	 */
1255
	if (bio_has_data(bio)) {
1256 1257
		unsigned int count;

1258
		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1259
			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1260 1261 1262
		else
			count = bio_sectors(bio);

1263
		if (op_is_write(bio_op(bio))) {
1264 1265
			count_vm_events(PGPGOUT, count);
		} else {
1266
			task_io_account_read(bio->bi_iter.bi_size);
1267 1268 1269 1270 1271
			count_vm_events(PGPGIN, count);
		}

		if (unlikely(block_dump)) {
			char b[BDEVNAME_SIZE];
1272
			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1273
			current->comm, task_pid_nr(current),
1274
				op_is_write(bio_op(bio)) ? "WRITE" : "READ",
1275
				(unsigned long long)bio->bi_iter.bi_sector,
1276
				bio_devname(bio, b), count);
1277
		}
L
Linus Torvalds 已提交
1278 1279
	}

1280
	/*
1281 1282 1283 1284
	 * If we're reading data that is part of the userspace workingset, count
	 * submission time as memory stall.  When the device is congested, or
	 * the submitting cgroup IO-throttled, submission can be a significant
	 * part of overall IO time.
1285
	 */
1286 1287 1288 1289
	if (unlikely(bio_op(bio) == REQ_OP_READ &&
	    bio_flagged(bio, BIO_WORKINGSET))) {
		unsigned long pflags;
		blk_qc_t ret;
1290

1291 1292
		psi_memstall_enter(&pflags);
		ret = generic_make_request(bio);
1293 1294
		psi_memstall_leave(&pflags);

1295 1296 1297 1298
		return ret;
	}

	return generic_make_request(bio);
L
Linus Torvalds 已提交
1299 1300 1301
}
EXPORT_SYMBOL(submit_bio);

1302
/**
1303
 * blk_cloned_rq_check_limits - Helper function to check a cloned request
1304
 *                              for the new queue limits
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
 * @q:  the queue
 * @rq: the request being checked
 *
 * Description:
 *    @rq may have been made based on weaker limitations of upper-level queues
 *    in request stacking drivers, and it may violate the limitation of @q.
 *    Since the block layer and the underlying device driver trust @rq
 *    after it is inserted to @q, it should be checked against @q before
 *    the insertion using this generic function.
 *
 *    Request stacking drivers like request-based dm may change the queue
1316 1317
 *    limits when retrying requests on other queues. Those requests need
 *    to be checked against the new queue limits again during dispatch.
1318
 */
1319 1320
static int blk_cloned_rq_check_limits(struct request_queue *q,
				      struct request *rq)
1321
{
1322
	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
1323 1324 1325
		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
			__func__, blk_rq_sectors(rq),
			blk_queue_get_max_sectors(q, req_op(rq)));
1326 1327 1328 1329 1330 1331 1332 1333 1334
		return -EIO;
	}

	/*
	 * queue's settings related to segment counting like q->bounce_pfn
	 * may differ from that of other stacking queues.
	 * Recalculate it to check the request correctly on this queue's
	 * limitation.
	 */
1335
	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1336
	if (rq->nr_phys_segments > queue_max_segments(q)) {
1337 1338
		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
			__func__, rq->nr_phys_segments, queue_max_segments(q));
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
		return -EIO;
	}

	return 0;
}

/**
 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
 * @q:  the queue to submit the request
 * @rq: the request being queued
 */
1350
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1351
{
1352
	if (blk_cloned_rq_check_limits(q, rq))
1353
		return BLK_STS_IOERR;
1354

1355 1356
	if (rq->rq_disk &&
	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1357
		return BLK_STS_IOERR;
1358

1359 1360 1361
	if (blk_crypto_insert_cloned_request(rq))
		return BLK_STS_IOERR;

J
Jens Axboe 已提交
1362
	if (blk_queue_io_stat(q))
1363
		blk_account_io_start(rq);
1364 1365

	/*
J
Jens Axboe 已提交
1366 1367 1368
	 * Since we have a scheduler attached on the top device,
	 * bypass a potential scheduler on the bottom device for
	 * insert.
1369
	 */
1370
	return blk_mq_request_issue_directly(rq, true);
1371 1372 1373
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);

1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
/**
 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
 * @rq: request to examine
 *
 * Description:
 *     A request could be merge of IOs which require different failure
 *     handling.  This function determines the number of bytes which
 *     can be failed from the beginning of the request without
 *     crossing into area which need to be retried further.
 *
 * Return:
 *     The number of bytes to fail.
 */
unsigned int blk_rq_err_bytes(const struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	unsigned int bytes = 0;
	struct bio *bio;

1393
	if (!(rq->rq_flags & RQF_MIXED_MERGE))
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
		return blk_rq_bytes(rq);

	/*
	 * Currently the only 'mixing' which can happen is between
	 * different fastfail types.  We can safely fail portions
	 * which have all the failfast bits that the first one has -
	 * the ones which are at least as eager to fail as the first
	 * one.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
J
Jens Axboe 已提交
1404
		if ((bio->bi_opf & ff) != ff)
1405
			break;
1406
		bytes += bio->bi_iter.bi_size;
1407 1408 1409 1410 1411 1412 1413 1414
	}

	/* this could lead to infinite loop */
	BUG_ON(blk_rq_bytes(rq) && !bytes);
	return bytes;
}
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);

1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
static void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
{
	unsigned long stamp;
again:
	stamp = READ_ONCE(part->stamp);
	if (unlikely(stamp != now)) {
		if (likely(cmpxchg(&part->stamp, stamp, now) == stamp))
			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
	}
	if (part->partno) {
		part = &part_to_disk(part)->part0;
		goto again;
	}
}

1430
static void blk_account_io_completion(struct request *req, unsigned int bytes)
1431
{
1432
	if (req->part && blk_do_io_stat(req)) {
1433
		const int sgrp = op_stat_group(req_op(req));
1434 1435
		struct hd_struct *part;

1436
		part_stat_lock();
1437
		part = req->part;
1438
		part_stat_add(part, sectors[sgrp], bytes >> 9);
1439 1440 1441 1442
		part_stat_unlock();
	}
}

1443
void blk_account_io_done(struct request *req, u64 now)
1444 1445
{
	/*
1446 1447 1448
	 * Account IO completion.  flush_rq isn't accounted as a
	 * normal IO on queueing nor completion.  Accounting the
	 * containing request is enough.
1449
	 */
1450 1451
	if (req->part && blk_do_io_stat(req) &&
	    !(req->rq_flags & RQF_FLUSH_SEQ)) {
1452
		const int sgrp = op_stat_group(req_op(req));
1453 1454
		struct hd_struct *part;

1455
		part_stat_lock();
1456
		part = req->part;
1457

1458
		update_io_ticks(part, jiffies, true);
1459 1460
		part_stat_inc(part, ios[sgrp]);
		part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
1461
		part_stat_unlock();
1462

1463
		hd_struct_put(part);
1464 1465 1466
	}
}

1467
void blk_account_io_start(struct request *rq)
1468 1469 1470 1471
{
	if (!blk_do_io_stat(rq))
		return;

1472
	rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
1473

1474
	part_stat_lock();
1475
	update_io_ticks(rq->part, jiffies, false);
1476 1477 1478
	part_stat_unlock();
}

1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
		unsigned int op)
{
	struct hd_struct *part = &disk->part0;
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);

	part_stat_lock();
	update_io_ticks(part, now, false);
	part_stat_inc(part, ios[sgrp]);
	part_stat_add(part, sectors[sgrp], sectors);
	part_stat_local_inc(part, in_flight[op_is_write(op)]);
	part_stat_unlock();
1492

1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	return now;
}
EXPORT_SYMBOL(disk_start_io_acct);

void disk_end_io_acct(struct gendisk *disk, unsigned int op,
		unsigned long start_time)
{
	struct hd_struct *part = &disk->part0;
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);
	unsigned long duration = now - start_time;
1504

1505 1506 1507 1508
	part_stat_lock();
	update_io_ticks(part, now, true);
	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
	part_stat_local_dec(part, in_flight[op_is_write(op)]);
1509 1510
	part_stat_unlock();
}
1511
EXPORT_SYMBOL(disk_end_io_acct);
1512

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
/*
 * Steal bios from a request and add them to a bio list.
 * The request must not have been partially completed before.
 */
void blk_steal_bios(struct bio_list *list, struct request *rq)
{
	if (rq->bio) {
		if (list->tail)
			list->tail->bi_next = rq->bio;
		else
			list->head = rq->bio;
		list->tail = rq->biotail;

		rq->bio = NULL;
		rq->biotail = NULL;
	}

	rq->__data_len = 0;
}
EXPORT_SYMBOL_GPL(blk_steal_bios);

1534
/**
1535
 * blk_update_request - Special helper function for request stacking drivers
1536
 * @req:      the request being processed
1537
 * @error:    block status code
1538
 * @nr_bytes: number of bytes to complete @req
1539 1540
 *
 * Description:
1541 1542 1543
 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
 *     the request structure even if @req doesn't have leftover.
 *     If @req has leftover, sets it up for the next range of segments.
1544 1545 1546
 *
 *     This special helper function is only for request stacking drivers
 *     (e.g. request-based dm) so that they can handle partial completion.
1547
 *     Actual device drivers should use blk_mq_end_request instead.
1548 1549 1550
 *
 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
 *     %false return from this function.
1551
 *
1552 1553 1554 1555
 * Note:
 *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
 *	blk_rq_bytes() and in blk_update_request().
 *
1556
 * Return:
1557 1558
 *     %false - this request doesn't have any more data
 *     %true  - this request has more data
1559
 **/
1560 1561
bool blk_update_request(struct request *req, blk_status_t error,
		unsigned int nr_bytes)
L
Linus Torvalds 已提交
1562
{
1563
	int total_bytes;
L
Linus Torvalds 已提交
1564

1565
	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1566

1567 1568 1569
	if (!req->bio)
		return false;

1570 1571 1572 1573 1574 1575
#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
	    error == BLK_STS_OK)
		req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif

1576 1577
	if (unlikely(error && !blk_rq_is_passthrough(req) &&
		     !(req->rq_flags & RQF_QUIET)))
1578
		print_req_error(req, error, __func__);
L
Linus Torvalds 已提交
1579

1580
	blk_account_io_completion(req, nr_bytes);
1581

1582 1583 1584
	total_bytes = 0;
	while (req->bio) {
		struct bio *bio = req->bio;
1585
		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
L
Linus Torvalds 已提交
1586

1587
		if (bio_bytes == bio->bi_iter.bi_size)
L
Linus Torvalds 已提交
1588 1589
			req->bio = bio->bi_next;

N
NeilBrown 已提交
1590 1591
		/* Completion has already been traced */
		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1592
		req_bio_endio(req, bio, bio_bytes, error);
L
Linus Torvalds 已提交
1593

1594 1595
		total_bytes += bio_bytes;
		nr_bytes -= bio_bytes;
L
Linus Torvalds 已提交
1596

1597 1598
		if (!nr_bytes)
			break;
L
Linus Torvalds 已提交
1599 1600 1601 1602 1603
	}

	/*
	 * completely done
	 */
1604 1605 1606 1607 1608 1609
	if (!req->bio) {
		/*
		 * Reset counters so that the request stacking driver
		 * can find how many bytes remain in the request
		 * later.
		 */
1610
		req->__data_len = 0;
1611 1612
		return false;
	}
L
Linus Torvalds 已提交
1613

1614
	req->__data_len -= total_bytes;
1615 1616

	/* update sector only for requests with clear definition of sector */
1617
	if (!blk_rq_is_passthrough(req))
1618
		req->__sector += total_bytes >> 9;
1619

1620
	/* mixed attributes always follow the first bio */
1621
	if (req->rq_flags & RQF_MIXED_MERGE) {
1622
		req->cmd_flags &= ~REQ_FAILFAST_MASK;
J
Jens Axboe 已提交
1623
		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1624 1625
	}

1626 1627 1628 1629 1630 1631 1632 1633 1634
	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
		/*
		 * If total number of sectors is less than the first segment
		 * size, something has gone terribly wrong.
		 */
		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
			blk_dump_rq_flags(req, "request botched");
			req->__data_len = blk_rq_cur_bytes(req);
		}
1635

1636
		/* recalculate the number of segments */
1637
		req->nr_phys_segments = blk_recalc_rq_segments(req);
1638
	}
1639

1640
	return true;
L
Linus Torvalds 已提交
1641
}
1642
EXPORT_SYMBOL_GPL(blk_update_request);
L
Linus Torvalds 已提交
1643

1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
/**
 * rq_flush_dcache_pages - Helper function to flush all pages in a request
 * @rq: the request to be flushed
 *
 * Description:
 *     Flush all pages in @rq.
 */
void rq_flush_dcache_pages(struct request *rq)
{
	struct req_iterator iter;
1655
	struct bio_vec bvec;
1656 1657

	rq_for_each_segment(bvec, rq, iter)
1658
		flush_dcache_page(bvec.bv_page);
1659 1660 1661 1662
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif

1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
/**
 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
 * @q : the queue of the device being checked
 *
 * Description:
 *    Check if underlying low-level drivers of a device are busy.
 *    If the drivers want to export their busy state, they must set own
 *    exporting function using blk_queue_lld_busy() first.
 *
 *    Basically, this function is used only by request stacking drivers
 *    to stop dispatching requests to underlying devices when underlying
 *    devices are busy.  This behavior helps more I/O merging on the queue
 *    of the request stacking driver and prevents I/O throughput regression
 *    on burst I/O load.
 *
 * Return:
 *    0 - Not busy (The request stacking driver should dispatch request)
 *    1 - Busy (The request stacking driver should stop dispatching request)
 */
int blk_lld_busy(struct request_queue *q)
{
J
Jens Axboe 已提交
1684
	if (queue_is_mq(q) && q->mq_ops->busy)
J
Jens Axboe 已提交
1685
		return q->mq_ops->busy(q);
1686 1687 1688 1689 1690

	return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);

1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
/**
 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
 * @rq: the clone request to be cleaned up
 *
 * Description:
 *     Free all bios in @rq for a cloned request.
 */
void blk_rq_unprep_clone(struct request *rq)
{
	struct bio *bio;

	while ((bio = rq->bio) != NULL) {
		rq->bio = bio->bi_next;

		bio_put(bio);
	}
}
EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);

/**
 * blk_rq_prep_clone - Helper function to setup clone request
 * @rq: the request to be setup
 * @rq_src: original request to be cloned
 * @bs: bio_set that bios for clone are allocated from
 * @gfp_mask: memory allocation mask for bio
 * @bio_ctr: setup function to be called for each clone bio.
 *           Returns %0 for success, non %0 for failure.
 * @data: private data to be passed to @bio_ctr
 *
 * Description:
 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
 *     Also, pages which the original bios are pointing to are not copied
 *     and the cloned bios just point same pages.
 *     So cloned bios must be completed before original bios, which means
 *     the caller must complete @rq before @rq_src.
 */
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
		      struct bio_set *bs, gfp_t gfp_mask,
		      int (*bio_ctr)(struct bio *, struct bio *, void *),
		      void *data)
{
	struct bio *bio, *bio_src;

	if (!bs)
1735
		bs = &fs_bio_set;
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751

	__rq_for_each_bio(bio_src, rq_src) {
		bio = bio_clone_fast(bio_src, gfp_mask, bs);
		if (!bio)
			goto free_and_out;

		if (bio_ctr && bio_ctr(bio, bio_src, data))
			goto free_and_out;

		if (rq->bio) {
			rq->biotail->bi_next = bio;
			rq->biotail = bio;
		} else
			rq->bio = rq->biotail = bio;
	}

1752 1753 1754 1755 1756 1757 1758 1759 1760
	/* Copy attributes of the original request to the clone request. */
	rq->__sector = blk_rq_pos(rq_src);
	rq->__data_len = blk_rq_bytes(rq_src);
	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
		rq->special_vec = rq_src->special_vec;
	}
	rq->nr_phys_segments = rq_src->nr_phys_segments;
	rq->ioprio = rq_src->ioprio;
1761

1762 1763
	if (rq->bio)
		blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask);
1764 1765 1766 1767 1768 1769 1770 1771 1772

	return 0;

free_and_out:
	if (bio)
		bio_put(bio);
	blk_rq_unprep_clone(rq);

	return -ENOMEM;
1773 1774 1775
}
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);

1776
int kblockd_schedule_work(struct work_struct *work)
L
Linus Torvalds 已提交
1777 1778 1779 1780 1781
{
	return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);

1782 1783 1784 1785 1786 1787 1788
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
				unsigned long delay)
{
	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);

S
Suresh Jayaraman 已提交
1789 1790 1791 1792 1793
/**
 * blk_start_plug - initialize blk_plug and track it inside the task_struct
 * @plug:	The &struct blk_plug that needs to be initialized
 *
 * Description:
1794 1795 1796 1797 1798 1799 1800 1801 1802
 *   blk_start_plug() indicates to the block layer an intent by the caller
 *   to submit multiple I/O requests in a batch.  The block layer may use
 *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
 *   is called.  However, the block layer may choose to submit requests
 *   before a call to blk_finish_plug() if the number of queued I/Os
 *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
 *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
 *   the task schedules (see below).
 *
S
Suresh Jayaraman 已提交
1803 1804 1805 1806 1807 1808 1809 1810 1811
 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
 *   pending I/O should the task end up blocking between blk_start_plug() and
 *   blk_finish_plug(). This is important from a performance perspective, but
 *   also ensures that we don't deadlock. For instance, if the task is blocking
 *   for a memory allocation, memory reclaim could end up wanting to free a
 *   page belonging to that request that is currently residing in our private
 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
 *   this kind of deadlock.
 */
1812 1813 1814 1815
void blk_start_plug(struct blk_plug *plug)
{
	struct task_struct *tsk = current;

S
Shaohua Li 已提交
1816 1817 1818 1819 1820 1821
	/*
	 * If this is a nested plug, don't actually assign it.
	 */
	if (tsk->plug)
		return;

1822
	INIT_LIST_HEAD(&plug->mq_list);
1823
	INIT_LIST_HEAD(&plug->cb_list);
1824
	plug->rq_count = 0;
1825
	plug->multiple_queues = false;
1826

1827
	/*
S
Shaohua Li 已提交
1828 1829
	 * Store ordering should not be needed here, since a potential
	 * preempt will imply a full memory barrier
1830
	 */
S
Shaohua Li 已提交
1831
	tsk->plug = plug;
1832 1833 1834
}
EXPORT_SYMBOL(blk_start_plug);

1835
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1836 1837 1838
{
	LIST_HEAD(callbacks);

S
Shaohua Li 已提交
1839 1840
	while (!list_empty(&plug->cb_list)) {
		list_splice_init(&plug->cb_list, &callbacks);
1841

S
Shaohua Li 已提交
1842 1843
		while (!list_empty(&callbacks)) {
			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1844 1845
							  struct blk_plug_cb,
							  list);
S
Shaohua Li 已提交
1846
			list_del(&cb->list);
1847
			cb->callback(cb, from_schedule);
S
Shaohua Li 已提交
1848
		}
1849 1850 1851
	}
}

1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876
struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
				      int size)
{
	struct blk_plug *plug = current->plug;
	struct blk_plug_cb *cb;

	if (!plug)
		return NULL;

	list_for_each_entry(cb, &plug->cb_list, list)
		if (cb->callback == unplug && cb->data == data)
			return cb;

	/* Not currently on the callback list */
	BUG_ON(size < sizeof(*cb));
	cb = kzalloc(size, GFP_ATOMIC);
	if (cb) {
		cb->data = data;
		cb->callback = unplug;
		list_add(&cb->list, &plug->cb_list);
	}
	return cb;
}
EXPORT_SYMBOL(blk_check_plugged);

1877
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1878
{
1879
	flush_plug_callbacks(plug, from_schedule);
1880 1881 1882

	if (!list_empty(&plug->mq_list))
		blk_mq_flush_plug_list(plug, from_schedule);
1883 1884
}

1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
/**
 * blk_finish_plug - mark the end of a batch of submitted I/O
 * @plug:	The &struct blk_plug passed to blk_start_plug()
 *
 * Description:
 * Indicate that a batch of I/O submissions is complete.  This function
 * must be paired with an initial call to blk_start_plug().  The intent
 * is to allow the block layer to optimize I/O submission.  See the
 * documentation for blk_start_plug() for more information.
 */
1895 1896
void blk_finish_plug(struct blk_plug *plug)
{
S
Shaohua Li 已提交
1897 1898
	if (plug != current->plug)
		return;
1899
	blk_flush_plug_list(plug, false);
1900

S
Shaohua Li 已提交
1901
	current->plug = NULL;
1902
}
1903
EXPORT_SYMBOL(blk_finish_plug);
1904

1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916
void blk_io_schedule(void)
{
	/* Prevent hang_check timer from firing at us during very long I/O */
	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;

	if (timeout)
		io_schedule_timeout(timeout);
	else
		io_schedule();
}
EXPORT_SYMBOL_GPL(blk_io_schedule);

L
Linus Torvalds 已提交
1917 1918
int __init blk_dev_init(void)
{
1919 1920
	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1921
			sizeof_field(struct request, cmd_flags));
1922
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1923
			sizeof_field(struct bio, bi_opf));
1924

1925 1926
	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
	kblockd_workqueue = alloc_workqueue("kblockd",
1927
					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
L
Linus Torvalds 已提交
1928 1929 1930
	if (!kblockd_workqueue)
		panic("Failed to create kblockd\n");

1931
	blk_requestq_cachep = kmem_cache_create("request_queue",
1932
			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1933

1934 1935
	blk_debugfs_root = debugfs_create_dir("block", NULL);

1936
	return 0;
L
Linus Torvalds 已提交
1937
}