blk-core.c 48.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 8
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *	-  July2000
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
20
#include <linux/blk-mq.h>
21
#include <linux/blk-pm.h>
L
Linus Torvalds 已提交
22 23
#include <linux/highmem.h>
#include <linux/mm.h>
24
#include <linux/pagemap.h>
L
Linus Torvalds 已提交
25 26 27 28 29 30 31
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
32
#include <linux/task_io_accounting_ops.h>
33
#include <linux/fault-inject.h>
34
#include <linux/list_sort.h>
T
Tejun Heo 已提交
35
#include <linux/delay.h>
36
#include <linux/ratelimit.h>
L
Lin Ming 已提交
37
#include <linux/pm_runtime.h>
38
#include <linux/blk-cgroup.h>
39
#include <linux/t10-pi.h>
40
#include <linux/debugfs.h>
41
#include <linux/bpf.h>
42
#include <linux/psi.h>
43
#include <linux/sched/sysctl.h>
44
#include <linux/blk-crypto.h>
45 46 47

#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
L
Linus Torvalds 已提交
48

49
#include "blk.h"
50
#include "blk-mq.h"
51
#include "blk-mq-sched.h"
52
#include "blk-pm.h"
53
#include "blk-rq-qos.h"
54

55 56
struct dentry *blk_debugfs_root;

57
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
58
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
59
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
K
Keith Busch 已提交
60
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
N
NeilBrown 已提交
61
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
62

63 64
DEFINE_IDA(blk_queue_ida);

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
bool precise_iostat;

static int __init precise_iostat_setup(char *str)
{
	bool precise;

	if (!strtobool(str, &precise)) {
		precise_iostat = precise;
		pr_info("precise iostat %d\n", precise_iostat);
	}

	return 1;
}
__setup("precise_iostat=", precise_iostat_setup);

L
Linus Torvalds 已提交
80 81 82
/*
 * For queue allocation
 */
83
struct kmem_cache *blk_requestq_cachep;
L
Linus Torvalds 已提交
84 85 86 87

/*
 * Controlling structure to kblockd
 */
88
static struct workqueue_struct *kblockd_workqueue;
L
Linus Torvalds 已提交
89

90 91 92 93 94 95 96
/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
97
	set_bit(flag, &q->queue_flags);
98 99 100 101 102 103 104 105 106 107
}
EXPORT_SYMBOL(blk_queue_flag_set);

/**
 * blk_queue_flag_clear - atomically clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
108
	clear_bit(flag, &q->queue_flags);
109 110 111 112 113 114 115 116 117 118 119 120 121
}
EXPORT_SYMBOL(blk_queue_flag_clear);

/**
 * blk_queue_flag_test_and_set - atomically test and set a queue flag
 * @flag: flag to be set
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was already set.
 */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
122
	return test_and_set_bit(flag, &q->queue_flags);
123 124 125
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);

126
void blk_rq_init(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
127
{
128 129
	memset(rq, 0, sizeof(*rq));

L
Linus Torvalds 已提交
130
	INIT_LIST_HEAD(&rq->queuelist);
J
Jens Axboe 已提交
131
	rq->q = q;
132
	rq->__sector = (sector_t) -1;
133 134
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
135 136
	rq->tag = BLK_MQ_NO_TAG;
	rq->internal_tag = BLK_MQ_NO_TAG;
137
	rq->start_time_ns = ktime_get_ns();
138
	rq->part = NULL;
139
	blk_crypto_rq_set_defaults(rq);
L
Linus Torvalds 已提交
140
}
141
EXPORT_SYMBOL(blk_rq_init);
L
Linus Torvalds 已提交
142

143 144 145 146 147 148 149 150
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
	REQ_OP_NAME(READ),
	REQ_OP_NAME(WRITE),
	REQ_OP_NAME(FLUSH),
	REQ_OP_NAME(DISCARD),
	REQ_OP_NAME(SECURE_ERASE),
	REQ_OP_NAME(ZONE_RESET),
151
	REQ_OP_NAME(ZONE_RESET_ALL),
152 153 154
	REQ_OP_NAME(ZONE_OPEN),
	REQ_OP_NAME(ZONE_CLOSE),
	REQ_OP_NAME(ZONE_FINISH),
155
	REQ_OP_NAME(ZONE_APPEND),
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
	REQ_OP_NAME(WRITE_SAME),
	REQ_OP_NAME(WRITE_ZEROES),
	REQ_OP_NAME(SCSI_IN),
	REQ_OP_NAME(SCSI_OUT),
	REQ_OP_NAME(DRV_IN),
	REQ_OP_NAME(DRV_OUT),
};
#undef REQ_OP_NAME

/**
 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 * @op: REQ_OP_XXX.
 *
 * Description: Centralize block layer function to convert REQ_OP_XXX into
 * string format. Useful in the debugging and tracing bio or request. For
 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 */
inline const char *blk_op_str(unsigned int op)
{
	const char *op_str = "UNKNOWN";

	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
		op_str = blk_op_name[op];

	return op_str;
}
EXPORT_SYMBOL_GPL(blk_op_str);

184 185 186 187 188 189 190 191 192 193 194 195 196 197
static const struct {
	int		errno;
	const char	*name;
} blk_errors[] = {
	[BLK_STS_OK]		= { 0,		"" },
	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
198
	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
199
	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
200

201 202 203
	/* device mapper special case, should not leak out: */
	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },

204 205 206 207
	/* zone device specific errors */
	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	/* everything else not covered above: */
	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
};

blk_status_t errno_to_blk_status(int errno)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
		if (blk_errors[i].errno == errno)
			return (__force blk_status_t)i;
	}

	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);

int blk_status_to_errno(blk_status_t status)
{
	int idx = (__force int)status;

229
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
230 231 232 233 234
		return -EIO;
	return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);

235 236
static void print_req_error(struct request *req, blk_status_t status,
		const char *caller)
237 238 239
{
	int idx = (__force int)status;

240
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
241 242
		return;

243
	printk_ratelimited(KERN_ERR
244 245
		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
		"phys_seg %u prio class %u\n",
246
		caller, blk_errors[idx].name,
247 248 249 250 251
		req->rq_disk ? req->rq_disk->disk_name : "?",
		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
		req->cmd_flags & ~REQ_OP_MASK,
		req->nr_phys_segments,
		IOPRIO_PRIO_CLASS(req->ioprio));
252 253
}

N
NeilBrown 已提交
254
static void req_bio_endio(struct request *rq, struct bio *bio,
255
			  unsigned int nbytes, blk_status_t error)
L
Linus Torvalds 已提交
256
{
257
	if (error)
258
		bio->bi_status = error;
259

260
	if (unlikely(rq->rq_flags & RQF_QUIET))
261
		bio_set_flag(bio, BIO_QUIET);
262

263
	bio_advance(bio, nbytes);
264

265 266 267 268 269 270 271 272 273 274 275
	if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
		/*
		 * Partial zone append completions cannot be supported as the
		 * BIO fragments may end up not being written sequentially.
		 */
		if (bio->bi_iter.bi_size)
			bio->bi_status = BLK_STS_IOERR;
		else
			bio->bi_iter.bi_sector = rq->__sector;
	}

T
Tejun Heo 已提交
276
	/* don't actually finish bio if it's part of flush sequence */
277
	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
278
		bio_endio(bio);
L
Linus Torvalds 已提交
279 280 281 282
}

void blk_dump_rq_flags(struct request *rq, char *msg)
{
283 284
	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
		rq->rq_disk ? rq->rq_disk->disk_name : "?",
J
Jens Axboe 已提交
285
		(unsigned long long) rq->cmd_flags);
L
Linus Torvalds 已提交
286

287 288 289
	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
290 291
	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
	       rq->bio, rq->biotail, blk_rq_bytes(rq));
L
Linus Torvalds 已提交
292 293 294 295 296 297 298 299 300 301 302 303
}
EXPORT_SYMBOL(blk_dump_rq_flags);

/**
 * blk_sync_queue - cancel any pending callbacks on a queue
 * @q: the queue
 *
 * Description:
 *     The block layer may perform asynchronous callback activity
 *     on a queue, such as calling the unplug function after a timeout.
 *     A block device may call blk_sync_queue to ensure that any
 *     such activity is cancelled, thus allowing it to release resources
304
 *     that the callbacks might use. The caller must already have made sure
305
 *     that its ->submit_bio will not re-add plugging prior to calling
L
Linus Torvalds 已提交
306 307
 *     this function.
 *
308
 *     This function does not cancel any asynchronous activity arising
309
 *     out of elevator or throttling code. That would require elevator_exit()
310
 *     and blkcg_exit_queue() to be called with queue lock initialized.
311
 *
L
Linus Torvalds 已提交
312 313 314
 */
void blk_sync_queue(struct request_queue *q)
{
315
	del_timer_sync(&q->timeout);
316
	cancel_work_sync(&q->timeout_work);
L
Linus Torvalds 已提交
317 318 319
}
EXPORT_SYMBOL(blk_sync_queue);

320
/**
321
 * blk_set_pm_only - increment pm_only counter
322 323
 * @q: request queue pointer
 */
324
void blk_set_pm_only(struct request_queue *q)
325
{
326
	atomic_inc(&q->pm_only);
327
}
328
EXPORT_SYMBOL_GPL(blk_set_pm_only);
329

330
void blk_clear_pm_only(struct request_queue *q)
331
{
332 333 334 335 336 337
	int pm_only;

	pm_only = atomic_dec_return(&q->pm_only);
	WARN_ON_ONCE(pm_only < 0);
	if (pm_only == 0)
		wake_up_all(&q->mq_freeze_wq);
338
}
339
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
340

341 342 343 344 345 346
/**
 * blk_put_queue - decrement the request_queue refcount
 * @q: the request_queue structure to decrement the refcount for
 *
 * Decrements the refcount of the request_queue kobject. When this reaches 0
 * we'll have blk_release_queue() called.
347 348 349
 *
 * Context: Any context, but the last reference must not be dropped from
 *          atomic context.
350
 */
351
void blk_put_queue(struct request_queue *q)
352 353 354
{
	kobject_put(&q->kobj);
}
J
Jens Axboe 已提交
355
EXPORT_SYMBOL(blk_put_queue);
356

357 358
void blk_set_queue_dying(struct request_queue *q)
{
359
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
360

361 362 363 364 365 366 367
	/*
	 * When queue DYING flag is set, we need to block new req
	 * entering queue, so we call blk_freeze_queue_start() to
	 * prevent I/O from crossing blk_queue_enter().
	 */
	blk_freeze_queue_start(q);

J
Jens Axboe 已提交
368
	if (queue_is_mq(q))
369
		blk_mq_wake_waiters(q);
370 371 372

	/* Make blk_queue_enter() reexamine the DYING flag. */
	wake_up_all(&q->mq_freeze_wq);
373 374 375
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);

376 377 378 379
/**
 * blk_cleanup_queue - shutdown a request queue
 * @q: request queue to shutdown
 *
380 381
 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 * put it.  All future requests will be failed immediately with -ENODEV.
382 383
 *
 * Context: can sleep
384
 */
385
void blk_cleanup_queue(struct request_queue *q)
386
{
387 388 389
	/* cannot be called from atomic context */
	might_sleep();

390 391
	WARN_ON_ONCE(blk_queue_registered(q));

B
Bart Van Assche 已提交
392
	/* mark @q DYING, no new request or merges will be allowed afterwards */
393
	blk_set_queue_dying(q);
394

395 396
	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
397

398 399
	/*
	 * Drain all requests queued before DYING marking. Set DEAD flag to
400 401
	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
	 * after draining finished.
402
	 */
403 404
	blk_freeze_queue_start(q);
	blk_mq_freeze_queue_wait_sync(q);
405 406 407

	rq_qos_exit(q);

408
	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
409

410 411 412
	/* for synchronous bio-based driver finish in-flight integrity i/o */
	blk_flush_integrity();

413
	/* @q won't process any more request, flush async actions */
414
	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
415 416
	blk_sync_queue(q);

417 418
	if (queue_is_mq(q)) {
		blk_mq_cancel_work_sync(q);
419
		blk_mq_exit_queue(q);
420
	}
J
Jens Axboe 已提交
421

422 423 424 425 426 427 428 429 430 431
	/*
	 * In theory, request pool of sched_tags belongs to request queue.
	 * However, the current implementation requires tag_set for freeing
	 * requests, so free the pool now.
	 *
	 * Queue has become frozen, there can't be any in-queue requests, so
	 * it is safe to free requests now.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator)
432
		blk_mq_sched_free_rqs(q);
433 434
	mutex_unlock(&q->sysfs_lock);

435
	percpu_ref_exit(&q->q_usage_counter);
B
Bart Van Assche 已提交
436

437
	/* @q is and will stay empty, shutdown and put */
438 439
	blk_put_queue(q);
}
L
Linus Torvalds 已提交
440 441
EXPORT_SYMBOL(blk_cleanup_queue);

442 443 444
/**
 * blk_queue_enter() - try to increase q->q_usage_counter
 * @q: request queue pointer
445
 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
446
 */
447
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
448
{
449
	const bool pm = flags & BLK_MQ_REQ_PM;
450

451
	while (true) {
452
		bool success = false;
453

454
		rcu_read_lock();
455 456
		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
			/*
457 458 459
			 * The code that increments the pm_only counter is
			 * responsible for ensuring that that counter is
			 * globally visible before the queue is unfrozen.
460
			 */
461 462
			if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
			    !blk_queue_pm_only(q)) {
463 464 465 466 467
				success = true;
			} else {
				percpu_ref_put(&q->q_usage_counter);
			}
		}
468
		rcu_read_unlock();
469 470

		if (success)
471 472
			return 0;

473
		if (flags & BLK_MQ_REQ_NOWAIT)
474 475
			return -EBUSY;

476
		/*
477
		 * read pair of barrier in blk_freeze_queue_start(),
478
		 * we need to order reading __PERCPU_REF_DEAD flag of
479 480 481
		 * .q_usage_counter and reading .mq_freeze_depth or
		 * queue dying flag, otherwise the following wait may
		 * never return if the two reads are reordered.
482 483 484
		 */
		smp_rmb();

485
		wait_event(q->mq_freeze_wq,
486
			   (!q->mq_freeze_depth &&
487
			    blk_pm_resume_queue(pm, q)) ||
488
			   blk_queue_dying(q));
489 490 491 492 493
		if (blk_queue_dying(q))
			return -ENODEV;
	}
}

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
static inline int bio_queue_enter(struct bio *bio)
{
	struct request_queue *q = bio->bi_disk->queue;
	bool nowait = bio->bi_opf & REQ_NOWAIT;
	int ret;

	ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
	if (unlikely(ret)) {
		if (nowait && !blk_queue_dying(q))
			bio_wouldblock_error(bio);
		else
			bio_io_error(bio);
	}

	return ret;
}

511 512 513 514 515 516 517 518 519 520
void blk_queue_exit(struct request_queue *q)
{
	percpu_ref_put(&q->q_usage_counter);
}

static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
	struct request_queue *q =
		container_of(ref, struct request_queue, q_usage_counter);

521
	blk_queue_flag_set(QUEUE_FLAG_USAGE_COUNT_SYNC, q);
522 523 524
	wake_up_all(&q->mq_freeze_wq);
}

525
static void blk_rq_timed_out_timer(struct timer_list *t)
526
{
527
	struct request_queue *q = from_timer(q, t, timeout);
528 529 530 531

	kblockd_schedule_work(&q->timeout_work);
}

532 533 534 535
static void blk_timeout_work(struct work_struct *work)
{
}

536
struct request_queue *blk_alloc_queue(int node_id)
537
{
538
	struct request_queue *q;
539
	int ret;
540

541
	q = kmem_cache_alloc_node(blk_requestq_cachep,
542
				GFP_KERNEL | __GFP_ZERO, node_id);
543
	if (!q)
L
Linus Torvalds 已提交
544 545
		return NULL;

546 547
	q->last_merge = NULL;

548
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
549
	if (q->id < 0)
550
		goto fail_q;
551

552 553
	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
	if (ret)
554 555
		goto fail_id;

C
Christoph Hellwig 已提交
556
	q->backing_dev_info = bdi_alloc(node_id);
557 558 559
	if (!q->backing_dev_info)
		goto fail_split;

560 561 562 563
	q->stats = blk_alloc_queue_stats();
	if (!q->stats)
		goto fail_stats;

564
	q->node = node_id;
565

566 567
	atomic_set(&q->nr_active_requests_shared_sbitmap, 0);

568 569 570
	timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
		    laptop_mode_timer_fn, 0);
	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
571
	INIT_WORK(&q->timeout_work, blk_timeout_work);
572
	INIT_LIST_HEAD(&q->icq_list);
573
#ifdef CONFIG_BLK_CGROUP
574
	INIT_LIST_HEAD(&q->blkg_list);
575
#endif
576

577
	kobject_init(&q->kobj, &blk_queue_ktype);
L
Linus Torvalds 已提交
578

579
	mutex_init(&q->debugfs_mutex);
580
	mutex_init(&q->sysfs_lock);
581
	mutex_init(&q->sysfs_dir_lock);
582
	spin_lock_init(&q->queue_lock);
583

584
	init_waitqueue_head(&q->mq_freeze_wq);
585
	mutex_init(&q->mq_freeze_lock);
586

587 588 589 590 591 592 593
	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
	 * See blk_register_queue() for details.
	 */
	if (percpu_ref_init(&q->q_usage_counter,
				blk_queue_usage_counter_release,
				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
594
		goto fail_bdi;
595

596 597 598
	if (blkcg_init_queue(q))
		goto fail_ref;

599 600
	blk_queue_dma_alignment(q, 511);
	blk_set_default_limits(&q->limits);
601
	q->nr_requests = BLKDEV_MAX_RQ;
602

L
Linus Torvalds 已提交
603
	return q;
604

605 606
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
607
fail_bdi:
608 609
	blk_free_queue_stats(q->stats);
fail_stats:
610
	bdi_put(q->backing_dev_info);
611
fail_split:
612
	bioset_exit(&q->bio_split);
613 614 615
fail_id:
	ida_simple_remove(&blk_queue_ida, q->id);
fail_q:
616
	kmem_cache_free(blk_requestq_cachep, q);
617
	return NULL;
L
Linus Torvalds 已提交
618
}
619
EXPORT_SYMBOL(blk_alloc_queue);
L
Linus Torvalds 已提交
620

621 622 623 624 625
/**
 * blk_get_queue - increment the request_queue refcount
 * @q: the request_queue structure to increment the refcount for
 *
 * Increment the refcount of the request_queue kobject.
626 627
 *
 * Context: Any context.
628
 */
T
Tejun Heo 已提交
629
bool blk_get_queue(struct request_queue *q)
L
Linus Torvalds 已提交
630
{
B
Bart Van Assche 已提交
631
	if (likely(!blk_queue_dying(q))) {
T
Tejun Heo 已提交
632 633
		__blk_get_queue(q);
		return true;
L
Linus Torvalds 已提交
634 635
	}

T
Tejun Heo 已提交
636
	return false;
L
Linus Torvalds 已提交
637
}
J
Jens Axboe 已提交
638
EXPORT_SYMBOL(blk_get_queue);
L
Linus Torvalds 已提交
639

J
Jens Axboe 已提交
640 641 642 643 644
/**
 * blk_get_request - allocate a request
 * @q: request queue to allocate a request for
 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
L
Linus Torvalds 已提交
645
 */
J
Jens Axboe 已提交
646 647
struct request *blk_get_request(struct request_queue *q, unsigned int op,
				blk_mq_req_flags_t flags)
L
Linus Torvalds 已提交
648
{
J
Jens Axboe 已提交
649
	struct request *req;
L
Linus Torvalds 已提交
650

J
Jens Axboe 已提交
651
	WARN_ON_ONCE(op & REQ_NOWAIT);
652
	WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
L
Linus Torvalds 已提交
653

J
Jens Axboe 已提交
654 655 656
	req = blk_mq_alloc_request(q, op, flags);
	if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
		q->mq_ops->initialize_rq_fn(req);
L
Linus Torvalds 已提交
657

J
Jens Axboe 已提交
658
	return req;
L
Linus Torvalds 已提交
659
}
J
Jens Axboe 已提交
660
EXPORT_SYMBOL(blk_get_request);
L
Linus Torvalds 已提交
661 662 663

void blk_put_request(struct request *req)
{
J
Jens Axboe 已提交
664
	blk_mq_free_request(req);
L
Linus Torvalds 已提交
665 666 667
}
EXPORT_SYMBOL(blk_put_request);

668
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
L
Linus Torvalds 已提交
669 670 671
{
	char b[BDEVNAME_SIZE];

672 673 674 675
	pr_info_ratelimited("attempt to access beyond end of device\n"
			    "%s: rw=%d, want=%llu, limit=%llu\n",
			    bio_devname(bio, b), bio->bi_opf,
			    bio_end_sector(bio), maxsector);
L
Linus Torvalds 已提交
676 677
}

678 679 680 681 682 683 684 685 686 687
#ifdef CONFIG_FAIL_MAKE_REQUEST

static DECLARE_FAULT_ATTR(fail_make_request);

static int __init setup_fail_make_request(char *str)
{
	return setup_fault_attr(&fail_make_request, str);
}
__setup("fail_make_request=", setup_fail_make_request);

688
static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
689
{
690
	return part->make_it_fail && should_fail(&fail_make_request, bytes);
691 692 693 694
}

static int __init fail_make_request_debugfs(void)
{
695 696 697
	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
						NULL, &fail_make_request);

698
	return PTR_ERR_OR_ZERO(dir);
699 700 701 702 703 704
}

late_initcall(fail_make_request_debugfs);

#else /* CONFIG_FAIL_MAKE_REQUEST */

705 706
static inline bool should_fail_request(struct hd_struct *part,
					unsigned int bytes)
707
{
708
	return false;
709 710 711 712
}

#endif /* CONFIG_FAIL_MAKE_REQUEST */

713 714
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{
715 716
	const int op = bio_op(bio);

717
	if (part->read_only && op_is_write(op)) {
718 719
		char b[BDEVNAME_SIZE];

720 721 722
		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
			return false;

723
		WARN_ONCE(1,
724
		       "Trying to write to read-only block-device %s (partno %d)\n",
725
			bio_devname(bio, b), part->partno);
726 727
		/* Older lvm-tools actually trigger this */
		return false;
728 729 730 731 732
	}

	return false;
}

733 734 735 736 737 738 739 740
static noinline int should_fail_bio(struct bio *bio)
{
	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
		return -EIO;
	return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);

741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
/*
 * Check whether this bio extends beyond the end of the device or partition.
 * This may well happen - the kernel calls bread() without checking the size of
 * the device, e.g., when mounting a file system.
 */
static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
{
	unsigned int nr_sectors = bio_sectors(bio);

	if (nr_sectors && maxsector &&
	    (nr_sectors > maxsector ||
	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
		handle_bad_sector(bio, maxsector);
		return -EIO;
	}
	return 0;
}

759 760 761 762 763 764
/*
 * Remap block n of partition p to block n+start(p) of the disk.
 */
static inline int blk_partition_remap(struct bio *bio)
{
	struct hd_struct *p;
765
	int ret = -EIO;
766

767 768
	rcu_read_lock();
	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
769 770 771 772 773
	if (unlikely(!p))
		goto out;
	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
		goto out;
	if (unlikely(bio_check_ro(bio, p)))
774 775
		goto out;

776
	if (bio_sectors(bio)) {
777 778 779 780 781 782
		if (bio_check_eod(bio, part_nr_sects_read(p)))
			goto out;
		bio->bi_iter.bi_sector += p->start_sect;
		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
				      bio->bi_iter.bi_sector - p->start_sect);
	}
783
	bio->bi_partno = 0;
784
	ret = 0;
785 786
out:
	rcu_read_unlock();
787 788 789
	return ret;
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
/*
 * Check write append to a zoned block device.
 */
static inline blk_status_t blk_check_zone_append(struct request_queue *q,
						 struct bio *bio)
{
	sector_t pos = bio->bi_iter.bi_sector;
	int nr_sectors = bio_sectors(bio);

	/* Only applicable to zoned block devices */
	if (!blk_queue_is_zoned(q))
		return BLK_STS_NOTSUPP;

	/* The bio sector must point to the start of a sequential zone */
	if (pos & (blk_queue_zone_sectors(q) - 1) ||
	    !blk_queue_zone_is_seq(q, pos))
		return BLK_STS_IOERR;

	/*
	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
	 * split and could result in non-contiguous sectors being written in
	 * different zones.
	 */
	if (nr_sectors > q->limits.chunk_sectors)
		return BLK_STS_IOERR;

	/* Make sure the BIO is small enough and will not get split */
	if (nr_sectors > q->limits.max_zone_append_sectors)
		return BLK_STS_IOERR;

	bio->bi_opf |= REQ_NOMERGE;

	return BLK_STS_OK;
}

825
static noinline_for_stack bool submit_bio_checks(struct bio *bio)
L
Linus Torvalds 已提交
826
{
827
	struct request_queue *q = bio->bi_disk->queue;
828
	blk_status_t status = BLK_STS_IOERR;
829
	struct blk_plug *plug;
L
Linus Torvalds 已提交
830 831 832

	might_sleep();

833 834 835 836
	plug = blk_mq_plug(q, bio);
	if (plug && plug->nowait)
		bio->bi_opf |= REQ_NOWAIT;

837
	/*
838
	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
M
Mike Snitzer 已提交
839
	 * if queue does not support NOWAIT.
840
	 */
M
Mike Snitzer 已提交
841
	if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
842
		goto not_supported;
843

844
	if (should_fail_bio(bio))
845
		goto end_io;
846

847 848
	if (bio->bi_partno) {
		if (unlikely(blk_partition_remap(bio)))
849 850
			goto end_io;
	} else {
851 852 853
		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
			goto end_io;
		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
854 855
			goto end_io;
	}
856

857
	/*
858 859
	 * Filter flush bio's early so that bio based drivers without flush
	 * support don't have to worry about them.
860
	 */
861
	if (op_is_flush(bio->bi_opf) &&
J
Jens Axboe 已提交
862
	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
J
Jens Axboe 已提交
863
		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
864
		if (!bio_sectors(bio)) {
865
			status = BLK_STS_OK;
866 867
			goto end_io;
		}
868
	}
869

870 871 872
	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		bio->bi_opf &= ~REQ_HIPRI;

873 874 875 876 877 878 879 880 881 882
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
		if (!blk_queue_secure_erase(q))
			goto not_supported;
		break;
	case REQ_OP_WRITE_SAME:
883
		if (!q->limits.max_write_same_sectors)
884
			goto not_supported;
885
		break;
886 887 888 889 890
	case REQ_OP_ZONE_APPEND:
		status = blk_check_zone_append(q, bio);
		if (status != BLK_STS_OK)
			goto end_io;
		break;
891
	case REQ_OP_ZONE_RESET:
892 893 894
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
895
		if (!blk_queue_is_zoned(q))
896
			goto not_supported;
897
		break;
898 899 900 901
	case REQ_OP_ZONE_RESET_ALL:
		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
			goto not_supported;
		break;
902
	case REQ_OP_WRITE_ZEROES:
903
		if (!q->limits.max_write_zeroes_sectors)
904 905
			goto not_supported;
		break;
906 907
	default:
		break;
908
	}
909

T
Tejun Heo 已提交
910
	/*
911 912 913 914
	 * Various block parts want %current->io_context, so allocate it up
	 * front rather than dealing with lots of pain to allocate it only
	 * where needed. This may fail and the block layer knows how to live
	 * with it.
T
Tejun Heo 已提交
915
	 */
916 917
	if (unlikely(!current->io_context))
		create_task_io_context(current, GFP_ATOMIC, q->node);
T
Tejun Heo 已提交
918

919
	if (blk_throtl_bio(bio))
920
		return false;
921 922 923

	blk_cgroup_bio_start(bio);
	blkcg_bio_issue_init(bio);
924

N
NeilBrown 已提交
925 926 927 928 929 930 931
	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
		trace_block_bio_queue(q, bio);
		/* Now that enqueuing has been traced, we need to trace
		 * completion as well.
		 */
		bio_set_flag(bio, BIO_TRACE_COMPLETION);
	}
932
	return true;
933

934
not_supported:
935
	status = BLK_STS_NOTSUPP;
936
end_io:
937
	bio->bi_status = status;
938
	bio_endio(bio);
939
	return false;
L
Linus Torvalds 已提交
940 941
}

942
static blk_qc_t __submit_bio(struct bio *bio)
943
{
944
	struct gendisk *disk = bio->bi_disk;
945 946 947
	blk_qc_t ret = BLK_QC_T_NONE;

	if (blk_crypto_bio_prep(&bio)) {
948 949 950
		if (!disk->fops->submit_bio)
			return blk_mq_submit_bio(bio);
		ret = disk->fops->submit_bio(bio);
951
	}
952
	blk_queue_exit(disk->queue);
953 954 955
	return ret;
}

956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
/*
 * The loop in this function may be a bit non-obvious, and so deserves some
 * explanation:
 *
 *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
 *    that), so we have a list with a single bio.
 *  - We pretend that we have just taken it off a longer list, so we assign
 *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
 *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
 *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
 *    non-NULL value in bio_list and re-enter the loop from the top.
 *  - In this case we really did just take the bio of the top of the list (no
 *    pretending) and so remove it from bio_list, and call into ->submit_bio()
 *    again.
 *
 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
 * bio_list_on_stack[1] contains bios that were submitted before the current
 *	->submit_bio_bio, but that haven't been processed yet.
 */
static blk_qc_t __submit_bio_noacct(struct bio *bio)
{
	struct bio_list bio_list_on_stack[2];
	blk_qc_t ret = BLK_QC_T_NONE;

	BUG_ON(bio->bi_next);

	bio_list_init(&bio_list_on_stack[0]);
	current->bio_list = bio_list_on_stack;

	do {
		struct request_queue *q = bio->bi_disk->queue;
		struct bio_list lower, same;

		if (unlikely(bio_queue_enter(bio) != 0))
			continue;

		/*
		 * Create a fresh bio_list for all subordinate requests.
		 */
		bio_list_on_stack[1] = bio_list_on_stack[0];
		bio_list_init(&bio_list_on_stack[0]);

		ret = __submit_bio(bio);

		/*
		 * Sort new bios into those for a lower level and those for the
		 * same level.
		 */
		bio_list_init(&lower);
		bio_list_init(&same);
		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
			if (q == bio->bi_disk->queue)
				bio_list_add(&same, bio);
			else
				bio_list_add(&lower, bio);

		/*
		 * Now assemble so we handle the lowest level first.
		 */
		bio_list_merge(&bio_list_on_stack[0], &lower);
		bio_list_merge(&bio_list_on_stack[0], &same);
		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));

	current->bio_list = NULL;
	return ret;
}

1024 1025
static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
{
1026
	struct bio_list bio_list[2] = { };
1027 1028
	blk_qc_t ret = BLK_QC_T_NONE;

1029
	current->bio_list = bio_list;
1030 1031

	do {
1032
		struct gendisk *disk = bio->bi_disk;
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043

		if (unlikely(bio_queue_enter(bio) != 0))
			continue;

		if (!blk_crypto_bio_prep(&bio)) {
			blk_queue_exit(disk->queue);
			ret = BLK_QC_T_NONE;
			continue;
		}

		ret = blk_mq_submit_bio(bio);
1044
	} while ((bio = bio_list_pop(&bio_list[0])));
1045 1046 1047 1048 1049

	current->bio_list = NULL;
	return ret;
}

1050
/**
1051
 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
1052 1053
 * @bio:  The bio describing the location in memory and on the device.
 *
1054 1055 1056 1057
 * This is a version of submit_bio() that shall only be used for I/O that is
 * resubmitted to lower level drivers by stacking block drivers.  All file
 * systems and other upper level users of the block layer should use
 * submit_bio() instead.
1058
 */
1059
blk_qc_t submit_bio_noacct(struct bio *bio)
1060
{
1061
	if (!submit_bio_checks(bio))
1062
		return BLK_QC_T_NONE;
1063 1064

	/*
1065 1066 1067 1068
	 * We only want one ->submit_bio to be active at a time, else stack
	 * usage with stacked devices could be a problem.  Use current->bio_list
	 * to collect a list of requests submited by a ->submit_bio method while
	 * it is active, and then process them after it returned.
1069
	 */
1070
	if (current->bio_list) {
1071
		bio_list_add(&current->bio_list[0], bio);
1072
		return BLK_QC_T_NONE;
1073
	}
1074

1075 1076
	if (!bio->bi_disk->fops->submit_bio)
		return __submit_bio_noacct_mq(bio);
1077
	return __submit_bio_noacct(bio);
1078
}
1079
EXPORT_SYMBOL(submit_bio_noacct);
L
Linus Torvalds 已提交
1080 1081

/**
1082
 * submit_bio - submit a bio to the block device layer for I/O
L
Linus Torvalds 已提交
1083 1084
 * @bio: The &struct bio which describes the I/O
 *
1085 1086 1087
 * submit_bio() is used to submit I/O requests to block devices.  It is passed a
 * fully set up &struct bio that describes the I/O that needs to be done.  The
 * bio will be send to the device described by the bi_disk and bi_partno fields.
L
Linus Torvalds 已提交
1088
 *
1089 1090 1091 1092
 * The success/failure status of the request, along with notification of
 * completion, is delivered asynchronously through the ->bi_end_io() callback
 * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
 * been called.
L
Linus Torvalds 已提交
1093
 */
1094
blk_qc_t submit_bio(struct bio *bio)
L
Linus Torvalds 已提交
1095
{
T
Tejun Heo 已提交
1096 1097 1098
	if (blkcg_punt_bio_submit(bio))
		return BLK_QC_T_NONE;

1099 1100 1101 1102
	/*
	 * If it's a regular read/write or a barrier with data attached,
	 * go through the normal accounting stuff before submission.
	 */
1103
	if (bio_has_data(bio)) {
1104 1105
		unsigned int count;

1106
		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1107
			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1108 1109 1110
		else
			count = bio_sectors(bio);

1111
		if (op_is_write(bio_op(bio))) {
1112 1113
			count_vm_events(PGPGOUT, count);
		} else {
1114
			task_io_account_read(bio->bi_iter.bi_size);
1115 1116
			count_vm_events(PGPGIN, count);
		}
L
Linus Torvalds 已提交
1117 1118
	}

1119
	/*
1120 1121 1122 1123
	 * If we're reading data that is part of the userspace workingset, count
	 * submission time as memory stall.  When the device is congested, or
	 * the submitting cgroup IO-throttled, submission can be a significant
	 * part of overall IO time.
1124
	 */
1125 1126 1127 1128
	if (unlikely(bio_op(bio) == REQ_OP_READ &&
	    bio_flagged(bio, BIO_WORKINGSET))) {
		unsigned long pflags;
		blk_qc_t ret;
1129

1130
		psi_memstall_enter(&pflags);
1131
		ret = submit_bio_noacct(bio);
1132 1133
		psi_memstall_leave(&pflags);

1134 1135 1136
		return ret;
	}

1137
	return submit_bio_noacct(bio);
L
Linus Torvalds 已提交
1138 1139 1140
}
EXPORT_SYMBOL(submit_bio);

1141
/**
1142
 * blk_cloned_rq_check_limits - Helper function to check a cloned request
1143
 *                              for the new queue limits
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
 * @q:  the queue
 * @rq: the request being checked
 *
 * Description:
 *    @rq may have been made based on weaker limitations of upper-level queues
 *    in request stacking drivers, and it may violate the limitation of @q.
 *    Since the block layer and the underlying device driver trust @rq
 *    after it is inserted to @q, it should be checked against @q before
 *    the insertion using this generic function.
 *
 *    Request stacking drivers like request-based dm may change the queue
1155 1156
 *    limits when retrying requests on other queues. Those requests need
 *    to be checked against the new queue limits again during dispatch.
1157
 */
1158
static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
1159
				      struct request *rq)
1160
{
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
	unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));

	if (blk_rq_sectors(rq) > max_sectors) {
		/*
		 * SCSI device does not have a good way to return if
		 * Write Same/Zero is actually supported. If a device rejects
		 * a non-read/write command (discard, write same,etc.) the
		 * low-level device driver will set the relevant queue limit to
		 * 0 to prevent blk-lib from issuing more of the offending
		 * operations. Commands queued prior to the queue limit being
		 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
		 * errors being propagated to upper layers.
		 */
		if (max_sectors == 0)
			return BLK_STS_NOTSUPP;

1177
		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1178
			__func__, blk_rq_sectors(rq), max_sectors);
1179
		return BLK_STS_IOERR;
1180 1181 1182 1183 1184 1185 1186 1187
	}

	/*
	 * queue's settings related to segment counting like q->bounce_pfn
	 * may differ from that of other stacking queues.
	 * Recalculate it to check the request correctly on this queue's
	 * limitation.
	 */
1188
	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1189
	if (rq->nr_phys_segments > queue_max_segments(q)) {
1190 1191
		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
			__func__, rq->nr_phys_segments, queue_max_segments(q));
1192
		return BLK_STS_IOERR;
1193 1194
	}

1195
	return BLK_STS_OK;
1196 1197 1198 1199 1200 1201 1202
}

/**
 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
 * @q:  the queue to submit the request
 * @rq: the request being queued
 */
1203
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1204
{
1205 1206 1207 1208 1209
	blk_status_t ret;

	ret = blk_cloned_rq_check_limits(q, rq);
	if (ret != BLK_STS_OK)
		return ret;
1210

1211 1212
	if (rq->rq_disk &&
	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1213
		return BLK_STS_IOERR;
1214

1215 1216 1217
	if (blk_crypto_insert_cloned_request(rq))
		return BLK_STS_IOERR;

J
Jens Axboe 已提交
1218
	if (blk_queue_io_stat(q))
1219
		blk_account_io_start(rq);
1220 1221

	/*
J
Jens Axboe 已提交
1222 1223 1224
	 * Since we have a scheduler attached on the top device,
	 * bypass a potential scheduler on the bottom device for
	 * insert.
1225
	 */
1226 1227 1228 1229
	ret = blk_mq_request_issue_directly(rq, true);
	if (ret)
		blk_account_io_done(rq, ktime_get_ns());
	return ret;
1230 1231 1232
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);

1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
/**
 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
 * @rq: request to examine
 *
 * Description:
 *     A request could be merge of IOs which require different failure
 *     handling.  This function determines the number of bytes which
 *     can be failed from the beginning of the request without
 *     crossing into area which need to be retried further.
 *
 * Return:
 *     The number of bytes to fail.
 */
unsigned int blk_rq_err_bytes(const struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	unsigned int bytes = 0;
	struct bio *bio;

1252
	if (!(rq->rq_flags & RQF_MIXED_MERGE))
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
		return blk_rq_bytes(rq);

	/*
	 * Currently the only 'mixing' which can happen is between
	 * different fastfail types.  We can safely fail portions
	 * which have all the failfast bits that the first one has -
	 * the ones which are at least as eager to fail as the first
	 * one.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
J
Jens Axboe 已提交
1263
		if ((bio->bi_opf & ff) != ff)
1264
			break;
1265
		bytes += bio->bi_iter.bi_size;
1266 1267 1268 1269 1270 1271 1272 1273
	}

	/* this could lead to infinite loop */
	BUG_ON(blk_rq_bytes(rq) && !bytes);
	return bytes;
}
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);

1274
void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
1275 1276 1277 1278
{
	unsigned long stamp;
again:
	stamp = READ_ONCE(part->stamp);
1279 1280 1281 1282 1283 1284
	if (unlikely(time_after(now, stamp)) &&
		likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
		if (precise_iostat) {
			if (end || part_in_flight(part))
				__part_stat_add(part, io_ticks, now - stamp);
		} else {
1285
			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
1286
		}
1287 1288 1289 1290 1291 1292 1293
	}
	if (part->partno) {
		part = &part_to_disk(part)->part0;
		goto again;
	}
}

1294
static void blk_account_io_completion(struct request *req, unsigned int bytes)
1295
{
1296
	if (req->part && blk_do_io_stat(req)) {
1297
		const int sgrp = op_stat_group(req_op(req));
1298 1299
		struct hd_struct *part;

1300
		part_stat_lock();
1301
		part = req->part;
1302
		part_stat_add(part, sectors[sgrp], bytes >> 9);
1303 1304 1305 1306
		part_stat_unlock();
	}
}

1307
void blk_account_io_done(struct request *req, u64 now)
1308 1309
{
	/*
1310 1311 1312
	 * Account IO completion.  flush_rq isn't accounted as a
	 * normal IO on queueing nor completion.  Accounting the
	 * containing request is enough.
1313
	 */
1314 1315
	if (req->part && blk_do_io_stat(req) &&
	    !(req->rq_flags & RQF_FLUSH_SEQ)) {
1316
		const int sgrp = op_stat_group(req_op(req));
1317
		struct hd_struct *part;
1318
#ifdef CONFIG_64BIT
1319
		u64 stat_time;
1320
		struct request_wrapper *rq_wrapper = request_to_wrapper(req);
1321
#endif
1322

1323
		part_stat_lock();
1324
		part = req->part;
1325
		update_io_ticks(part, jiffies, true);
1326
		part_stat_inc(part, ios[sgrp]);
1327
#ifdef CONFIG_64BIT
1328
		stat_time = READ_ONCE(rq_wrapper->stat_time_ns);
1329
		/*
1330
		 * This might fail if 'stat_time_ns' is updated
1331 1332
		 * in blk_mq_check_inflight_with_stat().
		 */
1333 1334
		if (likely(now > stat_time &&
			   cmpxchg64(&rq_wrapper->stat_time_ns, stat_time, now)
1335 1336 1337 1338 1339 1340
			   == stat_time)) {
			u64 duation = stat_time ? now - stat_time :
				now - req->start_time_ns;

			part_stat_add(req->part, nsecs[sgrp], duation);
		}
1341 1342 1343
#else
		part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
#endif
1344 1345
		if (precise_iostat)
			part_stat_local_dec(part, in_flight[rq_data_dir(req)]);
1346
		part_stat_unlock();
1347

1348
		hd_struct_put(part);
1349 1350 1351
	}
}

1352
void blk_account_io_start(struct request *rq)
1353 1354 1355 1356
{
	if (!blk_do_io_stat(rq))
		return;

1357
	rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
1358

1359
	part_stat_lock();
1360
	update_io_ticks(rq->part, jiffies, false);
1361 1362
	if (precise_iostat)
		part_stat_local_inc(rq->part, in_flight[rq_data_dir(rq)]);
1363 1364 1365
	part_stat_unlock();
}

1366 1367
static unsigned long __part_start_io_acct(struct hd_struct *part,
					  unsigned int sectors, unsigned int op)
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
{
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);

	part_stat_lock();
	update_io_ticks(part, now, false);
	part_stat_inc(part, ios[sgrp]);
	part_stat_add(part, sectors[sgrp], sectors);
	part_stat_local_inc(part, in_flight[op_is_write(op)]);
	part_stat_unlock();
1378

1379 1380
	return now;
}
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395

unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
				 struct bio *bio)
{
	*part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);

	return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio));
}
EXPORT_SYMBOL_GPL(part_start_io_acct);

unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
				 unsigned int op)
{
	return __part_start_io_acct(&disk->part0, sectors, op);
}
1396 1397
EXPORT_SYMBOL(disk_start_io_acct);

1398 1399
static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
			       unsigned long start_time)
1400 1401 1402 1403
{
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);
	unsigned long duration = now - start_time;
1404

1405 1406 1407 1408
	part_stat_lock();
	update_io_ticks(part, now, true);
	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
	part_stat_local_dec(part, in_flight[op_is_write(op)]);
1409 1410
	part_stat_unlock();
}
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424

void part_end_io_acct(struct hd_struct *part, struct bio *bio,
		      unsigned long start_time)
{
	__part_end_io_acct(part, bio_op(bio), start_time);
	hd_struct_put(part);
}
EXPORT_SYMBOL_GPL(part_end_io_acct);

void disk_end_io_acct(struct gendisk *disk, unsigned int op,
		      unsigned long start_time)
{
	__part_end_io_acct(&disk->part0, op, start_time);
}
1425
EXPORT_SYMBOL(disk_end_io_acct);
1426

1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447
/*
 * Steal bios from a request and add them to a bio list.
 * The request must not have been partially completed before.
 */
void blk_steal_bios(struct bio_list *list, struct request *rq)
{
	if (rq->bio) {
		if (list->tail)
			list->tail->bi_next = rq->bio;
		else
			list->head = rq->bio;
		list->tail = rq->biotail;

		rq->bio = NULL;
		rq->biotail = NULL;
	}

	rq->__data_len = 0;
}
EXPORT_SYMBOL_GPL(blk_steal_bios);

1448
/**
1449
 * blk_update_request - Special helper function for request stacking drivers
1450
 * @req:      the request being processed
1451
 * @error:    block status code
1452
 * @nr_bytes: number of bytes to complete @req
1453 1454
 *
 * Description:
1455 1456 1457
 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
 *     the request structure even if @req doesn't have leftover.
 *     If @req has leftover, sets it up for the next range of segments.
1458 1459 1460
 *
 *     This special helper function is only for request stacking drivers
 *     (e.g. request-based dm) so that they can handle partial completion.
1461
 *     Actual device drivers should use blk_mq_end_request instead.
1462 1463 1464
 *
 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
 *     %false return from this function.
1465
 *
1466 1467 1468 1469
 * Note:
 *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
 *	blk_rq_bytes() and in blk_update_request().
 *
1470
 * Return:
1471 1472
 *     %false - this request doesn't have any more data
 *     %true  - this request has more data
1473
 **/
1474 1475
bool blk_update_request(struct request *req, blk_status_t error,
		unsigned int nr_bytes)
L
Linus Torvalds 已提交
1476
{
1477
	int total_bytes;
L
Linus Torvalds 已提交
1478

1479
	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1480

1481 1482 1483
	if (!req->bio)
		return false;

1484 1485 1486 1487 1488 1489
#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
	    error == BLK_STS_OK)
		req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif

1490 1491
	if (unlikely(error && !blk_rq_is_passthrough(req) &&
		     !(req->rq_flags & RQF_QUIET)))
1492
		print_req_error(req, error, __func__);
L
Linus Torvalds 已提交
1493

1494
	blk_account_io_completion(req, nr_bytes);
1495

1496 1497 1498
	total_bytes = 0;
	while (req->bio) {
		struct bio *bio = req->bio;
1499
		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
L
Linus Torvalds 已提交
1500

1501
		if (bio_bytes == bio->bi_iter.bi_size)
L
Linus Torvalds 已提交
1502 1503
			req->bio = bio->bi_next;

N
NeilBrown 已提交
1504 1505
		/* Completion has already been traced */
		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1506
		req_bio_endio(req, bio, bio_bytes, error);
L
Linus Torvalds 已提交
1507

1508 1509
		total_bytes += bio_bytes;
		nr_bytes -= bio_bytes;
L
Linus Torvalds 已提交
1510

1511 1512
		if (!nr_bytes)
			break;
L
Linus Torvalds 已提交
1513 1514 1515 1516 1517
	}

	/*
	 * completely done
	 */
1518 1519 1520 1521 1522 1523
	if (!req->bio) {
		/*
		 * Reset counters so that the request stacking driver
		 * can find how many bytes remain in the request
		 * later.
		 */
1524
		req->__data_len = 0;
1525 1526
		return false;
	}
L
Linus Torvalds 已提交
1527

1528
	req->__data_len -= total_bytes;
1529 1530

	/* update sector only for requests with clear definition of sector */
1531
	if (!blk_rq_is_passthrough(req))
1532
		req->__sector += total_bytes >> 9;
1533

1534
	/* mixed attributes always follow the first bio */
1535
	if (req->rq_flags & RQF_MIXED_MERGE) {
1536
		req->cmd_flags &= ~REQ_FAILFAST_MASK;
J
Jens Axboe 已提交
1537
		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1538 1539
	}

1540 1541 1542 1543 1544 1545 1546 1547 1548
	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
		/*
		 * If total number of sectors is less than the first segment
		 * size, something has gone terribly wrong.
		 */
		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
			blk_dump_rq_flags(req, "request botched");
			req->__data_len = blk_rq_cur_bytes(req);
		}
1549

1550
		/* recalculate the number of segments */
1551
		req->nr_phys_segments = blk_recalc_rq_segments(req);
1552
	}
1553

1554
	return true;
L
Linus Torvalds 已提交
1555
}
1556
EXPORT_SYMBOL_GPL(blk_update_request);
L
Linus Torvalds 已提交
1557

1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
/**
 * rq_flush_dcache_pages - Helper function to flush all pages in a request
 * @rq: the request to be flushed
 *
 * Description:
 *     Flush all pages in @rq.
 */
void rq_flush_dcache_pages(struct request *rq)
{
	struct req_iterator iter;
1569
	struct bio_vec bvec;
1570 1571

	rq_for_each_segment(bvec, rq, iter)
1572
		flush_dcache_page(bvec.bv_page);
1573 1574 1575 1576
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif

1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
/**
 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
 * @q : the queue of the device being checked
 *
 * Description:
 *    Check if underlying low-level drivers of a device are busy.
 *    If the drivers want to export their busy state, they must set own
 *    exporting function using blk_queue_lld_busy() first.
 *
 *    Basically, this function is used only by request stacking drivers
 *    to stop dispatching requests to underlying devices when underlying
 *    devices are busy.  This behavior helps more I/O merging on the queue
 *    of the request stacking driver and prevents I/O throughput regression
 *    on burst I/O load.
 *
 * Return:
 *    0 - Not busy (The request stacking driver should dispatch request)
 *    1 - Busy (The request stacking driver should stop dispatching request)
 */
int blk_lld_busy(struct request_queue *q)
{
J
Jens Axboe 已提交
1598
	if (queue_is_mq(q) && q->mq_ops->busy)
J
Jens Axboe 已提交
1599
		return q->mq_ops->busy(q);
1600 1601 1602 1603 1604

	return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);

1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
/**
 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
 * @rq: the clone request to be cleaned up
 *
 * Description:
 *     Free all bios in @rq for a cloned request.
 */
void blk_rq_unprep_clone(struct request *rq)
{
	struct bio *bio;

	while ((bio = rq->bio) != NULL) {
		rq->bio = bio->bi_next;

		bio_put(bio);
	}
}
EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);

/**
 * blk_rq_prep_clone - Helper function to setup clone request
 * @rq: the request to be setup
 * @rq_src: original request to be cloned
 * @bs: bio_set that bios for clone are allocated from
 * @gfp_mask: memory allocation mask for bio
 * @bio_ctr: setup function to be called for each clone bio.
 *           Returns %0 for success, non %0 for failure.
 * @data: private data to be passed to @bio_ctr
 *
 * Description:
 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
 *     Also, pages which the original bios are pointing to are not copied
 *     and the cloned bios just point same pages.
 *     So cloned bios must be completed before original bios, which means
 *     the caller must complete @rq before @rq_src.
 */
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
		      struct bio_set *bs, gfp_t gfp_mask,
		      int (*bio_ctr)(struct bio *, struct bio *, void *),
		      void *data)
{
	struct bio *bio, *bio_src;

	if (!bs)
1649
		bs = &fs_bio_set;
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661

	__rq_for_each_bio(bio_src, rq_src) {
		bio = bio_clone_fast(bio_src, gfp_mask, bs);
		if (!bio)
			goto free_and_out;

		if (bio_ctr && bio_ctr(bio, bio_src, data))
			goto free_and_out;

		if (rq->bio) {
			rq->biotail->bi_next = bio;
			rq->biotail = bio;
1662
		} else {
1663
			rq->bio = rq->biotail = bio;
1664 1665
		}
		bio = NULL;
1666 1667
	}

1668 1669 1670 1671 1672 1673 1674 1675 1676
	/* Copy attributes of the original request to the clone request. */
	rq->__sector = blk_rq_pos(rq_src);
	rq->__data_len = blk_rq_bytes(rq_src);
	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
		rq->special_vec = rq_src->special_vec;
	}
	rq->nr_phys_segments = rq_src->nr_phys_segments;
	rq->ioprio = rq_src->ioprio;
1677

1678 1679
	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
		goto free_and_out;
1680 1681 1682 1683 1684 1685 1686 1687 1688

	return 0;

free_and_out:
	if (bio)
		bio_put(bio);
	blk_rq_unprep_clone(rq);

	return -ENOMEM;
1689 1690 1691
}
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);

1692
int kblockd_schedule_work(struct work_struct *work)
L
Linus Torvalds 已提交
1693 1694 1695 1696 1697
{
	return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);

1698 1699 1700 1701 1702 1703 1704
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
				unsigned long delay)
{
	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);

S
Suresh Jayaraman 已提交
1705 1706 1707 1708 1709
/**
 * blk_start_plug - initialize blk_plug and track it inside the task_struct
 * @plug:	The &struct blk_plug that needs to be initialized
 *
 * Description:
1710 1711 1712 1713 1714 1715 1716 1717 1718
 *   blk_start_plug() indicates to the block layer an intent by the caller
 *   to submit multiple I/O requests in a batch.  The block layer may use
 *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
 *   is called.  However, the block layer may choose to submit requests
 *   before a call to blk_finish_plug() if the number of queued I/Os
 *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
 *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
 *   the task schedules (see below).
 *
S
Suresh Jayaraman 已提交
1719 1720 1721 1722 1723 1724 1725 1726 1727
 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
 *   pending I/O should the task end up blocking between blk_start_plug() and
 *   blk_finish_plug(). This is important from a performance perspective, but
 *   also ensures that we don't deadlock. For instance, if the task is blocking
 *   for a memory allocation, memory reclaim could end up wanting to free a
 *   page belonging to that request that is currently residing in our private
 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
 *   this kind of deadlock.
 */
1728 1729 1730 1731
void blk_start_plug(struct blk_plug *plug)
{
	struct task_struct *tsk = current;

S
Shaohua Li 已提交
1732 1733 1734 1735 1736 1737
	/*
	 * If this is a nested plug, don't actually assign it.
	 */
	if (tsk->plug)
		return;

1738
	INIT_LIST_HEAD(&plug->mq_list);
1739
	INIT_LIST_HEAD(&plug->cb_list);
1740
	plug->rq_count = 0;
1741
	plug->multiple_queues = false;
1742
	plug->nowait = false;
1743

1744
	/*
S
Shaohua Li 已提交
1745 1746
	 * Store ordering should not be needed here, since a potential
	 * preempt will imply a full memory barrier
1747
	 */
S
Shaohua Li 已提交
1748
	tsk->plug = plug;
1749 1750 1751
}
EXPORT_SYMBOL(blk_start_plug);

1752
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1753 1754 1755
{
	LIST_HEAD(callbacks);

S
Shaohua Li 已提交
1756 1757
	while (!list_empty(&plug->cb_list)) {
		list_splice_init(&plug->cb_list, &callbacks);
1758

S
Shaohua Li 已提交
1759 1760
		while (!list_empty(&callbacks)) {
			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1761 1762
							  struct blk_plug_cb,
							  list);
S
Shaohua Li 已提交
1763
			list_del(&cb->list);
1764
			cb->callback(cb, from_schedule);
S
Shaohua Li 已提交
1765
		}
1766 1767 1768
	}
}

1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
				      int size)
{
	struct blk_plug *plug = current->plug;
	struct blk_plug_cb *cb;

	if (!plug)
		return NULL;

	list_for_each_entry(cb, &plug->cb_list, list)
		if (cb->callback == unplug && cb->data == data)
			return cb;

	/* Not currently on the callback list */
	BUG_ON(size < sizeof(*cb));
	cb = kzalloc(size, GFP_ATOMIC);
	if (cb) {
		cb->data = data;
		cb->callback = unplug;
		list_add(&cb->list, &plug->cb_list);
	}
	return cb;
}
EXPORT_SYMBOL(blk_check_plugged);

1794
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1795
{
1796
	flush_plug_callbacks(plug, from_schedule);
1797 1798 1799

	if (!list_empty(&plug->mq_list))
		blk_mq_flush_plug_list(plug, from_schedule);
1800 1801
}

1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
/**
 * blk_finish_plug - mark the end of a batch of submitted I/O
 * @plug:	The &struct blk_plug passed to blk_start_plug()
 *
 * Description:
 * Indicate that a batch of I/O submissions is complete.  This function
 * must be paired with an initial call to blk_start_plug().  The intent
 * is to allow the block layer to optimize I/O submission.  See the
 * documentation for blk_start_plug() for more information.
 */
1812 1813
void blk_finish_plug(struct blk_plug *plug)
{
S
Shaohua Li 已提交
1814 1815
	if (plug != current->plug)
		return;
1816
	blk_flush_plug_list(plug, false);
1817

S
Shaohua Li 已提交
1818
	current->plug = NULL;
1819
}
1820
EXPORT_SYMBOL(blk_finish_plug);
1821

1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
void blk_io_schedule(void)
{
	/* Prevent hang_check timer from firing at us during very long I/O */
	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;

	if (timeout)
		io_schedule_timeout(timeout);
	else
		io_schedule();
}
EXPORT_SYMBOL_GPL(blk_io_schedule);

L
Linus Torvalds 已提交
1834 1835
int __init blk_dev_init(void)
{
1836 1837
	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1838
			sizeof_field(struct request, cmd_flags));
1839
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1840
			sizeof_field(struct bio, bi_opf));
1841

1842 1843
	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
	kblockd_workqueue = alloc_workqueue("kblockd",
1844
					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
L
Linus Torvalds 已提交
1845 1846 1847
	if (!kblockd_workqueue)
		panic("Failed to create kblockd\n");

1848
	blk_requestq_cachep = kmem_cache_create("request_queue",
1849
			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1850

1851 1852
	blk_debugfs_root = debugfs_create_dir("block", NULL);

1853
	return 0;
L
Linus Torvalds 已提交
1854
}