blk-core.c 36.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 8
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *	-  July2000
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
19
#include <linux/blk-pm.h>
20
#include <linux/blk-integrity.h>
L
Linus Torvalds 已提交
21 22
#include <linux/highmem.h>
#include <linux/mm.h>
23
#include <linux/pagemap.h>
L
Linus Torvalds 已提交
24 25 26 27 28 29 30
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
31
#include <linux/task_io_accounting_ops.h>
32
#include <linux/fault-inject.h>
33
#include <linux/list_sort.h>
T
Tejun Heo 已提交
34
#include <linux/delay.h>
35
#include <linux/ratelimit.h>
L
Lin Ming 已提交
36
#include <linux/pm_runtime.h>
37
#include <linux/blk-cgroup.h>
38
#include <linux/t10-pi.h>
39
#include <linux/debugfs.h>
40
#include <linux/bpf.h>
41
#include <linux/psi.h>
42
#include <linux/part_stat.h>
43
#include <linux/sched/sysctl.h>
44
#include <linux/blk-crypto.h>
45 46 47

#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
L
Linus Torvalds 已提交
48

49
#include "blk.h"
50
#include "blk-mq-sched.h"
51
#include "blk-pm.h"
52
#include "blk-throttle.h"
53
#include "blk-rq-qos.h"
54

55 56
struct dentry *blk_debugfs_root;

57
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
58
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
59
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
K
Keith Busch 已提交
60
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
N
NeilBrown 已提交
61
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
62
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
63

64 65
DEFINE_IDA(blk_queue_ida);

L
Linus Torvalds 已提交
66 67 68
/*
 * For queue allocation
 */
69
struct kmem_cache *blk_requestq_cachep;
70
struct kmem_cache *blk_requestq_srcu_cachep;
L
Linus Torvalds 已提交
71 72 73 74

/*
 * Controlling structure to kblockd
 */
75
static struct workqueue_struct *kblockd_workqueue;
L
Linus Torvalds 已提交
76

77 78 79 80 81 82 83
/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
84
	set_bit(flag, &q->queue_flags);
85 86 87 88 89 90 91 92 93 94
}
EXPORT_SYMBOL(blk_queue_flag_set);

/**
 * blk_queue_flag_clear - atomically clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
95
	clear_bit(flag, &q->queue_flags);
96 97 98 99 100 101 102 103 104 105 106 107 108
}
EXPORT_SYMBOL(blk_queue_flag_clear);

/**
 * blk_queue_flag_test_and_set - atomically test and set a queue flag
 * @flag: flag to be set
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was already set.
 */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
109
	return test_and_set_bit(flag, &q->queue_flags);
110 111 112
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);

113 114 115 116 117 118 119 120
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
	REQ_OP_NAME(READ),
	REQ_OP_NAME(WRITE),
	REQ_OP_NAME(FLUSH),
	REQ_OP_NAME(DISCARD),
	REQ_OP_NAME(SECURE_ERASE),
	REQ_OP_NAME(ZONE_RESET),
121
	REQ_OP_NAME(ZONE_RESET_ALL),
122 123 124
	REQ_OP_NAME(ZONE_OPEN),
	REQ_OP_NAME(ZONE_CLOSE),
	REQ_OP_NAME(ZONE_FINISH),
125
	REQ_OP_NAME(ZONE_APPEND),
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
	REQ_OP_NAME(WRITE_SAME),
	REQ_OP_NAME(WRITE_ZEROES),
	REQ_OP_NAME(DRV_IN),
	REQ_OP_NAME(DRV_OUT),
};
#undef REQ_OP_NAME

/**
 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 * @op: REQ_OP_XXX.
 *
 * Description: Centralize block layer function to convert REQ_OP_XXX into
 * string format. Useful in the debugging and tracing bio or request. For
 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 */
inline const char *blk_op_str(unsigned int op)
{
	const char *op_str = "UNKNOWN";

	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
		op_str = blk_op_name[op];

	return op_str;
}
EXPORT_SYMBOL_GPL(blk_op_str);

152 153 154 155 156 157 158 159 160 161 162 163 164 165
static const struct {
	int		errno;
	const char	*name;
} blk_errors[] = {
	[BLK_STS_OK]		= { 0,		"" },
	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
166
	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
167
	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
168

169 170 171
	/* device mapper special case, should not leak out: */
	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },

172 173 174 175
	/* zone device specific errors */
	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	/* everything else not covered above: */
	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
};

blk_status_t errno_to_blk_status(int errno)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
		if (blk_errors[i].errno == errno)
			return (__force blk_status_t)i;
	}

	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);

int blk_status_to_errno(blk_status_t status)
{
	int idx = (__force int)status;

197
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
198 199 200 201 202
		return -EIO;
	return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);

203
const char *blk_status_to_str(blk_status_t status)
204 205 206
{
	int idx = (__force int)status;

207
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
208 209
		return "<null>";
	return blk_errors[idx].name;
210 211
}

L
Linus Torvalds 已提交
212 213 214 215 216 217 218 219 220
/**
 * blk_sync_queue - cancel any pending callbacks on a queue
 * @q: the queue
 *
 * Description:
 *     The block layer may perform asynchronous callback activity
 *     on a queue, such as calling the unplug function after a timeout.
 *     A block device may call blk_sync_queue to ensure that any
 *     such activity is cancelled, thus allowing it to release resources
221
 *     that the callbacks might use. The caller must already have made sure
222
 *     that its ->submit_bio will not re-add plugging prior to calling
L
Linus Torvalds 已提交
223 224
 *     this function.
 *
225
 *     This function does not cancel any asynchronous activity arising
226
 *     out of elevator or throttling code. That would require elevator_exit()
227
 *     and blkcg_exit_queue() to be called with queue lock initialized.
228
 *
L
Linus Torvalds 已提交
229 230 231
 */
void blk_sync_queue(struct request_queue *q)
{
232
	del_timer_sync(&q->timeout);
233
	cancel_work_sync(&q->timeout_work);
L
Linus Torvalds 已提交
234 235 236
}
EXPORT_SYMBOL(blk_sync_queue);

237
/**
238
 * blk_set_pm_only - increment pm_only counter
239 240
 * @q: request queue pointer
 */
241
void blk_set_pm_only(struct request_queue *q)
242
{
243
	atomic_inc(&q->pm_only);
244
}
245
EXPORT_SYMBOL_GPL(blk_set_pm_only);
246

247
void blk_clear_pm_only(struct request_queue *q)
248
{
249 250 251 252 253 254
	int pm_only;

	pm_only = atomic_dec_return(&q->pm_only);
	WARN_ON_ONCE(pm_only < 0);
	if (pm_only == 0)
		wake_up_all(&q->mq_freeze_wq);
255
}
256
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
257

258 259 260 261 262 263
/**
 * blk_put_queue - decrement the request_queue refcount
 * @q: the request_queue structure to decrement the refcount for
 *
 * Decrements the refcount of the request_queue kobject. When this reaches 0
 * we'll have blk_release_queue() called.
264 265 266
 *
 * Context: Any context, but the last reference must not be dropped from
 *          atomic context.
267
 */
268
void blk_put_queue(struct request_queue *q)
269 270 271
{
	kobject_put(&q->kobj);
}
J
Jens Axboe 已提交
272
EXPORT_SYMBOL(blk_put_queue);
273

274
void blk_queue_start_drain(struct request_queue *q)
275
{
276 277 278 279 280 281
	/*
	 * When queue DYING flag is set, we need to block new req
	 * entering queue, so we call blk_freeze_queue_start() to
	 * prevent I/O from crossing blk_queue_enter().
	 */
	blk_freeze_queue_start(q);
J
Jens Axboe 已提交
282
	if (queue_is_mq(q))
283
		blk_mq_wake_waiters(q);
284 285
	/* Make blk_queue_enter() reexamine the DYING flag. */
	wake_up_all(&q->mq_freeze_wq);
286
}
287

288 289 290 291
/**
 * blk_cleanup_queue - shutdown a request queue
 * @q: request queue to shutdown
 *
292 293
 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 * put it.  All future requests will be failed immediately with -ENODEV.
294 295
 *
 * Context: can sleep
296
 */
297
void blk_cleanup_queue(struct request_queue *q)
298
{
299 300 301
	/* cannot be called from atomic context */
	might_sleep();

302 303
	WARN_ON_ONCE(blk_queue_registered(q));

B
Bart Van Assche 已提交
304
	/* mark @q DYING, no new request or merges will be allowed afterwards */
305 306
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
	blk_queue_start_drain(q);
307

308 309
	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
310

311 312
	/*
	 * Drain all requests queued before DYING marking. Set DEAD flag to
313 314
	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
	 * after draining finished.
315
	 */
316
	blk_freeze_queue(q);
317

318 319 320
	/* cleanup rq qos structures for queue without disk */
	rq_qos_exit(q);

321
	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
322 323

	blk_sync_queue(q);
324 325
	if (queue_is_mq(q)) {
		blk_mq_cancel_work_sync(q);
326
		blk_mq_exit_queue(q);
327
	}
J
Jens Axboe 已提交
328

329 330 331 332 333 334 335 336 337 338
	/*
	 * In theory, request pool of sched_tags belongs to request queue.
	 * However, the current implementation requires tag_set for freeing
	 * requests, so free the pool now.
	 *
	 * Queue has become frozen, there can't be any in-queue requests, so
	 * it is safe to free requests now.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator)
339
		blk_mq_sched_free_rqs(q);
340 341
	mutex_unlock(&q->sysfs_lock);

342
	percpu_ref_exit(&q->q_usage_counter);
B
Bart Van Assche 已提交
343

344
	/* @q is and will stay empty, shutdown and put */
345 346
	blk_put_queue(q);
}
L
Linus Torvalds 已提交
347 348
EXPORT_SYMBOL(blk_cleanup_queue);

349 350 351
/**
 * blk_queue_enter() - try to increase q->q_usage_counter
 * @q: request queue pointer
352
 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
353
 */
354
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
355
{
356
	const bool pm = flags & BLK_MQ_REQ_PM;
357

358
	while (!blk_try_enter_queue(q, pm)) {
359
		if (flags & BLK_MQ_REQ_NOWAIT)
360 361
			return -EBUSY;

362
		/*
363 364 365 366 367
		 * read pair of barrier in blk_freeze_queue_start(), we need to
		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
		 * reading .mq_freeze_depth or queue dying flag, otherwise the
		 * following wait may never return if the two reads are
		 * reordered.
368 369
		 */
		smp_rmb();
370
		wait_event(q->mq_freeze_wq,
371
			   (!q->mq_freeze_depth &&
372
			    blk_pm_resume_queue(pm, q)) ||
373
			   blk_queue_dying(q));
374 375 376
		if (blk_queue_dying(q))
			return -ENODEV;
	}
377 378

	return 0;
379 380
}

381
int __bio_queue_enter(struct request_queue *q, struct bio *bio)
382
{
383
	while (!blk_try_enter_queue(q, false)) {
384 385
		struct gendisk *disk = bio->bi_bdev->bd_disk;

386
		if (bio->bi_opf & REQ_NOWAIT) {
387
			if (test_bit(GD_DEAD, &disk->state))
388
				goto dead;
389
			bio_wouldblock_error(bio);
390 391 392 393 394 395 396 397 398 399 400 401 402 403
			return -EBUSY;
		}

		/*
		 * read pair of barrier in blk_freeze_queue_start(), we need to
		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
		 * reading .mq_freeze_depth or queue dying flag, otherwise the
		 * following wait may never return if the two reads are
		 * reordered.
		 */
		smp_rmb();
		wait_event(q->mq_freeze_wq,
			   (!q->mq_freeze_depth &&
			    blk_pm_resume_queue(false, q)) ||
404 405
			   test_bit(GD_DEAD, &disk->state));
		if (test_bit(GD_DEAD, &disk->state))
406
			goto dead;
407 408
	}

409 410 411 412
	return 0;
dead:
	bio_io_error(bio);
	return -ENODEV;
413 414
}

415 416 417 418 419 420 421 422 423 424 425 426 427
void blk_queue_exit(struct request_queue *q)
{
	percpu_ref_put(&q->q_usage_counter);
}

static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
	struct request_queue *q =
		container_of(ref, struct request_queue, q_usage_counter);

	wake_up_all(&q->mq_freeze_wq);
}

428
static void blk_rq_timed_out_timer(struct timer_list *t)
429
{
430
	struct request_queue *q = from_timer(q, t, timeout);
431 432 433 434

	kblockd_schedule_work(&q->timeout_work);
}

435 436 437 438
static void blk_timeout_work(struct work_struct *work)
{
}

439
struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
440
{
441
	struct request_queue *q;
442
	int ret;
443

444 445
	q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
			GFP_KERNEL | __GFP_ZERO, node_id);
L
Linus Torvalds 已提交
446 447 448
	if (!q)
		return NULL;

449 450 451 452 453 454
	if (alloc_srcu) {
		blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
		if (init_srcu_struct(q->srcu) != 0)
			goto fail_q;
	}

455 456
	q->last_merge = NULL;

457
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
458
	if (q->id < 0)
459
		goto fail_srcu;
460

461
	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
462
	if (ret)
463 464
		goto fail_id;

465 466
	q->stats = blk_alloc_queue_stats();
	if (!q->stats)
467
		goto fail_split;
468

469
	q->node = node_id;
470

471
	atomic_set(&q->nr_active_requests_shared_tags, 0);
472

473
	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
474
	INIT_WORK(&q->timeout_work, blk_timeout_work);
475
	INIT_LIST_HEAD(&q->icq_list);
476
#ifdef CONFIG_BLK_CGROUP
477
	INIT_LIST_HEAD(&q->blkg_list);
478
#endif
479

480
	kobject_init(&q->kobj, &blk_queue_ktype);
L
Linus Torvalds 已提交
481

482
	mutex_init(&q->debugfs_mutex);
483
	mutex_init(&q->sysfs_lock);
484
	mutex_init(&q->sysfs_dir_lock);
485
	spin_lock_init(&q->queue_lock);
486

487
	init_waitqueue_head(&q->mq_freeze_wq);
488
	mutex_init(&q->mq_freeze_lock);
489

490 491 492 493 494 495 496
	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
	 * See blk_register_queue() for details.
	 */
	if (percpu_ref_init(&q->q_usage_counter,
				blk_queue_usage_counter_release,
				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
497
		goto fail_stats;
498

499 500 501
	if (blkcg_init_queue(q))
		goto fail_ref;

502 503
	blk_queue_dma_alignment(q, 511);
	blk_set_default_limits(&q->limits);
504
	q->nr_requests = BLKDEV_DEFAULT_RQ;
505

L
Linus Torvalds 已提交
506
	return q;
507

508 509
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
510
fail_stats:
511
	blk_free_queue_stats(q->stats);
512
fail_split:
513
	bioset_exit(&q->bio_split);
514 515
fail_id:
	ida_simple_remove(&blk_queue_ida, q->id);
516 517 518
fail_srcu:
	if (alloc_srcu)
		cleanup_srcu_struct(q->srcu);
519
fail_q:
520
	kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
521
	return NULL;
L
Linus Torvalds 已提交
522 523
}

524 525 526 527 528
/**
 * blk_get_queue - increment the request_queue refcount
 * @q: the request_queue structure to increment the refcount for
 *
 * Increment the refcount of the request_queue kobject.
529 530
 *
 * Context: Any context.
531
 */
T
Tejun Heo 已提交
532
bool blk_get_queue(struct request_queue *q)
L
Linus Torvalds 已提交
533
{
B
Bart Van Assche 已提交
534
	if (likely(!blk_queue_dying(q))) {
T
Tejun Heo 已提交
535 536
		__blk_get_queue(q);
		return true;
L
Linus Torvalds 已提交
537 538
	}

T
Tejun Heo 已提交
539
	return false;
L
Linus Torvalds 已提交
540
}
J
Jens Axboe 已提交
541
EXPORT_SYMBOL(blk_get_queue);
L
Linus Torvalds 已提交
542

543
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
L
Linus Torvalds 已提交
544 545 546
{
	char b[BDEVNAME_SIZE];

547
	pr_info_ratelimited("%s: attempt to access beyond end of device\n"
548
			    "%s: rw=%d, want=%llu, limit=%llu\n",
549
			    current->comm,
550 551
			    bio_devname(bio, b), bio->bi_opf,
			    bio_end_sector(bio), maxsector);
L
Linus Torvalds 已提交
552 553
}

554 555 556 557 558 559 560 561 562 563
#ifdef CONFIG_FAIL_MAKE_REQUEST

static DECLARE_FAULT_ATTR(fail_make_request);

static int __init setup_fail_make_request(char *str)
{
	return setup_fault_attr(&fail_make_request, str);
}
__setup("fail_make_request=", setup_fail_make_request);

564
bool should_fail_request(struct block_device *part, unsigned int bytes)
565
{
566
	return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
567 568 569 570
}

static int __init fail_make_request_debugfs(void)
{
571 572 573
	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
						NULL, &fail_make_request);

574
	return PTR_ERR_OR_ZERO(dir);
575 576 577 578 579
}

late_initcall(fail_make_request_debugfs);
#endif /* CONFIG_FAIL_MAKE_REQUEST */

580
static inline bool bio_check_ro(struct bio *bio)
581
{
582
	if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
583 584
		char b[BDEVNAME_SIZE];

585 586 587
		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
			return false;

588
		WARN_ONCE(1,
589
		       "Trying to write to read-only block-device %s (partno %d)\n",
590
			bio_devname(bio, b), bio->bi_bdev->bd_partno);
591 592
		/* Older lvm-tools actually trigger this */
		return false;
593 594 595 596 597
	}

	return false;
}

598 599
static noinline int should_fail_bio(struct bio *bio)
{
600
	if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
601 602 603 604 605
		return -EIO;
	return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);

606 607 608 609 610
/*
 * Check whether this bio extends beyond the end of the device or partition.
 * This may well happen - the kernel calls bread() without checking the size of
 * the device, e.g., when mounting a file system.
 */
611
static inline int bio_check_eod(struct bio *bio)
612
{
613
	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
614 615 616 617 618 619 620 621 622 623 624
	unsigned int nr_sectors = bio_sectors(bio);

	if (nr_sectors && maxsector &&
	    (nr_sectors > maxsector ||
	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
		handle_bad_sector(bio, maxsector);
		return -EIO;
	}
	return 0;
}

625 626 627
/*
 * Remap block n of partition p to block n+start(p) of the disk.
 */
628
static int blk_partition_remap(struct bio *bio)
629
{
630
	struct block_device *p = bio->bi_bdev;
631

632
	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
633
		return -EIO;
634
	if (bio_sectors(bio)) {
635
		bio->bi_iter.bi_sector += p->bd_start_sect;
636
		trace_block_bio_remap(bio, p->bd_dev,
637
				      bio->bi_iter.bi_sector -
638
				      p->bd_start_sect);
639
	}
640
	bio_set_flag(bio, BIO_REMAPPED);
641
	return 0;
642 643
}

644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
/*
 * Check write append to a zoned block device.
 */
static inline blk_status_t blk_check_zone_append(struct request_queue *q,
						 struct bio *bio)
{
	sector_t pos = bio->bi_iter.bi_sector;
	int nr_sectors = bio_sectors(bio);

	/* Only applicable to zoned block devices */
	if (!blk_queue_is_zoned(q))
		return BLK_STS_NOTSUPP;

	/* The bio sector must point to the start of a sequential zone */
	if (pos & (blk_queue_zone_sectors(q) - 1) ||
	    !blk_queue_zone_is_seq(q, pos))
		return BLK_STS_IOERR;

	/*
	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
	 * split and could result in non-contiguous sectors being written in
	 * different zones.
	 */
	if (nr_sectors > q->limits.chunk_sectors)
		return BLK_STS_IOERR;

	/* Make sure the BIO is small enough and will not get split */
	if (nr_sectors > q->limits.max_zone_append_sectors)
		return BLK_STS_IOERR;

	bio->bi_opf |= REQ_NOMERGE;

	return BLK_STS_OK;
}

679
noinline_for_stack bool submit_bio_checks(struct bio *bio)
L
Linus Torvalds 已提交
680
{
681
	struct block_device *bdev = bio->bi_bdev;
682
	struct request_queue *q = bdev_get_queue(bdev);
683
	blk_status_t status = BLK_STS_IOERR;
684
	struct blk_plug *plug;
L
Linus Torvalds 已提交
685 686 687

	might_sleep();

688 689 690 691
	plug = blk_mq_plug(q, bio);
	if (plug && plug->nowait)
		bio->bi_opf |= REQ_NOWAIT;

692
	/*
693
	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
M
Mike Snitzer 已提交
694
	 * if queue does not support NOWAIT.
695
	 */
M
Mike Snitzer 已提交
696
	if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
697
		goto not_supported;
698

699
	if (should_fail_bio(bio))
700
		goto end_io;
701 702
	if (unlikely(bio_check_ro(bio)))
		goto end_io;
703 704 705 706 707 708
	if (!bio_flagged(bio, BIO_REMAPPED)) {
		if (unlikely(bio_check_eod(bio)))
			goto end_io;
		if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
			goto end_io;
	}
709

710
	/*
711 712
	 * Filter flush bio's early so that bio based drivers without flush
	 * support don't have to worry about them.
713
	 */
714
	if (op_is_flush(bio->bi_opf) &&
J
Jens Axboe 已提交
715
	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
J
Jens Axboe 已提交
716
		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
717
		if (!bio_sectors(bio)) {
718
			status = BLK_STS_OK;
719 720
			goto end_io;
		}
721
	}
722

723
	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
724
		bio_clear_polled(bio);
725

726 727 728 729 730 731 732 733 734 735
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
		if (!blk_queue_secure_erase(q))
			goto not_supported;
		break;
	case REQ_OP_WRITE_SAME:
736
		if (!q->limits.max_write_same_sectors)
737
			goto not_supported;
738
		break;
739 740 741 742 743
	case REQ_OP_ZONE_APPEND:
		status = blk_check_zone_append(q, bio);
		if (status != BLK_STS_OK)
			goto end_io;
		break;
744
	case REQ_OP_ZONE_RESET:
745 746 747
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
748
		if (!blk_queue_is_zoned(q))
749
			goto not_supported;
750
		break;
751 752 753 754
	case REQ_OP_ZONE_RESET_ALL:
		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
			goto not_supported;
		break;
755
	case REQ_OP_WRITE_ZEROES:
756
		if (!q->limits.max_write_zeroes_sectors)
757 758
			goto not_supported;
		break;
759 760
	default:
		break;
761
	}
762

763
	if (blk_throtl_bio(bio))
764
		return false;
765 766 767

	blk_cgroup_bio_start(bio);
	blkcg_bio_issue_init(bio);
768

N
NeilBrown 已提交
769
	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
770
		trace_block_bio_queue(bio);
N
NeilBrown 已提交
771 772 773 774 775
		/* Now that enqueuing has been traced, we need to trace
		 * completion as well.
		 */
		bio_set_flag(bio, BIO_TRACE_COMPLETION);
	}
776
	return true;
777

778
not_supported:
779
	status = BLK_STS_NOTSUPP;
780
end_io:
781
	bio->bi_status = status;
782
	bio_endio(bio);
783
	return false;
L
Linus Torvalds 已提交
784 785
}

786
static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
787
{
788 789 790 791 792 793
	if (blk_crypto_bio_prep(&bio)) {
		if (likely(bio_queue_enter(bio) == 0)) {
			disk->fops->submit_bio(bio);
			blk_queue_exit(disk->queue);
		}
	}
794
}
795

796 797 798
static void __submit_bio(struct bio *bio)
{
	struct gendisk *disk = bio->bi_bdev->bd_disk;
799

800 801 802
	if (unlikely(!submit_bio_checks(bio)))
		return;

803
	if (!disk->fops->submit_bio)
804
		blk_mq_submit_bio(bio);
805 806
	else
		__submit_bio_fops(disk, bio);
807 808
}

809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
/*
 * The loop in this function may be a bit non-obvious, and so deserves some
 * explanation:
 *
 *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
 *    that), so we have a list with a single bio.
 *  - We pretend that we have just taken it off a longer list, so we assign
 *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
 *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
 *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
 *    non-NULL value in bio_list and re-enter the loop from the top.
 *  - In this case we really did just take the bio of the top of the list (no
 *    pretending) and so remove it from bio_list, and call into ->submit_bio()
 *    again.
 *
 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
 * bio_list_on_stack[1] contains bios that were submitted before the current
 *	->submit_bio_bio, but that haven't been processed yet.
 */
828
static void __submit_bio_noacct(struct bio *bio)
829 830 831 832 833 834 835 836 837
{
	struct bio_list bio_list_on_stack[2];

	BUG_ON(bio->bi_next);

	bio_list_init(&bio_list_on_stack[0]);
	current->bio_list = bio_list_on_stack;

	do {
838
		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
839 840 841 842 843 844 845 846
		struct bio_list lower, same;

		/*
		 * Create a fresh bio_list for all subordinate requests.
		 */
		bio_list_on_stack[1] = bio_list_on_stack[0];
		bio_list_init(&bio_list_on_stack[0]);

847
		__submit_bio(bio);
848 849 850 851 852 853 854 855

		/*
		 * Sort new bios into those for a lower level and those for the
		 * same level.
		 */
		bio_list_init(&lower);
		bio_list_init(&same);
		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
856
			if (q == bdev_get_queue(bio->bi_bdev))
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
				bio_list_add(&same, bio);
			else
				bio_list_add(&lower, bio);

		/*
		 * Now assemble so we handle the lowest level first.
		 */
		bio_list_merge(&bio_list_on_stack[0], &lower);
		bio_list_merge(&bio_list_on_stack[0], &same);
		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));

	current->bio_list = NULL;
}

872
static void __submit_bio_noacct_mq(struct bio *bio)
873
{
874
	struct bio_list bio_list[2] = { };
875

876
	current->bio_list = bio_list;
877 878

	do {
879
		__submit_bio(bio);
880
	} while ((bio = bio_list_pop(&bio_list[0])));
881 882 883 884

	current->bio_list = NULL;
}

885
/**
886
 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
887 888
 * @bio:  The bio describing the location in memory and on the device.
 *
889 890 891 892
 * This is a version of submit_bio() that shall only be used for I/O that is
 * resubmitted to lower level drivers by stacking block drivers.  All file
 * systems and other upper level users of the block layer should use
 * submit_bio() instead.
893
 */
894
void submit_bio_noacct(struct bio *bio)
895
{
896
	/*
897 898 899 900
	 * We only want one ->submit_bio to be active at a time, else stack
	 * usage with stacked devices could be a problem.  Use current->bio_list
	 * to collect a list of requests submited by a ->submit_bio method while
	 * it is active, and then process them after it returned.
901
	 */
902
	if (current->bio_list)
903
		bio_list_add(&current->bio_list[0], bio);
904 905 906 907
	else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
		__submit_bio_noacct_mq(bio);
	else
		__submit_bio_noacct(bio);
908
}
909
EXPORT_SYMBOL(submit_bio_noacct);
L
Linus Torvalds 已提交
910 911

/**
912
 * submit_bio - submit a bio to the block device layer for I/O
L
Linus Torvalds 已提交
913 914
 * @bio: The &struct bio which describes the I/O
 *
915 916
 * submit_bio() is used to submit I/O requests to block devices.  It is passed a
 * fully set up &struct bio that describes the I/O that needs to be done.  The
917
 * bio will be send to the device described by the bi_bdev field.
L
Linus Torvalds 已提交
918
 *
919 920 921 922
 * The success/failure status of the request, along with notification of
 * completion, is delivered asynchronously through the ->bi_end_io() callback
 * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
 * been called.
L
Linus Torvalds 已提交
923
 */
924
void submit_bio(struct bio *bio)
L
Linus Torvalds 已提交
925
{
T
Tejun Heo 已提交
926
	if (blkcg_punt_bio_submit(bio))
927
		return;
T
Tejun Heo 已提交
928

929 930 931 932
	/*
	 * If it's a regular read/write or a barrier with data attached,
	 * go through the normal accounting stuff before submission.
	 */
933
	if (bio_has_data(bio)) {
934 935
		unsigned int count;

936
		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
937
			count = queue_logical_block_size(
938
					bdev_get_queue(bio->bi_bdev)) >> 9;
939 940 941
		else
			count = bio_sectors(bio);

942
		if (op_is_write(bio_op(bio))) {
943 944
			count_vm_events(PGPGOUT, count);
		} else {
945
			task_io_account_read(bio->bi_iter.bi_size);
946 947
			count_vm_events(PGPGIN, count);
		}
L
Linus Torvalds 已提交
948 949
	}

950
	/*
951 952 953 954
	 * If we're reading data that is part of the userspace workingset, count
	 * submission time as memory stall.  When the device is congested, or
	 * the submitting cgroup IO-throttled, submission can be a significant
	 * part of overall IO time.
955
	 */
956 957 958
	if (unlikely(bio_op(bio) == REQ_OP_READ &&
	    bio_flagged(bio, BIO_WORKINGSET))) {
		unsigned long pflags;
959

960
		psi_memstall_enter(&pflags);
961
		submit_bio_noacct(bio);
962
		psi_memstall_leave(&pflags);
963
		return;
964 965
	}

966
	submit_bio_noacct(bio);
L
Linus Torvalds 已提交
967 968 969
}
EXPORT_SYMBOL(submit_bio);

970 971 972
/**
 * bio_poll - poll for BIO completions
 * @bio: bio to poll for
973
 * @iob: batches of IO
974 975 976 977 978 979 980 981
 * @flags: BLK_POLL_* flags that control the behavior
 *
 * Poll for completions on queue associated with the bio. Returns number of
 * completed entries found.
 *
 * Note: the caller must either be the context that submitted @bio, or
 * be in a RCU critical section to prevent freeing of @bio.
 */
982
int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
983
{
984
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
985 986 987 988 989 990 991 992
	blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
	int ret;

	if (cookie == BLK_QC_T_NONE ||
	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		return 0;

	if (current->plug)
993
		blk_flush_plug(current->plug, false);
994 995 996 997 998 999

	if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
		return 0;
	if (WARN_ON_ONCE(!queue_is_mq(q)))
		ret = 0;	/* not yet implemented, should not happen */
	else
1000
		ret = blk_mq_poll(q, cookie, iob, flags);
1001 1002 1003 1004 1005 1006 1007 1008 1009
	blk_queue_exit(q);
	return ret;
}
EXPORT_SYMBOL_GPL(bio_poll);

/*
 * Helper to implement file_operations.iopoll.  Requires the bio to be stored
 * in iocb->private, and cleared before freeing the bio.
 */
1010 1011
int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
		    unsigned int flags)
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
{
	struct bio *bio;
	int ret = 0;

	/*
	 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
	 * point to a freshly allocated bio at this point.  If that happens
	 * we have a few cases to consider:
	 *
	 *  1) the bio is beeing initialized and bi_bdev is NULL.  We can just
	 *     simply nothing in this case
	 *  2) the bio points to a not poll enabled device.  bio_poll will catch
	 *     this and return 0
	 *  3) the bio points to a poll capable device, including but not
	 *     limited to the one that the original bio pointed to.  In this
	 *     case we will call into the actual poll method and poll for I/O,
	 *     even if we don't need to, but it won't cause harm either.
	 *
	 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
	 * is still allocated. Because partitions hold a reference to the whole
	 * device bdev and thus disk, the disk is also still valid.  Grabbing
	 * a reference to the queue in bio_poll() ensures the hctxs and requests
	 * are still valid as well.
	 */
	rcu_read_lock();
	bio = READ_ONCE(kiocb->private);
	if (bio && bio->bi_bdev)
1039
		ret = bio_poll(bio, iob, flags);
1040 1041 1042 1043 1044 1045
	rcu_read_unlock();

	return ret;
}
EXPORT_SYMBOL_GPL(iocb_bio_iopoll);

1046
void update_io_ticks(struct block_device *part, unsigned long now, bool end)
1047 1048 1049
{
	unsigned long stamp;
again:
1050
	stamp = READ_ONCE(part->bd_stamp);
1051
	if (unlikely(time_after(now, stamp))) {
1052
		if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
1053 1054
			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
	}
1055 1056
	if (part->bd_partno) {
		part = bdev_whole(part);
1057 1058 1059 1060
		goto again;
	}
}

1061
static unsigned long __part_start_io_acct(struct block_device *part,
1062 1063
					  unsigned int sectors, unsigned int op,
					  unsigned long start_time)
1064 1065 1066 1067
{
	const int sgrp = op_stat_group(op);

	part_stat_lock();
1068
	update_io_ticks(part, start_time, false);
1069 1070 1071 1072
	part_stat_inc(part, ios[sgrp]);
	part_stat_add(part, sectors[sgrp], sectors);
	part_stat_local_inc(part, in_flight[op_is_write(op)]);
	part_stat_unlock();
1073

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	return start_time;
}

/**
 * bio_start_io_acct_time - start I/O accounting for bio based drivers
 * @bio:	bio to start account for
 * @start_time:	start time that should be passed back to bio_end_io_acct().
 */
void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
{
	__part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
			     bio_op(bio), start_time);
1086
}
1087
EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
1088

1089 1090 1091 1092 1093 1094 1095
/**
 * bio_start_io_acct - start I/O accounting for bio based drivers
 * @bio:	bio to start account for
 *
 * Returns the start time that should be passed back to bio_end_io_acct().
 */
unsigned long bio_start_io_acct(struct bio *bio)
1096
{
1097 1098
	return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
				    bio_op(bio), jiffies);
1099
}
1100
EXPORT_SYMBOL_GPL(bio_start_io_acct);
1101 1102 1103 1104

unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
				 unsigned int op)
{
1105
	return __part_start_io_acct(disk->part0, sectors, op, jiffies);
1106
}
1107 1108
EXPORT_SYMBOL(disk_start_io_acct);

1109
static void __part_end_io_acct(struct block_device *part, unsigned int op,
1110
			       unsigned long start_time)
1111 1112 1113 1114
{
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);
	unsigned long duration = now - start_time;
1115

1116 1117 1118 1119
	part_stat_lock();
	update_io_ticks(part, now, true);
	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
	part_stat_local_dec(part, in_flight[op_is_write(op)]);
1120 1121
	part_stat_unlock();
}
1122

1123 1124
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
		struct block_device *orig_bdev)
1125
{
1126
	__part_end_io_acct(orig_bdev, bio_op(bio), start_time);
1127
}
1128
EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1129 1130 1131 1132

void disk_end_io_acct(struct gendisk *disk, unsigned int op,
		      unsigned long start_time)
{
1133
	__part_end_io_acct(disk->part0, op, start_time);
1134
}
1135
EXPORT_SYMBOL(disk_end_io_acct);
1136

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
/**
 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
 * @q : the queue of the device being checked
 *
 * Description:
 *    Check if underlying low-level drivers of a device are busy.
 *    If the drivers want to export their busy state, they must set own
 *    exporting function using blk_queue_lld_busy() first.
 *
 *    Basically, this function is used only by request stacking drivers
 *    to stop dispatching requests to underlying devices when underlying
 *    devices are busy.  This behavior helps more I/O merging on the queue
 *    of the request stacking driver and prevents I/O throughput regression
 *    on burst I/O load.
 *
 * Return:
 *    0 - Not busy (The request stacking driver should dispatch request)
 *    1 - Busy (The request stacking driver should stop dispatching request)
 */
int blk_lld_busy(struct request_queue *q)
{
J
Jens Axboe 已提交
1158
	if (queue_is_mq(q) && q->mq_ops->busy)
J
Jens Axboe 已提交
1159
		return q->mq_ops->busy(q);
1160 1161 1162 1163 1164

	return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);

1165
int kblockd_schedule_work(struct work_struct *work)
L
Linus Torvalds 已提交
1166 1167 1168 1169 1170
{
	return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);

1171 1172 1173 1174 1175 1176 1177
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
				unsigned long delay)
{
	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);

1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
{
	struct task_struct *tsk = current;

	/*
	 * If this is a nested plug, don't actually assign it.
	 */
	if (tsk->plug)
		return;

1188
	plug->mq_list = NULL;
1189 1190 1191 1192
	plug->cached_rq = NULL;
	plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
	plug->rq_count = 0;
	plug->multiple_queues = false;
1193
	plug->has_elevator = false;
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
	plug->nowait = false;
	INIT_LIST_HEAD(&plug->cb_list);

	/*
	 * Store ordering should not be needed here, since a potential
	 * preempt will imply a full memory barrier
	 */
	tsk->plug = plug;
}

S
Suresh Jayaraman 已提交
1204 1205 1206 1207 1208
/**
 * blk_start_plug - initialize blk_plug and track it inside the task_struct
 * @plug:	The &struct blk_plug that needs to be initialized
 *
 * Description:
1209 1210 1211 1212 1213 1214 1215 1216 1217
 *   blk_start_plug() indicates to the block layer an intent by the caller
 *   to submit multiple I/O requests in a batch.  The block layer may use
 *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
 *   is called.  However, the block layer may choose to submit requests
 *   before a call to blk_finish_plug() if the number of queued I/Os
 *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
 *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
 *   the task schedules (see below).
 *
S
Suresh Jayaraman 已提交
1218 1219 1220 1221 1222 1223 1224 1225 1226
 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
 *   pending I/O should the task end up blocking between blk_start_plug() and
 *   blk_finish_plug(). This is important from a performance perspective, but
 *   also ensures that we don't deadlock. For instance, if the task is blocking
 *   for a memory allocation, memory reclaim could end up wanting to free a
 *   page belonging to that request that is currently residing in our private
 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
 *   this kind of deadlock.
 */
1227 1228
void blk_start_plug(struct blk_plug *plug)
{
1229
	blk_start_plug_nr_ios(plug, 1);
1230 1231 1232
}
EXPORT_SYMBOL(blk_start_plug);

1233
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1234 1235 1236
{
	LIST_HEAD(callbacks);

S
Shaohua Li 已提交
1237 1238
	while (!list_empty(&plug->cb_list)) {
		list_splice_init(&plug->cb_list, &callbacks);
1239

S
Shaohua Li 已提交
1240 1241
		while (!list_empty(&callbacks)) {
			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1242 1243
							  struct blk_plug_cb,
							  list);
S
Shaohua Li 已提交
1244
			list_del(&cb->list);
1245
			cb->callback(cb, from_schedule);
S
Shaohua Li 已提交
1246
		}
1247 1248 1249
	}
}

1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
				      int size)
{
	struct blk_plug *plug = current->plug;
	struct blk_plug_cb *cb;

	if (!plug)
		return NULL;

	list_for_each_entry(cb, &plug->cb_list, list)
		if (cb->callback == unplug && cb->data == data)
			return cb;

	/* Not currently on the callback list */
	BUG_ON(size < sizeof(*cb));
	cb = kzalloc(size, GFP_ATOMIC);
	if (cb) {
		cb->data = data;
		cb->callback = unplug;
		list_add(&cb->list, &plug->cb_list);
	}
	return cb;
}
EXPORT_SYMBOL(blk_check_plugged);

1275
void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1276
{
1277 1278
	if (!list_empty(&plug->cb_list))
		flush_plug_callbacks(plug, from_schedule);
1279
	if (!rq_list_empty(plug->mq_list))
1280
		blk_mq_flush_plug_list(plug, from_schedule);
1281 1282 1283 1284 1285 1286 1287
	/*
	 * Unconditionally flush out cached requests, even if the unplug
	 * event came from schedule. Since we know hold references to the
	 * queue for cached requests, we don't want a blocked task holding
	 * up a queue freeze/quiesce event.
	 */
	if (unlikely(!rq_list_empty(plug->cached_rq)))
1288
		blk_mq_free_plug_rqs(plug);
1289 1290
}

1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
/**
 * blk_finish_plug - mark the end of a batch of submitted I/O
 * @plug:	The &struct blk_plug passed to blk_start_plug()
 *
 * Description:
 * Indicate that a batch of I/O submissions is complete.  This function
 * must be paired with an initial call to blk_start_plug().  The intent
 * is to allow the block layer to optimize I/O submission.  See the
 * documentation for blk_start_plug() for more information.
 */
1301 1302
void blk_finish_plug(struct blk_plug *plug)
{
1303 1304 1305 1306
	if (plug == current->plug) {
		blk_flush_plug(plug, false);
		current->plug = NULL;
	}
1307
}
1308
EXPORT_SYMBOL(blk_finish_plug);
1309

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
void blk_io_schedule(void)
{
	/* Prevent hang_check timer from firing at us during very long I/O */
	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;

	if (timeout)
		io_schedule_timeout(timeout);
	else
		io_schedule();
}
EXPORT_SYMBOL_GPL(blk_io_schedule);

L
Linus Torvalds 已提交
1322 1323
int __init blk_dev_init(void)
{
1324 1325
	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1326
			sizeof_field(struct request, cmd_flags));
1327
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1328
			sizeof_field(struct bio, bi_opf));
1329 1330 1331
	BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
			   __alignof__(struct request_queue)) !=
		     sizeof(struct request_queue));
1332

1333 1334
	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
	kblockd_workqueue = alloc_workqueue("kblockd",
1335
					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
L
Linus Torvalds 已提交
1336 1337 1338
	if (!kblockd_workqueue)
		panic("Failed to create kblockd\n");

1339
	blk_requestq_cachep = kmem_cache_create("request_queue",
1340
			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1341

1342 1343 1344 1345
	blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
			sizeof(struct request_queue) +
			sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);

1346 1347
	blk_debugfs_root = debugfs_create_dir("block", NULL);

1348
	return 0;
L
Linus Torvalds 已提交
1349
}