blk-core.c 36.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 8
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *	-  July2000
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
19
#include <linux/blk-pm.h>
20
#include <linux/blk-integrity.h>
L
Linus Torvalds 已提交
21 22
#include <linux/highmem.h>
#include <linux/mm.h>
23
#include <linux/pagemap.h>
L
Linus Torvalds 已提交
24 25 26 27 28 29 30
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
31
#include <linux/task_io_accounting_ops.h>
32
#include <linux/fault-inject.h>
33
#include <linux/list_sort.h>
T
Tejun Heo 已提交
34
#include <linux/delay.h>
35
#include <linux/ratelimit.h>
L
Lin Ming 已提交
36
#include <linux/pm_runtime.h>
37
#include <linux/blk-cgroup.h>
38
#include <linux/t10-pi.h>
39
#include <linux/debugfs.h>
40
#include <linux/bpf.h>
41
#include <linux/psi.h>
42
#include <linux/part_stat.h>
43
#include <linux/sched/sysctl.h>
44
#include <linux/blk-crypto.h>
45 46 47

#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
L
Linus Torvalds 已提交
48

49
#include "blk.h"
50
#include "blk-mq-sched.h"
51
#include "blk-pm.h"
52
#include "blk-throttle.h"
53

54 55
struct dentry *blk_debugfs_root;

56
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
K
Keith Busch 已提交
59
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
N
NeilBrown 已提交
60
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
62

63 64
DEFINE_IDA(blk_queue_ida);

L
Linus Torvalds 已提交
65 66 67
/*
 * For queue allocation
 */
68
struct kmem_cache *blk_requestq_cachep;
69
struct kmem_cache *blk_requestq_srcu_cachep;
L
Linus Torvalds 已提交
70 71 72 73

/*
 * Controlling structure to kblockd
 */
74
static struct workqueue_struct *kblockd_workqueue;
L
Linus Torvalds 已提交
75

76 77 78 79 80 81 82
/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
83
	set_bit(flag, &q->queue_flags);
84 85 86 87 88 89 90 91 92 93
}
EXPORT_SYMBOL(blk_queue_flag_set);

/**
 * blk_queue_flag_clear - atomically clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
94
	clear_bit(flag, &q->queue_flags);
95 96 97 98 99 100 101 102 103 104 105 106 107
}
EXPORT_SYMBOL(blk_queue_flag_clear);

/**
 * blk_queue_flag_test_and_set - atomically test and set a queue flag
 * @flag: flag to be set
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was already set.
 */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
108
	return test_and_set_bit(flag, &q->queue_flags);
109 110 111
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);

112 113 114 115 116 117 118 119
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
	REQ_OP_NAME(READ),
	REQ_OP_NAME(WRITE),
	REQ_OP_NAME(FLUSH),
	REQ_OP_NAME(DISCARD),
	REQ_OP_NAME(SECURE_ERASE),
	REQ_OP_NAME(ZONE_RESET),
120
	REQ_OP_NAME(ZONE_RESET_ALL),
121 122 123
	REQ_OP_NAME(ZONE_OPEN),
	REQ_OP_NAME(ZONE_CLOSE),
	REQ_OP_NAME(ZONE_FINISH),
124
	REQ_OP_NAME(ZONE_APPEND),
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
	REQ_OP_NAME(WRITE_SAME),
	REQ_OP_NAME(WRITE_ZEROES),
	REQ_OP_NAME(DRV_IN),
	REQ_OP_NAME(DRV_OUT),
};
#undef REQ_OP_NAME

/**
 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 * @op: REQ_OP_XXX.
 *
 * Description: Centralize block layer function to convert REQ_OP_XXX into
 * string format. Useful in the debugging and tracing bio or request. For
 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 */
inline const char *blk_op_str(unsigned int op)
{
	const char *op_str = "UNKNOWN";

	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
		op_str = blk_op_name[op];

	return op_str;
}
EXPORT_SYMBOL_GPL(blk_op_str);

151 152 153 154 155 156 157 158 159 160 161 162 163 164
static const struct {
	int		errno;
	const char	*name;
} blk_errors[] = {
	[BLK_STS_OK]		= { 0,		"" },
	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
165
	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
166
	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
167

168 169 170
	/* device mapper special case, should not leak out: */
	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },

171 172 173 174
	/* zone device specific errors */
	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
	/* everything else not covered above: */
	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
};

blk_status_t errno_to_blk_status(int errno)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
		if (blk_errors[i].errno == errno)
			return (__force blk_status_t)i;
	}

	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);

int blk_status_to_errno(blk_status_t status)
{
	int idx = (__force int)status;

196
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
197 198 199 200 201
		return -EIO;
	return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);

202
const char *blk_status_to_str(blk_status_t status)
203 204 205
{
	int idx = (__force int)status;

206
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
207 208
		return "<null>";
	return blk_errors[idx].name;
209 210
}

L
Linus Torvalds 已提交
211 212 213 214 215 216 217 218 219
/**
 * blk_sync_queue - cancel any pending callbacks on a queue
 * @q: the queue
 *
 * Description:
 *     The block layer may perform asynchronous callback activity
 *     on a queue, such as calling the unplug function after a timeout.
 *     A block device may call blk_sync_queue to ensure that any
 *     such activity is cancelled, thus allowing it to release resources
220
 *     that the callbacks might use. The caller must already have made sure
221
 *     that its ->submit_bio will not re-add plugging prior to calling
L
Linus Torvalds 已提交
222 223
 *     this function.
 *
224
 *     This function does not cancel any asynchronous activity arising
225
 *     out of elevator or throttling code. That would require elevator_exit()
226
 *     and blkcg_exit_queue() to be called with queue lock initialized.
227
 *
L
Linus Torvalds 已提交
228 229 230
 */
void blk_sync_queue(struct request_queue *q)
{
231
	del_timer_sync(&q->timeout);
232
	cancel_work_sync(&q->timeout_work);
L
Linus Torvalds 已提交
233 234 235
}
EXPORT_SYMBOL(blk_sync_queue);

236
/**
237
 * blk_set_pm_only - increment pm_only counter
238 239
 * @q: request queue pointer
 */
240
void blk_set_pm_only(struct request_queue *q)
241
{
242
	atomic_inc(&q->pm_only);
243
}
244
EXPORT_SYMBOL_GPL(blk_set_pm_only);
245

246
void blk_clear_pm_only(struct request_queue *q)
247
{
248 249 250 251 252 253
	int pm_only;

	pm_only = atomic_dec_return(&q->pm_only);
	WARN_ON_ONCE(pm_only < 0);
	if (pm_only == 0)
		wake_up_all(&q->mq_freeze_wq);
254
}
255
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
256

257 258 259 260 261 262
/**
 * blk_put_queue - decrement the request_queue refcount
 * @q: the request_queue structure to decrement the refcount for
 *
 * Decrements the refcount of the request_queue kobject. When this reaches 0
 * we'll have blk_release_queue() called.
263 264 265
 *
 * Context: Any context, but the last reference must not be dropped from
 *          atomic context.
266
 */
267
void blk_put_queue(struct request_queue *q)
268 269 270
{
	kobject_put(&q->kobj);
}
J
Jens Axboe 已提交
271
EXPORT_SYMBOL(blk_put_queue);
272

273
void blk_queue_start_drain(struct request_queue *q)
274
{
275 276 277 278 279 280
	/*
	 * When queue DYING flag is set, we need to block new req
	 * entering queue, so we call blk_freeze_queue_start() to
	 * prevent I/O from crossing blk_queue_enter().
	 */
	blk_freeze_queue_start(q);
J
Jens Axboe 已提交
281
	if (queue_is_mq(q))
282
		blk_mq_wake_waiters(q);
283 284
	/* Make blk_queue_enter() reexamine the DYING flag. */
	wake_up_all(&q->mq_freeze_wq);
285
}
286

287 288 289 290
/**
 * blk_cleanup_queue - shutdown a request queue
 * @q: request queue to shutdown
 *
291 292
 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 * put it.  All future requests will be failed immediately with -ENODEV.
293 294
 *
 * Context: can sleep
295
 */
296
void blk_cleanup_queue(struct request_queue *q)
297
{
298 299 300
	/* cannot be called from atomic context */
	might_sleep();

301 302
	WARN_ON_ONCE(blk_queue_registered(q));

B
Bart Van Assche 已提交
303
	/* mark @q DYING, no new request or merges will be allowed afterwards */
304 305
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
	blk_queue_start_drain(q);
306

307 308
	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
309

310 311
	/*
	 * Drain all requests queued before DYING marking. Set DEAD flag to
312 313
	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
	 * after draining finished.
314
	 */
315
	blk_freeze_queue(q);
316

317
	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
318 319

	blk_sync_queue(q);
320 321
	if (queue_is_mq(q)) {
		blk_mq_cancel_work_sync(q);
322
		blk_mq_exit_queue(q);
323
	}
J
Jens Axboe 已提交
324

325 326 327 328 329 330 331 332 333 334
	/*
	 * In theory, request pool of sched_tags belongs to request queue.
	 * However, the current implementation requires tag_set for freeing
	 * requests, so free the pool now.
	 *
	 * Queue has become frozen, there can't be any in-queue requests, so
	 * it is safe to free requests now.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator)
335
		blk_mq_sched_free_rqs(q);
336 337
	mutex_unlock(&q->sysfs_lock);

338
	percpu_ref_exit(&q->q_usage_counter);
B
Bart Van Assche 已提交
339

340
	/* @q is and will stay empty, shutdown and put */
341 342
	blk_put_queue(q);
}
L
Linus Torvalds 已提交
343 344
EXPORT_SYMBOL(blk_cleanup_queue);

345 346 347
/**
 * blk_queue_enter() - try to increase q->q_usage_counter
 * @q: request queue pointer
348
 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
349
 */
350
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
351
{
352
	const bool pm = flags & BLK_MQ_REQ_PM;
353

354
	while (!blk_try_enter_queue(q, pm)) {
355
		if (flags & BLK_MQ_REQ_NOWAIT)
356 357
			return -EBUSY;

358
		/*
359 360 361 362 363
		 * read pair of barrier in blk_freeze_queue_start(), we need to
		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
		 * reading .mq_freeze_depth or queue dying flag, otherwise the
		 * following wait may never return if the two reads are
		 * reordered.
364 365
		 */
		smp_rmb();
366
		wait_event(q->mq_freeze_wq,
367
			   (!q->mq_freeze_depth &&
368
			    blk_pm_resume_queue(pm, q)) ||
369
			   blk_queue_dying(q));
370 371 372
		if (blk_queue_dying(q))
			return -ENODEV;
	}
373 374

	return 0;
375 376
}

377
int __bio_queue_enter(struct request_queue *q, struct bio *bio)
378
{
379
	while (!blk_try_enter_queue(q, false)) {
380 381
		struct gendisk *disk = bio->bi_bdev->bd_disk;

382
		if (bio->bi_opf & REQ_NOWAIT) {
383
			if (test_bit(GD_DEAD, &disk->state))
384
				goto dead;
385
			bio_wouldblock_error(bio);
386 387 388 389 390 391 392 393 394 395 396 397 398 399
			return -EBUSY;
		}

		/*
		 * read pair of barrier in blk_freeze_queue_start(), we need to
		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
		 * reading .mq_freeze_depth or queue dying flag, otherwise the
		 * following wait may never return if the two reads are
		 * reordered.
		 */
		smp_rmb();
		wait_event(q->mq_freeze_wq,
			   (!q->mq_freeze_depth &&
			    blk_pm_resume_queue(false, q)) ||
400 401
			   test_bit(GD_DEAD, &disk->state));
		if (test_bit(GD_DEAD, &disk->state))
402
			goto dead;
403 404
	}

405 406 407 408
	return 0;
dead:
	bio_io_error(bio);
	return -ENODEV;
409 410
}

411 412 413 414 415 416 417 418 419 420 421 422 423
void blk_queue_exit(struct request_queue *q)
{
	percpu_ref_put(&q->q_usage_counter);
}

static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
	struct request_queue *q =
		container_of(ref, struct request_queue, q_usage_counter);

	wake_up_all(&q->mq_freeze_wq);
}

424
static void blk_rq_timed_out_timer(struct timer_list *t)
425
{
426
	struct request_queue *q = from_timer(q, t, timeout);
427 428 429 430

	kblockd_schedule_work(&q->timeout_work);
}

431 432 433 434
static void blk_timeout_work(struct work_struct *work)
{
}

435
struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
436
{
437
	struct request_queue *q;
438
	int ret;
439

440 441
	q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
			GFP_KERNEL | __GFP_ZERO, node_id);
L
Linus Torvalds 已提交
442 443 444
	if (!q)
		return NULL;

445 446 447 448 449 450
	if (alloc_srcu) {
		blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
		if (init_srcu_struct(q->srcu) != 0)
			goto fail_q;
	}

451 452
	q->last_merge = NULL;

453
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
454
	if (q->id < 0)
455
		goto fail_srcu;
456

457
	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
458
	if (ret)
459 460
		goto fail_id;

461 462
	q->stats = blk_alloc_queue_stats();
	if (!q->stats)
463
		goto fail_split;
464

465
	q->node = node_id;
466

467
	atomic_set(&q->nr_active_requests_shared_tags, 0);
468

469
	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
470
	INIT_WORK(&q->timeout_work, blk_timeout_work);
471
	INIT_LIST_HEAD(&q->icq_list);
472
#ifdef CONFIG_BLK_CGROUP
473
	INIT_LIST_HEAD(&q->blkg_list);
474
#endif
475

476
	kobject_init(&q->kobj, &blk_queue_ktype);
L
Linus Torvalds 已提交
477

478
	mutex_init(&q->debugfs_mutex);
479
	mutex_init(&q->sysfs_lock);
480
	mutex_init(&q->sysfs_dir_lock);
481
	spin_lock_init(&q->queue_lock);
482

483
	init_waitqueue_head(&q->mq_freeze_wq);
484
	mutex_init(&q->mq_freeze_lock);
485

486 487 488 489 490 491 492
	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
	 * See blk_register_queue() for details.
	 */
	if (percpu_ref_init(&q->q_usage_counter,
				blk_queue_usage_counter_release,
				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
493
		goto fail_stats;
494

495 496 497
	if (blkcg_init_queue(q))
		goto fail_ref;

498 499
	blk_queue_dma_alignment(q, 511);
	blk_set_default_limits(&q->limits);
500
	q->nr_requests = BLKDEV_DEFAULT_RQ;
501

L
Linus Torvalds 已提交
502
	return q;
503

504 505
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
506
fail_stats:
507
	blk_free_queue_stats(q->stats);
508
fail_split:
509
	bioset_exit(&q->bio_split);
510 511
fail_id:
	ida_simple_remove(&blk_queue_ida, q->id);
512 513 514
fail_srcu:
	if (alloc_srcu)
		cleanup_srcu_struct(q->srcu);
515
fail_q:
516
	kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
517
	return NULL;
L
Linus Torvalds 已提交
518 519
}

520 521 522 523 524
/**
 * blk_get_queue - increment the request_queue refcount
 * @q: the request_queue structure to increment the refcount for
 *
 * Increment the refcount of the request_queue kobject.
525 526
 *
 * Context: Any context.
527
 */
T
Tejun Heo 已提交
528
bool blk_get_queue(struct request_queue *q)
L
Linus Torvalds 已提交
529
{
B
Bart Van Assche 已提交
530
	if (likely(!blk_queue_dying(q))) {
T
Tejun Heo 已提交
531 532
		__blk_get_queue(q);
		return true;
L
Linus Torvalds 已提交
533 534
	}

T
Tejun Heo 已提交
535
	return false;
L
Linus Torvalds 已提交
536
}
J
Jens Axboe 已提交
537
EXPORT_SYMBOL(blk_get_queue);
L
Linus Torvalds 已提交
538

539
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
L
Linus Torvalds 已提交
540 541 542
{
	char b[BDEVNAME_SIZE];

543
	pr_info_ratelimited("%s: attempt to access beyond end of device\n"
544
			    "%s: rw=%d, want=%llu, limit=%llu\n",
545
			    current->comm,
546 547
			    bio_devname(bio, b), bio->bi_opf,
			    bio_end_sector(bio), maxsector);
L
Linus Torvalds 已提交
548 549
}

550 551 552 553 554 555 556 557 558 559
#ifdef CONFIG_FAIL_MAKE_REQUEST

static DECLARE_FAULT_ATTR(fail_make_request);

static int __init setup_fail_make_request(char *str)
{
	return setup_fault_attr(&fail_make_request, str);
}
__setup("fail_make_request=", setup_fail_make_request);

560
bool should_fail_request(struct block_device *part, unsigned int bytes)
561
{
562
	return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
563 564 565 566
}

static int __init fail_make_request_debugfs(void)
{
567 568 569
	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
						NULL, &fail_make_request);

570
	return PTR_ERR_OR_ZERO(dir);
571 572 573 574 575
}

late_initcall(fail_make_request_debugfs);
#endif /* CONFIG_FAIL_MAKE_REQUEST */

576
static inline bool bio_check_ro(struct bio *bio)
577
{
578
	if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
579 580
		char b[BDEVNAME_SIZE];

581 582 583
		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
			return false;

584
		WARN_ONCE(1,
585
		       "Trying to write to read-only block-device %s (partno %d)\n",
586
			bio_devname(bio, b), bio->bi_bdev->bd_partno);
587 588
		/* Older lvm-tools actually trigger this */
		return false;
589 590 591 592 593
	}

	return false;
}

594 595
static noinline int should_fail_bio(struct bio *bio)
{
596
	if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
597 598 599 600 601
		return -EIO;
	return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);

602 603 604 605 606
/*
 * Check whether this bio extends beyond the end of the device or partition.
 * This may well happen - the kernel calls bread() without checking the size of
 * the device, e.g., when mounting a file system.
 */
607
static inline int bio_check_eod(struct bio *bio)
608
{
609
	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
610 611 612 613 614 615 616 617 618 619 620
	unsigned int nr_sectors = bio_sectors(bio);

	if (nr_sectors && maxsector &&
	    (nr_sectors > maxsector ||
	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
		handle_bad_sector(bio, maxsector);
		return -EIO;
	}
	return 0;
}

621 622 623
/*
 * Remap block n of partition p to block n+start(p) of the disk.
 */
624
static int blk_partition_remap(struct bio *bio)
625
{
626
	struct block_device *p = bio->bi_bdev;
627

628
	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
629
		return -EIO;
630
	if (bio_sectors(bio)) {
631
		bio->bi_iter.bi_sector += p->bd_start_sect;
632
		trace_block_bio_remap(bio, p->bd_dev,
633
				      bio->bi_iter.bi_sector -
634
				      p->bd_start_sect);
635
	}
636
	bio_set_flag(bio, BIO_REMAPPED);
637
	return 0;
638 639
}

640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
/*
 * Check write append to a zoned block device.
 */
static inline blk_status_t blk_check_zone_append(struct request_queue *q,
						 struct bio *bio)
{
	sector_t pos = bio->bi_iter.bi_sector;
	int nr_sectors = bio_sectors(bio);

	/* Only applicable to zoned block devices */
	if (!blk_queue_is_zoned(q))
		return BLK_STS_NOTSUPP;

	/* The bio sector must point to the start of a sequential zone */
	if (pos & (blk_queue_zone_sectors(q) - 1) ||
	    !blk_queue_zone_is_seq(q, pos))
		return BLK_STS_IOERR;

	/*
	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
	 * split and could result in non-contiguous sectors being written in
	 * different zones.
	 */
	if (nr_sectors > q->limits.chunk_sectors)
		return BLK_STS_IOERR;

	/* Make sure the BIO is small enough and will not get split */
	if (nr_sectors > q->limits.max_zone_append_sectors)
		return BLK_STS_IOERR;

	bio->bi_opf |= REQ_NOMERGE;

	return BLK_STS_OK;
}

675
noinline_for_stack bool submit_bio_checks(struct bio *bio)
L
Linus Torvalds 已提交
676
{
677
	struct block_device *bdev = bio->bi_bdev;
678
	struct request_queue *q = bdev_get_queue(bdev);
679
	blk_status_t status = BLK_STS_IOERR;
680
	struct blk_plug *plug;
L
Linus Torvalds 已提交
681 682 683

	might_sleep();

684 685 686 687
	plug = blk_mq_plug(q, bio);
	if (plug && plug->nowait)
		bio->bi_opf |= REQ_NOWAIT;

688
	/*
689
	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
M
Mike Snitzer 已提交
690
	 * if queue does not support NOWAIT.
691
	 */
M
Mike Snitzer 已提交
692
	if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
693
		goto not_supported;
694

695
	if (should_fail_bio(bio))
696
		goto end_io;
697 698
	if (unlikely(bio_check_ro(bio)))
		goto end_io;
699 700 701 702 703 704
	if (!bio_flagged(bio, BIO_REMAPPED)) {
		if (unlikely(bio_check_eod(bio)))
			goto end_io;
		if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
			goto end_io;
	}
705

706
	/*
707 708
	 * Filter flush bio's early so that bio based drivers without flush
	 * support don't have to worry about them.
709
	 */
710
	if (op_is_flush(bio->bi_opf) &&
J
Jens Axboe 已提交
711
	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
J
Jens Axboe 已提交
712
		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
713
		if (!bio_sectors(bio)) {
714
			status = BLK_STS_OK;
715 716
			goto end_io;
		}
717
	}
718

719
	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
720
		bio_clear_polled(bio);
721

722 723 724 725 726 727 728 729 730 731
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
		if (!blk_queue_secure_erase(q))
			goto not_supported;
		break;
	case REQ_OP_WRITE_SAME:
732
		if (!q->limits.max_write_same_sectors)
733
			goto not_supported;
734
		break;
735 736 737 738 739
	case REQ_OP_ZONE_APPEND:
		status = blk_check_zone_append(q, bio);
		if (status != BLK_STS_OK)
			goto end_io;
		break;
740
	case REQ_OP_ZONE_RESET:
741 742 743
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
744
		if (!blk_queue_is_zoned(q))
745
			goto not_supported;
746
		break;
747 748 749 750
	case REQ_OP_ZONE_RESET_ALL:
		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
			goto not_supported;
		break;
751
	case REQ_OP_WRITE_ZEROES:
752
		if (!q->limits.max_write_zeroes_sectors)
753 754
			goto not_supported;
		break;
755 756
	default:
		break;
757
	}
758

759
	if (blk_throtl_bio(bio))
760
		return false;
761 762 763

	blk_cgroup_bio_start(bio);
	blkcg_bio_issue_init(bio);
764

N
NeilBrown 已提交
765
	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
766
		trace_block_bio_queue(bio);
N
NeilBrown 已提交
767 768 769 770 771
		/* Now that enqueuing has been traced, we need to trace
		 * completion as well.
		 */
		bio_set_flag(bio, BIO_TRACE_COMPLETION);
	}
772
	return true;
773

774
not_supported:
775
	status = BLK_STS_NOTSUPP;
776
end_io:
777
	bio->bi_status = status;
778
	bio_endio(bio);
779
	return false;
L
Linus Torvalds 已提交
780 781
}

782
static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
783
{
784 785 786 787 788 789
	if (blk_crypto_bio_prep(&bio)) {
		if (likely(bio_queue_enter(bio) == 0)) {
			disk->fops->submit_bio(bio);
			blk_queue_exit(disk->queue);
		}
	}
790
}
791

792 793 794
static void __submit_bio(struct bio *bio)
{
	struct gendisk *disk = bio->bi_bdev->bd_disk;
795

796 797 798
	if (unlikely(!submit_bio_checks(bio)))
		return;

799
	if (!disk->fops->submit_bio)
800
		blk_mq_submit_bio(bio);
801 802
	else
		__submit_bio_fops(disk, bio);
803 804
}

805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
/*
 * The loop in this function may be a bit non-obvious, and so deserves some
 * explanation:
 *
 *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
 *    that), so we have a list with a single bio.
 *  - We pretend that we have just taken it off a longer list, so we assign
 *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
 *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
 *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
 *    non-NULL value in bio_list and re-enter the loop from the top.
 *  - In this case we really did just take the bio of the top of the list (no
 *    pretending) and so remove it from bio_list, and call into ->submit_bio()
 *    again.
 *
 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
 * bio_list_on_stack[1] contains bios that were submitted before the current
 *	->submit_bio_bio, but that haven't been processed yet.
 */
824
static void __submit_bio_noacct(struct bio *bio)
825 826 827 828 829 830 831 832 833
{
	struct bio_list bio_list_on_stack[2];

	BUG_ON(bio->bi_next);

	bio_list_init(&bio_list_on_stack[0]);
	current->bio_list = bio_list_on_stack;

	do {
834
		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
835 836 837 838 839 840 841 842
		struct bio_list lower, same;

		/*
		 * Create a fresh bio_list for all subordinate requests.
		 */
		bio_list_on_stack[1] = bio_list_on_stack[0];
		bio_list_init(&bio_list_on_stack[0]);

843
		__submit_bio(bio);
844 845 846 847 848 849 850 851

		/*
		 * Sort new bios into those for a lower level and those for the
		 * same level.
		 */
		bio_list_init(&lower);
		bio_list_init(&same);
		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
852
			if (q == bdev_get_queue(bio->bi_bdev))
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
				bio_list_add(&same, bio);
			else
				bio_list_add(&lower, bio);

		/*
		 * Now assemble so we handle the lowest level first.
		 */
		bio_list_merge(&bio_list_on_stack[0], &lower);
		bio_list_merge(&bio_list_on_stack[0], &same);
		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));

	current->bio_list = NULL;
}

868
static void __submit_bio_noacct_mq(struct bio *bio)
869
{
870
	struct bio_list bio_list[2] = { };
871

872
	current->bio_list = bio_list;
873 874

	do {
875
		__submit_bio(bio);
876
	} while ((bio = bio_list_pop(&bio_list[0])));
877 878 879 880

	current->bio_list = NULL;
}

881
/**
882
 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
883 884
 * @bio:  The bio describing the location in memory and on the device.
 *
885 886 887 888
 * This is a version of submit_bio() that shall only be used for I/O that is
 * resubmitted to lower level drivers by stacking block drivers.  All file
 * systems and other upper level users of the block layer should use
 * submit_bio() instead.
889
 */
890
void submit_bio_noacct(struct bio *bio)
891
{
892
	/*
893 894 895 896
	 * We only want one ->submit_bio to be active at a time, else stack
	 * usage with stacked devices could be a problem.  Use current->bio_list
	 * to collect a list of requests submited by a ->submit_bio method while
	 * it is active, and then process them after it returned.
897
	 */
898
	if (current->bio_list)
899
		bio_list_add(&current->bio_list[0], bio);
900 901 902 903
	else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
		__submit_bio_noacct_mq(bio);
	else
		__submit_bio_noacct(bio);
904
}
905
EXPORT_SYMBOL(submit_bio_noacct);
L
Linus Torvalds 已提交
906 907

/**
908
 * submit_bio - submit a bio to the block device layer for I/O
L
Linus Torvalds 已提交
909 910
 * @bio: The &struct bio which describes the I/O
 *
911 912
 * submit_bio() is used to submit I/O requests to block devices.  It is passed a
 * fully set up &struct bio that describes the I/O that needs to be done.  The
913
 * bio will be send to the device described by the bi_bdev field.
L
Linus Torvalds 已提交
914
 *
915 916 917 918
 * The success/failure status of the request, along with notification of
 * completion, is delivered asynchronously through the ->bi_end_io() callback
 * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
 * been called.
L
Linus Torvalds 已提交
919
 */
920
void submit_bio(struct bio *bio)
L
Linus Torvalds 已提交
921
{
T
Tejun Heo 已提交
922
	if (blkcg_punt_bio_submit(bio))
923
		return;
T
Tejun Heo 已提交
924

925 926 927 928
	/*
	 * If it's a regular read/write or a barrier with data attached,
	 * go through the normal accounting stuff before submission.
	 */
929
	if (bio_has_data(bio)) {
930 931
		unsigned int count;

932
		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
933
			count = queue_logical_block_size(
934
					bdev_get_queue(bio->bi_bdev)) >> 9;
935 936 937
		else
			count = bio_sectors(bio);

938
		if (op_is_write(bio_op(bio))) {
939 940
			count_vm_events(PGPGOUT, count);
		} else {
941
			task_io_account_read(bio->bi_iter.bi_size);
942 943
			count_vm_events(PGPGIN, count);
		}
L
Linus Torvalds 已提交
944 945
	}

946
	/*
947 948 949 950
	 * If we're reading data that is part of the userspace workingset, count
	 * submission time as memory stall.  When the device is congested, or
	 * the submitting cgroup IO-throttled, submission can be a significant
	 * part of overall IO time.
951
	 */
952 953 954
	if (unlikely(bio_op(bio) == REQ_OP_READ &&
	    bio_flagged(bio, BIO_WORKINGSET))) {
		unsigned long pflags;
955

956
		psi_memstall_enter(&pflags);
957
		submit_bio_noacct(bio);
958
		psi_memstall_leave(&pflags);
959
		return;
960 961
	}

962
	submit_bio_noacct(bio);
L
Linus Torvalds 已提交
963 964 965
}
EXPORT_SYMBOL(submit_bio);

966 967 968
/**
 * bio_poll - poll for BIO completions
 * @bio: bio to poll for
969
 * @iob: batches of IO
970 971 972 973 974 975 976 977
 * @flags: BLK_POLL_* flags that control the behavior
 *
 * Poll for completions on queue associated with the bio. Returns number of
 * completed entries found.
 *
 * Note: the caller must either be the context that submitted @bio, or
 * be in a RCU critical section to prevent freeing of @bio.
 */
978
int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
979
{
980
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
981 982 983 984 985 986 987 988
	blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
	int ret;

	if (cookie == BLK_QC_T_NONE ||
	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		return 0;

	if (current->plug)
989
		blk_flush_plug(current->plug, false);
990 991 992 993 994 995

	if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
		return 0;
	if (WARN_ON_ONCE(!queue_is_mq(q)))
		ret = 0;	/* not yet implemented, should not happen */
	else
996
		ret = blk_mq_poll(q, cookie, iob, flags);
997 998 999 1000 1001 1002 1003 1004 1005
	blk_queue_exit(q);
	return ret;
}
EXPORT_SYMBOL_GPL(bio_poll);

/*
 * Helper to implement file_operations.iopoll.  Requires the bio to be stored
 * in iocb->private, and cleared before freeing the bio.
 */
1006 1007
int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
		    unsigned int flags)
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
{
	struct bio *bio;
	int ret = 0;

	/*
	 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
	 * point to a freshly allocated bio at this point.  If that happens
	 * we have a few cases to consider:
	 *
	 *  1) the bio is beeing initialized and bi_bdev is NULL.  We can just
	 *     simply nothing in this case
	 *  2) the bio points to a not poll enabled device.  bio_poll will catch
	 *     this and return 0
	 *  3) the bio points to a poll capable device, including but not
	 *     limited to the one that the original bio pointed to.  In this
	 *     case we will call into the actual poll method and poll for I/O,
	 *     even if we don't need to, but it won't cause harm either.
	 *
	 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
	 * is still allocated. Because partitions hold a reference to the whole
	 * device bdev and thus disk, the disk is also still valid.  Grabbing
	 * a reference to the queue in bio_poll() ensures the hctxs and requests
	 * are still valid as well.
	 */
	rcu_read_lock();
	bio = READ_ONCE(kiocb->private);
	if (bio && bio->bi_bdev)
1035
		ret = bio_poll(bio, iob, flags);
1036 1037 1038 1039 1040 1041
	rcu_read_unlock();

	return ret;
}
EXPORT_SYMBOL_GPL(iocb_bio_iopoll);

1042
void update_io_ticks(struct block_device *part, unsigned long now, bool end)
1043 1044 1045
{
	unsigned long stamp;
again:
1046
	stamp = READ_ONCE(part->bd_stamp);
1047
	if (unlikely(time_after(now, stamp))) {
1048
		if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
1049 1050
			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
	}
1051 1052
	if (part->bd_partno) {
		part = bdev_whole(part);
1053 1054 1055 1056
		goto again;
	}
}

1057
static unsigned long __part_start_io_acct(struct block_device *part,
1058 1059
					  unsigned int sectors, unsigned int op,
					  unsigned long start_time)
1060 1061 1062 1063
{
	const int sgrp = op_stat_group(op);

	part_stat_lock();
1064
	update_io_ticks(part, start_time, false);
1065 1066 1067 1068
	part_stat_inc(part, ios[sgrp]);
	part_stat_add(part, sectors[sgrp], sectors);
	part_stat_local_inc(part, in_flight[op_is_write(op)]);
	part_stat_unlock();
1069

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
	return start_time;
}

/**
 * bio_start_io_acct_time - start I/O accounting for bio based drivers
 * @bio:	bio to start account for
 * @start_time:	start time that should be passed back to bio_end_io_acct().
 */
void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
{
	__part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
			     bio_op(bio), start_time);
1082
}
1083
EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
1084

1085 1086 1087 1088 1089 1090 1091
/**
 * bio_start_io_acct - start I/O accounting for bio based drivers
 * @bio:	bio to start account for
 *
 * Returns the start time that should be passed back to bio_end_io_acct().
 */
unsigned long bio_start_io_acct(struct bio *bio)
1092
{
1093 1094
	return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
				    bio_op(bio), jiffies);
1095
}
1096
EXPORT_SYMBOL_GPL(bio_start_io_acct);
1097 1098 1099 1100

unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
				 unsigned int op)
{
1101
	return __part_start_io_acct(disk->part0, sectors, op, jiffies);
1102
}
1103 1104
EXPORT_SYMBOL(disk_start_io_acct);

1105
static void __part_end_io_acct(struct block_device *part, unsigned int op,
1106
			       unsigned long start_time)
1107 1108 1109 1110
{
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);
	unsigned long duration = now - start_time;
1111

1112 1113 1114 1115
	part_stat_lock();
	update_io_ticks(part, now, true);
	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
	part_stat_local_dec(part, in_flight[op_is_write(op)]);
1116 1117
	part_stat_unlock();
}
1118

1119 1120
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
		struct block_device *orig_bdev)
1121
{
1122
	__part_end_io_acct(orig_bdev, bio_op(bio), start_time);
1123
}
1124
EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1125 1126 1127 1128

void disk_end_io_acct(struct gendisk *disk, unsigned int op,
		      unsigned long start_time)
{
1129
	__part_end_io_acct(disk->part0, op, start_time);
1130
}
1131
EXPORT_SYMBOL(disk_end_io_acct);
1132

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
/**
 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
 * @q : the queue of the device being checked
 *
 * Description:
 *    Check if underlying low-level drivers of a device are busy.
 *    If the drivers want to export their busy state, they must set own
 *    exporting function using blk_queue_lld_busy() first.
 *
 *    Basically, this function is used only by request stacking drivers
 *    to stop dispatching requests to underlying devices when underlying
 *    devices are busy.  This behavior helps more I/O merging on the queue
 *    of the request stacking driver and prevents I/O throughput regression
 *    on burst I/O load.
 *
 * Return:
 *    0 - Not busy (The request stacking driver should dispatch request)
 *    1 - Busy (The request stacking driver should stop dispatching request)
 */
int blk_lld_busy(struct request_queue *q)
{
J
Jens Axboe 已提交
1154
	if (queue_is_mq(q) && q->mq_ops->busy)
J
Jens Axboe 已提交
1155
		return q->mq_ops->busy(q);
1156 1157 1158 1159 1160

	return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);

1161
int kblockd_schedule_work(struct work_struct *work)
L
Linus Torvalds 已提交
1162 1163 1164 1165 1166
{
	return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);

1167 1168 1169 1170 1171 1172 1173
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
				unsigned long delay)
{
	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);

1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
{
	struct task_struct *tsk = current;

	/*
	 * If this is a nested plug, don't actually assign it.
	 */
	if (tsk->plug)
		return;

1184
	plug->mq_list = NULL;
1185 1186 1187 1188
	plug->cached_rq = NULL;
	plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
	plug->rq_count = 0;
	plug->multiple_queues = false;
1189
	plug->has_elevator = false;
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
	plug->nowait = false;
	INIT_LIST_HEAD(&plug->cb_list);

	/*
	 * Store ordering should not be needed here, since a potential
	 * preempt will imply a full memory barrier
	 */
	tsk->plug = plug;
}

S
Suresh Jayaraman 已提交
1200 1201 1202 1203 1204
/**
 * blk_start_plug - initialize blk_plug and track it inside the task_struct
 * @plug:	The &struct blk_plug that needs to be initialized
 *
 * Description:
1205 1206 1207 1208 1209 1210 1211 1212 1213
 *   blk_start_plug() indicates to the block layer an intent by the caller
 *   to submit multiple I/O requests in a batch.  The block layer may use
 *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
 *   is called.  However, the block layer may choose to submit requests
 *   before a call to blk_finish_plug() if the number of queued I/Os
 *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
 *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
 *   the task schedules (see below).
 *
S
Suresh Jayaraman 已提交
1214 1215 1216 1217 1218 1219 1220 1221 1222
 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
 *   pending I/O should the task end up blocking between blk_start_plug() and
 *   blk_finish_plug(). This is important from a performance perspective, but
 *   also ensures that we don't deadlock. For instance, if the task is blocking
 *   for a memory allocation, memory reclaim could end up wanting to free a
 *   page belonging to that request that is currently residing in our private
 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
 *   this kind of deadlock.
 */
1223 1224
void blk_start_plug(struct blk_plug *plug)
{
1225
	blk_start_plug_nr_ios(plug, 1);
1226 1227 1228
}
EXPORT_SYMBOL(blk_start_plug);

1229
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1230 1231 1232
{
	LIST_HEAD(callbacks);

S
Shaohua Li 已提交
1233 1234
	while (!list_empty(&plug->cb_list)) {
		list_splice_init(&plug->cb_list, &callbacks);
1235

S
Shaohua Li 已提交
1236 1237
		while (!list_empty(&callbacks)) {
			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1238 1239
							  struct blk_plug_cb,
							  list);
S
Shaohua Li 已提交
1240
			list_del(&cb->list);
1241
			cb->callback(cb, from_schedule);
S
Shaohua Li 已提交
1242
		}
1243 1244 1245
	}
}

1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
				      int size)
{
	struct blk_plug *plug = current->plug;
	struct blk_plug_cb *cb;

	if (!plug)
		return NULL;

	list_for_each_entry(cb, &plug->cb_list, list)
		if (cb->callback == unplug && cb->data == data)
			return cb;

	/* Not currently on the callback list */
	BUG_ON(size < sizeof(*cb));
	cb = kzalloc(size, GFP_ATOMIC);
	if (cb) {
		cb->data = data;
		cb->callback = unplug;
		list_add(&cb->list, &plug->cb_list);
	}
	return cb;
}
EXPORT_SYMBOL(blk_check_plugged);

1271
void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1272
{
1273 1274
	if (!list_empty(&plug->cb_list))
		flush_plug_callbacks(plug, from_schedule);
1275
	if (!rq_list_empty(plug->mq_list))
1276
		blk_mq_flush_plug_list(plug, from_schedule);
1277 1278 1279 1280 1281 1282 1283
	/*
	 * Unconditionally flush out cached requests, even if the unplug
	 * event came from schedule. Since we know hold references to the
	 * queue for cached requests, we don't want a blocked task holding
	 * up a queue freeze/quiesce event.
	 */
	if (unlikely(!rq_list_empty(plug->cached_rq)))
1284
		blk_mq_free_plug_rqs(plug);
1285 1286
}

1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
/**
 * blk_finish_plug - mark the end of a batch of submitted I/O
 * @plug:	The &struct blk_plug passed to blk_start_plug()
 *
 * Description:
 * Indicate that a batch of I/O submissions is complete.  This function
 * must be paired with an initial call to blk_start_plug().  The intent
 * is to allow the block layer to optimize I/O submission.  See the
 * documentation for blk_start_plug() for more information.
 */
1297 1298
void blk_finish_plug(struct blk_plug *plug)
{
1299 1300 1301 1302
	if (plug == current->plug) {
		blk_flush_plug(plug, false);
		current->plug = NULL;
	}
1303
}
1304
EXPORT_SYMBOL(blk_finish_plug);
1305

1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
void blk_io_schedule(void)
{
	/* Prevent hang_check timer from firing at us during very long I/O */
	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;

	if (timeout)
		io_schedule_timeout(timeout);
	else
		io_schedule();
}
EXPORT_SYMBOL_GPL(blk_io_schedule);

L
Linus Torvalds 已提交
1318 1319
int __init blk_dev_init(void)
{
1320 1321
	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1322
			sizeof_field(struct request, cmd_flags));
1323
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1324
			sizeof_field(struct bio, bi_opf));
1325 1326 1327
	BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
			   __alignof__(struct request_queue)) !=
		     sizeof(struct request_queue));
1328

1329 1330
	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
	kblockd_workqueue = alloc_workqueue("kblockd",
1331
					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
L
Linus Torvalds 已提交
1332 1333 1334
	if (!kblockd_workqueue)
		panic("Failed to create kblockd\n");

1335
	blk_requestq_cachep = kmem_cache_create("request_queue",
1336
			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1337

1338 1339 1340 1341
	blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
			sizeof(struct request_queue) +
			sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);

1342 1343
	blk_debugfs_root = debugfs_create_dir("block", NULL);

1344
	return 0;
L
Linus Torvalds 已提交
1345
}