blk-core.c 35.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 8
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *	-  July2000
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
19
#include <linux/blk-pm.h>
20
#include <linux/blk-integrity.h>
L
Linus Torvalds 已提交
21 22
#include <linux/highmem.h>
#include <linux/mm.h>
23
#include <linux/pagemap.h>
L
Linus Torvalds 已提交
24 25 26 27 28 29 30
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
31
#include <linux/task_io_accounting_ops.h>
32
#include <linux/fault-inject.h>
33
#include <linux/list_sort.h>
T
Tejun Heo 已提交
34
#include <linux/delay.h>
35
#include <linux/ratelimit.h>
L
Lin Ming 已提交
36
#include <linux/pm_runtime.h>
37
#include <linux/blk-cgroup.h>
38
#include <linux/t10-pi.h>
39
#include <linux/debugfs.h>
40
#include <linux/bpf.h>
41
#include <linux/psi.h>
42
#include <linux/part_stat.h>
43
#include <linux/sched/sysctl.h>
44
#include <linux/blk-crypto.h>
45 46 47

#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
L
Linus Torvalds 已提交
48

49
#include "blk.h"
50
#include "blk-mq-sched.h"
51
#include "blk-pm.h"
52
#include "blk-throttle.h"
53

54 55
struct dentry *blk_debugfs_root;

56
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
K
Keith Busch 已提交
59
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
N
NeilBrown 已提交
60
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
62

63 64
DEFINE_IDA(blk_queue_ida);

L
Linus Torvalds 已提交
65 66 67
/*
 * For queue allocation
 */
68
struct kmem_cache *blk_requestq_cachep;
L
Linus Torvalds 已提交
69 70 71 72

/*
 * Controlling structure to kblockd
 */
73
static struct workqueue_struct *kblockd_workqueue;
L
Linus Torvalds 已提交
74

75 76 77 78 79 80 81
/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
82
	set_bit(flag, &q->queue_flags);
83 84 85 86 87 88 89 90 91 92
}
EXPORT_SYMBOL(blk_queue_flag_set);

/**
 * blk_queue_flag_clear - atomically clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
93
	clear_bit(flag, &q->queue_flags);
94 95 96 97 98 99 100 101 102 103 104 105 106
}
EXPORT_SYMBOL(blk_queue_flag_clear);

/**
 * blk_queue_flag_test_and_set - atomically test and set a queue flag
 * @flag: flag to be set
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was already set.
 */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
107
	return test_and_set_bit(flag, &q->queue_flags);
108 109 110
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);

111 112 113 114 115 116 117 118
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
	REQ_OP_NAME(READ),
	REQ_OP_NAME(WRITE),
	REQ_OP_NAME(FLUSH),
	REQ_OP_NAME(DISCARD),
	REQ_OP_NAME(SECURE_ERASE),
	REQ_OP_NAME(ZONE_RESET),
119
	REQ_OP_NAME(ZONE_RESET_ALL),
120 121 122
	REQ_OP_NAME(ZONE_OPEN),
	REQ_OP_NAME(ZONE_CLOSE),
	REQ_OP_NAME(ZONE_FINISH),
123
	REQ_OP_NAME(ZONE_APPEND),
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
	REQ_OP_NAME(WRITE_SAME),
	REQ_OP_NAME(WRITE_ZEROES),
	REQ_OP_NAME(DRV_IN),
	REQ_OP_NAME(DRV_OUT),
};
#undef REQ_OP_NAME

/**
 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 * @op: REQ_OP_XXX.
 *
 * Description: Centralize block layer function to convert REQ_OP_XXX into
 * string format. Useful in the debugging and tracing bio or request. For
 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 */
inline const char *blk_op_str(unsigned int op)
{
	const char *op_str = "UNKNOWN";

	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
		op_str = blk_op_name[op];

	return op_str;
}
EXPORT_SYMBOL_GPL(blk_op_str);

150 151 152 153 154 155 156 157 158 159 160 161 162 163
static const struct {
	int		errno;
	const char	*name;
} blk_errors[] = {
	[BLK_STS_OK]		= { 0,		"" },
	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
164
	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
165
	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
166

167 168 169
	/* device mapper special case, should not leak out: */
	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },

170 171 172 173
	/* zone device specific errors */
	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	/* everything else not covered above: */
	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
};

blk_status_t errno_to_blk_status(int errno)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
		if (blk_errors[i].errno == errno)
			return (__force blk_status_t)i;
	}

	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);

int blk_status_to_errno(blk_status_t status)
{
	int idx = (__force int)status;

195
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
196 197 198 199 200
		return -EIO;
	return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);

201
const char *blk_status_to_str(blk_status_t status)
202 203 204
{
	int idx = (__force int)status;

205
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
206 207
		return "<null>";
	return blk_errors[idx].name;
208 209
}

L
Linus Torvalds 已提交
210 211 212 213 214 215 216 217 218
/**
 * blk_sync_queue - cancel any pending callbacks on a queue
 * @q: the queue
 *
 * Description:
 *     The block layer may perform asynchronous callback activity
 *     on a queue, such as calling the unplug function after a timeout.
 *     A block device may call blk_sync_queue to ensure that any
 *     such activity is cancelled, thus allowing it to release resources
219
 *     that the callbacks might use. The caller must already have made sure
220
 *     that its ->submit_bio will not re-add plugging prior to calling
L
Linus Torvalds 已提交
221 222
 *     this function.
 *
223
 *     This function does not cancel any asynchronous activity arising
224
 *     out of elevator or throttling code. That would require elevator_exit()
225
 *     and blkcg_exit_queue() to be called with queue lock initialized.
226
 *
L
Linus Torvalds 已提交
227 228 229
 */
void blk_sync_queue(struct request_queue *q)
{
230
	del_timer_sync(&q->timeout);
231
	cancel_work_sync(&q->timeout_work);
L
Linus Torvalds 已提交
232 233 234
}
EXPORT_SYMBOL(blk_sync_queue);

235
/**
236
 * blk_set_pm_only - increment pm_only counter
237 238
 * @q: request queue pointer
 */
239
void blk_set_pm_only(struct request_queue *q)
240
{
241
	atomic_inc(&q->pm_only);
242
}
243
EXPORT_SYMBOL_GPL(blk_set_pm_only);
244

245
void blk_clear_pm_only(struct request_queue *q)
246
{
247 248 249 250 251 252
	int pm_only;

	pm_only = atomic_dec_return(&q->pm_only);
	WARN_ON_ONCE(pm_only < 0);
	if (pm_only == 0)
		wake_up_all(&q->mq_freeze_wq);
253
}
254
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
255

256 257 258 259 260 261
/**
 * blk_put_queue - decrement the request_queue refcount
 * @q: the request_queue structure to decrement the refcount for
 *
 * Decrements the refcount of the request_queue kobject. When this reaches 0
 * we'll have blk_release_queue() called.
262 263 264
 *
 * Context: Any context, but the last reference must not be dropped from
 *          atomic context.
265
 */
266
void blk_put_queue(struct request_queue *q)
267 268 269
{
	kobject_put(&q->kobj);
}
J
Jens Axboe 已提交
270
EXPORT_SYMBOL(blk_put_queue);
271

272
void blk_queue_start_drain(struct request_queue *q)
273
{
274 275 276 277 278 279
	/*
	 * When queue DYING flag is set, we need to block new req
	 * entering queue, so we call blk_freeze_queue_start() to
	 * prevent I/O from crossing blk_queue_enter().
	 */
	blk_freeze_queue_start(q);
J
Jens Axboe 已提交
280
	if (queue_is_mq(q))
281
		blk_mq_wake_waiters(q);
282 283
	/* Make blk_queue_enter() reexamine the DYING flag. */
	wake_up_all(&q->mq_freeze_wq);
284
}
285 286 287 288 289 290

void blk_set_queue_dying(struct request_queue *q)
{
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
	blk_queue_start_drain(q);
}
291 292
EXPORT_SYMBOL_GPL(blk_set_queue_dying);

293 294 295 296
/**
 * blk_cleanup_queue - shutdown a request queue
 * @q: request queue to shutdown
 *
297 298
 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 * put it.  All future requests will be failed immediately with -ENODEV.
299 300
 *
 * Context: can sleep
301
 */
302
void blk_cleanup_queue(struct request_queue *q)
303
{
304 305 306
	/* cannot be called from atomic context */
	might_sleep();

307 308
	WARN_ON_ONCE(blk_queue_registered(q));

B
Bart Van Assche 已提交
309
	/* mark @q DYING, no new request or merges will be allowed afterwards */
310
	blk_set_queue_dying(q);
311

312 313
	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
314

315 316
	/*
	 * Drain all requests queued before DYING marking. Set DEAD flag to
317 318
	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
	 * after draining finished.
319
	 */
320
	blk_freeze_queue(q);
321

322
	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
323 324

	blk_sync_queue(q);
325 326
	if (queue_is_mq(q)) {
		blk_mq_cancel_work_sync(q);
327
		blk_mq_exit_queue(q);
328
	}
J
Jens Axboe 已提交
329

330 331 332 333 334 335 336 337 338 339
	/*
	 * In theory, request pool of sched_tags belongs to request queue.
	 * However, the current implementation requires tag_set for freeing
	 * requests, so free the pool now.
	 *
	 * Queue has become frozen, there can't be any in-queue requests, so
	 * it is safe to free requests now.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator)
340
		blk_mq_sched_free_rqs(q);
341 342
	mutex_unlock(&q->sysfs_lock);

343
	percpu_ref_exit(&q->q_usage_counter);
B
Bart Van Assche 已提交
344

345
	/* @q is and will stay empty, shutdown and put */
346 347
	blk_put_queue(q);
}
L
Linus Torvalds 已提交
348 349
EXPORT_SYMBOL(blk_cleanup_queue);

350 351 352
/**
 * blk_queue_enter() - try to increase q->q_usage_counter
 * @q: request queue pointer
353
 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
354
 */
355
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
356
{
357
	const bool pm = flags & BLK_MQ_REQ_PM;
358

359
	while (!blk_try_enter_queue(q, pm)) {
360
		if (flags & BLK_MQ_REQ_NOWAIT)
361 362
			return -EBUSY;

363
		/*
364 365 366 367 368
		 * read pair of barrier in blk_freeze_queue_start(), we need to
		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
		 * reading .mq_freeze_depth or queue dying flag, otherwise the
		 * following wait may never return if the two reads are
		 * reordered.
369 370
		 */
		smp_rmb();
371
		wait_event(q->mq_freeze_wq,
372
			   (!q->mq_freeze_depth &&
373
			    blk_pm_resume_queue(pm, q)) ||
374
			   blk_queue_dying(q));
375 376 377
		if (blk_queue_dying(q))
			return -ENODEV;
	}
378 379

	return 0;
380 381
}

382
int __bio_queue_enter(struct request_queue *q, struct bio *bio)
383
{
384
	while (!blk_try_enter_queue(q, false)) {
385 386
		struct gendisk *disk = bio->bi_bdev->bd_disk;

387
		if (bio->bi_opf & REQ_NOWAIT) {
388
			if (test_bit(GD_DEAD, &disk->state))
389
				goto dead;
390
			bio_wouldblock_error(bio);
391 392 393 394 395 396 397 398 399 400 401 402 403 404
			return -EBUSY;
		}

		/*
		 * read pair of barrier in blk_freeze_queue_start(), we need to
		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
		 * reading .mq_freeze_depth or queue dying flag, otherwise the
		 * following wait may never return if the two reads are
		 * reordered.
		 */
		smp_rmb();
		wait_event(q->mq_freeze_wq,
			   (!q->mq_freeze_depth &&
			    blk_pm_resume_queue(false, q)) ||
405 406
			   test_bit(GD_DEAD, &disk->state));
		if (test_bit(GD_DEAD, &disk->state))
407
			goto dead;
408 409
	}

410 411 412 413
	return 0;
dead:
	bio_io_error(bio);
	return -ENODEV;
414 415
}

416 417 418 419 420 421 422 423 424 425 426 427 428
void blk_queue_exit(struct request_queue *q)
{
	percpu_ref_put(&q->q_usage_counter);
}

static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
	struct request_queue *q =
		container_of(ref, struct request_queue, q_usage_counter);

	wake_up_all(&q->mq_freeze_wq);
}

429
static void blk_rq_timed_out_timer(struct timer_list *t)
430
{
431
	struct request_queue *q = from_timer(q, t, timeout);
432 433 434 435

	kblockd_schedule_work(&q->timeout_work);
}

436 437 438 439
static void blk_timeout_work(struct work_struct *work)
{
}

440
struct request_queue *blk_alloc_queue(int node_id)
441
{
442
	struct request_queue *q;
443
	int ret;
444

445
	q = kmem_cache_alloc_node(blk_requestq_cachep,
446
				GFP_KERNEL | __GFP_ZERO, node_id);
L
Linus Torvalds 已提交
447 448 449
	if (!q)
		return NULL;

450 451
	q->last_merge = NULL;

452
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
453
	if (q->id < 0)
454
		goto fail_q;
455

456
	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
457
	if (ret)
458 459
		goto fail_id;

460 461
	q->stats = blk_alloc_queue_stats();
	if (!q->stats)
462
		goto fail_split;
463

464
	q->node = node_id;
465

466
	atomic_set(&q->nr_active_requests_shared_tags, 0);
467

468
	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
469
	INIT_WORK(&q->timeout_work, blk_timeout_work);
470
	INIT_LIST_HEAD(&q->icq_list);
471
#ifdef CONFIG_BLK_CGROUP
472
	INIT_LIST_HEAD(&q->blkg_list);
473
#endif
474

475
	kobject_init(&q->kobj, &blk_queue_ktype);
L
Linus Torvalds 已提交
476

477
	mutex_init(&q->debugfs_mutex);
478
	mutex_init(&q->sysfs_lock);
479
	mutex_init(&q->sysfs_dir_lock);
480
	spin_lock_init(&q->queue_lock);
481

482
	init_waitqueue_head(&q->mq_freeze_wq);
483
	mutex_init(&q->mq_freeze_lock);
484

485 486 487 488 489 490 491
	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
	 * See blk_register_queue() for details.
	 */
	if (percpu_ref_init(&q->q_usage_counter,
				blk_queue_usage_counter_release,
				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
492
		goto fail_stats;
493

494 495 496
	if (blkcg_init_queue(q))
		goto fail_ref;

497 498
	blk_queue_dma_alignment(q, 511);
	blk_set_default_limits(&q->limits);
499
	q->nr_requests = BLKDEV_DEFAULT_RQ;
500

L
Linus Torvalds 已提交
501
	return q;
502

503 504
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
505
fail_stats:
506
	blk_free_queue_stats(q->stats);
507
fail_split:
508
	bioset_exit(&q->bio_split);
509 510 511 512 513
fail_id:
	ida_simple_remove(&blk_queue_ida, q->id);
fail_q:
	kmem_cache_free(blk_requestq_cachep, q);
	return NULL;
L
Linus Torvalds 已提交
514 515
}

516 517 518 519 520
/**
 * blk_get_queue - increment the request_queue refcount
 * @q: the request_queue structure to increment the refcount for
 *
 * Increment the refcount of the request_queue kobject.
521 522
 *
 * Context: Any context.
523
 */
T
Tejun Heo 已提交
524
bool blk_get_queue(struct request_queue *q)
L
Linus Torvalds 已提交
525
{
B
Bart Van Assche 已提交
526
	if (likely(!blk_queue_dying(q))) {
T
Tejun Heo 已提交
527 528
		__blk_get_queue(q);
		return true;
L
Linus Torvalds 已提交
529 530
	}

T
Tejun Heo 已提交
531
	return false;
L
Linus Torvalds 已提交
532
}
J
Jens Axboe 已提交
533
EXPORT_SYMBOL(blk_get_queue);
L
Linus Torvalds 已提交
534

535
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
L
Linus Torvalds 已提交
536 537 538
{
	char b[BDEVNAME_SIZE];

539
	pr_info_ratelimited("%s: attempt to access beyond end of device\n"
540
			    "%s: rw=%d, want=%llu, limit=%llu\n",
541
			    current->comm,
542 543
			    bio_devname(bio, b), bio->bi_opf,
			    bio_end_sector(bio), maxsector);
L
Linus Torvalds 已提交
544 545
}

546 547 548 549 550 551 552 553 554 555
#ifdef CONFIG_FAIL_MAKE_REQUEST

static DECLARE_FAULT_ATTR(fail_make_request);

static int __init setup_fail_make_request(char *str)
{
	return setup_fault_attr(&fail_make_request, str);
}
__setup("fail_make_request=", setup_fail_make_request);

556
bool should_fail_request(struct block_device *part, unsigned int bytes)
557
{
558
	return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
559 560 561 562
}

static int __init fail_make_request_debugfs(void)
{
563 564 565
	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
						NULL, &fail_make_request);

566
	return PTR_ERR_OR_ZERO(dir);
567 568 569 570 571
}

late_initcall(fail_make_request_debugfs);
#endif /* CONFIG_FAIL_MAKE_REQUEST */

572
static inline bool bio_check_ro(struct bio *bio)
573
{
574
	if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
575 576
		char b[BDEVNAME_SIZE];

577 578 579
		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
			return false;

580
		WARN_ONCE(1,
581
		       "Trying to write to read-only block-device %s (partno %d)\n",
582
			bio_devname(bio, b), bio->bi_bdev->bd_partno);
583 584
		/* Older lvm-tools actually trigger this */
		return false;
585 586 587 588 589
	}

	return false;
}

590 591
static noinline int should_fail_bio(struct bio *bio)
{
592
	if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
593 594 595 596 597
		return -EIO;
	return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);

598 599 600 601 602
/*
 * Check whether this bio extends beyond the end of the device or partition.
 * This may well happen - the kernel calls bread() without checking the size of
 * the device, e.g., when mounting a file system.
 */
603
static inline int bio_check_eod(struct bio *bio)
604
{
605
	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
606 607 608 609 610 611 612 613 614 615 616
	unsigned int nr_sectors = bio_sectors(bio);

	if (nr_sectors && maxsector &&
	    (nr_sectors > maxsector ||
	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
		handle_bad_sector(bio, maxsector);
		return -EIO;
	}
	return 0;
}

617 618 619
/*
 * Remap block n of partition p to block n+start(p) of the disk.
 */
620
static int blk_partition_remap(struct bio *bio)
621
{
622
	struct block_device *p = bio->bi_bdev;
623

624
	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
625
		return -EIO;
626
	if (bio_sectors(bio)) {
627
		bio->bi_iter.bi_sector += p->bd_start_sect;
628
		trace_block_bio_remap(bio, p->bd_dev,
629
				      bio->bi_iter.bi_sector -
630
				      p->bd_start_sect);
631
	}
632
	bio_set_flag(bio, BIO_REMAPPED);
633
	return 0;
634 635
}

636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
/*
 * Check write append to a zoned block device.
 */
static inline blk_status_t blk_check_zone_append(struct request_queue *q,
						 struct bio *bio)
{
	sector_t pos = bio->bi_iter.bi_sector;
	int nr_sectors = bio_sectors(bio);

	/* Only applicable to zoned block devices */
	if (!blk_queue_is_zoned(q))
		return BLK_STS_NOTSUPP;

	/* The bio sector must point to the start of a sequential zone */
	if (pos & (blk_queue_zone_sectors(q) - 1) ||
	    !blk_queue_zone_is_seq(q, pos))
		return BLK_STS_IOERR;

	/*
	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
	 * split and could result in non-contiguous sectors being written in
	 * different zones.
	 */
	if (nr_sectors > q->limits.chunk_sectors)
		return BLK_STS_IOERR;

	/* Make sure the BIO is small enough and will not get split */
	if (nr_sectors > q->limits.max_zone_append_sectors)
		return BLK_STS_IOERR;

	bio->bi_opf |= REQ_NOMERGE;

	return BLK_STS_OK;
}

671
noinline_for_stack bool submit_bio_checks(struct bio *bio)
L
Linus Torvalds 已提交
672
{
673
	struct block_device *bdev = bio->bi_bdev;
674
	struct request_queue *q = bdev_get_queue(bdev);
675
	blk_status_t status = BLK_STS_IOERR;
676
	struct blk_plug *plug;
L
Linus Torvalds 已提交
677 678 679

	might_sleep();

680 681 682 683
	plug = blk_mq_plug(q, bio);
	if (plug && plug->nowait)
		bio->bi_opf |= REQ_NOWAIT;

684
	/*
685
	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
M
Mike Snitzer 已提交
686
	 * if queue does not support NOWAIT.
687
	 */
M
Mike Snitzer 已提交
688
	if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
689
		goto not_supported;
690

691
	if (should_fail_bio(bio))
692
		goto end_io;
693 694
	if (unlikely(bio_check_ro(bio)))
		goto end_io;
695 696 697 698 699 700
	if (!bio_flagged(bio, BIO_REMAPPED)) {
		if (unlikely(bio_check_eod(bio)))
			goto end_io;
		if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
			goto end_io;
	}
701

702
	/*
703 704
	 * Filter flush bio's early so that bio based drivers without flush
	 * support don't have to worry about them.
705
	 */
706
	if (op_is_flush(bio->bi_opf) &&
J
Jens Axboe 已提交
707
	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
J
Jens Axboe 已提交
708
		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
709
		if (!bio_sectors(bio)) {
710
			status = BLK_STS_OK;
711 712
			goto end_io;
		}
713
	}
714

715
	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
716
		bio_clear_polled(bio);
717

718 719 720 721 722 723 724 725 726 727
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
		if (!blk_queue_secure_erase(q))
			goto not_supported;
		break;
	case REQ_OP_WRITE_SAME:
728
		if (!q->limits.max_write_same_sectors)
729
			goto not_supported;
730
		break;
731 732 733 734 735
	case REQ_OP_ZONE_APPEND:
		status = blk_check_zone_append(q, bio);
		if (status != BLK_STS_OK)
			goto end_io;
		break;
736
	case REQ_OP_ZONE_RESET:
737 738 739
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
740
		if (!blk_queue_is_zoned(q))
741
			goto not_supported;
742
		break;
743 744 745 746
	case REQ_OP_ZONE_RESET_ALL:
		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
			goto not_supported;
		break;
747
	case REQ_OP_WRITE_ZEROES:
748
		if (!q->limits.max_write_zeroes_sectors)
749 750
			goto not_supported;
		break;
751 752
	default:
		break;
753
	}
754

755
	if (blk_throtl_bio(bio))
756
		return false;
757 758 759

	blk_cgroup_bio_start(bio);
	blkcg_bio_issue_init(bio);
760

N
NeilBrown 已提交
761
	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
762
		trace_block_bio_queue(bio);
N
NeilBrown 已提交
763 764 765 766 767
		/* Now that enqueuing has been traced, we need to trace
		 * completion as well.
		 */
		bio_set_flag(bio, BIO_TRACE_COMPLETION);
	}
768
	return true;
769

770
not_supported:
771
	status = BLK_STS_NOTSUPP;
772
end_io:
773
	bio->bi_status = status;
774
	bio_endio(bio);
775
	return false;
L
Linus Torvalds 已提交
776 777
}

778
static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
779
{
780
	if (unlikely(bio_queue_enter(bio) != 0))
781
		return;
782 783 784 785
	if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
		disk->fops->submit_bio(bio);
	blk_queue_exit(disk->queue);
}
786

787 788 789
static void __submit_bio(struct bio *bio)
{
	struct gendisk *disk = bio->bi_bdev->bd_disk;
790

791
	if (!disk->fops->submit_bio)
792
		blk_mq_submit_bio(bio);
793 794
	else
		__submit_bio_fops(disk, bio);
795 796
}

797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
/*
 * The loop in this function may be a bit non-obvious, and so deserves some
 * explanation:
 *
 *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
 *    that), so we have a list with a single bio.
 *  - We pretend that we have just taken it off a longer list, so we assign
 *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
 *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
 *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
 *    non-NULL value in bio_list and re-enter the loop from the top.
 *  - In this case we really did just take the bio of the top of the list (no
 *    pretending) and so remove it from bio_list, and call into ->submit_bio()
 *    again.
 *
 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
 * bio_list_on_stack[1] contains bios that were submitted before the current
 *	->submit_bio_bio, but that haven't been processed yet.
 */
816
static void __submit_bio_noacct(struct bio *bio)
817 818 819 820 821 822 823 824 825
{
	struct bio_list bio_list_on_stack[2];

	BUG_ON(bio->bi_next);

	bio_list_init(&bio_list_on_stack[0]);
	current->bio_list = bio_list_on_stack;

	do {
826
		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
827 828 829 830 831 832 833 834
		struct bio_list lower, same;

		/*
		 * Create a fresh bio_list for all subordinate requests.
		 */
		bio_list_on_stack[1] = bio_list_on_stack[0];
		bio_list_init(&bio_list_on_stack[0]);

835
		__submit_bio(bio);
836 837 838 839 840 841 842 843

		/*
		 * Sort new bios into those for a lower level and those for the
		 * same level.
		 */
		bio_list_init(&lower);
		bio_list_init(&same);
		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
844
			if (q == bdev_get_queue(bio->bi_bdev))
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
				bio_list_add(&same, bio);
			else
				bio_list_add(&lower, bio);

		/*
		 * Now assemble so we handle the lowest level first.
		 */
		bio_list_merge(&bio_list_on_stack[0], &lower);
		bio_list_merge(&bio_list_on_stack[0], &same);
		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));

	current->bio_list = NULL;
}

860
static void __submit_bio_noacct_mq(struct bio *bio)
861
{
862
	struct bio_list bio_list[2] = { };
863

864
	current->bio_list = bio_list;
865 866

	do {
867
		__submit_bio(bio);
868
	} while ((bio = bio_list_pop(&bio_list[0])));
869 870 871 872

	current->bio_list = NULL;
}

873
/**
874
 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
875 876
 * @bio:  The bio describing the location in memory and on the device.
 *
877 878 879 880
 * This is a version of submit_bio() that shall only be used for I/O that is
 * resubmitted to lower level drivers by stacking block drivers.  All file
 * systems and other upper level users of the block layer should use
 * submit_bio() instead.
881
 */
882
void submit_bio_noacct(struct bio *bio)
883
{
884
	/*
885 886 887 888
	 * We only want one ->submit_bio to be active at a time, else stack
	 * usage with stacked devices could be a problem.  Use current->bio_list
	 * to collect a list of requests submited by a ->submit_bio method while
	 * it is active, and then process them after it returned.
889
	 */
890
	if (current->bio_list)
891
		bio_list_add(&current->bio_list[0], bio);
892 893 894 895
	else if (!bio->bi_bdev->bd_disk->fops->submit_bio)
		__submit_bio_noacct_mq(bio);
	else
		__submit_bio_noacct(bio);
896
}
897
EXPORT_SYMBOL(submit_bio_noacct);
L
Linus Torvalds 已提交
898 899

/**
900
 * submit_bio - submit a bio to the block device layer for I/O
L
Linus Torvalds 已提交
901 902
 * @bio: The &struct bio which describes the I/O
 *
903 904
 * submit_bio() is used to submit I/O requests to block devices.  It is passed a
 * fully set up &struct bio that describes the I/O that needs to be done.  The
905
 * bio will be send to the device described by the bi_bdev field.
L
Linus Torvalds 已提交
906
 *
907 908 909 910
 * The success/failure status of the request, along with notification of
 * completion, is delivered asynchronously through the ->bi_end_io() callback
 * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
 * been called.
L
Linus Torvalds 已提交
911
 */
912
void submit_bio(struct bio *bio)
L
Linus Torvalds 已提交
913
{
T
Tejun Heo 已提交
914
	if (blkcg_punt_bio_submit(bio))
915
		return;
T
Tejun Heo 已提交
916

917 918 919 920
	/*
	 * If it's a regular read/write or a barrier with data attached,
	 * go through the normal accounting stuff before submission.
	 */
921
	if (bio_has_data(bio)) {
922 923
		unsigned int count;

924
		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
925
			count = queue_logical_block_size(
926
					bdev_get_queue(bio->bi_bdev)) >> 9;
927 928 929
		else
			count = bio_sectors(bio);

930
		if (op_is_write(bio_op(bio))) {
931 932
			count_vm_events(PGPGOUT, count);
		} else {
933
			task_io_account_read(bio->bi_iter.bi_size);
934 935
			count_vm_events(PGPGIN, count);
		}
L
Linus Torvalds 已提交
936 937
	}

938
	/*
939 940 941 942
	 * If we're reading data that is part of the userspace workingset, count
	 * submission time as memory stall.  When the device is congested, or
	 * the submitting cgroup IO-throttled, submission can be a significant
	 * part of overall IO time.
943
	 */
944 945 946
	if (unlikely(bio_op(bio) == REQ_OP_READ &&
	    bio_flagged(bio, BIO_WORKINGSET))) {
		unsigned long pflags;
947

948
		psi_memstall_enter(&pflags);
949
		submit_bio_noacct(bio);
950
		psi_memstall_leave(&pflags);
951
		return;
952 953
	}

954
	submit_bio_noacct(bio);
L
Linus Torvalds 已提交
955 956 957
}
EXPORT_SYMBOL(submit_bio);

958 959 960
/**
 * bio_poll - poll for BIO completions
 * @bio: bio to poll for
961
 * @iob: batches of IO
962 963 964 965 966 967 968 969
 * @flags: BLK_POLL_* flags that control the behavior
 *
 * Poll for completions on queue associated with the bio. Returns number of
 * completed entries found.
 *
 * Note: the caller must either be the context that submitted @bio, or
 * be in a RCU critical section to prevent freeing of @bio.
 */
970
int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
971
{
972
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
973 974 975 976 977 978 979 980
	blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
	int ret;

	if (cookie == BLK_QC_T_NONE ||
	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		return 0;

	if (current->plug)
981
		blk_flush_plug(current->plug, false);
982 983 984 985 986 987

	if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
		return 0;
	if (WARN_ON_ONCE(!queue_is_mq(q)))
		ret = 0;	/* not yet implemented, should not happen */
	else
988
		ret = blk_mq_poll(q, cookie, iob, flags);
989 990 991 992 993 994 995 996 997
	blk_queue_exit(q);
	return ret;
}
EXPORT_SYMBOL_GPL(bio_poll);

/*
 * Helper to implement file_operations.iopoll.  Requires the bio to be stored
 * in iocb->private, and cleared before freeing the bio.
 */
998 999
int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
		    unsigned int flags)
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
{
	struct bio *bio;
	int ret = 0;

	/*
	 * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can
	 * point to a freshly allocated bio at this point.  If that happens
	 * we have a few cases to consider:
	 *
	 *  1) the bio is beeing initialized and bi_bdev is NULL.  We can just
	 *     simply nothing in this case
	 *  2) the bio points to a not poll enabled device.  bio_poll will catch
	 *     this and return 0
	 *  3) the bio points to a poll capable device, including but not
	 *     limited to the one that the original bio pointed to.  In this
	 *     case we will call into the actual poll method and poll for I/O,
	 *     even if we don't need to, but it won't cause harm either.
	 *
	 * For cases 2) and 3) above the RCU grace period ensures that bi_bdev
	 * is still allocated. Because partitions hold a reference to the whole
	 * device bdev and thus disk, the disk is also still valid.  Grabbing
	 * a reference to the queue in bio_poll() ensures the hctxs and requests
	 * are still valid as well.
	 */
	rcu_read_lock();
	bio = READ_ONCE(kiocb->private);
	if (bio && bio->bi_bdev)
1027
		ret = bio_poll(bio, iob, flags);
1028 1029 1030 1031 1032 1033
	rcu_read_unlock();

	return ret;
}
EXPORT_SYMBOL_GPL(iocb_bio_iopoll);

1034
void update_io_ticks(struct block_device *part, unsigned long now, bool end)
1035 1036 1037
{
	unsigned long stamp;
again:
1038
	stamp = READ_ONCE(part->bd_stamp);
1039
	if (unlikely(time_after(now, stamp))) {
1040
		if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
1041 1042
			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
	}
1043 1044
	if (part->bd_partno) {
		part = bdev_whole(part);
1045 1046 1047 1048
		goto again;
	}
}

1049
static unsigned long __part_start_io_acct(struct block_device *part,
1050
					  unsigned int sectors, unsigned int op)
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
{
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);

	part_stat_lock();
	update_io_ticks(part, now, false);
	part_stat_inc(part, ios[sgrp]);
	part_stat_add(part, sectors[sgrp], sectors);
	part_stat_local_inc(part, in_flight[op_is_write(op)]);
	part_stat_unlock();
1061

1062 1063
	return now;
}
1064

1065 1066 1067 1068 1069 1070 1071
/**
 * bio_start_io_acct - start I/O accounting for bio based drivers
 * @bio:	bio to start account for
 *
 * Returns the start time that should be passed back to bio_end_io_acct().
 */
unsigned long bio_start_io_acct(struct bio *bio)
1072
{
1073
	return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio));
1074
}
1075
EXPORT_SYMBOL_GPL(bio_start_io_acct);
1076 1077 1078 1079

unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
				 unsigned int op)
{
1080
	return __part_start_io_acct(disk->part0, sectors, op);
1081
}
1082 1083
EXPORT_SYMBOL(disk_start_io_acct);

1084
static void __part_end_io_acct(struct block_device *part, unsigned int op,
1085
			       unsigned long start_time)
1086 1087 1088 1089
{
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);
	unsigned long duration = now - start_time;
1090

1091 1092 1093 1094
	part_stat_lock();
	update_io_ticks(part, now, true);
	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
	part_stat_local_dec(part, in_flight[op_is_write(op)]);
1095 1096
	part_stat_unlock();
}
1097

1098 1099
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
		struct block_device *orig_bdev)
1100
{
1101
	__part_end_io_acct(orig_bdev, bio_op(bio), start_time);
1102
}
1103
EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1104 1105 1106 1107

void disk_end_io_acct(struct gendisk *disk, unsigned int op,
		      unsigned long start_time)
{
1108
	__part_end_io_acct(disk->part0, op, start_time);
1109
}
1110
EXPORT_SYMBOL(disk_end_io_acct);
1111

1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
/**
 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
 * @q : the queue of the device being checked
 *
 * Description:
 *    Check if underlying low-level drivers of a device are busy.
 *    If the drivers want to export their busy state, they must set own
 *    exporting function using blk_queue_lld_busy() first.
 *
 *    Basically, this function is used only by request stacking drivers
 *    to stop dispatching requests to underlying devices when underlying
 *    devices are busy.  This behavior helps more I/O merging on the queue
 *    of the request stacking driver and prevents I/O throughput regression
 *    on burst I/O load.
 *
 * Return:
 *    0 - Not busy (The request stacking driver should dispatch request)
 *    1 - Busy (The request stacking driver should stop dispatching request)
 */
int blk_lld_busy(struct request_queue *q)
{
J
Jens Axboe 已提交
1133
	if (queue_is_mq(q) && q->mq_ops->busy)
J
Jens Axboe 已提交
1134
		return q->mq_ops->busy(q);
1135 1136 1137 1138 1139

	return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);

1140
int kblockd_schedule_work(struct work_struct *work)
L
Linus Torvalds 已提交
1141 1142 1143 1144 1145
{
	return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);

1146 1147 1148 1149 1150 1151 1152
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
				unsigned long delay)
{
	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);

1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
{
	struct task_struct *tsk = current;

	/*
	 * If this is a nested plug, don't actually assign it.
	 */
	if (tsk->plug)
		return;

1163
	plug->mq_list = NULL;
1164 1165 1166 1167
	plug->cached_rq = NULL;
	plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
	plug->rq_count = 0;
	plug->multiple_queues = false;
1168
	plug->has_elevator = false;
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
	plug->nowait = false;
	INIT_LIST_HEAD(&plug->cb_list);

	/*
	 * Store ordering should not be needed here, since a potential
	 * preempt will imply a full memory barrier
	 */
	tsk->plug = plug;
}

S
Suresh Jayaraman 已提交
1179 1180 1181 1182 1183
/**
 * blk_start_plug - initialize blk_plug and track it inside the task_struct
 * @plug:	The &struct blk_plug that needs to be initialized
 *
 * Description:
1184 1185 1186 1187 1188 1189 1190 1191 1192
 *   blk_start_plug() indicates to the block layer an intent by the caller
 *   to submit multiple I/O requests in a batch.  The block layer may use
 *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
 *   is called.  However, the block layer may choose to submit requests
 *   before a call to blk_finish_plug() if the number of queued I/Os
 *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
 *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
 *   the task schedules (see below).
 *
S
Suresh Jayaraman 已提交
1193 1194 1195 1196 1197 1198 1199 1200 1201
 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
 *   pending I/O should the task end up blocking between blk_start_plug() and
 *   blk_finish_plug(). This is important from a performance perspective, but
 *   also ensures that we don't deadlock. For instance, if the task is blocking
 *   for a memory allocation, memory reclaim could end up wanting to free a
 *   page belonging to that request that is currently residing in our private
 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
 *   this kind of deadlock.
 */
1202 1203
void blk_start_plug(struct blk_plug *plug)
{
1204
	blk_start_plug_nr_ios(plug, 1);
1205 1206 1207
}
EXPORT_SYMBOL(blk_start_plug);

1208
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1209 1210 1211
{
	LIST_HEAD(callbacks);

S
Shaohua Li 已提交
1212 1213
	while (!list_empty(&plug->cb_list)) {
		list_splice_init(&plug->cb_list, &callbacks);
1214

S
Shaohua Li 已提交
1215 1216
		while (!list_empty(&callbacks)) {
			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1217 1218
							  struct blk_plug_cb,
							  list);
S
Shaohua Li 已提交
1219
			list_del(&cb->list);
1220
			cb->callback(cb, from_schedule);
S
Shaohua Li 已提交
1221
		}
1222 1223 1224
	}
}

1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
				      int size)
{
	struct blk_plug *plug = current->plug;
	struct blk_plug_cb *cb;

	if (!plug)
		return NULL;

	list_for_each_entry(cb, &plug->cb_list, list)
		if (cb->callback == unplug && cb->data == data)
			return cb;

	/* Not currently on the callback list */
	BUG_ON(size < sizeof(*cb));
	cb = kzalloc(size, GFP_ATOMIC);
	if (cb) {
		cb->data = data;
		cb->callback = unplug;
		list_add(&cb->list, &plug->cb_list);
	}
	return cb;
}
EXPORT_SYMBOL(blk_check_plugged);

1250
void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1251
{
1252 1253
	if (!list_empty(&plug->cb_list))
		flush_plug_callbacks(plug, from_schedule);
1254
	if (!rq_list_empty(plug->mq_list))
1255
		blk_mq_flush_plug_list(plug, from_schedule);
1256 1257 1258 1259 1260 1261 1262
	/*
	 * Unconditionally flush out cached requests, even if the unplug
	 * event came from schedule. Since we know hold references to the
	 * queue for cached requests, we don't want a blocked task holding
	 * up a queue freeze/quiesce event.
	 */
	if (unlikely(!rq_list_empty(plug->cached_rq)))
1263
		blk_mq_free_plug_rqs(plug);
1264 1265
}

1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
/**
 * blk_finish_plug - mark the end of a batch of submitted I/O
 * @plug:	The &struct blk_plug passed to blk_start_plug()
 *
 * Description:
 * Indicate that a batch of I/O submissions is complete.  This function
 * must be paired with an initial call to blk_start_plug().  The intent
 * is to allow the block layer to optimize I/O submission.  See the
 * documentation for blk_start_plug() for more information.
 */
1276 1277
void blk_finish_plug(struct blk_plug *plug)
{
1278 1279 1280 1281
	if (plug == current->plug) {
		blk_flush_plug(plug, false);
		current->plug = NULL;
	}
1282
}
1283
EXPORT_SYMBOL(blk_finish_plug);
1284

1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
void blk_io_schedule(void)
{
	/* Prevent hang_check timer from firing at us during very long I/O */
	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;

	if (timeout)
		io_schedule_timeout(timeout);
	else
		io_schedule();
}
EXPORT_SYMBOL_GPL(blk_io_schedule);

L
Linus Torvalds 已提交
1297 1298
int __init blk_dev_init(void)
{
1299 1300
	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1301
			sizeof_field(struct request, cmd_flags));
1302
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1303
			sizeof_field(struct bio, bi_opf));
1304

1305 1306
	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
	kblockd_workqueue = alloc_workqueue("kblockd",
1307
					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
L
Linus Torvalds 已提交
1308 1309 1310
	if (!kblockd_workqueue)
		panic("Failed to create kblockd\n");

1311
	blk_requestq_cachep = kmem_cache_create("request_queue",
1312
			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1313

1314 1315
	blk_debugfs_root = debugfs_create_dir("block", NULL);

1316
	return 0;
L
Linus Torvalds 已提交
1317
}