blk-core.c 50.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 8
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *	-  July2000
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
20
#include <linux/blk-mq.h>
L
Linus Torvalds 已提交
21 22
#include <linux/highmem.h>
#include <linux/mm.h>
23
#include <linux/pagemap.h>
L
Linus Torvalds 已提交
24 25 26 27 28 29 30
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
31
#include <linux/task_io_accounting_ops.h>
32
#include <linux/fault-inject.h>
33
#include <linux/list_sort.h>
T
Tejun Heo 已提交
34
#include <linux/delay.h>
35
#include <linux/ratelimit.h>
L
Lin Ming 已提交
36
#include <linux/pm_runtime.h>
37
#include <linux/blk-cgroup.h>
38
#include <linux/t10-pi.h>
39
#include <linux/debugfs.h>
40
#include <linux/bpf.h>
41
#include <linux/psi.h>
42
#include <linux/sched/sysctl.h>
43
#include <linux/blk-crypto.h>
44 45 46

#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
L
Linus Torvalds 已提交
47

48
#include "blk.h"
49
#include "blk-mq.h"
50
#include "blk-mq-sched.h"
51
#include "blk-pm.h"
52
#include "blk-rq-qos.h"
53

54 55 56 57
#ifdef CONFIG_DEBUG_FS
struct dentry *blk_debugfs_root;
#endif

58
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
59
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
60
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
K
Keith Busch 已提交
61
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
N
NeilBrown 已提交
62
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
63

64 65
DEFINE_IDA(blk_queue_ida);

L
Linus Torvalds 已提交
66 67 68
/*
 * For queue allocation
 */
69
struct kmem_cache *blk_requestq_cachep;
L
Linus Torvalds 已提交
70 71 72 73

/*
 * Controlling structure to kblockd
 */
74
static struct workqueue_struct *kblockd_workqueue;
L
Linus Torvalds 已提交
75

76 77 78 79 80 81 82
/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
83
	set_bit(flag, &q->queue_flags);
84 85 86 87 88 89 90 91 92 93
}
EXPORT_SYMBOL(blk_queue_flag_set);

/**
 * blk_queue_flag_clear - atomically clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
94
	clear_bit(flag, &q->queue_flags);
95 96 97 98 99 100 101 102 103 104 105 106 107
}
EXPORT_SYMBOL(blk_queue_flag_clear);

/**
 * blk_queue_flag_test_and_set - atomically test and set a queue flag
 * @flag: flag to be set
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was already set.
 */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
108
	return test_and_set_bit(flag, &q->queue_flags);
109 110 111
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);

112
void blk_rq_init(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
113
{
114 115
	memset(rq, 0, sizeof(*rq));

L
Linus Torvalds 已提交
116
	INIT_LIST_HEAD(&rq->queuelist);
J
Jens Axboe 已提交
117
	rq->q = q;
118
	rq->__sector = (sector_t) -1;
119 120
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
J
Jens Axboe 已提交
121
	rq->tag = -1;
122
	rq->internal_tag = -1;
123
	rq->start_time_ns = ktime_get_ns();
124
	rq->part = NULL;
125
	refcount_set(&rq->ref, 1);
126
	blk_crypto_rq_set_defaults(rq);
L
Linus Torvalds 已提交
127
}
128
EXPORT_SYMBOL(blk_rq_init);
L
Linus Torvalds 已提交
129

130 131 132 133 134 135 136 137
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
	REQ_OP_NAME(READ),
	REQ_OP_NAME(WRITE),
	REQ_OP_NAME(FLUSH),
	REQ_OP_NAME(DISCARD),
	REQ_OP_NAME(SECURE_ERASE),
	REQ_OP_NAME(ZONE_RESET),
138
	REQ_OP_NAME(ZONE_RESET_ALL),
139 140 141
	REQ_OP_NAME(ZONE_OPEN),
	REQ_OP_NAME(ZONE_CLOSE),
	REQ_OP_NAME(ZONE_FINISH),
142
	REQ_OP_NAME(ZONE_APPEND),
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
	REQ_OP_NAME(WRITE_SAME),
	REQ_OP_NAME(WRITE_ZEROES),
	REQ_OP_NAME(SCSI_IN),
	REQ_OP_NAME(SCSI_OUT),
	REQ_OP_NAME(DRV_IN),
	REQ_OP_NAME(DRV_OUT),
};
#undef REQ_OP_NAME

/**
 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 * @op: REQ_OP_XXX.
 *
 * Description: Centralize block layer function to convert REQ_OP_XXX into
 * string format. Useful in the debugging and tracing bio or request. For
 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 */
inline const char *blk_op_str(unsigned int op)
{
	const char *op_str = "UNKNOWN";

	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
		op_str = blk_op_name[op];

	return op_str;
}
EXPORT_SYMBOL_GPL(blk_op_str);

171 172 173 174 175 176 177 178 179 180 181 182 183 184
static const struct {
	int		errno;
	const char	*name;
} blk_errors[] = {
	[BLK_STS_OK]		= { 0,		"" },
	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
185
	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
186
	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
187

188 189 190
	/* device mapper special case, should not leak out: */
	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
	/* everything else not covered above: */
	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
};

blk_status_t errno_to_blk_status(int errno)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
		if (blk_errors[i].errno == errno)
			return (__force blk_status_t)i;
	}

	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);

int blk_status_to_errno(blk_status_t status)
{
	int idx = (__force int)status;

212
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
213 214 215 216 217
		return -EIO;
	return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);

218 219
static void print_req_error(struct request *req, blk_status_t status,
		const char *caller)
220 221 222
{
	int idx = (__force int)status;

223
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
224 225
		return;

226
	printk_ratelimited(KERN_ERR
227 228
		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
		"phys_seg %u prio class %u\n",
229
		caller, blk_errors[idx].name,
230 231 232 233 234
		req->rq_disk ? req->rq_disk->disk_name : "?",
		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
		req->cmd_flags & ~REQ_OP_MASK,
		req->nr_phys_segments,
		IOPRIO_PRIO_CLASS(req->ioprio));
235 236
}

N
NeilBrown 已提交
237
static void req_bio_endio(struct request *rq, struct bio *bio,
238
			  unsigned int nbytes, blk_status_t error)
L
Linus Torvalds 已提交
239
{
240
	if (error)
241
		bio->bi_status = error;
242

243
	if (unlikely(rq->rq_flags & RQF_QUIET))
244
		bio_set_flag(bio, BIO_QUIET);
245

246
	bio_advance(bio, nbytes);
247

248 249 250 251 252 253 254 255 256 257 258
	if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
		/*
		 * Partial zone append completions cannot be supported as the
		 * BIO fragments may end up not being written sequentially.
		 */
		if (bio->bi_iter.bi_size)
			bio->bi_status = BLK_STS_IOERR;
		else
			bio->bi_iter.bi_sector = rq->__sector;
	}

T
Tejun Heo 已提交
259
	/* don't actually finish bio if it's part of flush sequence */
260
	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
261
		bio_endio(bio);
L
Linus Torvalds 已提交
262 263 264 265
}

void blk_dump_rq_flags(struct request *rq, char *msg)
{
266 267
	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
		rq->rq_disk ? rq->rq_disk->disk_name : "?",
J
Jens Axboe 已提交
268
		(unsigned long long) rq->cmd_flags);
L
Linus Torvalds 已提交
269

270 271 272
	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
273 274
	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
	       rq->bio, rq->biotail, blk_rq_bytes(rq));
L
Linus Torvalds 已提交
275 276 277 278 279 280 281 282 283 284 285 286
}
EXPORT_SYMBOL(blk_dump_rq_flags);

/**
 * blk_sync_queue - cancel any pending callbacks on a queue
 * @q: the queue
 *
 * Description:
 *     The block layer may perform asynchronous callback activity
 *     on a queue, such as calling the unplug function after a timeout.
 *     A block device may call blk_sync_queue to ensure that any
 *     such activity is cancelled, thus allowing it to release resources
287
 *     that the callbacks might use. The caller must already have made sure
L
Linus Torvalds 已提交
288 289 290
 *     that its ->make_request_fn will not re-add plugging prior to calling
 *     this function.
 *
291
 *     This function does not cancel any asynchronous activity arising
292
 *     out of elevator or throttling code. That would require elevator_exit()
293
 *     and blkcg_exit_queue() to be called with queue lock initialized.
294
 *
L
Linus Torvalds 已提交
295 296 297
 */
void blk_sync_queue(struct request_queue *q)
{
298
	del_timer_sync(&q->timeout);
299
	cancel_work_sync(&q->timeout_work);
L
Linus Torvalds 已提交
300 301 302
}
EXPORT_SYMBOL(blk_sync_queue);

303
/**
304
 * blk_set_pm_only - increment pm_only counter
305 306
 * @q: request queue pointer
 */
307
void blk_set_pm_only(struct request_queue *q)
308
{
309
	atomic_inc(&q->pm_only);
310
}
311
EXPORT_SYMBOL_GPL(blk_set_pm_only);
312

313
void blk_clear_pm_only(struct request_queue *q)
314
{
315 316 317 318 319 320
	int pm_only;

	pm_only = atomic_dec_return(&q->pm_only);
	WARN_ON_ONCE(pm_only < 0);
	if (pm_only == 0)
		wake_up_all(&q->mq_freeze_wq);
321
}
322
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
323

324
void blk_put_queue(struct request_queue *q)
325 326 327
{
	kobject_put(&q->kobj);
}
J
Jens Axboe 已提交
328
EXPORT_SYMBOL(blk_put_queue);
329

330 331
void blk_set_queue_dying(struct request_queue *q)
{
332
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
333

334 335 336 337 338 339 340
	/*
	 * When queue DYING flag is set, we need to block new req
	 * entering queue, so we call blk_freeze_queue_start() to
	 * prevent I/O from crossing blk_queue_enter().
	 */
	blk_freeze_queue_start(q);

J
Jens Axboe 已提交
341
	if (queue_is_mq(q))
342
		blk_mq_wake_waiters(q);
343 344 345

	/* Make blk_queue_enter() reexamine the DYING flag. */
	wake_up_all(&q->mq_freeze_wq);
346 347 348
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);

349 350 351 352
/**
 * blk_cleanup_queue - shutdown a request queue
 * @q: request queue to shutdown
 *
353 354
 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 * put it.  All future requests will be failed immediately with -ENODEV.
355
 */
356
void blk_cleanup_queue(struct request_queue *q)
357
{
358 359
	WARN_ON_ONCE(blk_queue_registered(q));

B
Bart Van Assche 已提交
360
	/* mark @q DYING, no new request or merges will be allowed afterwards */
361
	blk_set_queue_dying(q);
362

363 364
	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
365

366 367
	/*
	 * Drain all requests queued before DYING marking. Set DEAD flag to
368 369
	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
	 * after draining finished.
370
	 */
371
	blk_freeze_queue(q);
372 373 374

	rq_qos_exit(q);

375
	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
376

377 378 379
	/* for synchronous bio-based driver finish in-flight integrity i/o */
	blk_flush_integrity();

380
	/* @q won't process any more request, flush async actions */
381
	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
382 383
	blk_sync_queue(q);

J
Jens Axboe 已提交
384
	if (queue_is_mq(q))
385
		blk_mq_exit_queue(q);
J
Jens Axboe 已提交
386

387 388 389 390 391 392 393 394 395 396 397 398 399
	/*
	 * In theory, request pool of sched_tags belongs to request queue.
	 * However, the current implementation requires tag_set for freeing
	 * requests, so free the pool now.
	 *
	 * Queue has become frozen, there can't be any in-queue requests, so
	 * it is safe to free requests now.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator)
		blk_mq_sched_free_requests(q);
	mutex_unlock(&q->sysfs_lock);

400
	percpu_ref_exit(&q->q_usage_counter);
B
Bart Van Assche 已提交
401

402
	/* @q is and will stay empty, shutdown and put */
403 404
	blk_put_queue(q);
}
L
Linus Torvalds 已提交
405 406
EXPORT_SYMBOL(blk_cleanup_queue);

407 408 409 410 411
/**
 * blk_queue_enter() - try to increase q->q_usage_counter
 * @q: request queue pointer
 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
 */
412
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
413
{
414
	const bool pm = flags & BLK_MQ_REQ_PREEMPT;
415

416
	while (true) {
417
		bool success = false;
418

419
		rcu_read_lock();
420 421
		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
			/*
422 423 424
			 * The code that increments the pm_only counter is
			 * responsible for ensuring that that counter is
			 * globally visible before the queue is unfrozen.
425
			 */
426
			if (pm || !blk_queue_pm_only(q)) {
427 428 429 430 431
				success = true;
			} else {
				percpu_ref_put(&q->q_usage_counter);
			}
		}
432
		rcu_read_unlock();
433 434

		if (success)
435 436
			return 0;

437
		if (flags & BLK_MQ_REQ_NOWAIT)
438 439
			return -EBUSY;

440
		/*
441
		 * read pair of barrier in blk_freeze_queue_start(),
442
		 * we need to order reading __PERCPU_REF_DEAD flag of
443 444 445
		 * .q_usage_counter and reading .mq_freeze_depth or
		 * queue dying flag, otherwise the following wait may
		 * never return if the two reads are reordered.
446 447 448
		 */
		smp_rmb();

449
		wait_event(q->mq_freeze_wq,
450
			   (!q->mq_freeze_depth &&
451 452
			    (pm || (blk_pm_request_resume(q),
				    !blk_queue_pm_only(q)))) ||
453
			   blk_queue_dying(q));
454 455 456 457 458
		if (blk_queue_dying(q))
			return -ENODEV;
	}
}

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
static inline int bio_queue_enter(struct bio *bio)
{
	struct request_queue *q = bio->bi_disk->queue;
	bool nowait = bio->bi_opf & REQ_NOWAIT;
	int ret;

	ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
	if (unlikely(ret)) {
		if (nowait && !blk_queue_dying(q))
			bio_wouldblock_error(bio);
		else
			bio_io_error(bio);
	}

	return ret;
}

476 477 478 479 480 481 482 483 484 485 486 487 488
void blk_queue_exit(struct request_queue *q)
{
	percpu_ref_put(&q->q_usage_counter);
}

static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
	struct request_queue *q =
		container_of(ref, struct request_queue, q_usage_counter);

	wake_up_all(&q->mq_freeze_wq);
}

489
static void blk_rq_timed_out_timer(struct timer_list *t)
490
{
491
	struct request_queue *q = from_timer(q, t, timeout);
492 493 494 495

	kblockd_schedule_work(&q->timeout_work);
}

496 497 498 499
static void blk_timeout_work(struct work_struct *work)
{
}

500
struct request_queue *__blk_alloc_queue(int node_id)
501
{
502
	struct request_queue *q;
503
	int ret;
504

505
	q = kmem_cache_alloc_node(blk_requestq_cachep,
506
				GFP_KERNEL | __GFP_ZERO, node_id);
L
Linus Torvalds 已提交
507 508 509
	if (!q)
		return NULL;

510 511
	q->last_merge = NULL;

512
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
513
	if (q->id < 0)
514
		goto fail_q;
515

516 517
	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
	if (ret)
518 519
		goto fail_id;

C
Christoph Hellwig 已提交
520
	q->backing_dev_info = bdi_alloc(node_id);
521 522 523
	if (!q->backing_dev_info)
		goto fail_split;

524 525 526 527
	q->stats = blk_alloc_queue_stats();
	if (!q->stats)
		goto fail_stats;

528
	q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
529
	q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
530
	q->node = node_id;
531

532 533 534
	timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
		    laptop_mode_timer_fn, 0);
	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
535
	INIT_WORK(&q->timeout_work, blk_timeout_work);
536
	INIT_LIST_HEAD(&q->icq_list);
537
#ifdef CONFIG_BLK_CGROUP
538
	INIT_LIST_HEAD(&q->blkg_list);
539
#endif
540

541
	kobject_init(&q->kobj, &blk_queue_ktype);
L
Linus Torvalds 已提交
542

543 544 545
#ifdef CONFIG_BLK_DEV_IO_TRACE
	mutex_init(&q->blk_trace_mutex);
#endif
546
	mutex_init(&q->sysfs_lock);
547
	mutex_init(&q->sysfs_dir_lock);
548
	spin_lock_init(&q->queue_lock);
549

550
	init_waitqueue_head(&q->mq_freeze_wq);
551
	mutex_init(&q->mq_freeze_lock);
552

553 554 555 556 557 558 559
	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
	 * See blk_register_queue() for details.
	 */
	if (percpu_ref_init(&q->q_usage_counter,
				blk_queue_usage_counter_release,
				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
560
		goto fail_bdi;
561

562 563 564
	if (blkcg_init_queue(q))
		goto fail_ref;

565 566 567
	blk_queue_dma_alignment(q, 511);
	blk_set_default_limits(&q->limits);

L
Linus Torvalds 已提交
568
	return q;
569

570 571
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
572
fail_bdi:
573 574
	blk_free_queue_stats(q->stats);
fail_stats:
575
	bdi_put(q->backing_dev_info);
576
fail_split:
577
	bioset_exit(&q->bio_split);
578 579 580 581 582
fail_id:
	ida_simple_remove(&blk_queue_ida, q->id);
fail_q:
	kmem_cache_free(blk_requestq_cachep, q);
	return NULL;
L
Linus Torvalds 已提交
583
}
584 585 586 587 588 589

struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id)
{
	struct request_queue *q;

	if (WARN_ON_ONCE(!make_request))
590
		return NULL;
591 592 593 594 595 596 597 598 599

	q = __blk_alloc_queue(node_id);
	if (!q)
		return NULL;
	q->make_request_fn = make_request;
	q->nr_requests = BLKDEV_MAX_RQ;
	return q;
}
EXPORT_SYMBOL(blk_alloc_queue);
L
Linus Torvalds 已提交
600

T
Tejun Heo 已提交
601
bool blk_get_queue(struct request_queue *q)
L
Linus Torvalds 已提交
602
{
B
Bart Van Assche 已提交
603
	if (likely(!blk_queue_dying(q))) {
T
Tejun Heo 已提交
604 605
		__blk_get_queue(q);
		return true;
L
Linus Torvalds 已提交
606 607
	}

T
Tejun Heo 已提交
608
	return false;
L
Linus Torvalds 已提交
609
}
J
Jens Axboe 已提交
610
EXPORT_SYMBOL(blk_get_queue);
L
Linus Torvalds 已提交
611

J
Jens Axboe 已提交
612 613 614 615 616
/**
 * blk_get_request - allocate a request
 * @q: request queue to allocate a request for
 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
L
Linus Torvalds 已提交
617
 */
J
Jens Axboe 已提交
618 619
struct request *blk_get_request(struct request_queue *q, unsigned int op,
				blk_mq_req_flags_t flags)
L
Linus Torvalds 已提交
620
{
J
Jens Axboe 已提交
621
	struct request *req;
L
Linus Torvalds 已提交
622

J
Jens Axboe 已提交
623 624
	WARN_ON_ONCE(op & REQ_NOWAIT);
	WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
L
Linus Torvalds 已提交
625

J
Jens Axboe 已提交
626 627 628
	req = blk_mq_alloc_request(q, op, flags);
	if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
		q->mq_ops->initialize_rq_fn(req);
L
Linus Torvalds 已提交
629

J
Jens Axboe 已提交
630
	return req;
L
Linus Torvalds 已提交
631
}
J
Jens Axboe 已提交
632
EXPORT_SYMBOL(blk_get_request);
L
Linus Torvalds 已提交
633 634 635

void blk_put_request(struct request *req)
{
J
Jens Axboe 已提交
636
	blk_mq_free_request(req);
L
Linus Torvalds 已提交
637 638 639
}
EXPORT_SYMBOL(blk_put_request);

640 641 642 643 644 645 646 647 648 649
static void blk_account_io_merge_bio(struct request *req)
{
	if (!blk_do_io_stat(req))
		return;

	part_stat_lock();
	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
	part_stat_unlock();
}

650 651
bool bio_attempt_back_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs)
652
{
J
Jens Axboe 已提交
653
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
654

655
	if (!ll_back_merge_fn(req, bio, nr_segs))
656 657
		return false;

658
	trace_block_bio_backmerge(req->q, req, bio);
T
Tejun Heo 已提交
659
	rq_qos_merge(req->q, req, bio);
660 661 662 663 664 665

	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
		blk_rq_set_mixed_merge(req);

	req->biotail->bi_next = bio;
	req->biotail = bio;
666
	req->__data_len += bio->bi_iter.bi_size;
667

668 669
	bio_crypt_free_ctx(bio);

670
	blk_account_io_merge_bio(req);
671 672 673
	return true;
}

674 675
bool bio_attempt_front_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs)
676
{
J
Jens Axboe 已提交
677
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
678

679
	if (!ll_front_merge_fn(req, bio, nr_segs))
680 681
		return false;

682
	trace_block_bio_frontmerge(req->q, req, bio);
T
Tejun Heo 已提交
683
	rq_qos_merge(req->q, req, bio);
684 685 686 687 688 689 690

	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
		blk_rq_set_mixed_merge(req);

	bio->bi_next = req->bio;
	req->bio = bio;

691 692
	req->__sector = bio->bi_iter.bi_sector;
	req->__data_len += bio->bi_iter.bi_size;
693

694 695
	bio_crypt_do_front_merge(req, bio);

696
	blk_account_io_merge_bio(req);
697 698 699
	return true;
}

700 701 702 703 704 705 706 707 708 709 710
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
		struct bio *bio)
{
	unsigned short segments = blk_rq_nr_discard_segments(req);

	if (segments >= queue_max_discard_segments(q))
		goto no_merge;
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
		goto no_merge;

T
Tejun Heo 已提交
711 712
	rq_qos_merge(q, req, bio);

713 714 715 716 717
	req->biotail->bi_next = bio;
	req->biotail = bio;
	req->__data_len += bio->bi_iter.bi_size;
	req->nr_phys_segments = segments + 1;

718
	blk_account_io_merge_bio(req);
719 720 721 722 723 724
	return true;
no_merge:
	req_set_nomerge(q, req);
	return false;
}

725
/**
726
 * blk_attempt_plug_merge - try to merge with %current's plugged list
727 728
 * @q: request_queue new bio is being queued at
 * @bio: new bio being queued
729
 * @nr_segs: number of segments in @bio
730 731 732
 * @same_queue_rq: pointer to &struct request that gets filled in when
 * another request associated with @q is found on the plug list
 * (optional, may be %NULL)
733 734 735 736 737
 *
 * Determine whether @bio being queued on @q can be merged with a request
 * on %current's plugged list.  Returns %true if merge was successful,
 * otherwise %false.
 *
738 739 740 741 742 743
 * Plugging coalesces IOs from the same issuer for the same purpose without
 * going through @q->queue_lock.  As such it's more of an issuing mechanism
 * than scheduling, and the request, while may have elvpriv data, is not
 * added on the elevator at this point.  In addition, we don't have
 * reliable access to the elevator outside queue lock.  Only check basic
 * merging parameters without querying the elevator.
744 745
 *
 * Caller must ensure !blk_queue_nomerges(q) beforehand.
746
 */
747
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
748
		unsigned int nr_segs, struct request **same_queue_rq)
749 750 751
{
	struct blk_plug *plug;
	struct request *rq;
S
Shaohua Li 已提交
752
	struct list_head *plug_list;
753

754
	plug = blk_mq_plug(q, bio);
755
	if (!plug)
756
		return false;
757

J
Jens Axboe 已提交
758
	plug_list = &plug->mq_list;
S
Shaohua Li 已提交
759 760

	list_for_each_entry_reverse(rq, plug_list, queuelist) {
761
		bool merged = false;
762

763
		if (rq->q == q && same_queue_rq) {
764 765 766 767 768
			/*
			 * Only blk-mq multiple hardware queues case checks the
			 * rq in the same queue, there should be only one such
			 * rq in a queue
			 **/
769
			*same_queue_rq = rq;
770
		}
771

772
		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
773 774
			continue;

775 776
		switch (blk_try_merge(rq, bio)) {
		case ELEVATOR_BACK_MERGE:
777
			merged = bio_attempt_back_merge(rq, bio, nr_segs);
778 779
			break;
		case ELEVATOR_FRONT_MERGE:
780
			merged = bio_attempt_front_merge(rq, bio, nr_segs);
781
			break;
782 783 784
		case ELEVATOR_DISCARD_MERGE:
			merged = bio_attempt_discard_merge(q, rq, bio);
			break;
785 786
		default:
			break;
787
		}
788 789 790

		if (merged)
			return true;
791
	}
792 793

	return false;
794 795
}

796
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
L
Linus Torvalds 已提交
797 798 799 800
{
	char b[BDEVNAME_SIZE];

	printk(KERN_INFO "attempt to access beyond end of device\n");
801
	printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
802
			bio_devname(bio, b), bio->bi_opf,
K
Kent Overstreet 已提交
803
			(unsigned long long)bio_end_sector(bio),
804
			(long long)maxsector);
L
Linus Torvalds 已提交
805 806
}

807 808 809 810 811 812 813 814 815 816
#ifdef CONFIG_FAIL_MAKE_REQUEST

static DECLARE_FAULT_ATTR(fail_make_request);

static int __init setup_fail_make_request(char *str)
{
	return setup_fault_attr(&fail_make_request, str);
}
__setup("fail_make_request=", setup_fail_make_request);

817
static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
818
{
819
	return part->make_it_fail && should_fail(&fail_make_request, bytes);
820 821 822 823
}

static int __init fail_make_request_debugfs(void)
{
824 825 826
	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
						NULL, &fail_make_request);

827
	return PTR_ERR_OR_ZERO(dir);
828 829 830 831 832 833
}

late_initcall(fail_make_request_debugfs);

#else /* CONFIG_FAIL_MAKE_REQUEST */

834 835
static inline bool should_fail_request(struct hd_struct *part,
					unsigned int bytes)
836
{
837
	return false;
838 839 840 841
}

#endif /* CONFIG_FAIL_MAKE_REQUEST */

842 843
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{
844 845
	const int op = bio_op(bio);

846
	if (part->policy && op_is_write(op)) {
847 848
		char b[BDEVNAME_SIZE];

849 850 851
		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
			return false;

852
		WARN_ONCE(1,
853 854 855
		       "generic_make_request: Trying to write "
			"to read-only block-device %s (partno %d)\n",
			bio_devname(bio, b), part->partno);
856 857
		/* Older lvm-tools actually trigger this */
		return false;
858 859 860 861 862
	}

	return false;
}

863 864 865 866 867 868 869 870
static noinline int should_fail_bio(struct bio *bio)
{
	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
		return -EIO;
	return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);

871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
/*
 * Check whether this bio extends beyond the end of the device or partition.
 * This may well happen - the kernel calls bread() without checking the size of
 * the device, e.g., when mounting a file system.
 */
static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
{
	unsigned int nr_sectors = bio_sectors(bio);

	if (nr_sectors && maxsector &&
	    (nr_sectors > maxsector ||
	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
		handle_bad_sector(bio, maxsector);
		return -EIO;
	}
	return 0;
}

889 890 891 892 893 894
/*
 * Remap block n of partition p to block n+start(p) of the disk.
 */
static inline int blk_partition_remap(struct bio *bio)
{
	struct hd_struct *p;
895
	int ret = -EIO;
896

897 898
	rcu_read_lock();
	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
899 900 901 902 903
	if (unlikely(!p))
		goto out;
	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
		goto out;
	if (unlikely(bio_check_ro(bio, p)))
904 905
		goto out;

906
	if (bio_sectors(bio)) {
907 908 909 910 911 912
		if (bio_check_eod(bio, part_nr_sects_read(p)))
			goto out;
		bio->bi_iter.bi_sector += p->start_sect;
		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
				      bio->bi_iter.bi_sector - p->start_sect);
	}
913
	bio->bi_partno = 0;
914
	ret = 0;
915 916
out:
	rcu_read_unlock();
917 918 919
	return ret;
}

920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
/*
 * Check write append to a zoned block device.
 */
static inline blk_status_t blk_check_zone_append(struct request_queue *q,
						 struct bio *bio)
{
	sector_t pos = bio->bi_iter.bi_sector;
	int nr_sectors = bio_sectors(bio);

	/* Only applicable to zoned block devices */
	if (!blk_queue_is_zoned(q))
		return BLK_STS_NOTSUPP;

	/* The bio sector must point to the start of a sequential zone */
	if (pos & (blk_queue_zone_sectors(q) - 1) ||
	    !blk_queue_zone_is_seq(q, pos))
		return BLK_STS_IOERR;

	/*
	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
	 * split and could result in non-contiguous sectors being written in
	 * different zones.
	 */
	if (nr_sectors > q->limits.chunk_sectors)
		return BLK_STS_IOERR;

	/* Make sure the BIO is small enough and will not get split */
	if (nr_sectors > q->limits.max_zone_append_sectors)
		return BLK_STS_IOERR;

	bio->bi_opf |= REQ_NOMERGE;

	return BLK_STS_OK;
}

955 956
static noinline_for_stack bool
generic_make_request_checks(struct bio *bio)
L
Linus Torvalds 已提交
957
{
958
	struct request_queue *q;
959
	int nr_sectors = bio_sectors(bio);
960
	blk_status_t status = BLK_STS_IOERR;
961
	char b[BDEVNAME_SIZE];
L
Linus Torvalds 已提交
962 963 964

	might_sleep();

965
	q = bio->bi_disk->queue;
966 967 968 969
	if (unlikely(!q)) {
		printk(KERN_ERR
		       "generic_make_request: Trying to access "
			"nonexistent block-device %s (%Lu)\n",
970
			bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
971 972
		goto end_io;
	}
973

974
	/*
975 976
	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
	 * if queue is not a request based queue.
977
	 */
978 979
	if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
		goto not_supported;
980

981
	if (should_fail_bio(bio))
982
		goto end_io;
983

984 985
	if (bio->bi_partno) {
		if (unlikely(blk_partition_remap(bio)))
986 987
			goto end_io;
	} else {
988 989 990
		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
			goto end_io;
		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
991 992
			goto end_io;
	}
993

994 995 996 997 998
	/*
	 * Filter flush bio's early so that make_request based
	 * drivers without flush support don't have to worry
	 * about them.
	 */
999
	if (op_is_flush(bio->bi_opf) &&
J
Jens Axboe 已提交
1000
	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
J
Jens Axboe 已提交
1001
		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
1002
		if (!nr_sectors) {
1003
			status = BLK_STS_OK;
1004 1005
			goto end_io;
		}
1006
	}
1007

1008 1009 1010
	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		bio->bi_opf &= ~REQ_HIPRI;

1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
		if (!blk_queue_secure_erase(q))
			goto not_supported;
		break;
	case REQ_OP_WRITE_SAME:
1021
		if (!q->limits.max_write_same_sectors)
1022
			goto not_supported;
1023
		break;
1024 1025 1026 1027 1028
	case REQ_OP_ZONE_APPEND:
		status = blk_check_zone_append(q, bio);
		if (status != BLK_STS_OK)
			goto end_io;
		break;
1029
	case REQ_OP_ZONE_RESET:
1030 1031 1032
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
1033
		if (!blk_queue_is_zoned(q))
1034
			goto not_supported;
1035
		break;
1036 1037 1038 1039
	case REQ_OP_ZONE_RESET_ALL:
		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
			goto not_supported;
		break;
1040
	case REQ_OP_WRITE_ZEROES:
1041
		if (!q->limits.max_write_zeroes_sectors)
1042 1043
			goto not_supported;
		break;
1044 1045
	default:
		break;
1046
	}
1047

T
Tejun Heo 已提交
1048
	/*
1049 1050 1051 1052
	 * Various block parts want %current->io_context, so allocate it up
	 * front rather than dealing with lots of pain to allocate it only
	 * where needed. This may fail and the block layer knows how to live
	 * with it.
T
Tejun Heo 已提交
1053
	 */
1054 1055
	if (unlikely(!current->io_context))
		create_task_io_context(current, GFP_ATOMIC, q->node);
T
Tejun Heo 已提交
1056

1057 1058
	if (!blkcg_bio_issue_check(q, bio))
		return false;
1059

N
NeilBrown 已提交
1060 1061 1062 1063 1064 1065 1066
	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
		trace_block_bio_queue(q, bio);
		/* Now that enqueuing has been traced, we need to trace
		 * completion as well.
		 */
		bio_set_flag(bio, BIO_TRACE_COMPLETION);
	}
1067
	return true;
1068

1069
not_supported:
1070
	status = BLK_STS_NOTSUPP;
1071
end_io:
1072
	bio->bi_status = status;
1073
	bio_endio(bio);
1074
	return false;
L
Linus Torvalds 已提交
1075 1076
}

1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
static blk_qc_t do_make_request(struct bio *bio)
{
	struct request_queue *q = bio->bi_disk->queue;
	blk_qc_t ret = BLK_QC_T_NONE;

	if (blk_crypto_bio_prep(&bio)) {
		if (!q->make_request_fn)
			return blk_mq_make_request(q, bio);
		ret = q->make_request_fn(q, bio);
	}
	blk_queue_exit(q);
	return ret;
}

1091
/**
1092
 * generic_make_request - re-submit a bio to the block device layer for I/O
1093 1094
 * @bio:  The bio describing the location in memory and on the device.
 *
1095 1096 1097 1098
 * This is a version of submit_bio() that shall only be used for I/O that is
 * resubmitted to lower level drivers by stacking block drivers.  All file
 * systems and other upper level users of the block layer should use
 * submit_bio() instead.
1099
 */
1100
blk_qc_t generic_make_request(struct bio *bio)
1101
{
1102 1103 1104 1105 1106 1107 1108 1109
	/*
	 * bio_list_on_stack[0] contains bios submitted by the current
	 * make_request_fn.
	 * bio_list_on_stack[1] contains bios that were submitted before
	 * the current make_request_fn, but that haven't been processed
	 * yet.
	 */
	struct bio_list bio_list_on_stack[2];
1110
	blk_qc_t ret = BLK_QC_T_NONE;
1111

1112
	if (!generic_make_request_checks(bio))
1113
		goto out;
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124

	/*
	 * We only want one ->make_request_fn to be active at a time, else
	 * stack usage with stacked devices could be a problem.  So use
	 * current->bio_list to keep a list of requests submited by a
	 * make_request_fn function.  current->bio_list is also used as a
	 * flag to say if generic_make_request is currently active in this
	 * task or not.  If it is NULL, then no make_request is active.  If
	 * it is non-NULL, then a make_request is active, and new requests
	 * should be added at the tail
	 */
1125
	if (current->bio_list) {
1126
		bio_list_add(&current->bio_list[0], bio);
1127
		goto out;
1128
	}
1129

1130 1131 1132 1133 1134
	/* following loop may be a bit non-obvious, and so deserves some
	 * explanation.
	 * Before entering the loop, bio->bi_next is NULL (as all callers
	 * ensure that) so we have a list with a single bio.
	 * We pretend that we have just taken it off a longer list, so
1135 1136
	 * we assign bio_list to a pointer to the bio_list_on_stack,
	 * thus initialising the bio_list of new bios to be
1137
	 * added.  ->make_request() may indeed add some more bios
1138 1139 1140
	 * through a recursive call to generic_make_request.  If it
	 * did, we find a non-NULL value in bio_list and re-enter the loop
	 * from the top.  In this case we really did just take the bio
1141
	 * of the top of the list (no pretending) and so remove it from
1142
	 * bio_list, and call into ->make_request() again.
1143 1144
	 */
	BUG_ON(bio->bi_next);
1145 1146
	bio_list_init(&bio_list_on_stack[0]);
	current->bio_list = bio_list_on_stack;
1147
	do {
1148
		struct request_queue *q = bio->bi_disk->queue;
1149

1150
		if (likely(bio_queue_enter(bio) == 0)) {
1151 1152 1153
			struct bio_list lower, same;

			/* Create a fresh bio_list for all subordinate requests */
1154 1155
			bio_list_on_stack[1] = bio_list_on_stack[0];
			bio_list_init(&bio_list_on_stack[0]);
1156
			ret = do_make_request(bio);
1157

1158 1159 1160 1161 1162
			/* sort new bios into those for a lower level
			 * and those for the same level
			 */
			bio_list_init(&lower);
			bio_list_init(&same);
1163
			while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
1164
				if (q == bio->bi_disk->queue)
1165 1166 1167 1168
					bio_list_add(&same, bio);
				else
					bio_list_add(&lower, bio);
			/* now assemble so we handle the lowest level first */
1169 1170 1171
			bio_list_merge(&bio_list_on_stack[0], &lower);
			bio_list_merge(&bio_list_on_stack[0], &same);
			bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
1172
		}
1173
		bio = bio_list_pop(&bio_list_on_stack[0]);
1174
	} while (bio);
1175
	current->bio_list = NULL; /* deactivate */
1176 1177 1178

out:
	return ret;
1179
}
L
Linus Torvalds 已提交
1180 1181
EXPORT_SYMBOL(generic_make_request);

1182 1183 1184 1185 1186 1187
/**
 * direct_make_request - hand a buffer directly to its device driver for I/O
 * @bio:  The bio describing the location in memory and on the device.
 *
 * This function behaves like generic_make_request(), but does not protect
 * against recursion.  Must only be used if the called driver is known
1188
 * to be blk-mq based.
1189 1190 1191 1192 1193
 */
blk_qc_t direct_make_request(struct bio *bio)
{
	struct request_queue *q = bio->bi_disk->queue;

1194 1195
	if (WARN_ON_ONCE(q->make_request_fn)) {
		bio_io_error(bio);
1196 1197 1198 1199
		return BLK_QC_T_NONE;
	}
	if (!generic_make_request_checks(bio))
		return BLK_QC_T_NONE;
1200 1201
	if (unlikely(bio_queue_enter(bio)))
		return BLK_QC_T_NONE;
1202 1203
	if (!blk_crypto_bio_prep(&bio)) {
		blk_queue_exit(q);
1204 1205
		return BLK_QC_T_NONE;
	}
1206
	return blk_mq_make_request(q, bio);
1207 1208 1209
}
EXPORT_SYMBOL_GPL(direct_make_request);

L
Linus Torvalds 已提交
1210
/**
1211
 * submit_bio - submit a bio to the block device layer for I/O
L
Linus Torvalds 已提交
1212 1213
 * @bio: The &struct bio which describes the I/O
 *
1214 1215 1216
 * submit_bio() is used to submit I/O requests to block devices.  It is passed a
 * fully set up &struct bio that describes the I/O that needs to be done.  The
 * bio will be send to the device described by the bi_disk and bi_partno fields.
L
Linus Torvalds 已提交
1217
 *
1218 1219 1220 1221
 * The success/failure status of the request, along with notification of
 * completion, is delivered asynchronously through the ->bi_end_io() callback
 * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
 * been called.
L
Linus Torvalds 已提交
1222
 */
1223
blk_qc_t submit_bio(struct bio *bio)
L
Linus Torvalds 已提交
1224
{
T
Tejun Heo 已提交
1225 1226 1227
	if (blkcg_punt_bio_submit(bio))
		return BLK_QC_T_NONE;

1228 1229 1230 1231
	/*
	 * If it's a regular read/write or a barrier with data attached,
	 * go through the normal accounting stuff before submission.
	 */
1232
	if (bio_has_data(bio)) {
1233 1234
		unsigned int count;

1235
		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1236
			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1237 1238 1239
		else
			count = bio_sectors(bio);

1240
		if (op_is_write(bio_op(bio))) {
1241 1242
			count_vm_events(PGPGOUT, count);
		} else {
1243
			task_io_account_read(bio->bi_iter.bi_size);
1244 1245 1246 1247 1248
			count_vm_events(PGPGIN, count);
		}

		if (unlikely(block_dump)) {
			char b[BDEVNAME_SIZE];
1249
			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1250
			current->comm, task_pid_nr(current),
1251
				op_is_write(bio_op(bio)) ? "WRITE" : "READ",
1252
				(unsigned long long)bio->bi_iter.bi_sector,
1253
				bio_devname(bio, b), count);
1254
		}
L
Linus Torvalds 已提交
1255 1256
	}

1257
	/*
1258 1259 1260 1261
	 * If we're reading data that is part of the userspace workingset, count
	 * submission time as memory stall.  When the device is congested, or
	 * the submitting cgroup IO-throttled, submission can be a significant
	 * part of overall IO time.
1262
	 */
1263 1264 1265 1266
	if (unlikely(bio_op(bio) == REQ_OP_READ &&
	    bio_flagged(bio, BIO_WORKINGSET))) {
		unsigned long pflags;
		blk_qc_t ret;
1267

1268 1269
		psi_memstall_enter(&pflags);
		ret = generic_make_request(bio);
1270 1271
		psi_memstall_leave(&pflags);

1272 1273 1274 1275
		return ret;
	}

	return generic_make_request(bio);
L
Linus Torvalds 已提交
1276 1277 1278
}
EXPORT_SYMBOL(submit_bio);

1279
/**
1280
 * blk_cloned_rq_check_limits - Helper function to check a cloned request
1281
 *                              for the new queue limits
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
 * @q:  the queue
 * @rq: the request being checked
 *
 * Description:
 *    @rq may have been made based on weaker limitations of upper-level queues
 *    in request stacking drivers, and it may violate the limitation of @q.
 *    Since the block layer and the underlying device driver trust @rq
 *    after it is inserted to @q, it should be checked against @q before
 *    the insertion using this generic function.
 *
 *    Request stacking drivers like request-based dm may change the queue
1293 1294
 *    limits when retrying requests on other queues. Those requests need
 *    to be checked against the new queue limits again during dispatch.
1295
 */
1296 1297
static int blk_cloned_rq_check_limits(struct request_queue *q,
				      struct request *rq)
1298
{
1299
	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
1300 1301 1302
		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
			__func__, blk_rq_sectors(rq),
			blk_queue_get_max_sectors(q, req_op(rq)));
1303 1304 1305 1306 1307 1308 1309 1310 1311
		return -EIO;
	}

	/*
	 * queue's settings related to segment counting like q->bounce_pfn
	 * may differ from that of other stacking queues.
	 * Recalculate it to check the request correctly on this queue's
	 * limitation.
	 */
1312
	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1313
	if (rq->nr_phys_segments > queue_max_segments(q)) {
1314 1315
		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
			__func__, rq->nr_phys_segments, queue_max_segments(q));
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
		return -EIO;
	}

	return 0;
}

/**
 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
 * @q:  the queue to submit the request
 * @rq: the request being queued
 */
1327
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1328
{
1329
	if (blk_cloned_rq_check_limits(q, rq))
1330
		return BLK_STS_IOERR;
1331

1332 1333
	if (rq->rq_disk &&
	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1334
		return BLK_STS_IOERR;
1335

1336 1337 1338
	if (blk_crypto_insert_cloned_request(rq))
		return BLK_STS_IOERR;

J
Jens Axboe 已提交
1339
	if (blk_queue_io_stat(q))
1340
		blk_account_io_start(rq);
1341 1342

	/*
J
Jens Axboe 已提交
1343 1344 1345
	 * Since we have a scheduler attached on the top device,
	 * bypass a potential scheduler on the bottom device for
	 * insert.
1346
	 */
1347
	return blk_mq_request_issue_directly(rq, true);
1348 1349 1350
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);

1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
/**
 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
 * @rq: request to examine
 *
 * Description:
 *     A request could be merge of IOs which require different failure
 *     handling.  This function determines the number of bytes which
 *     can be failed from the beginning of the request without
 *     crossing into area which need to be retried further.
 *
 * Return:
 *     The number of bytes to fail.
 */
unsigned int blk_rq_err_bytes(const struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	unsigned int bytes = 0;
	struct bio *bio;

1370
	if (!(rq->rq_flags & RQF_MIXED_MERGE))
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
		return blk_rq_bytes(rq);

	/*
	 * Currently the only 'mixing' which can happen is between
	 * different fastfail types.  We can safely fail portions
	 * which have all the failfast bits that the first one has -
	 * the ones which are at least as eager to fail as the first
	 * one.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
J
Jens Axboe 已提交
1381
		if ((bio->bi_opf & ff) != ff)
1382
			break;
1383
		bytes += bio->bi_iter.bi_size;
1384 1385 1386 1387 1388 1389 1390 1391
	}

	/* this could lead to infinite loop */
	BUG_ON(blk_rq_bytes(rq) && !bytes);
	return bytes;
}
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
static void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
{
	unsigned long stamp;
again:
	stamp = READ_ONCE(part->stamp);
	if (unlikely(stamp != now)) {
		if (likely(cmpxchg(&part->stamp, stamp, now) == stamp))
			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
	}
	if (part->partno) {
		part = &part_to_disk(part)->part0;
		goto again;
	}
}

1407
static void blk_account_io_completion(struct request *req, unsigned int bytes)
1408
{
1409
	if (req->part && blk_do_io_stat(req)) {
1410
		const int sgrp = op_stat_group(req_op(req));
1411 1412
		struct hd_struct *part;

1413
		part_stat_lock();
1414
		part = req->part;
1415
		part_stat_add(part, sectors[sgrp], bytes >> 9);
1416 1417 1418 1419
		part_stat_unlock();
	}
}

1420
void blk_account_io_done(struct request *req, u64 now)
1421 1422
{
	/*
1423 1424 1425
	 * Account IO completion.  flush_rq isn't accounted as a
	 * normal IO on queueing nor completion.  Accounting the
	 * containing request is enough.
1426
	 */
1427 1428
	if (req->part && blk_do_io_stat(req) &&
	    !(req->rq_flags & RQF_FLUSH_SEQ)) {
1429
		const int sgrp = op_stat_group(req_op(req));
1430 1431
		struct hd_struct *part;

1432
		part_stat_lock();
1433
		part = req->part;
1434

1435
		update_io_ticks(part, jiffies, true);
1436 1437
		part_stat_inc(part, ios[sgrp]);
		part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
1438
		part_stat_unlock();
1439

1440
		hd_struct_put(part);
1441 1442 1443
	}
}

1444
void blk_account_io_start(struct request *rq)
1445 1446 1447 1448
{
	if (!blk_do_io_stat(rq))
		return;

1449
	rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
1450

1451
	part_stat_lock();
1452
	update_io_ticks(rq->part, jiffies, false);
1453 1454 1455
	part_stat_unlock();
}

1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
		unsigned int op)
{
	struct hd_struct *part = &disk->part0;
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);

	part_stat_lock();
	update_io_ticks(part, now, false);
	part_stat_inc(part, ios[sgrp]);
	part_stat_add(part, sectors[sgrp], sectors);
	part_stat_local_inc(part, in_flight[op_is_write(op)]);
	part_stat_unlock();
1469

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
	return now;
}
EXPORT_SYMBOL(disk_start_io_acct);

void disk_end_io_acct(struct gendisk *disk, unsigned int op,
		unsigned long start_time)
{
	struct hd_struct *part = &disk->part0;
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);
	unsigned long duration = now - start_time;
1481

1482 1483 1484 1485
	part_stat_lock();
	update_io_ticks(part, now, true);
	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
	part_stat_local_dec(part, in_flight[op_is_write(op)]);
1486 1487
	part_stat_unlock();
}
1488
EXPORT_SYMBOL(disk_end_io_acct);
1489

1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
/*
 * Steal bios from a request and add them to a bio list.
 * The request must not have been partially completed before.
 */
void blk_steal_bios(struct bio_list *list, struct request *rq)
{
	if (rq->bio) {
		if (list->tail)
			list->tail->bi_next = rq->bio;
		else
			list->head = rq->bio;
		list->tail = rq->biotail;

		rq->bio = NULL;
		rq->biotail = NULL;
	}

	rq->__data_len = 0;
}
EXPORT_SYMBOL_GPL(blk_steal_bios);

1511
/**
1512
 * blk_update_request - Special helper function for request stacking drivers
1513
 * @req:      the request being processed
1514
 * @error:    block status code
1515
 * @nr_bytes: number of bytes to complete @req
1516 1517
 *
 * Description:
1518 1519 1520
 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
 *     the request structure even if @req doesn't have leftover.
 *     If @req has leftover, sets it up for the next range of segments.
1521 1522 1523
 *
 *     This special helper function is only for request stacking drivers
 *     (e.g. request-based dm) so that they can handle partial completion.
1524
 *     Actual device drivers should use blk_mq_end_request instead.
1525 1526 1527
 *
 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
 *     %false return from this function.
1528
 *
1529 1530 1531 1532
 * Note:
 *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
 *	blk_rq_bytes() and in blk_update_request().
 *
1533
 * Return:
1534 1535
 *     %false - this request doesn't have any more data
 *     %true  - this request has more data
1536
 **/
1537 1538
bool blk_update_request(struct request *req, blk_status_t error,
		unsigned int nr_bytes)
L
Linus Torvalds 已提交
1539
{
1540
	int total_bytes;
L
Linus Torvalds 已提交
1541

1542
	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1543

1544 1545 1546
	if (!req->bio)
		return false;

1547 1548 1549 1550 1551 1552
#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
	    error == BLK_STS_OK)
		req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif

1553 1554
	if (unlikely(error && !blk_rq_is_passthrough(req) &&
		     !(req->rq_flags & RQF_QUIET)))
1555
		print_req_error(req, error, __func__);
L
Linus Torvalds 已提交
1556

1557
	blk_account_io_completion(req, nr_bytes);
1558

1559 1560 1561
	total_bytes = 0;
	while (req->bio) {
		struct bio *bio = req->bio;
1562
		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
L
Linus Torvalds 已提交
1563

1564
		if (bio_bytes == bio->bi_iter.bi_size)
L
Linus Torvalds 已提交
1565 1566
			req->bio = bio->bi_next;

N
NeilBrown 已提交
1567 1568
		/* Completion has already been traced */
		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1569
		req_bio_endio(req, bio, bio_bytes, error);
L
Linus Torvalds 已提交
1570

1571 1572
		total_bytes += bio_bytes;
		nr_bytes -= bio_bytes;
L
Linus Torvalds 已提交
1573

1574 1575
		if (!nr_bytes)
			break;
L
Linus Torvalds 已提交
1576 1577 1578 1579 1580
	}

	/*
	 * completely done
	 */
1581 1582 1583 1584 1585 1586
	if (!req->bio) {
		/*
		 * Reset counters so that the request stacking driver
		 * can find how many bytes remain in the request
		 * later.
		 */
1587
		req->__data_len = 0;
1588 1589
		return false;
	}
L
Linus Torvalds 已提交
1590

1591
	req->__data_len -= total_bytes;
1592 1593

	/* update sector only for requests with clear definition of sector */
1594
	if (!blk_rq_is_passthrough(req))
1595
		req->__sector += total_bytes >> 9;
1596

1597
	/* mixed attributes always follow the first bio */
1598
	if (req->rq_flags & RQF_MIXED_MERGE) {
1599
		req->cmd_flags &= ~REQ_FAILFAST_MASK;
J
Jens Axboe 已提交
1600
		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1601 1602
	}

1603 1604 1605 1606 1607 1608 1609 1610 1611
	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
		/*
		 * If total number of sectors is less than the first segment
		 * size, something has gone terribly wrong.
		 */
		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
			blk_dump_rq_flags(req, "request botched");
			req->__data_len = blk_rq_cur_bytes(req);
		}
1612

1613
		/* recalculate the number of segments */
1614
		req->nr_phys_segments = blk_recalc_rq_segments(req);
1615
	}
1616

1617
	return true;
L
Linus Torvalds 已提交
1618
}
1619
EXPORT_SYMBOL_GPL(blk_update_request);
L
Linus Torvalds 已提交
1620

1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
/**
 * rq_flush_dcache_pages - Helper function to flush all pages in a request
 * @rq: the request to be flushed
 *
 * Description:
 *     Flush all pages in @rq.
 */
void rq_flush_dcache_pages(struct request *rq)
{
	struct req_iterator iter;
1632
	struct bio_vec bvec;
1633 1634

	rq_for_each_segment(bvec, rq, iter)
1635
		flush_dcache_page(bvec.bv_page);
1636 1637 1638 1639
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif

1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660
/**
 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
 * @q : the queue of the device being checked
 *
 * Description:
 *    Check if underlying low-level drivers of a device are busy.
 *    If the drivers want to export their busy state, they must set own
 *    exporting function using blk_queue_lld_busy() first.
 *
 *    Basically, this function is used only by request stacking drivers
 *    to stop dispatching requests to underlying devices when underlying
 *    devices are busy.  This behavior helps more I/O merging on the queue
 *    of the request stacking driver and prevents I/O throughput regression
 *    on burst I/O load.
 *
 * Return:
 *    0 - Not busy (The request stacking driver should dispatch request)
 *    1 - Busy (The request stacking driver should stop dispatching request)
 */
int blk_lld_busy(struct request_queue *q)
{
J
Jens Axboe 已提交
1661
	if (queue_is_mq(q) && q->mq_ops->busy)
J
Jens Axboe 已提交
1662
		return q->mq_ops->busy(q);
1663 1664 1665 1666 1667

	return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);

1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
/**
 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
 * @rq: the clone request to be cleaned up
 *
 * Description:
 *     Free all bios in @rq for a cloned request.
 */
void blk_rq_unprep_clone(struct request *rq)
{
	struct bio *bio;

	while ((bio = rq->bio) != NULL) {
		rq->bio = bio->bi_next;

		bio_put(bio);
	}
}
EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);

/**
 * blk_rq_prep_clone - Helper function to setup clone request
 * @rq: the request to be setup
 * @rq_src: original request to be cloned
 * @bs: bio_set that bios for clone are allocated from
 * @gfp_mask: memory allocation mask for bio
 * @bio_ctr: setup function to be called for each clone bio.
 *           Returns %0 for success, non %0 for failure.
 * @data: private data to be passed to @bio_ctr
 *
 * Description:
 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
 *     Also, pages which the original bios are pointing to are not copied
 *     and the cloned bios just point same pages.
 *     So cloned bios must be completed before original bios, which means
 *     the caller must complete @rq before @rq_src.
 */
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
		      struct bio_set *bs, gfp_t gfp_mask,
		      int (*bio_ctr)(struct bio *, struct bio *, void *),
		      void *data)
{
	struct bio *bio, *bio_src;

	if (!bs)
1712
		bs = &fs_bio_set;
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728

	__rq_for_each_bio(bio_src, rq_src) {
		bio = bio_clone_fast(bio_src, gfp_mask, bs);
		if (!bio)
			goto free_and_out;

		if (bio_ctr && bio_ctr(bio, bio_src, data))
			goto free_and_out;

		if (rq->bio) {
			rq->biotail->bi_next = bio;
			rq->biotail = bio;
		} else
			rq->bio = rq->biotail = bio;
	}

1729 1730 1731 1732 1733 1734 1735 1736 1737
	/* Copy attributes of the original request to the clone request. */
	rq->__sector = blk_rq_pos(rq_src);
	rq->__data_len = blk_rq_bytes(rq_src);
	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
		rq->special_vec = rq_src->special_vec;
	}
	rq->nr_phys_segments = rq_src->nr_phys_segments;
	rq->ioprio = rq_src->ioprio;
1738

1739 1740
	if (rq->bio)
		blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask);
1741 1742 1743 1744 1745 1746 1747 1748 1749

	return 0;

free_and_out:
	if (bio)
		bio_put(bio);
	blk_rq_unprep_clone(rq);

	return -ENOMEM;
1750 1751 1752
}
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);

1753
int kblockd_schedule_work(struct work_struct *work)
L
Linus Torvalds 已提交
1754 1755 1756 1757 1758
{
	return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);

1759 1760 1761 1762 1763 1764 1765
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
				unsigned long delay)
{
	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);

S
Suresh Jayaraman 已提交
1766 1767 1768 1769 1770
/**
 * blk_start_plug - initialize blk_plug and track it inside the task_struct
 * @plug:	The &struct blk_plug that needs to be initialized
 *
 * Description:
1771 1772 1773 1774 1775 1776 1777 1778 1779
 *   blk_start_plug() indicates to the block layer an intent by the caller
 *   to submit multiple I/O requests in a batch.  The block layer may use
 *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
 *   is called.  However, the block layer may choose to submit requests
 *   before a call to blk_finish_plug() if the number of queued I/Os
 *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
 *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
 *   the task schedules (see below).
 *
S
Suresh Jayaraman 已提交
1780 1781 1782 1783 1784 1785 1786 1787 1788
 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
 *   pending I/O should the task end up blocking between blk_start_plug() and
 *   blk_finish_plug(). This is important from a performance perspective, but
 *   also ensures that we don't deadlock. For instance, if the task is blocking
 *   for a memory allocation, memory reclaim could end up wanting to free a
 *   page belonging to that request that is currently residing in our private
 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
 *   this kind of deadlock.
 */
1789 1790 1791 1792
void blk_start_plug(struct blk_plug *plug)
{
	struct task_struct *tsk = current;

S
Shaohua Li 已提交
1793 1794 1795 1796 1797 1798
	/*
	 * If this is a nested plug, don't actually assign it.
	 */
	if (tsk->plug)
		return;

1799
	INIT_LIST_HEAD(&plug->mq_list);
1800
	INIT_LIST_HEAD(&plug->cb_list);
1801
	plug->rq_count = 0;
1802
	plug->multiple_queues = false;
1803

1804
	/*
S
Shaohua Li 已提交
1805 1806
	 * Store ordering should not be needed here, since a potential
	 * preempt will imply a full memory barrier
1807
	 */
S
Shaohua Li 已提交
1808
	tsk->plug = plug;
1809 1810 1811
}
EXPORT_SYMBOL(blk_start_plug);

1812
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1813 1814 1815
{
	LIST_HEAD(callbacks);

S
Shaohua Li 已提交
1816 1817
	while (!list_empty(&plug->cb_list)) {
		list_splice_init(&plug->cb_list, &callbacks);
1818

S
Shaohua Li 已提交
1819 1820
		while (!list_empty(&callbacks)) {
			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1821 1822
							  struct blk_plug_cb,
							  list);
S
Shaohua Li 已提交
1823
			list_del(&cb->list);
1824
			cb->callback(cb, from_schedule);
S
Shaohua Li 已提交
1825
		}
1826 1827 1828
	}
}

1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853
struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
				      int size)
{
	struct blk_plug *plug = current->plug;
	struct blk_plug_cb *cb;

	if (!plug)
		return NULL;

	list_for_each_entry(cb, &plug->cb_list, list)
		if (cb->callback == unplug && cb->data == data)
			return cb;

	/* Not currently on the callback list */
	BUG_ON(size < sizeof(*cb));
	cb = kzalloc(size, GFP_ATOMIC);
	if (cb) {
		cb->data = data;
		cb->callback = unplug;
		list_add(&cb->list, &plug->cb_list);
	}
	return cb;
}
EXPORT_SYMBOL(blk_check_plugged);

1854
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1855
{
1856
	flush_plug_callbacks(plug, from_schedule);
1857 1858 1859

	if (!list_empty(&plug->mq_list))
		blk_mq_flush_plug_list(plug, from_schedule);
1860 1861
}

1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
/**
 * blk_finish_plug - mark the end of a batch of submitted I/O
 * @plug:	The &struct blk_plug passed to blk_start_plug()
 *
 * Description:
 * Indicate that a batch of I/O submissions is complete.  This function
 * must be paired with an initial call to blk_start_plug().  The intent
 * is to allow the block layer to optimize I/O submission.  See the
 * documentation for blk_start_plug() for more information.
 */
1872 1873
void blk_finish_plug(struct blk_plug *plug)
{
S
Shaohua Li 已提交
1874 1875
	if (plug != current->plug)
		return;
1876
	blk_flush_plug_list(plug, false);
1877

S
Shaohua Li 已提交
1878
	current->plug = NULL;
1879
}
1880
EXPORT_SYMBOL(blk_finish_plug);
1881

1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
void blk_io_schedule(void)
{
	/* Prevent hang_check timer from firing at us during very long I/O */
	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;

	if (timeout)
		io_schedule_timeout(timeout);
	else
		io_schedule();
}
EXPORT_SYMBOL_GPL(blk_io_schedule);

L
Linus Torvalds 已提交
1894 1895
int __init blk_dev_init(void)
{
1896 1897
	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1898
			sizeof_field(struct request, cmd_flags));
1899
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1900
			sizeof_field(struct bio, bi_opf));
1901

1902 1903
	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
	kblockd_workqueue = alloc_workqueue("kblockd",
1904
					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
L
Linus Torvalds 已提交
1905 1906 1907
	if (!kblockd_workqueue)
		panic("Failed to create kblockd\n");

1908
	blk_requestq_cachep = kmem_cache_create("request_queue",
1909
			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1910

1911 1912 1913 1914
#ifdef CONFIG_DEBUG_FS
	blk_debugfs_root = debugfs_create_dir("block", NULL);
#endif

1915
	return 0;
L
Linus Torvalds 已提交
1916
}