blk-core.c 47.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 8
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *	-  July2000
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
19
#include <linux/blk-mq.h>
20
#include <linux/blk-pm.h>
21
#include <linux/blk-integrity.h>
L
Linus Torvalds 已提交
22 23
#include <linux/highmem.h>
#include <linux/mm.h>
24
#include <linux/pagemap.h>
L
Linus Torvalds 已提交
25 26 27 28 29 30 31
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
32
#include <linux/task_io_accounting_ops.h>
33
#include <linux/fault-inject.h>
34
#include <linux/list_sort.h>
T
Tejun Heo 已提交
35
#include <linux/delay.h>
36
#include <linux/ratelimit.h>
L
Lin Ming 已提交
37
#include <linux/pm_runtime.h>
38
#include <linux/blk-cgroup.h>
39
#include <linux/t10-pi.h>
40
#include <linux/debugfs.h>
41
#include <linux/bpf.h>
42
#include <linux/psi.h>
43
#include <linux/sched/sysctl.h>
44
#include <linux/blk-crypto.h>
45 46 47

#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
L
Linus Torvalds 已提交
48

49
#include "blk.h"
50
#include "blk-mq.h"
51
#include "blk-mq-sched.h"
52
#include "blk-pm.h"
53
#include "blk-throttle.h"
54

55 56
struct dentry *blk_debugfs_root;

57
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
58
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
59
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
K
Keith Busch 已提交
60
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
N
NeilBrown 已提交
61
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
62
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
63

64 65
DEFINE_IDA(blk_queue_ida);

L
Linus Torvalds 已提交
66 67 68
/*
 * For queue allocation
 */
69
struct kmem_cache *blk_requestq_cachep;
L
Linus Torvalds 已提交
70 71 72 73

/*
 * Controlling structure to kblockd
 */
74
static struct workqueue_struct *kblockd_workqueue;
L
Linus Torvalds 已提交
75

76 77 78 79 80 81 82
/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
83
	set_bit(flag, &q->queue_flags);
84 85 86 87 88 89 90 91 92 93
}
EXPORT_SYMBOL(blk_queue_flag_set);

/**
 * blk_queue_flag_clear - atomically clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
94
	clear_bit(flag, &q->queue_flags);
95 96 97 98 99 100 101 102 103 104 105 106 107
}
EXPORT_SYMBOL(blk_queue_flag_clear);

/**
 * blk_queue_flag_test_and_set - atomically test and set a queue flag
 * @flag: flag to be set
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was already set.
 */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
108
	return test_and_set_bit(flag, &q->queue_flags);
109 110 111
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);

112
void blk_rq_init(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
113
{
114 115
	memset(rq, 0, sizeof(*rq));

L
Linus Torvalds 已提交
116
	INIT_LIST_HEAD(&rq->queuelist);
J
Jens Axboe 已提交
117
	rq->q = q;
118
	rq->__sector = (sector_t) -1;
119 120
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
121 122
	rq->tag = BLK_MQ_NO_TAG;
	rq->internal_tag = BLK_MQ_NO_TAG;
123
	rq->start_time_ns = ktime_get_ns();
124
	rq->part = NULL;
125
	blk_crypto_rq_set_defaults(rq);
L
Linus Torvalds 已提交
126
}
127
EXPORT_SYMBOL(blk_rq_init);
L
Linus Torvalds 已提交
128

129 130 131 132 133 134 135 136
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
	REQ_OP_NAME(READ),
	REQ_OP_NAME(WRITE),
	REQ_OP_NAME(FLUSH),
	REQ_OP_NAME(DISCARD),
	REQ_OP_NAME(SECURE_ERASE),
	REQ_OP_NAME(ZONE_RESET),
137
	REQ_OP_NAME(ZONE_RESET_ALL),
138 139 140
	REQ_OP_NAME(ZONE_OPEN),
	REQ_OP_NAME(ZONE_CLOSE),
	REQ_OP_NAME(ZONE_FINISH),
141
	REQ_OP_NAME(ZONE_APPEND),
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
	REQ_OP_NAME(WRITE_SAME),
	REQ_OP_NAME(WRITE_ZEROES),
	REQ_OP_NAME(DRV_IN),
	REQ_OP_NAME(DRV_OUT),
};
#undef REQ_OP_NAME

/**
 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 * @op: REQ_OP_XXX.
 *
 * Description: Centralize block layer function to convert REQ_OP_XXX into
 * string format. Useful in the debugging and tracing bio or request. For
 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 */
inline const char *blk_op_str(unsigned int op)
{
	const char *op_str = "UNKNOWN";

	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
		op_str = blk_op_name[op];

	return op_str;
}
EXPORT_SYMBOL_GPL(blk_op_str);

168 169 170 171 172 173 174 175 176 177 178 179 180 181
static const struct {
	int		errno;
	const char	*name;
} blk_errors[] = {
	[BLK_STS_OK]		= { 0,		"" },
	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
182
	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
183
	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
184

185 186 187
	/* device mapper special case, should not leak out: */
	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },

188 189 190 191
	/* zone device specific errors */
	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
	/* everything else not covered above: */
	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
};

blk_status_t errno_to_blk_status(int errno)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
		if (blk_errors[i].errno == errno)
			return (__force blk_status_t)i;
	}

	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);

int blk_status_to_errno(blk_status_t status)
{
	int idx = (__force int)status;

213
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
214 215 216 217 218
		return -EIO;
	return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);

219 220
static void print_req_error(struct request *req, blk_status_t status,
		const char *caller)
221 222 223
{
	int idx = (__force int)status;

224
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
225 226
		return;

227
	printk_ratelimited(KERN_ERR
228 229
		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
		"phys_seg %u prio class %u\n",
230
		caller, blk_errors[idx].name,
231 232 233 234 235
		req->rq_disk ? req->rq_disk->disk_name : "?",
		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
		req->cmd_flags & ~REQ_OP_MASK,
		req->nr_phys_segments,
		IOPRIO_PRIO_CLASS(req->ioprio));
236 237
}

N
NeilBrown 已提交
238
static void req_bio_endio(struct request *rq, struct bio *bio,
239
			  unsigned int nbytes, blk_status_t error)
L
Linus Torvalds 已提交
240
{
241
	if (error)
242
		bio->bi_status = error;
243

244
	if (unlikely(rq->rq_flags & RQF_QUIET))
245
		bio_set_flag(bio, BIO_QUIET);
246

247
	bio_advance(bio, nbytes);
248

249 250 251 252 253 254 255 256 257 258 259
	if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
		/*
		 * Partial zone append completions cannot be supported as the
		 * BIO fragments may end up not being written sequentially.
		 */
		if (bio->bi_iter.bi_size)
			bio->bi_status = BLK_STS_IOERR;
		else
			bio->bi_iter.bi_sector = rq->__sector;
	}

T
Tejun Heo 已提交
260
	/* don't actually finish bio if it's part of flush sequence */
261
	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
262
		bio_endio(bio);
L
Linus Torvalds 已提交
263 264 265 266
}

void blk_dump_rq_flags(struct request *rq, char *msg)
{
267 268
	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
		rq->rq_disk ? rq->rq_disk->disk_name : "?",
J
Jens Axboe 已提交
269
		(unsigned long long) rq->cmd_flags);
L
Linus Torvalds 已提交
270

271 272 273
	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
274 275
	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
	       rq->bio, rq->biotail, blk_rq_bytes(rq));
L
Linus Torvalds 已提交
276 277 278 279 280 281 282 283 284 285 286 287
}
EXPORT_SYMBOL(blk_dump_rq_flags);

/**
 * blk_sync_queue - cancel any pending callbacks on a queue
 * @q: the queue
 *
 * Description:
 *     The block layer may perform asynchronous callback activity
 *     on a queue, such as calling the unplug function after a timeout.
 *     A block device may call blk_sync_queue to ensure that any
 *     such activity is cancelled, thus allowing it to release resources
288
 *     that the callbacks might use. The caller must already have made sure
289
 *     that its ->submit_bio will not re-add plugging prior to calling
L
Linus Torvalds 已提交
290 291
 *     this function.
 *
292
 *     This function does not cancel any asynchronous activity arising
293
 *     out of elevator or throttling code. That would require elevator_exit()
294
 *     and blkcg_exit_queue() to be called with queue lock initialized.
295
 *
L
Linus Torvalds 已提交
296 297 298
 */
void blk_sync_queue(struct request_queue *q)
{
299
	del_timer_sync(&q->timeout);
300
	cancel_work_sync(&q->timeout_work);
L
Linus Torvalds 已提交
301 302 303
}
EXPORT_SYMBOL(blk_sync_queue);

304
/**
305
 * blk_set_pm_only - increment pm_only counter
306 307
 * @q: request queue pointer
 */
308
void blk_set_pm_only(struct request_queue *q)
309
{
310
	atomic_inc(&q->pm_only);
311
}
312
EXPORT_SYMBOL_GPL(blk_set_pm_only);
313

314
void blk_clear_pm_only(struct request_queue *q)
315
{
316 317 318 319 320 321
	int pm_only;

	pm_only = atomic_dec_return(&q->pm_only);
	WARN_ON_ONCE(pm_only < 0);
	if (pm_only == 0)
		wake_up_all(&q->mq_freeze_wq);
322
}
323
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
324

325 326 327 328 329 330
/**
 * blk_put_queue - decrement the request_queue refcount
 * @q: the request_queue structure to decrement the refcount for
 *
 * Decrements the refcount of the request_queue kobject. When this reaches 0
 * we'll have blk_release_queue() called.
331 332 333
 *
 * Context: Any context, but the last reference must not be dropped from
 *          atomic context.
334
 */
335
void blk_put_queue(struct request_queue *q)
336 337 338
{
	kobject_put(&q->kobj);
}
J
Jens Axboe 已提交
339
EXPORT_SYMBOL(blk_put_queue);
340

341
void blk_queue_start_drain(struct request_queue *q)
342
{
343 344 345 346 347 348
	/*
	 * When queue DYING flag is set, we need to block new req
	 * entering queue, so we call blk_freeze_queue_start() to
	 * prevent I/O from crossing blk_queue_enter().
	 */
	blk_freeze_queue_start(q);
J
Jens Axboe 已提交
349
	if (queue_is_mq(q))
350
		blk_mq_wake_waiters(q);
351 352
	/* Make blk_queue_enter() reexamine the DYING flag. */
	wake_up_all(&q->mq_freeze_wq);
353
}
354 355 356 357 358 359

void blk_set_queue_dying(struct request_queue *q)
{
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
	blk_queue_start_drain(q);
}
360 361
EXPORT_SYMBOL_GPL(blk_set_queue_dying);

362 363 364 365
/**
 * blk_cleanup_queue - shutdown a request queue
 * @q: request queue to shutdown
 *
366 367
 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 * put it.  All future requests will be failed immediately with -ENODEV.
368 369
 *
 * Context: can sleep
370
 */
371
void blk_cleanup_queue(struct request_queue *q)
372
{
373 374 375
	/* cannot be called from atomic context */
	might_sleep();

376 377
	WARN_ON_ONCE(blk_queue_registered(q));

B
Bart Van Assche 已提交
378
	/* mark @q DYING, no new request or merges will be allowed afterwards */
379
	blk_set_queue_dying(q);
380

381 382
	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
383

384 385
	/*
	 * Drain all requests queued before DYING marking. Set DEAD flag to
386 387
	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
	 * after draining finished.
388
	 */
389
	blk_freeze_queue(q);
390

391
	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
392 393

	blk_sync_queue(q);
J
Jens Axboe 已提交
394
	if (queue_is_mq(q))
395
		blk_mq_exit_queue(q);
J
Jens Axboe 已提交
396

397 398 399 400 401 402 403 404 405 406
	/*
	 * In theory, request pool of sched_tags belongs to request queue.
	 * However, the current implementation requires tag_set for freeing
	 * requests, so free the pool now.
	 *
	 * Queue has become frozen, there can't be any in-queue requests, so
	 * it is safe to free requests now.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator)
407
		blk_mq_sched_free_rqs(q);
408 409
	mutex_unlock(&q->sysfs_lock);

410
	percpu_ref_exit(&q->q_usage_counter);
B
Bart Van Assche 已提交
411

412
	/* @q is and will stay empty, shutdown and put */
413 414
	blk_put_queue(q);
}
L
Linus Torvalds 已提交
415 416
EXPORT_SYMBOL(blk_cleanup_queue);

417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
static bool blk_try_enter_queue(struct request_queue *q, bool pm)
{
	rcu_read_lock();
	if (!percpu_ref_tryget_live(&q->q_usage_counter))
		goto fail;

	/*
	 * The code that increments the pm_only counter must ensure that the
	 * counter is globally visible before the queue is unfrozen.
	 */
	if (blk_queue_pm_only(q) &&
	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
		goto fail_put;

	rcu_read_unlock();
	return true;

fail_put:
	percpu_ref_put(&q->q_usage_counter);
fail:
	rcu_read_unlock();
	return false;
}

441 442 443
/**
 * blk_queue_enter() - try to increase q->q_usage_counter
 * @q: request queue pointer
444
 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
445
 */
446
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
447
{
448
	const bool pm = flags & BLK_MQ_REQ_PM;
449

450
	while (!blk_try_enter_queue(q, pm)) {
451
		if (flags & BLK_MQ_REQ_NOWAIT)
452 453
			return -EBUSY;

454
		/*
455 456 457 458 459
		 * read pair of barrier in blk_freeze_queue_start(), we need to
		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
		 * reading .mq_freeze_depth or queue dying flag, otherwise the
		 * following wait may never return if the two reads are
		 * reordered.
460 461
		 */
		smp_rmb();
462
		wait_event(q->mq_freeze_wq,
463
			   (!q->mq_freeze_depth &&
464
			    blk_pm_resume_queue(pm, q)) ||
465
			   blk_queue_dying(q));
466 467 468
		if (blk_queue_dying(q))
			return -ENODEV;
	}
469 470

	return 0;
471 472
}

473 474
static inline int bio_queue_enter(struct bio *bio)
{
475 476
	struct gendisk *disk = bio->bi_bdev->bd_disk;
	struct request_queue *q = disk->queue;
477

478 479
	while (!blk_try_enter_queue(q, false)) {
		if (bio->bi_opf & REQ_NOWAIT) {
480
			if (test_bit(GD_DEAD, &disk->state))
481
				goto dead;
482
			bio_wouldblock_error(bio);
483 484 485 486 487 488 489 490 491 492 493 494 495 496
			return -EBUSY;
		}

		/*
		 * read pair of barrier in blk_freeze_queue_start(), we need to
		 * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
		 * reading .mq_freeze_depth or queue dying flag, otherwise the
		 * following wait may never return if the two reads are
		 * reordered.
		 */
		smp_rmb();
		wait_event(q->mq_freeze_wq,
			   (!q->mq_freeze_depth &&
			    blk_pm_resume_queue(false, q)) ||
497 498
			   test_bit(GD_DEAD, &disk->state));
		if (test_bit(GD_DEAD, &disk->state))
499
			goto dead;
500 501
	}

502 503 504 505
	return 0;
dead:
	bio_io_error(bio);
	return -ENODEV;
506 507
}

508 509 510 511 512 513 514 515 516 517 518 519 520
void blk_queue_exit(struct request_queue *q)
{
	percpu_ref_put(&q->q_usage_counter);
}

static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
	struct request_queue *q =
		container_of(ref, struct request_queue, q_usage_counter);

	wake_up_all(&q->mq_freeze_wq);
}

521
static void blk_rq_timed_out_timer(struct timer_list *t)
522
{
523
	struct request_queue *q = from_timer(q, t, timeout);
524 525 526 527

	kblockd_schedule_work(&q->timeout_work);
}

528 529 530 531
static void blk_timeout_work(struct work_struct *work)
{
}

532
struct request_queue *blk_alloc_queue(int node_id)
533
{
534
	struct request_queue *q;
535
	int ret;
536

537
	q = kmem_cache_alloc_node(blk_requestq_cachep,
538
				GFP_KERNEL | __GFP_ZERO, node_id);
L
Linus Torvalds 已提交
539 540 541
	if (!q)
		return NULL;

542 543
	q->last_merge = NULL;

544
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
545
	if (q->id < 0)
546
		goto fail_q;
547

548
	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
549
	if (ret)
550 551
		goto fail_id;

552 553
	q->stats = blk_alloc_queue_stats();
	if (!q->stats)
554
		goto fail_split;
555

556
	q->node = node_id;
557

558
	atomic_set(&q->nr_active_requests_shared_tags, 0);
559

560
	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
561
	INIT_WORK(&q->timeout_work, blk_timeout_work);
562
	INIT_LIST_HEAD(&q->icq_list);
563
#ifdef CONFIG_BLK_CGROUP
564
	INIT_LIST_HEAD(&q->blkg_list);
565
#endif
566

567
	kobject_init(&q->kobj, &blk_queue_ktype);
L
Linus Torvalds 已提交
568

569
	mutex_init(&q->debugfs_mutex);
570
	mutex_init(&q->sysfs_lock);
571
	mutex_init(&q->sysfs_dir_lock);
572
	spin_lock_init(&q->queue_lock);
573

574
	init_waitqueue_head(&q->mq_freeze_wq);
575
	mutex_init(&q->mq_freeze_lock);
576

577 578 579 580 581 582 583
	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
	 * See blk_register_queue() for details.
	 */
	if (percpu_ref_init(&q->q_usage_counter,
				blk_queue_usage_counter_release,
				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
584
		goto fail_stats;
585

586 587 588
	if (blkcg_init_queue(q))
		goto fail_ref;

589 590
	blk_queue_dma_alignment(q, 511);
	blk_set_default_limits(&q->limits);
591
	q->nr_requests = BLKDEV_DEFAULT_RQ;
592

L
Linus Torvalds 已提交
593
	return q;
594

595 596
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
597
fail_stats:
598
	blk_free_queue_stats(q->stats);
599
fail_split:
600
	bioset_exit(&q->bio_split);
601 602 603 604 605
fail_id:
	ida_simple_remove(&blk_queue_ida, q->id);
fail_q:
	kmem_cache_free(blk_requestq_cachep, q);
	return NULL;
L
Linus Torvalds 已提交
606 607
}

608 609 610 611 612
/**
 * blk_get_queue - increment the request_queue refcount
 * @q: the request_queue structure to increment the refcount for
 *
 * Increment the refcount of the request_queue kobject.
613 614
 *
 * Context: Any context.
615
 */
T
Tejun Heo 已提交
616
bool blk_get_queue(struct request_queue *q)
L
Linus Torvalds 已提交
617
{
B
Bart Van Assche 已提交
618
	if (likely(!blk_queue_dying(q))) {
T
Tejun Heo 已提交
619 620
		__blk_get_queue(q);
		return true;
L
Linus Torvalds 已提交
621 622
	}

T
Tejun Heo 已提交
623
	return false;
L
Linus Torvalds 已提交
624
}
J
Jens Axboe 已提交
625
EXPORT_SYMBOL(blk_get_queue);
L
Linus Torvalds 已提交
626

J
Jens Axboe 已提交
627 628 629 630 631
/**
 * blk_get_request - allocate a request
 * @q: request queue to allocate a request for
 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
L
Linus Torvalds 已提交
632
 */
J
Jens Axboe 已提交
633 634
struct request *blk_get_request(struct request_queue *q, unsigned int op,
				blk_mq_req_flags_t flags)
L
Linus Torvalds 已提交
635
{
J
Jens Axboe 已提交
636
	struct request *req;
L
Linus Torvalds 已提交
637

J
Jens Axboe 已提交
638
	WARN_ON_ONCE(op & REQ_NOWAIT);
639
	WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
L
Linus Torvalds 已提交
640

J
Jens Axboe 已提交
641 642 643
	req = blk_mq_alloc_request(q, op, flags);
	if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
		q->mq_ops->initialize_rq_fn(req);
L
Linus Torvalds 已提交
644

J
Jens Axboe 已提交
645
	return req;
L
Linus Torvalds 已提交
646
}
J
Jens Axboe 已提交
647
EXPORT_SYMBOL(blk_get_request);
L
Linus Torvalds 已提交
648 649 650

void blk_put_request(struct request *req)
{
J
Jens Axboe 已提交
651
	blk_mq_free_request(req);
L
Linus Torvalds 已提交
652 653 654
}
EXPORT_SYMBOL(blk_put_request);

655
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
L
Linus Torvalds 已提交
656 657 658
{
	char b[BDEVNAME_SIZE];

659
	pr_info_ratelimited("%s: attempt to access beyond end of device\n"
660
			    "%s: rw=%d, want=%llu, limit=%llu\n",
661
			    current->comm,
662 663
			    bio_devname(bio, b), bio->bi_opf,
			    bio_end_sector(bio), maxsector);
L
Linus Torvalds 已提交
664 665
}

666 667 668 669 670 671 672 673 674 675
#ifdef CONFIG_FAIL_MAKE_REQUEST

static DECLARE_FAULT_ATTR(fail_make_request);

static int __init setup_fail_make_request(char *str)
{
	return setup_fault_attr(&fail_make_request, str);
}
__setup("fail_make_request=", setup_fail_make_request);

676
static bool should_fail_request(struct block_device *part, unsigned int bytes)
677
{
678
	return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
679 680 681 682
}

static int __init fail_make_request_debugfs(void)
{
683 684 685
	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
						NULL, &fail_make_request);

686
	return PTR_ERR_OR_ZERO(dir);
687 688 689 690 691 692
}

late_initcall(fail_make_request_debugfs);

#else /* CONFIG_FAIL_MAKE_REQUEST */

693
static inline bool should_fail_request(struct block_device *part,
694
					unsigned int bytes)
695
{
696
	return false;
697 698 699 700
}

#endif /* CONFIG_FAIL_MAKE_REQUEST */

701
static inline bool bio_check_ro(struct bio *bio)
702
{
703
	if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
704 705
		char b[BDEVNAME_SIZE];

706 707 708
		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
			return false;

709
		WARN_ONCE(1,
710
		       "Trying to write to read-only block-device %s (partno %d)\n",
711
			bio_devname(bio, b), bio->bi_bdev->bd_partno);
712 713
		/* Older lvm-tools actually trigger this */
		return false;
714 715 716 717 718
	}

	return false;
}

719 720
static noinline int should_fail_bio(struct bio *bio)
{
721
	if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
722 723 724 725 726
		return -EIO;
	return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);

727 728 729 730 731
/*
 * Check whether this bio extends beyond the end of the device or partition.
 * This may well happen - the kernel calls bread() without checking the size of
 * the device, e.g., when mounting a file system.
 */
732
static inline int bio_check_eod(struct bio *bio)
733
{
734
	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
735 736 737 738 739 740 741 742 743 744 745
	unsigned int nr_sectors = bio_sectors(bio);

	if (nr_sectors && maxsector &&
	    (nr_sectors > maxsector ||
	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
		handle_bad_sector(bio, maxsector);
		return -EIO;
	}
	return 0;
}

746 747 748
/*
 * Remap block n of partition p to block n+start(p) of the disk.
 */
749
static int blk_partition_remap(struct bio *bio)
750
{
751
	struct block_device *p = bio->bi_bdev;
752

753
	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
754
		return -EIO;
755
	if (bio_sectors(bio)) {
756
		bio->bi_iter.bi_sector += p->bd_start_sect;
757
		trace_block_bio_remap(bio, p->bd_dev,
758
				      bio->bi_iter.bi_sector -
759
				      p->bd_start_sect);
760
	}
761
	bio_set_flag(bio, BIO_REMAPPED);
762
	return 0;
763 764
}

765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
/*
 * Check write append to a zoned block device.
 */
static inline blk_status_t blk_check_zone_append(struct request_queue *q,
						 struct bio *bio)
{
	sector_t pos = bio->bi_iter.bi_sector;
	int nr_sectors = bio_sectors(bio);

	/* Only applicable to zoned block devices */
	if (!blk_queue_is_zoned(q))
		return BLK_STS_NOTSUPP;

	/* The bio sector must point to the start of a sequential zone */
	if (pos & (blk_queue_zone_sectors(q) - 1) ||
	    !blk_queue_zone_is_seq(q, pos))
		return BLK_STS_IOERR;

	/*
	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
	 * split and could result in non-contiguous sectors being written in
	 * different zones.
	 */
	if (nr_sectors > q->limits.chunk_sectors)
		return BLK_STS_IOERR;

	/* Make sure the BIO is small enough and will not get split */
	if (nr_sectors > q->limits.max_zone_append_sectors)
		return BLK_STS_IOERR;

	bio->bi_opf |= REQ_NOMERGE;

	return BLK_STS_OK;
}

800
static noinline_for_stack bool submit_bio_checks(struct bio *bio)
L
Linus Torvalds 已提交
801
{
802 803
	struct block_device *bdev = bio->bi_bdev;
	struct request_queue *q = bdev->bd_disk->queue;
804
	blk_status_t status = BLK_STS_IOERR;
805
	struct blk_plug *plug;
L
Linus Torvalds 已提交
806 807 808

	might_sleep();

809 810 811 812
	plug = blk_mq_plug(q, bio);
	if (plug && plug->nowait)
		bio->bi_opf |= REQ_NOWAIT;

813
	/*
814
	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
M
Mike Snitzer 已提交
815
	 * if queue does not support NOWAIT.
816
	 */
M
Mike Snitzer 已提交
817
	if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
818
		goto not_supported;
819

820
	if (should_fail_bio(bio))
821
		goto end_io;
822 823
	if (unlikely(bio_check_ro(bio)))
		goto end_io;
824 825 826 827 828 829
	if (!bio_flagged(bio, BIO_REMAPPED)) {
		if (unlikely(bio_check_eod(bio)))
			goto end_io;
		if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
			goto end_io;
	}
830

831
	/*
832 833
	 * Filter flush bio's early so that bio based drivers without flush
	 * support don't have to worry about them.
834
	 */
835
	if (op_is_flush(bio->bi_opf) &&
J
Jens Axboe 已提交
836
	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
J
Jens Axboe 已提交
837
		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
838
		if (!bio_sectors(bio)) {
839
			status = BLK_STS_OK;
840 841
			goto end_io;
		}
842
	}
843

844
	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
845
		bio_clear_hipri(bio);
846

847 848 849 850 851 852 853 854 855 856
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
		if (!blk_queue_secure_erase(q))
			goto not_supported;
		break;
	case REQ_OP_WRITE_SAME:
857
		if (!q->limits.max_write_same_sectors)
858
			goto not_supported;
859
		break;
860 861 862 863 864
	case REQ_OP_ZONE_APPEND:
		status = blk_check_zone_append(q, bio);
		if (status != BLK_STS_OK)
			goto end_io;
		break;
865
	case REQ_OP_ZONE_RESET:
866 867 868
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
869
		if (!blk_queue_is_zoned(q))
870
			goto not_supported;
871
		break;
872 873 874 875
	case REQ_OP_ZONE_RESET_ALL:
		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
			goto not_supported;
		break;
876
	case REQ_OP_WRITE_ZEROES:
877
		if (!q->limits.max_write_zeroes_sectors)
878 879
			goto not_supported;
		break;
880 881
	default:
		break;
882
	}
883

T
Tejun Heo 已提交
884
	/*
885 886 887 888
	 * Various block parts want %current->io_context, so allocate it up
	 * front rather than dealing with lots of pain to allocate it only
	 * where needed. This may fail and the block layer knows how to live
	 * with it.
T
Tejun Heo 已提交
889
	 */
890 891
	if (unlikely(!current->io_context))
		create_task_io_context(current, GFP_ATOMIC, q->node);
T
Tejun Heo 已提交
892

893 894
	if (blk_throtl_bio(bio)) {
		blkcg_bio_issue_init(bio);
895
		return false;
896 897 898 899
	}

	blk_cgroup_bio_start(bio);
	blkcg_bio_issue_init(bio);
900

N
NeilBrown 已提交
901
	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
902
		trace_block_bio_queue(bio);
N
NeilBrown 已提交
903 904 905 906 907
		/* Now that enqueuing has been traced, we need to trace
		 * completion as well.
		 */
		bio_set_flag(bio, BIO_TRACE_COMPLETION);
	}
908
	return true;
909

910
not_supported:
911
	status = BLK_STS_NOTSUPP;
912
end_io:
913
	bio->bi_status = status;
914
	bio_endio(bio);
915
	return false;
L
Linus Torvalds 已提交
916 917
}

918
static blk_qc_t __submit_bio(struct bio *bio)
919
{
920
	struct gendisk *disk = bio->bi_bdev->bd_disk;
921 922
	blk_qc_t ret = BLK_QC_T_NONE;

923 924 925 926 927 928
	if (unlikely(bio_queue_enter(bio) != 0))
		return BLK_QC_T_NONE;

	if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
		goto queue_exit;
	if (disk->fops->submit_bio) {
929
		ret = disk->fops->submit_bio(bio);
930
		goto queue_exit;
931
	}
932 933 934
	return blk_mq_submit_bio(bio);

queue_exit:
935
	blk_queue_exit(disk->queue);
936 937 938
	return ret;
}

939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
/*
 * The loop in this function may be a bit non-obvious, and so deserves some
 * explanation:
 *
 *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
 *    that), so we have a list with a single bio.
 *  - We pretend that we have just taken it off a longer list, so we assign
 *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
 *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
 *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
 *    non-NULL value in bio_list and re-enter the loop from the top.
 *  - In this case we really did just take the bio of the top of the list (no
 *    pretending) and so remove it from bio_list, and call into ->submit_bio()
 *    again.
 *
 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
 * bio_list_on_stack[1] contains bios that were submitted before the current
 *	->submit_bio_bio, but that haven't been processed yet.
 */
static blk_qc_t __submit_bio_noacct(struct bio *bio)
{
	struct bio_list bio_list_on_stack[2];
	blk_qc_t ret = BLK_QC_T_NONE;

	BUG_ON(bio->bi_next);

	bio_list_init(&bio_list_on_stack[0]);
	current->bio_list = bio_list_on_stack;

	do {
969
		struct request_queue *q = bio->bi_bdev->bd_disk->queue;
970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
		struct bio_list lower, same;

		/*
		 * Create a fresh bio_list for all subordinate requests.
		 */
		bio_list_on_stack[1] = bio_list_on_stack[0];
		bio_list_init(&bio_list_on_stack[0]);

		ret = __submit_bio(bio);

		/*
		 * Sort new bios into those for a lower level and those for the
		 * same level.
		 */
		bio_list_init(&lower);
		bio_list_init(&same);
		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
987
			if (q == bio->bi_bdev->bd_disk->queue)
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
				bio_list_add(&same, bio);
			else
				bio_list_add(&lower, bio);

		/*
		 * Now assemble so we handle the lowest level first.
		 */
		bio_list_merge(&bio_list_on_stack[0], &lower);
		bio_list_merge(&bio_list_on_stack[0], &same);
		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));

	current->bio_list = NULL;
	return ret;
}

1004 1005
static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
{
1006
	struct bio_list bio_list[2] = { };
1007
	blk_qc_t ret;
1008

1009
	current->bio_list = bio_list;
1010 1011

	do {
1012
		ret = __submit_bio(bio);
1013
	} while ((bio = bio_list_pop(&bio_list[0])));
1014 1015 1016 1017 1018

	current->bio_list = NULL;
	return ret;
}

1019
/**
1020
 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
1021 1022
 * @bio:  The bio describing the location in memory and on the device.
 *
1023 1024 1025 1026
 * This is a version of submit_bio() that shall only be used for I/O that is
 * resubmitted to lower level drivers by stacking block drivers.  All file
 * systems and other upper level users of the block layer should use
 * submit_bio() instead.
1027
 */
1028
blk_qc_t submit_bio_noacct(struct bio *bio)
1029
{
1030
	/*
1031 1032 1033 1034
	 * We only want one ->submit_bio to be active at a time, else stack
	 * usage with stacked devices could be a problem.  Use current->bio_list
	 * to collect a list of requests submited by a ->submit_bio method while
	 * it is active, and then process them after it returned.
1035
	 */
1036
	if (current->bio_list) {
1037
		bio_list_add(&current->bio_list[0], bio);
1038
		return BLK_QC_T_NONE;
1039
	}
1040

1041
	if (!bio->bi_bdev->bd_disk->fops->submit_bio)
1042
		return __submit_bio_noacct_mq(bio);
1043
	return __submit_bio_noacct(bio);
1044
}
1045
EXPORT_SYMBOL(submit_bio_noacct);
L
Linus Torvalds 已提交
1046 1047

/**
1048
 * submit_bio - submit a bio to the block device layer for I/O
L
Linus Torvalds 已提交
1049 1050
 * @bio: The &struct bio which describes the I/O
 *
1051 1052
 * submit_bio() is used to submit I/O requests to block devices.  It is passed a
 * fully set up &struct bio that describes the I/O that needs to be done.  The
1053
 * bio will be send to the device described by the bi_bdev field.
L
Linus Torvalds 已提交
1054
 *
1055 1056 1057 1058
 * The success/failure status of the request, along with notification of
 * completion, is delivered asynchronously through the ->bi_end_io() callback
 * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
 * been called.
L
Linus Torvalds 已提交
1059
 */
1060
blk_qc_t submit_bio(struct bio *bio)
L
Linus Torvalds 已提交
1061
{
T
Tejun Heo 已提交
1062 1063 1064
	if (blkcg_punt_bio_submit(bio))
		return BLK_QC_T_NONE;

1065 1066 1067 1068
	/*
	 * If it's a regular read/write or a barrier with data attached,
	 * go through the normal accounting stuff before submission.
	 */
1069
	if (bio_has_data(bio)) {
1070 1071
		unsigned int count;

1072
		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1073 1074
			count = queue_logical_block_size(
					bio->bi_bdev->bd_disk->queue) >> 9;
1075 1076 1077
		else
			count = bio_sectors(bio);

1078
		if (op_is_write(bio_op(bio))) {
1079 1080
			count_vm_events(PGPGOUT, count);
		} else {
1081
			task_io_account_read(bio->bi_iter.bi_size);
1082 1083
			count_vm_events(PGPGIN, count);
		}
L
Linus Torvalds 已提交
1084 1085
	}

1086
	/*
1087 1088 1089 1090
	 * If we're reading data that is part of the userspace workingset, count
	 * submission time as memory stall.  When the device is congested, or
	 * the submitting cgroup IO-throttled, submission can be a significant
	 * part of overall IO time.
1091
	 */
1092 1093 1094 1095
	if (unlikely(bio_op(bio) == REQ_OP_READ &&
	    bio_flagged(bio, BIO_WORKINGSET))) {
		unsigned long pflags;
		blk_qc_t ret;
1096

1097
		psi_memstall_enter(&pflags);
1098
		ret = submit_bio_noacct(bio);
1099 1100
		psi_memstall_leave(&pflags);

1101 1102 1103
		return ret;
	}

1104
	return submit_bio_noacct(bio);
L
Linus Torvalds 已提交
1105 1106 1107
}
EXPORT_SYMBOL(submit_bio);

1108
/**
1109
 * blk_cloned_rq_check_limits - Helper function to check a cloned request
1110
 *                              for the new queue limits
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
 * @q:  the queue
 * @rq: the request being checked
 *
 * Description:
 *    @rq may have been made based on weaker limitations of upper-level queues
 *    in request stacking drivers, and it may violate the limitation of @q.
 *    Since the block layer and the underlying device driver trust @rq
 *    after it is inserted to @q, it should be checked against @q before
 *    the insertion using this generic function.
 *
 *    Request stacking drivers like request-based dm may change the queue
1122 1123
 *    limits when retrying requests on other queues. Those requests need
 *    to be checked against the new queue limits again during dispatch.
1124
 */
1125
static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
1126
				      struct request *rq)
1127
{
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
	unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));

	if (blk_rq_sectors(rq) > max_sectors) {
		/*
		 * SCSI device does not have a good way to return if
		 * Write Same/Zero is actually supported. If a device rejects
		 * a non-read/write command (discard, write same,etc.) the
		 * low-level device driver will set the relevant queue limit to
		 * 0 to prevent blk-lib from issuing more of the offending
		 * operations. Commands queued prior to the queue limit being
		 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
		 * errors being propagated to upper layers.
		 */
		if (max_sectors == 0)
			return BLK_STS_NOTSUPP;

1144
		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1145
			__func__, blk_rq_sectors(rq), max_sectors);
1146
		return BLK_STS_IOERR;
1147 1148 1149
	}

	/*
1150 1151
	 * The queue settings related to segment counting may differ from the
	 * original queue.
1152
	 */
1153
	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1154
	if (rq->nr_phys_segments > queue_max_segments(q)) {
1155 1156
		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
			__func__, rq->nr_phys_segments, queue_max_segments(q));
1157
		return BLK_STS_IOERR;
1158 1159
	}

1160
	return BLK_STS_OK;
1161 1162 1163 1164 1165 1166 1167
}

/**
 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
 * @q:  the queue to submit the request
 * @rq: the request being queued
 */
1168
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1169
{
1170 1171 1172 1173 1174
	blk_status_t ret;

	ret = blk_cloned_rq_check_limits(q, rq);
	if (ret != BLK_STS_OK)
		return ret;
1175

1176
	if (rq->rq_disk &&
1177
	    should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
1178
		return BLK_STS_IOERR;
1179

1180 1181 1182
	if (blk_crypto_insert_cloned_request(rq))
		return BLK_STS_IOERR;

1183
	blk_account_io_start(rq);
1184 1185

	/*
J
Jens Axboe 已提交
1186 1187 1188
	 * Since we have a scheduler attached on the top device,
	 * bypass a potential scheduler on the bottom device for
	 * insert.
1189
	 */
1190
	return blk_mq_request_issue_directly(rq, true);
1191 1192 1193
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
/**
 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
 * @rq: request to examine
 *
 * Description:
 *     A request could be merge of IOs which require different failure
 *     handling.  This function determines the number of bytes which
 *     can be failed from the beginning of the request without
 *     crossing into area which need to be retried further.
 *
 * Return:
 *     The number of bytes to fail.
 */
unsigned int blk_rq_err_bytes(const struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	unsigned int bytes = 0;
	struct bio *bio;

1213
	if (!(rq->rq_flags & RQF_MIXED_MERGE))
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
		return blk_rq_bytes(rq);

	/*
	 * Currently the only 'mixing' which can happen is between
	 * different fastfail types.  We can safely fail portions
	 * which have all the failfast bits that the first one has -
	 * the ones which are at least as eager to fail as the first
	 * one.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
J
Jens Axboe 已提交
1224
		if ((bio->bi_opf & ff) != ff)
1225
			break;
1226
		bytes += bio->bi_iter.bi_size;
1227 1228 1229 1230 1231 1232 1233 1234
	}

	/* this could lead to infinite loop */
	BUG_ON(blk_rq_bytes(rq) && !bytes);
	return bytes;
}
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);

1235 1236
static void update_io_ticks(struct block_device *part, unsigned long now,
		bool end)
1237 1238 1239
{
	unsigned long stamp;
again:
1240
	stamp = READ_ONCE(part->bd_stamp);
1241
	if (unlikely(time_after(now, stamp))) {
1242
		if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
1243 1244
			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
	}
1245 1246
	if (part->bd_partno) {
		part = bdev_whole(part);
1247 1248 1249 1250
		goto again;
	}
}

1251
static void blk_account_io_completion(struct request *req, unsigned int bytes)
1252
{
1253
	if (req->part && blk_do_io_stat(req)) {
1254
		const int sgrp = op_stat_group(req_op(req));
1255

1256
		part_stat_lock();
1257
		part_stat_add(req->part, sectors[sgrp], bytes >> 9);
1258 1259 1260 1261
		part_stat_unlock();
	}
}

1262
void __blk_account_io_done(struct request *req, u64 now)
1263
{
1264
	const int sgrp = op_stat_group(req_op(req));
1265

1266 1267 1268 1269 1270
	part_stat_lock();
	update_io_ticks(req->part, jiffies, true);
	part_stat_inc(req->part, ios[sgrp]);
	part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
	part_stat_unlock();
1271 1272
}

1273
void __blk_account_io_start(struct request *rq)
1274
{
1275 1276 1277 1278 1279
	/* passthrough requests can hold bios that do not have ->bi_bdev set */
	if (rq->bio && rq->bio->bi_bdev)
		rq->part = rq->bio->bi_bdev;
	else
		rq->part = rq->rq_disk->part0;
1280

1281
	part_stat_lock();
1282
	update_io_ticks(rq->part, jiffies, false);
1283 1284 1285
	part_stat_unlock();
}

1286
static unsigned long __part_start_io_acct(struct block_device *part,
1287
					  unsigned int sectors, unsigned int op)
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
{
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);

	part_stat_lock();
	update_io_ticks(part, now, false);
	part_stat_inc(part, ios[sgrp]);
	part_stat_add(part, sectors[sgrp], sectors);
	part_stat_local_inc(part, in_flight[op_is_write(op)]);
	part_stat_unlock();
1298

1299 1300
	return now;
}
1301

1302 1303 1304 1305 1306 1307 1308
/**
 * bio_start_io_acct - start I/O accounting for bio based drivers
 * @bio:	bio to start account for
 *
 * Returns the start time that should be passed back to bio_end_io_acct().
 */
unsigned long bio_start_io_acct(struct bio *bio)
1309
{
1310
	return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio));
1311
}
1312
EXPORT_SYMBOL_GPL(bio_start_io_acct);
1313 1314 1315 1316

unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
				 unsigned int op)
{
1317
	return __part_start_io_acct(disk->part0, sectors, op);
1318
}
1319 1320
EXPORT_SYMBOL(disk_start_io_acct);

1321
static void __part_end_io_acct(struct block_device *part, unsigned int op,
1322
			       unsigned long start_time)
1323 1324 1325 1326
{
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);
	unsigned long duration = now - start_time;
1327

1328 1329 1330 1331
	part_stat_lock();
	update_io_ticks(part, now, true);
	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
	part_stat_local_dec(part, in_flight[op_is_write(op)]);
1332 1333
	part_stat_unlock();
}
1334

1335 1336
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
		struct block_device *orig_bdev)
1337
{
1338
	__part_end_io_acct(orig_bdev, bio_op(bio), start_time);
1339
}
1340
EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1341 1342 1343 1344

void disk_end_io_acct(struct gendisk *disk, unsigned int op,
		      unsigned long start_time)
{
1345
	__part_end_io_acct(disk->part0, op, start_time);
1346
}
1347
EXPORT_SYMBOL(disk_end_io_acct);
1348

1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
/*
 * Steal bios from a request and add them to a bio list.
 * The request must not have been partially completed before.
 */
void blk_steal_bios(struct bio_list *list, struct request *rq)
{
	if (rq->bio) {
		if (list->tail)
			list->tail->bi_next = rq->bio;
		else
			list->head = rq->bio;
		list->tail = rq->biotail;

		rq->bio = NULL;
		rq->biotail = NULL;
	}

	rq->__data_len = 0;
}
EXPORT_SYMBOL_GPL(blk_steal_bios);

1370
/**
1371
 * blk_update_request - Complete multiple bytes without completing the request
1372
 * @req:      the request being processed
1373
 * @error:    block status code
1374
 * @nr_bytes: number of bytes to complete for @req
1375 1376
 *
 * Description:
1377 1378 1379
 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
 *     the request structure even if @req doesn't have leftover.
 *     If @req has leftover, sets it up for the next range of segments.
1380 1381 1382
 *
 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
 *     %false return from this function.
1383
 *
1384
 * Note:
1385 1386
 *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
 *      except in the consistency check at the end of this function.
1387
 *
1388
 * Return:
1389 1390
 *     %false - this request doesn't have any more data
 *     %true  - this request has more data
1391
 **/
1392 1393
bool blk_update_request(struct request *req, blk_status_t error,
		unsigned int nr_bytes)
L
Linus Torvalds 已提交
1394
{
1395
	int total_bytes;
L
Linus Torvalds 已提交
1396

1397
	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1398

1399 1400 1401
	if (!req->bio)
		return false;

1402 1403 1404 1405 1406 1407
#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
	    error == BLK_STS_OK)
		req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif

1408 1409
	if (unlikely(error && !blk_rq_is_passthrough(req) &&
		     !(req->rq_flags & RQF_QUIET)))
1410
		print_req_error(req, error, __func__);
L
Linus Torvalds 已提交
1411

1412
	blk_account_io_completion(req, nr_bytes);
1413

1414 1415 1416
	total_bytes = 0;
	while (req->bio) {
		struct bio *bio = req->bio;
1417
		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
L
Linus Torvalds 已提交
1418

1419
		if (bio_bytes == bio->bi_iter.bi_size)
L
Linus Torvalds 已提交
1420 1421
			req->bio = bio->bi_next;

N
NeilBrown 已提交
1422 1423
		/* Completion has already been traced */
		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1424
		req_bio_endio(req, bio, bio_bytes, error);
L
Linus Torvalds 已提交
1425

1426 1427
		total_bytes += bio_bytes;
		nr_bytes -= bio_bytes;
L
Linus Torvalds 已提交
1428

1429 1430
		if (!nr_bytes)
			break;
L
Linus Torvalds 已提交
1431 1432 1433 1434 1435
	}

	/*
	 * completely done
	 */
1436 1437 1438 1439 1440 1441
	if (!req->bio) {
		/*
		 * Reset counters so that the request stacking driver
		 * can find how many bytes remain in the request
		 * later.
		 */
1442
		req->__data_len = 0;
1443 1444
		return false;
	}
L
Linus Torvalds 已提交
1445

1446
	req->__data_len -= total_bytes;
1447 1448

	/* update sector only for requests with clear definition of sector */
1449
	if (!blk_rq_is_passthrough(req))
1450
		req->__sector += total_bytes >> 9;
1451

1452
	/* mixed attributes always follow the first bio */
1453
	if (req->rq_flags & RQF_MIXED_MERGE) {
1454
		req->cmd_flags &= ~REQ_FAILFAST_MASK;
J
Jens Axboe 已提交
1455
		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1456 1457
	}

1458 1459 1460 1461 1462 1463 1464 1465 1466
	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
		/*
		 * If total number of sectors is less than the first segment
		 * size, something has gone terribly wrong.
		 */
		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
			blk_dump_rq_flags(req, "request botched");
			req->__data_len = blk_rq_cur_bytes(req);
		}
1467

1468
		/* recalculate the number of segments */
1469
		req->nr_phys_segments = blk_recalc_rq_segments(req);
1470
	}
1471

1472
	return true;
L
Linus Torvalds 已提交
1473
}
1474
EXPORT_SYMBOL_GPL(blk_update_request);
L
Linus Torvalds 已提交
1475

1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
/**
 * rq_flush_dcache_pages - Helper function to flush all pages in a request
 * @rq: the request to be flushed
 *
 * Description:
 *     Flush all pages in @rq.
 */
void rq_flush_dcache_pages(struct request *rq)
{
	struct req_iterator iter;
1487
	struct bio_vec bvec;
1488 1489

	rq_for_each_segment(bvec, rq, iter)
1490
		flush_dcache_page(bvec.bv_page);
1491 1492 1493 1494
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif

1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
/**
 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
 * @q : the queue of the device being checked
 *
 * Description:
 *    Check if underlying low-level drivers of a device are busy.
 *    If the drivers want to export their busy state, they must set own
 *    exporting function using blk_queue_lld_busy() first.
 *
 *    Basically, this function is used only by request stacking drivers
 *    to stop dispatching requests to underlying devices when underlying
 *    devices are busy.  This behavior helps more I/O merging on the queue
 *    of the request stacking driver and prevents I/O throughput regression
 *    on burst I/O load.
 *
 * Return:
 *    0 - Not busy (The request stacking driver should dispatch request)
 *    1 - Busy (The request stacking driver should stop dispatching request)
 */
int blk_lld_busy(struct request_queue *q)
{
J
Jens Axboe 已提交
1516
	if (queue_is_mq(q) && q->mq_ops->busy)
J
Jens Axboe 已提交
1517
		return q->mq_ops->busy(q);
1518 1519 1520 1521 1522

	return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);

1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
/**
 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
 * @rq: the clone request to be cleaned up
 *
 * Description:
 *     Free all bios in @rq for a cloned request.
 */
void blk_rq_unprep_clone(struct request *rq)
{
	struct bio *bio;

	while ((bio = rq->bio) != NULL) {
		rq->bio = bio->bi_next;

		bio_put(bio);
	}
}
EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);

/**
 * blk_rq_prep_clone - Helper function to setup clone request
 * @rq: the request to be setup
 * @rq_src: original request to be cloned
 * @bs: bio_set that bios for clone are allocated from
 * @gfp_mask: memory allocation mask for bio
 * @bio_ctr: setup function to be called for each clone bio.
 *           Returns %0 for success, non %0 for failure.
 * @data: private data to be passed to @bio_ctr
 *
 * Description:
 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
 *     Also, pages which the original bios are pointing to are not copied
 *     and the cloned bios just point same pages.
 *     So cloned bios must be completed before original bios, which means
 *     the caller must complete @rq before @rq_src.
 */
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
		      struct bio_set *bs, gfp_t gfp_mask,
		      int (*bio_ctr)(struct bio *, struct bio *, void *),
		      void *data)
{
	struct bio *bio, *bio_src;

	if (!bs)
1567
		bs = &fs_bio_set;
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579

	__rq_for_each_bio(bio_src, rq_src) {
		bio = bio_clone_fast(bio_src, gfp_mask, bs);
		if (!bio)
			goto free_and_out;

		if (bio_ctr && bio_ctr(bio, bio_src, data))
			goto free_and_out;

		if (rq->bio) {
			rq->biotail->bi_next = bio;
			rq->biotail = bio;
1580
		} else {
1581
			rq->bio = rq->biotail = bio;
1582 1583
		}
		bio = NULL;
1584 1585
	}

1586 1587 1588 1589 1590 1591 1592 1593 1594
	/* Copy attributes of the original request to the clone request. */
	rq->__sector = blk_rq_pos(rq_src);
	rq->__data_len = blk_rq_bytes(rq_src);
	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
		rq->special_vec = rq_src->special_vec;
	}
	rq->nr_phys_segments = rq_src->nr_phys_segments;
	rq->ioprio = rq_src->ioprio;
1595

1596 1597
	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
		goto free_and_out;
1598 1599 1600 1601 1602 1603 1604 1605 1606

	return 0;

free_and_out:
	if (bio)
		bio_put(bio);
	blk_rq_unprep_clone(rq);

	return -ENOMEM;
1607 1608 1609
}
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);

1610
int kblockd_schedule_work(struct work_struct *work)
L
Linus Torvalds 已提交
1611 1612 1613 1614 1615
{
	return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);

1616 1617 1618 1619 1620 1621 1622
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
				unsigned long delay)
{
	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);

1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
{
	struct task_struct *tsk = current;

	/*
	 * If this is a nested plug, don't actually assign it.
	 */
	if (tsk->plug)
		return;

	INIT_LIST_HEAD(&plug->mq_list);
	plug->cached_rq = NULL;
	plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
	plug->rq_count = 0;
	plug->multiple_queues = false;
	plug->nowait = false;
	INIT_LIST_HEAD(&plug->cb_list);

	/*
	 * Store ordering should not be needed here, since a potential
	 * preempt will imply a full memory barrier
	 */
	tsk->plug = plug;
}

S
Suresh Jayaraman 已提交
1648 1649 1650 1651 1652
/**
 * blk_start_plug - initialize blk_plug and track it inside the task_struct
 * @plug:	The &struct blk_plug that needs to be initialized
 *
 * Description:
1653 1654 1655 1656 1657 1658 1659 1660 1661
 *   blk_start_plug() indicates to the block layer an intent by the caller
 *   to submit multiple I/O requests in a batch.  The block layer may use
 *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
 *   is called.  However, the block layer may choose to submit requests
 *   before a call to blk_finish_plug() if the number of queued I/Os
 *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
 *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
 *   the task schedules (see below).
 *
S
Suresh Jayaraman 已提交
1662 1663 1664 1665 1666 1667 1668 1669 1670
 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
 *   pending I/O should the task end up blocking between blk_start_plug() and
 *   blk_finish_plug(). This is important from a performance perspective, but
 *   also ensures that we don't deadlock. For instance, if the task is blocking
 *   for a memory allocation, memory reclaim could end up wanting to free a
 *   page belonging to that request that is currently residing in our private
 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
 *   this kind of deadlock.
 */
1671 1672
void blk_start_plug(struct blk_plug *plug)
{
1673
	blk_start_plug_nr_ios(plug, 1);
1674 1675 1676
}
EXPORT_SYMBOL(blk_start_plug);

1677
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1678 1679 1680
{
	LIST_HEAD(callbacks);

S
Shaohua Li 已提交
1681 1682
	while (!list_empty(&plug->cb_list)) {
		list_splice_init(&plug->cb_list, &callbacks);
1683

S
Shaohua Li 已提交
1684 1685
		while (!list_empty(&callbacks)) {
			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1686 1687
							  struct blk_plug_cb,
							  list);
S
Shaohua Li 已提交
1688
			list_del(&cb->list);
1689
			cb->callback(cb, from_schedule);
S
Shaohua Li 已提交
1690
		}
1691 1692 1693
	}
}

1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
				      int size)
{
	struct blk_plug *plug = current->plug;
	struct blk_plug_cb *cb;

	if (!plug)
		return NULL;

	list_for_each_entry(cb, &plug->cb_list, list)
		if (cb->callback == unplug && cb->data == data)
			return cb;

	/* Not currently on the callback list */
	BUG_ON(size < sizeof(*cb));
	cb = kzalloc(size, GFP_ATOMIC);
	if (cb) {
		cb->data = data;
		cb->callback = unplug;
		list_add(&cb->list, &plug->cb_list);
	}
	return cb;
}
EXPORT_SYMBOL(blk_check_plugged);

1719
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1720
{
1721
	flush_plug_callbacks(plug, from_schedule);
1722 1723 1724

	if (!list_empty(&plug->mq_list))
		blk_mq_flush_plug_list(plug, from_schedule);
1725 1726
	if (unlikely(!from_schedule && plug->cached_rq))
		blk_mq_free_plug_rqs(plug);
1727 1728
}

1729 1730 1731 1732 1733 1734 1735 1736 1737 1738
/**
 * blk_finish_plug - mark the end of a batch of submitted I/O
 * @plug:	The &struct blk_plug passed to blk_start_plug()
 *
 * Description:
 * Indicate that a batch of I/O submissions is complete.  This function
 * must be paired with an initial call to blk_start_plug().  The intent
 * is to allow the block layer to optimize I/O submission.  See the
 * documentation for blk_start_plug() for more information.
 */
1739 1740
void blk_finish_plug(struct blk_plug *plug)
{
S
Shaohua Li 已提交
1741 1742
	if (plug != current->plug)
		return;
1743
	blk_flush_plug_list(plug, false);
1744

S
Shaohua Li 已提交
1745
	current->plug = NULL;
1746
}
1747
EXPORT_SYMBOL(blk_finish_plug);
1748

1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760
void blk_io_schedule(void)
{
	/* Prevent hang_check timer from firing at us during very long I/O */
	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;

	if (timeout)
		io_schedule_timeout(timeout);
	else
		io_schedule();
}
EXPORT_SYMBOL_GPL(blk_io_schedule);

L
Linus Torvalds 已提交
1761 1762
int __init blk_dev_init(void)
{
1763 1764
	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1765
			sizeof_field(struct request, cmd_flags));
1766
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1767
			sizeof_field(struct bio, bi_opf));
1768

1769 1770
	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
	kblockd_workqueue = alloc_workqueue("kblockd",
1771
					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
L
Linus Torvalds 已提交
1772 1773 1774
	if (!kblockd_workqueue)
		panic("Failed to create kblockd\n");

1775
	blk_requestq_cachep = kmem_cache_create("request_queue",
1776
			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1777

1778 1779
	blk_debugfs_root = debugfs_create_dir("block", NULL);

1780
	return 0;
L
Linus Torvalds 已提交
1781
}