blk-core.c 47.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7 8
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *	-  July2000
L
Linus Torvalds 已提交
9 10 11 12 13 14 15 16 17 18 19
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
20
#include <linux/blk-mq.h>
L
Linus Torvalds 已提交
21 22
#include <linux/highmem.h>
#include <linux/mm.h>
23
#include <linux/pagemap.h>
L
Linus Torvalds 已提交
24 25 26 27 28 29 30
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
31
#include <linux/task_io_accounting_ops.h>
32
#include <linux/fault-inject.h>
33
#include <linux/list_sort.h>
T
Tejun Heo 已提交
34
#include <linux/delay.h>
35
#include <linux/ratelimit.h>
L
Lin Ming 已提交
36
#include <linux/pm_runtime.h>
37
#include <linux/blk-cgroup.h>
38
#include <linux/t10-pi.h>
39
#include <linux/debugfs.h>
40
#include <linux/bpf.h>
41
#include <linux/psi.h>
42
#include <linux/sched/sysctl.h>
43
#include <linux/blk-crypto.h>
44 45 46

#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
L
Linus Torvalds 已提交
47

48
#include "blk.h"
49
#include "blk-mq.h"
50
#include "blk-mq-sched.h"
51
#include "blk-pm.h"
52
#include "blk-rq-qos.h"
53

54 55
struct dentry *blk_debugfs_root;

56
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
57
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
58
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
K
Keith Busch 已提交
59
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
N
NeilBrown 已提交
60
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
61

62 63
DEFINE_IDA(blk_queue_ida);

L
Linus Torvalds 已提交
64 65 66
/*
 * For queue allocation
 */
67
struct kmem_cache *blk_requestq_cachep;
L
Linus Torvalds 已提交
68 69 70 71

/*
 * Controlling structure to kblockd
 */
72
static struct workqueue_struct *kblockd_workqueue;
L
Linus Torvalds 已提交
73

74 75 76 77 78 79 80
/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
81
	set_bit(flag, &q->queue_flags);
82 83 84 85 86 87 88 89 90 91
}
EXPORT_SYMBOL(blk_queue_flag_set);

/**
 * blk_queue_flag_clear - atomically clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
92
	clear_bit(flag, &q->queue_flags);
93 94 95 96 97 98 99 100 101 102 103 104 105
}
EXPORT_SYMBOL(blk_queue_flag_clear);

/**
 * blk_queue_flag_test_and_set - atomically test and set a queue flag
 * @flag: flag to be set
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was already set.
 */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
106
	return test_and_set_bit(flag, &q->queue_flags);
107 108 109
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);

110
void blk_rq_init(struct request_queue *q, struct request *rq)
L
Linus Torvalds 已提交
111
{
112 113
	memset(rq, 0, sizeof(*rq));

L
Linus Torvalds 已提交
114
	INIT_LIST_HEAD(&rq->queuelist);
J
Jens Axboe 已提交
115
	rq->q = q;
116
	rq->__sector = (sector_t) -1;
117 118
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
119 120
	rq->tag = BLK_MQ_NO_TAG;
	rq->internal_tag = BLK_MQ_NO_TAG;
121
	rq->start_time_ns = ktime_get_ns();
122
	rq->part = NULL;
123
	refcount_set(&rq->ref, 1);
124
	blk_crypto_rq_set_defaults(rq);
L
Linus Torvalds 已提交
125
}
126
EXPORT_SYMBOL(blk_rq_init);
L
Linus Torvalds 已提交
127

128 129 130 131 132 133 134 135
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
	REQ_OP_NAME(READ),
	REQ_OP_NAME(WRITE),
	REQ_OP_NAME(FLUSH),
	REQ_OP_NAME(DISCARD),
	REQ_OP_NAME(SECURE_ERASE),
	REQ_OP_NAME(ZONE_RESET),
136
	REQ_OP_NAME(ZONE_RESET_ALL),
137 138 139
	REQ_OP_NAME(ZONE_OPEN),
	REQ_OP_NAME(ZONE_CLOSE),
	REQ_OP_NAME(ZONE_FINISH),
140
	REQ_OP_NAME(ZONE_APPEND),
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
	REQ_OP_NAME(WRITE_SAME),
	REQ_OP_NAME(WRITE_ZEROES),
	REQ_OP_NAME(SCSI_IN),
	REQ_OP_NAME(SCSI_OUT),
	REQ_OP_NAME(DRV_IN),
	REQ_OP_NAME(DRV_OUT),
};
#undef REQ_OP_NAME

/**
 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 * @op: REQ_OP_XXX.
 *
 * Description: Centralize block layer function to convert REQ_OP_XXX into
 * string format. Useful in the debugging and tracing bio or request. For
 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 */
inline const char *blk_op_str(unsigned int op)
{
	const char *op_str = "UNKNOWN";

	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
		op_str = blk_op_name[op];

	return op_str;
}
EXPORT_SYMBOL_GPL(blk_op_str);

169 170 171 172 173 174 175 176 177 178 179 180 181 182
static const struct {
	int		errno;
	const char	*name;
} blk_errors[] = {
	[BLK_STS_OK]		= { 0,		"" },
	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
183
	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
184
	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
185

186 187 188
	/* device mapper special case, should not leak out: */
	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },

189 190 191 192
	/* zone device specific errors */
	[BLK_STS_ZONE_OPEN_RESOURCE]	= { -ETOOMANYREFS, "open zones exceeded" },
	[BLK_STS_ZONE_ACTIVE_RESOURCE]	= { -EOVERFLOW, "active zones exceeded" },

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	/* everything else not covered above: */
	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
};

blk_status_t errno_to_blk_status(int errno)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
		if (blk_errors[i].errno == errno)
			return (__force blk_status_t)i;
	}

	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);

int blk_status_to_errno(blk_status_t status)
{
	int idx = (__force int)status;

214
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
215 216 217 218 219
		return -EIO;
	return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);

220 221
static void print_req_error(struct request *req, blk_status_t status,
		const char *caller)
222 223 224
{
	int idx = (__force int)status;

225
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
226 227
		return;

228
	printk_ratelimited(KERN_ERR
229 230
		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
		"phys_seg %u prio class %u\n",
231
		caller, blk_errors[idx].name,
232 233 234 235 236
		req->rq_disk ? req->rq_disk->disk_name : "?",
		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
		req->cmd_flags & ~REQ_OP_MASK,
		req->nr_phys_segments,
		IOPRIO_PRIO_CLASS(req->ioprio));
237 238
}

N
NeilBrown 已提交
239
static void req_bio_endio(struct request *rq, struct bio *bio,
240
			  unsigned int nbytes, blk_status_t error)
L
Linus Torvalds 已提交
241
{
242
	if (error)
243
		bio->bi_status = error;
244

245
	if (unlikely(rq->rq_flags & RQF_QUIET))
246
		bio_set_flag(bio, BIO_QUIET);
247

248
	bio_advance(bio, nbytes);
249

250 251 252 253 254 255 256 257 258 259 260
	if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
		/*
		 * Partial zone append completions cannot be supported as the
		 * BIO fragments may end up not being written sequentially.
		 */
		if (bio->bi_iter.bi_size)
			bio->bi_status = BLK_STS_IOERR;
		else
			bio->bi_iter.bi_sector = rq->__sector;
	}

T
Tejun Heo 已提交
261
	/* don't actually finish bio if it's part of flush sequence */
262
	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
263
		bio_endio(bio);
L
Linus Torvalds 已提交
264 265 266 267
}

void blk_dump_rq_flags(struct request *rq, char *msg)
{
268 269
	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
		rq->rq_disk ? rq->rq_disk->disk_name : "?",
J
Jens Axboe 已提交
270
		(unsigned long long) rq->cmd_flags);
L
Linus Torvalds 已提交
271

272 273 274
	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
275 276
	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
	       rq->bio, rq->biotail, blk_rq_bytes(rq));
L
Linus Torvalds 已提交
277 278 279 280 281 282 283 284 285 286 287 288
}
EXPORT_SYMBOL(blk_dump_rq_flags);

/**
 * blk_sync_queue - cancel any pending callbacks on a queue
 * @q: the queue
 *
 * Description:
 *     The block layer may perform asynchronous callback activity
 *     on a queue, such as calling the unplug function after a timeout.
 *     A block device may call blk_sync_queue to ensure that any
 *     such activity is cancelled, thus allowing it to release resources
289
 *     that the callbacks might use. The caller must already have made sure
290
 *     that its ->submit_bio will not re-add plugging prior to calling
L
Linus Torvalds 已提交
291 292
 *     this function.
 *
293
 *     This function does not cancel any asynchronous activity arising
294
 *     out of elevator or throttling code. That would require elevator_exit()
295
 *     and blkcg_exit_queue() to be called with queue lock initialized.
296
 *
L
Linus Torvalds 已提交
297 298 299
 */
void blk_sync_queue(struct request_queue *q)
{
300
	del_timer_sync(&q->timeout);
301
	cancel_work_sync(&q->timeout_work);
L
Linus Torvalds 已提交
302 303 304
}
EXPORT_SYMBOL(blk_sync_queue);

305
/**
306
 * blk_set_pm_only - increment pm_only counter
307 308
 * @q: request queue pointer
 */
309
void blk_set_pm_only(struct request_queue *q)
310
{
311
	atomic_inc(&q->pm_only);
312
}
313
EXPORT_SYMBOL_GPL(blk_set_pm_only);
314

315
void blk_clear_pm_only(struct request_queue *q)
316
{
317 318 319 320 321 322
	int pm_only;

	pm_only = atomic_dec_return(&q->pm_only);
	WARN_ON_ONCE(pm_only < 0);
	if (pm_only == 0)
		wake_up_all(&q->mq_freeze_wq);
323
}
324
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
325

326 327 328 329 330 331
/**
 * blk_put_queue - decrement the request_queue refcount
 * @q: the request_queue structure to decrement the refcount for
 *
 * Decrements the refcount of the request_queue kobject. When this reaches 0
 * we'll have blk_release_queue() called.
332 333 334
 *
 * Context: Any context, but the last reference must not be dropped from
 *          atomic context.
335
 */
336
void blk_put_queue(struct request_queue *q)
337 338 339
{
	kobject_put(&q->kobj);
}
J
Jens Axboe 已提交
340
EXPORT_SYMBOL(blk_put_queue);
341

342 343
void blk_set_queue_dying(struct request_queue *q)
{
344
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
345

346 347 348 349 350 351 352
	/*
	 * When queue DYING flag is set, we need to block new req
	 * entering queue, so we call blk_freeze_queue_start() to
	 * prevent I/O from crossing blk_queue_enter().
	 */
	blk_freeze_queue_start(q);

J
Jens Axboe 已提交
353
	if (queue_is_mq(q))
354
		blk_mq_wake_waiters(q);
355 356 357

	/* Make blk_queue_enter() reexamine the DYING flag. */
	wake_up_all(&q->mq_freeze_wq);
358 359 360
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);

361 362 363 364
/**
 * blk_cleanup_queue - shutdown a request queue
 * @q: request queue to shutdown
 *
365 366
 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 * put it.  All future requests will be failed immediately with -ENODEV.
367 368
 *
 * Context: can sleep
369
 */
370
void blk_cleanup_queue(struct request_queue *q)
371
{
372 373 374
	/* cannot be called from atomic context */
	might_sleep();

375 376
	WARN_ON_ONCE(blk_queue_registered(q));

B
Bart Van Assche 已提交
377
	/* mark @q DYING, no new request or merges will be allowed afterwards */
378
	blk_set_queue_dying(q);
379

380 381
	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
382

383 384
	/*
	 * Drain all requests queued before DYING marking. Set DEAD flag to
385 386
	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
	 * after draining finished.
387
	 */
388
	blk_freeze_queue(q);
389 390 391

	rq_qos_exit(q);

392
	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
393

394 395 396
	/* for synchronous bio-based driver finish in-flight integrity i/o */
	blk_flush_integrity();

397
	/* @q won't process any more request, flush async actions */
398
	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
399 400
	blk_sync_queue(q);

J
Jens Axboe 已提交
401
	if (queue_is_mq(q))
402
		blk_mq_exit_queue(q);
J
Jens Axboe 已提交
403

404 405 406 407 408 409 410 411 412 413 414 415 416
	/*
	 * In theory, request pool of sched_tags belongs to request queue.
	 * However, the current implementation requires tag_set for freeing
	 * requests, so free the pool now.
	 *
	 * Queue has become frozen, there can't be any in-queue requests, so
	 * it is safe to free requests now.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator)
		blk_mq_sched_free_requests(q);
	mutex_unlock(&q->sysfs_lock);

417
	percpu_ref_exit(&q->q_usage_counter);
B
Bart Van Assche 已提交
418

419
	/* @q is and will stay empty, shutdown and put */
420 421
	blk_put_queue(q);
}
L
Linus Torvalds 已提交
422 423
EXPORT_SYMBOL(blk_cleanup_queue);

424 425 426
/**
 * blk_queue_enter() - try to increase q->q_usage_counter
 * @q: request queue pointer
427
 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
428
 */
429
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
430
{
431
	const bool pm = flags & BLK_MQ_REQ_PM;
432

433
	while (true) {
434
		bool success = false;
435

436
		rcu_read_lock();
437 438
		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
			/*
439 440 441
			 * The code that increments the pm_only counter is
			 * responsible for ensuring that that counter is
			 * globally visible before the queue is unfrozen.
442
			 */
443
			if (pm || !blk_queue_pm_only(q)) {
444 445 446 447 448
				success = true;
			} else {
				percpu_ref_put(&q->q_usage_counter);
			}
		}
449
		rcu_read_unlock();
450 451

		if (success)
452 453
			return 0;

454
		if (flags & BLK_MQ_REQ_NOWAIT)
455 456
			return -EBUSY;

457
		/*
458
		 * read pair of barrier in blk_freeze_queue_start(),
459
		 * we need to order reading __PERCPU_REF_DEAD flag of
460 461 462
		 * .q_usage_counter and reading .mq_freeze_depth or
		 * queue dying flag, otherwise the following wait may
		 * never return if the two reads are reordered.
463 464 465
		 */
		smp_rmb();

466
		wait_event(q->mq_freeze_wq,
467
			   (!q->mq_freeze_depth &&
468 469
			    (pm || (blk_pm_request_resume(q),
				    !blk_queue_pm_only(q)))) ||
470
			   blk_queue_dying(q));
471 472 473 474 475
		if (blk_queue_dying(q))
			return -ENODEV;
	}
}

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
static inline int bio_queue_enter(struct bio *bio)
{
	struct request_queue *q = bio->bi_disk->queue;
	bool nowait = bio->bi_opf & REQ_NOWAIT;
	int ret;

	ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
	if (unlikely(ret)) {
		if (nowait && !blk_queue_dying(q))
			bio_wouldblock_error(bio);
		else
			bio_io_error(bio);
	}

	return ret;
}

493 494 495 496 497 498 499 500 501 502 503 504 505
void blk_queue_exit(struct request_queue *q)
{
	percpu_ref_put(&q->q_usage_counter);
}

static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
	struct request_queue *q =
		container_of(ref, struct request_queue, q_usage_counter);

	wake_up_all(&q->mq_freeze_wq);
}

506
static void blk_rq_timed_out_timer(struct timer_list *t)
507
{
508
	struct request_queue *q = from_timer(q, t, timeout);
509 510 511 512

	kblockd_schedule_work(&q->timeout_work);
}

513 514 515 516
static void blk_timeout_work(struct work_struct *work)
{
}

517
struct request_queue *blk_alloc_queue(int node_id)
518
{
519
	struct request_queue *q;
520
	int ret;
521

522
	q = kmem_cache_alloc_node(blk_requestq_cachep,
523
				GFP_KERNEL | __GFP_ZERO, node_id);
L
Linus Torvalds 已提交
524 525 526
	if (!q)
		return NULL;

527 528
	q->last_merge = NULL;

529
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
530
	if (q->id < 0)
531
		goto fail_q;
532

533 534
	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
	if (ret)
535 536
		goto fail_id;

C
Christoph Hellwig 已提交
537
	q->backing_dev_info = bdi_alloc(node_id);
538 539 540
	if (!q->backing_dev_info)
		goto fail_split;

541 542 543 544
	q->stats = blk_alloc_queue_stats();
	if (!q->stats)
		goto fail_stats;

545
	q->node = node_id;
546

547 548
	atomic_set(&q->nr_active_requests_shared_sbitmap, 0);

549 550 551
	timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
		    laptop_mode_timer_fn, 0);
	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
552
	INIT_WORK(&q->timeout_work, blk_timeout_work);
553
	INIT_LIST_HEAD(&q->icq_list);
554
#ifdef CONFIG_BLK_CGROUP
555
	INIT_LIST_HEAD(&q->blkg_list);
556
#endif
557

558
	kobject_init(&q->kobj, &blk_queue_ktype);
L
Linus Torvalds 已提交
559

560
	mutex_init(&q->debugfs_mutex);
561
	mutex_init(&q->sysfs_lock);
562
	mutex_init(&q->sysfs_dir_lock);
563
	spin_lock_init(&q->queue_lock);
564

565
	init_waitqueue_head(&q->mq_freeze_wq);
566
	mutex_init(&q->mq_freeze_lock);
567

568 569 570 571 572 573 574
	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
	 * See blk_register_queue() for details.
	 */
	if (percpu_ref_init(&q->q_usage_counter,
				blk_queue_usage_counter_release,
				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
575
		goto fail_bdi;
576

577 578 579
	if (blkcg_init_queue(q))
		goto fail_ref;

580 581
	blk_queue_dma_alignment(q, 511);
	blk_set_default_limits(&q->limits);
582
	q->nr_requests = BLKDEV_MAX_RQ;
583

L
Linus Torvalds 已提交
584
	return q;
585

586 587
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
588
fail_bdi:
589 590
	blk_free_queue_stats(q->stats);
fail_stats:
591
	bdi_put(q->backing_dev_info);
592
fail_split:
593
	bioset_exit(&q->bio_split);
594 595 596 597 598
fail_id:
	ida_simple_remove(&blk_queue_ida, q->id);
fail_q:
	kmem_cache_free(blk_requestq_cachep, q);
	return NULL;
L
Linus Torvalds 已提交
599
}
600
EXPORT_SYMBOL(blk_alloc_queue);
L
Linus Torvalds 已提交
601

602 603 604 605 606
/**
 * blk_get_queue - increment the request_queue refcount
 * @q: the request_queue structure to increment the refcount for
 *
 * Increment the refcount of the request_queue kobject.
607 608
 *
 * Context: Any context.
609
 */
T
Tejun Heo 已提交
610
bool blk_get_queue(struct request_queue *q)
L
Linus Torvalds 已提交
611
{
B
Bart Van Assche 已提交
612
	if (likely(!blk_queue_dying(q))) {
T
Tejun Heo 已提交
613 614
		__blk_get_queue(q);
		return true;
L
Linus Torvalds 已提交
615 616
	}

T
Tejun Heo 已提交
617
	return false;
L
Linus Torvalds 已提交
618
}
J
Jens Axboe 已提交
619
EXPORT_SYMBOL(blk_get_queue);
L
Linus Torvalds 已提交
620

J
Jens Axboe 已提交
621 622 623 624 625
/**
 * blk_get_request - allocate a request
 * @q: request queue to allocate a request for
 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
L
Linus Torvalds 已提交
626
 */
J
Jens Axboe 已提交
627 628
struct request *blk_get_request(struct request_queue *q, unsigned int op,
				blk_mq_req_flags_t flags)
L
Linus Torvalds 已提交
629
{
J
Jens Axboe 已提交
630
	struct request *req;
L
Linus Torvalds 已提交
631

J
Jens Axboe 已提交
632
	WARN_ON_ONCE(op & REQ_NOWAIT);
633
	WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
L
Linus Torvalds 已提交
634

J
Jens Axboe 已提交
635 636 637
	req = blk_mq_alloc_request(q, op, flags);
	if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
		q->mq_ops->initialize_rq_fn(req);
L
Linus Torvalds 已提交
638

J
Jens Axboe 已提交
639
	return req;
L
Linus Torvalds 已提交
640
}
J
Jens Axboe 已提交
641
EXPORT_SYMBOL(blk_get_request);
L
Linus Torvalds 已提交
642 643 644

void blk_put_request(struct request *req)
{
J
Jens Axboe 已提交
645
	blk_mq_free_request(req);
L
Linus Torvalds 已提交
646 647 648
}
EXPORT_SYMBOL(blk_put_request);

649
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
L
Linus Torvalds 已提交
650 651 652
{
	char b[BDEVNAME_SIZE];

653 654 655 656
	pr_info_ratelimited("attempt to access beyond end of device\n"
			    "%s: rw=%d, want=%llu, limit=%llu\n",
			    bio_devname(bio, b), bio->bi_opf,
			    bio_end_sector(bio), maxsector);
L
Linus Torvalds 已提交
657 658
}

659 660 661 662 663 664 665 666 667 668
#ifdef CONFIG_FAIL_MAKE_REQUEST

static DECLARE_FAULT_ATTR(fail_make_request);

static int __init setup_fail_make_request(char *str)
{
	return setup_fault_attr(&fail_make_request, str);
}
__setup("fail_make_request=", setup_fail_make_request);

669
static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
670
{
671
	return part->make_it_fail && should_fail(&fail_make_request, bytes);
672 673 674 675
}

static int __init fail_make_request_debugfs(void)
{
676 677 678
	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
						NULL, &fail_make_request);

679
	return PTR_ERR_OR_ZERO(dir);
680 681 682 683 684 685
}

late_initcall(fail_make_request_debugfs);

#else /* CONFIG_FAIL_MAKE_REQUEST */

686 687
static inline bool should_fail_request(struct hd_struct *part,
					unsigned int bytes)
688
{
689
	return false;
690 691 692 693
}

#endif /* CONFIG_FAIL_MAKE_REQUEST */

694 695
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{
696 697
	const int op = bio_op(bio);

698
	if (part->policy && op_is_write(op)) {
699 700
		char b[BDEVNAME_SIZE];

701 702 703
		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
			return false;

704
		WARN_ONCE(1,
705
		       "Trying to write to read-only block-device %s (partno %d)\n",
706
			bio_devname(bio, b), part->partno);
707 708
		/* Older lvm-tools actually trigger this */
		return false;
709 710 711 712 713
	}

	return false;
}

714 715 716 717 718 719 720 721
static noinline int should_fail_bio(struct bio *bio)
{
	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
		return -EIO;
	return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);

722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
/*
 * Check whether this bio extends beyond the end of the device or partition.
 * This may well happen - the kernel calls bread() without checking the size of
 * the device, e.g., when mounting a file system.
 */
static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
{
	unsigned int nr_sectors = bio_sectors(bio);

	if (nr_sectors && maxsector &&
	    (nr_sectors > maxsector ||
	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
		handle_bad_sector(bio, maxsector);
		return -EIO;
	}
	return 0;
}

740 741 742 743 744 745
/*
 * Remap block n of partition p to block n+start(p) of the disk.
 */
static inline int blk_partition_remap(struct bio *bio)
{
	struct hd_struct *p;
746
	int ret = -EIO;
747

748 749
	rcu_read_lock();
	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
750 751 752 753 754
	if (unlikely(!p))
		goto out;
	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
		goto out;
	if (unlikely(bio_check_ro(bio, p)))
755 756
		goto out;

757
	if (bio_sectors(bio)) {
758 759 760 761 762 763
		if (bio_check_eod(bio, part_nr_sects_read(p)))
			goto out;
		bio->bi_iter.bi_sector += p->start_sect;
		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
				      bio->bi_iter.bi_sector - p->start_sect);
	}
764
	bio->bi_partno = 0;
765
	ret = 0;
766 767
out:
	rcu_read_unlock();
768 769 770
	return ret;
}

771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
/*
 * Check write append to a zoned block device.
 */
static inline blk_status_t blk_check_zone_append(struct request_queue *q,
						 struct bio *bio)
{
	sector_t pos = bio->bi_iter.bi_sector;
	int nr_sectors = bio_sectors(bio);

	/* Only applicable to zoned block devices */
	if (!blk_queue_is_zoned(q))
		return BLK_STS_NOTSUPP;

	/* The bio sector must point to the start of a sequential zone */
	if (pos & (blk_queue_zone_sectors(q) - 1) ||
	    !blk_queue_zone_is_seq(q, pos))
		return BLK_STS_IOERR;

	/*
	 * Not allowed to cross zone boundaries. Otherwise, the BIO will be
	 * split and could result in non-contiguous sectors being written in
	 * different zones.
	 */
	if (nr_sectors > q->limits.chunk_sectors)
		return BLK_STS_IOERR;

	/* Make sure the BIO is small enough and will not get split */
	if (nr_sectors > q->limits.max_zone_append_sectors)
		return BLK_STS_IOERR;

	bio->bi_opf |= REQ_NOMERGE;

	return BLK_STS_OK;
}

806
static noinline_for_stack bool submit_bio_checks(struct bio *bio)
L
Linus Torvalds 已提交
807
{
808
	struct request_queue *q = bio->bi_disk->queue;
809
	blk_status_t status = BLK_STS_IOERR;
810
	struct blk_plug *plug;
L
Linus Torvalds 已提交
811 812 813

	might_sleep();

814 815 816 817
	plug = blk_mq_plug(q, bio);
	if (plug && plug->nowait)
		bio->bi_opf |= REQ_NOWAIT;

818
	/*
819
	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
M
Mike Snitzer 已提交
820
	 * if queue does not support NOWAIT.
821
	 */
M
Mike Snitzer 已提交
822
	if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
823
		goto not_supported;
824

825
	if (should_fail_bio(bio))
826
		goto end_io;
827

828 829
	if (bio->bi_partno) {
		if (unlikely(blk_partition_remap(bio)))
830 831
			goto end_io;
	} else {
832 833 834
		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
			goto end_io;
		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
835 836
			goto end_io;
	}
837

838
	/*
839 840
	 * Filter flush bio's early so that bio based drivers without flush
	 * support don't have to worry about them.
841
	 */
842
	if (op_is_flush(bio->bi_opf) &&
J
Jens Axboe 已提交
843
	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
J
Jens Axboe 已提交
844
		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
845
		if (!bio_sectors(bio)) {
846
			status = BLK_STS_OK;
847 848
			goto end_io;
		}
849
	}
850

851 852 853
	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		bio->bi_opf &= ~REQ_HIPRI;

854 855 856 857 858 859 860 861 862 863
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
		if (!blk_queue_secure_erase(q))
			goto not_supported;
		break;
	case REQ_OP_WRITE_SAME:
864
		if (!q->limits.max_write_same_sectors)
865
			goto not_supported;
866
		break;
867 868 869 870 871
	case REQ_OP_ZONE_APPEND:
		status = blk_check_zone_append(q, bio);
		if (status != BLK_STS_OK)
			goto end_io;
		break;
872
	case REQ_OP_ZONE_RESET:
873 874 875
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
876
		if (!blk_queue_is_zoned(q))
877
			goto not_supported;
878
		break;
879 880 881 882
	case REQ_OP_ZONE_RESET_ALL:
		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
			goto not_supported;
		break;
883
	case REQ_OP_WRITE_ZEROES:
884
		if (!q->limits.max_write_zeroes_sectors)
885 886
			goto not_supported;
		break;
887 888
	default:
		break;
889
	}
890

T
Tejun Heo 已提交
891
	/*
892 893 894 895
	 * Various block parts want %current->io_context, so allocate it up
	 * front rather than dealing with lots of pain to allocate it only
	 * where needed. This may fail and the block layer knows how to live
	 * with it.
T
Tejun Heo 已提交
896
	 */
897 898
	if (unlikely(!current->io_context))
		create_task_io_context(current, GFP_ATOMIC, q->node);
T
Tejun Heo 已提交
899

900 901
	if (blk_throtl_bio(bio)) {
		blkcg_bio_issue_init(bio);
902
		return false;
903 904 905 906
	}

	blk_cgroup_bio_start(bio);
	blkcg_bio_issue_init(bio);
907

N
NeilBrown 已提交
908 909 910 911 912 913 914
	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
		trace_block_bio_queue(q, bio);
		/* Now that enqueuing has been traced, we need to trace
		 * completion as well.
		 */
		bio_set_flag(bio, BIO_TRACE_COMPLETION);
	}
915
	return true;
916

917
not_supported:
918
	status = BLK_STS_NOTSUPP;
919
end_io:
920
	bio->bi_status = status;
921
	bio_endio(bio);
922
	return false;
L
Linus Torvalds 已提交
923 924
}

925
static blk_qc_t __submit_bio(struct bio *bio)
926
{
927
	struct gendisk *disk = bio->bi_disk;
928 929 930
	blk_qc_t ret = BLK_QC_T_NONE;

	if (blk_crypto_bio_prep(&bio)) {
931 932 933
		if (!disk->fops->submit_bio)
			return blk_mq_submit_bio(bio);
		ret = disk->fops->submit_bio(bio);
934
	}
935
	blk_queue_exit(disk->queue);
936 937 938
	return ret;
}

939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
/*
 * The loop in this function may be a bit non-obvious, and so deserves some
 * explanation:
 *
 *  - Before entering the loop, bio->bi_next is NULL (as all callers ensure
 *    that), so we have a list with a single bio.
 *  - We pretend that we have just taken it off a longer list, so we assign
 *    bio_list to a pointer to the bio_list_on_stack, thus initialising the
 *    bio_list of new bios to be added.  ->submit_bio() may indeed add some more
 *    bios through a recursive call to submit_bio_noacct.  If it did, we find a
 *    non-NULL value in bio_list and re-enter the loop from the top.
 *  - In this case we really did just take the bio of the top of the list (no
 *    pretending) and so remove it from bio_list, and call into ->submit_bio()
 *    again.
 *
 * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
 * bio_list_on_stack[1] contains bios that were submitted before the current
 *	->submit_bio_bio, but that haven't been processed yet.
 */
static blk_qc_t __submit_bio_noacct(struct bio *bio)
{
	struct bio_list bio_list_on_stack[2];
	blk_qc_t ret = BLK_QC_T_NONE;

	BUG_ON(bio->bi_next);

	bio_list_init(&bio_list_on_stack[0]);
	current->bio_list = bio_list_on_stack;

	do {
		struct request_queue *q = bio->bi_disk->queue;
		struct bio_list lower, same;

		if (unlikely(bio_queue_enter(bio) != 0))
			continue;

		/*
		 * Create a fresh bio_list for all subordinate requests.
		 */
		bio_list_on_stack[1] = bio_list_on_stack[0];
		bio_list_init(&bio_list_on_stack[0]);

		ret = __submit_bio(bio);

		/*
		 * Sort new bios into those for a lower level and those for the
		 * same level.
		 */
		bio_list_init(&lower);
		bio_list_init(&same);
		while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
			if (q == bio->bi_disk->queue)
				bio_list_add(&same, bio);
			else
				bio_list_add(&lower, bio);

		/*
		 * Now assemble so we handle the lowest level first.
		 */
		bio_list_merge(&bio_list_on_stack[0], &lower);
		bio_list_merge(&bio_list_on_stack[0], &same);
		bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
	} while ((bio = bio_list_pop(&bio_list_on_stack[0])));

	current->bio_list = NULL;
	return ret;
}

1007 1008
static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
{
1009
	struct bio_list bio_list[2] = { };
1010 1011
	blk_qc_t ret = BLK_QC_T_NONE;

1012
	current->bio_list = bio_list;
1013 1014

	do {
1015
		struct gendisk *disk = bio->bi_disk;
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026

		if (unlikely(bio_queue_enter(bio) != 0))
			continue;

		if (!blk_crypto_bio_prep(&bio)) {
			blk_queue_exit(disk->queue);
			ret = BLK_QC_T_NONE;
			continue;
		}

		ret = blk_mq_submit_bio(bio);
1027
	} while ((bio = bio_list_pop(&bio_list[0])));
1028 1029 1030 1031 1032

	current->bio_list = NULL;
	return ret;
}

1033
/**
1034
 * submit_bio_noacct - re-submit a bio to the block device layer for I/O
1035 1036
 * @bio:  The bio describing the location in memory and on the device.
 *
1037 1038 1039 1040
 * This is a version of submit_bio() that shall only be used for I/O that is
 * resubmitted to lower level drivers by stacking block drivers.  All file
 * systems and other upper level users of the block layer should use
 * submit_bio() instead.
1041
 */
1042
blk_qc_t submit_bio_noacct(struct bio *bio)
1043
{
1044
	if (!submit_bio_checks(bio))
1045
		return BLK_QC_T_NONE;
1046 1047

	/*
1048 1049 1050 1051
	 * We only want one ->submit_bio to be active at a time, else stack
	 * usage with stacked devices could be a problem.  Use current->bio_list
	 * to collect a list of requests submited by a ->submit_bio method while
	 * it is active, and then process them after it returned.
1052
	 */
1053
	if (current->bio_list) {
1054
		bio_list_add(&current->bio_list[0], bio);
1055
		return BLK_QC_T_NONE;
1056
	}
1057

1058 1059
	if (!bio->bi_disk->fops->submit_bio)
		return __submit_bio_noacct_mq(bio);
1060
	return __submit_bio_noacct(bio);
1061
}
1062
EXPORT_SYMBOL(submit_bio_noacct);
L
Linus Torvalds 已提交
1063 1064

/**
1065
 * submit_bio - submit a bio to the block device layer for I/O
L
Linus Torvalds 已提交
1066 1067
 * @bio: The &struct bio which describes the I/O
 *
1068 1069 1070
 * submit_bio() is used to submit I/O requests to block devices.  It is passed a
 * fully set up &struct bio that describes the I/O that needs to be done.  The
 * bio will be send to the device described by the bi_disk and bi_partno fields.
L
Linus Torvalds 已提交
1071
 *
1072 1073 1074 1075
 * The success/failure status of the request, along with notification of
 * completion, is delivered asynchronously through the ->bi_end_io() callback
 * in @bio.  The bio must NOT be touched by thecaller until ->bi_end_io() has
 * been called.
L
Linus Torvalds 已提交
1076
 */
1077
blk_qc_t submit_bio(struct bio *bio)
L
Linus Torvalds 已提交
1078
{
T
Tejun Heo 已提交
1079 1080 1081
	if (blkcg_punt_bio_submit(bio))
		return BLK_QC_T_NONE;

1082 1083 1084 1085
	/*
	 * If it's a regular read/write or a barrier with data attached,
	 * go through the normal accounting stuff before submission.
	 */
1086
	if (bio_has_data(bio)) {
1087 1088
		unsigned int count;

1089
		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1090
			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
1091 1092 1093
		else
			count = bio_sectors(bio);

1094
		if (op_is_write(bio_op(bio))) {
1095 1096
			count_vm_events(PGPGOUT, count);
		} else {
1097
			task_io_account_read(bio->bi_iter.bi_size);
1098 1099 1100 1101 1102
			count_vm_events(PGPGIN, count);
		}

		if (unlikely(block_dump)) {
			char b[BDEVNAME_SIZE];
1103
			printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1104
			current->comm, task_pid_nr(current),
1105
				op_is_write(bio_op(bio)) ? "WRITE" : "READ",
1106
				(unsigned long long)bio->bi_iter.bi_sector,
1107
				bio_devname(bio, b), count);
1108
		}
L
Linus Torvalds 已提交
1109 1110
	}

1111
	/*
1112 1113 1114 1115
	 * If we're reading data that is part of the userspace workingset, count
	 * submission time as memory stall.  When the device is congested, or
	 * the submitting cgroup IO-throttled, submission can be a significant
	 * part of overall IO time.
1116
	 */
1117 1118 1119 1120
	if (unlikely(bio_op(bio) == REQ_OP_READ &&
	    bio_flagged(bio, BIO_WORKINGSET))) {
		unsigned long pflags;
		blk_qc_t ret;
1121

1122
		psi_memstall_enter(&pflags);
1123
		ret = submit_bio_noacct(bio);
1124 1125
		psi_memstall_leave(&pflags);

1126 1127 1128
		return ret;
	}

1129
	return submit_bio_noacct(bio);
L
Linus Torvalds 已提交
1130 1131 1132
}
EXPORT_SYMBOL(submit_bio);

1133
/**
1134
 * blk_cloned_rq_check_limits - Helper function to check a cloned request
1135
 *                              for the new queue limits
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
 * @q:  the queue
 * @rq: the request being checked
 *
 * Description:
 *    @rq may have been made based on weaker limitations of upper-level queues
 *    in request stacking drivers, and it may violate the limitation of @q.
 *    Since the block layer and the underlying device driver trust @rq
 *    after it is inserted to @q, it should be checked against @q before
 *    the insertion using this generic function.
 *
 *    Request stacking drivers like request-based dm may change the queue
1147 1148
 *    limits when retrying requests on other queues. Those requests need
 *    to be checked against the new queue limits again during dispatch.
1149
 */
1150
static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
1151
				      struct request *rq)
1152
{
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));

	if (blk_rq_sectors(rq) > max_sectors) {
		/*
		 * SCSI device does not have a good way to return if
		 * Write Same/Zero is actually supported. If a device rejects
		 * a non-read/write command (discard, write same,etc.) the
		 * low-level device driver will set the relevant queue limit to
		 * 0 to prevent blk-lib from issuing more of the offending
		 * operations. Commands queued prior to the queue limit being
		 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
		 * errors being propagated to upper layers.
		 */
		if (max_sectors == 0)
			return BLK_STS_NOTSUPP;

1169
		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1170
			__func__, blk_rq_sectors(rq), max_sectors);
1171
		return BLK_STS_IOERR;
1172 1173 1174 1175 1176 1177 1178 1179
	}

	/*
	 * queue's settings related to segment counting like q->bounce_pfn
	 * may differ from that of other stacking queues.
	 * Recalculate it to check the request correctly on this queue's
	 * limitation.
	 */
1180
	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1181
	if (rq->nr_phys_segments > queue_max_segments(q)) {
1182 1183
		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
			__func__, rq->nr_phys_segments, queue_max_segments(q));
1184
		return BLK_STS_IOERR;
1185 1186
	}

1187
	return BLK_STS_OK;
1188 1189 1190 1191 1192 1193 1194
}

/**
 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
 * @q:  the queue to submit the request
 * @rq: the request being queued
 */
1195
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1196
{
1197 1198 1199 1200 1201
	blk_status_t ret;

	ret = blk_cloned_rq_check_limits(q, rq);
	if (ret != BLK_STS_OK)
		return ret;
1202

1203 1204
	if (rq->rq_disk &&
	    should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1205
		return BLK_STS_IOERR;
1206

1207 1208 1209
	if (blk_crypto_insert_cloned_request(rq))
		return BLK_STS_IOERR;

J
Jens Axboe 已提交
1210
	if (blk_queue_io_stat(q))
1211
		blk_account_io_start(rq);
1212 1213

	/*
J
Jens Axboe 已提交
1214 1215 1216
	 * Since we have a scheduler attached on the top device,
	 * bypass a potential scheduler on the bottom device for
	 * insert.
1217
	 */
1218
	return blk_mq_request_issue_directly(rq, true);
1219 1220 1221
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);

1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
/**
 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
 * @rq: request to examine
 *
 * Description:
 *     A request could be merge of IOs which require different failure
 *     handling.  This function determines the number of bytes which
 *     can be failed from the beginning of the request without
 *     crossing into area which need to be retried further.
 *
 * Return:
 *     The number of bytes to fail.
 */
unsigned int blk_rq_err_bytes(const struct request *rq)
{
	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
	unsigned int bytes = 0;
	struct bio *bio;

1241
	if (!(rq->rq_flags & RQF_MIXED_MERGE))
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
		return blk_rq_bytes(rq);

	/*
	 * Currently the only 'mixing' which can happen is between
	 * different fastfail types.  We can safely fail portions
	 * which have all the failfast bits that the first one has -
	 * the ones which are at least as eager to fail as the first
	 * one.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
J
Jens Axboe 已提交
1252
		if ((bio->bi_opf & ff) != ff)
1253
			break;
1254
		bytes += bio->bi_iter.bi_size;
1255 1256 1257 1258 1259 1260 1261 1262
	}

	/* this could lead to infinite loop */
	BUG_ON(blk_rq_bytes(rq) && !bytes);
	return bytes;
}
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);

1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
static void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
{
	unsigned long stamp;
again:
	stamp = READ_ONCE(part->stamp);
	if (unlikely(stamp != now)) {
		if (likely(cmpxchg(&part->stamp, stamp, now) == stamp))
			__part_stat_add(part, io_ticks, end ? now - stamp : 1);
	}
	if (part->partno) {
		part = &part_to_disk(part)->part0;
		goto again;
	}
}

1278
static void blk_account_io_completion(struct request *req, unsigned int bytes)
1279
{
1280
	if (req->part && blk_do_io_stat(req)) {
1281
		const int sgrp = op_stat_group(req_op(req));
1282 1283
		struct hd_struct *part;

1284
		part_stat_lock();
1285
		part = req->part;
1286
		part_stat_add(part, sectors[sgrp], bytes >> 9);
1287 1288 1289 1290
		part_stat_unlock();
	}
}

1291
void blk_account_io_done(struct request *req, u64 now)
1292 1293
{
	/*
1294 1295 1296
	 * Account IO completion.  flush_rq isn't accounted as a
	 * normal IO on queueing nor completion.  Accounting the
	 * containing request is enough.
1297
	 */
1298 1299
	if (req->part && blk_do_io_stat(req) &&
	    !(req->rq_flags & RQF_FLUSH_SEQ)) {
1300
		const int sgrp = op_stat_group(req_op(req));
1301 1302
		struct hd_struct *part;

1303
		part_stat_lock();
1304
		part = req->part;
1305

1306
		update_io_ticks(part, jiffies, true);
1307 1308
		part_stat_inc(part, ios[sgrp]);
		part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
1309
		part_stat_unlock();
1310

1311
		hd_struct_put(part);
1312 1313 1314
	}
}

1315
void blk_account_io_start(struct request *rq)
1316 1317 1318 1319
{
	if (!blk_do_io_stat(rq))
		return;

1320
	rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
1321

1322
	part_stat_lock();
1323
	update_io_ticks(rq->part, jiffies, false);
1324 1325 1326
	part_stat_unlock();
}

1327 1328
static unsigned long __part_start_io_acct(struct hd_struct *part,
					  unsigned int sectors, unsigned int op)
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
{
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);

	part_stat_lock();
	update_io_ticks(part, now, false);
	part_stat_inc(part, ios[sgrp]);
	part_stat_add(part, sectors[sgrp], sectors);
	part_stat_local_inc(part, in_flight[op_is_write(op)]);
	part_stat_unlock();
1339

1340 1341
	return now;
}
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356

unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
				 struct bio *bio)
{
	*part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);

	return __part_start_io_acct(*part, bio_sectors(bio), bio_op(bio));
}
EXPORT_SYMBOL_GPL(part_start_io_acct);

unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
				 unsigned int op)
{
	return __part_start_io_acct(&disk->part0, sectors, op);
}
1357 1358
EXPORT_SYMBOL(disk_start_io_acct);

1359 1360
static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
			       unsigned long start_time)
1361 1362 1363 1364
{
	const int sgrp = op_stat_group(op);
	unsigned long now = READ_ONCE(jiffies);
	unsigned long duration = now - start_time;
1365

1366 1367 1368 1369
	part_stat_lock();
	update_io_ticks(part, now, true);
	part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
	part_stat_local_dec(part, in_flight[op_is_write(op)]);
1370 1371
	part_stat_unlock();
}
1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385

void part_end_io_acct(struct hd_struct *part, struct bio *bio,
		      unsigned long start_time)
{
	__part_end_io_acct(part, bio_op(bio), start_time);
	hd_struct_put(part);
}
EXPORT_SYMBOL_GPL(part_end_io_acct);

void disk_end_io_acct(struct gendisk *disk, unsigned int op,
		      unsigned long start_time)
{
	__part_end_io_acct(&disk->part0, op, start_time);
}
1386
EXPORT_SYMBOL(disk_end_io_acct);
1387

1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
/*
 * Steal bios from a request and add them to a bio list.
 * The request must not have been partially completed before.
 */
void blk_steal_bios(struct bio_list *list, struct request *rq)
{
	if (rq->bio) {
		if (list->tail)
			list->tail->bi_next = rq->bio;
		else
			list->head = rq->bio;
		list->tail = rq->biotail;

		rq->bio = NULL;
		rq->biotail = NULL;
	}

	rq->__data_len = 0;
}
EXPORT_SYMBOL_GPL(blk_steal_bios);

1409
/**
1410
 * blk_update_request - Special helper function for request stacking drivers
1411
 * @req:      the request being processed
1412
 * @error:    block status code
1413
 * @nr_bytes: number of bytes to complete @req
1414 1415
 *
 * Description:
1416 1417 1418
 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
 *     the request structure even if @req doesn't have leftover.
 *     If @req has leftover, sets it up for the next range of segments.
1419 1420 1421
 *
 *     This special helper function is only for request stacking drivers
 *     (e.g. request-based dm) so that they can handle partial completion.
1422
 *     Actual device drivers should use blk_mq_end_request instead.
1423 1424 1425
 *
 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
 *     %false return from this function.
1426
 *
1427 1428 1429 1430
 * Note:
 *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in both
 *	blk_rq_bytes() and in blk_update_request().
 *
1431
 * Return:
1432 1433
 *     %false - this request doesn't have any more data
 *     %true  - this request has more data
1434
 **/
1435 1436
bool blk_update_request(struct request *req, blk_status_t error,
		unsigned int nr_bytes)
L
Linus Torvalds 已提交
1437
{
1438
	int total_bytes;
L
Linus Torvalds 已提交
1439

1440
	trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1441

1442 1443 1444
	if (!req->bio)
		return false;

1445 1446 1447 1448 1449 1450
#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
	    error == BLK_STS_OK)
		req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif

1451 1452
	if (unlikely(error && !blk_rq_is_passthrough(req) &&
		     !(req->rq_flags & RQF_QUIET)))
1453
		print_req_error(req, error, __func__);
L
Linus Torvalds 已提交
1454

1455
	blk_account_io_completion(req, nr_bytes);
1456

1457 1458 1459
	total_bytes = 0;
	while (req->bio) {
		struct bio *bio = req->bio;
1460
		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
L
Linus Torvalds 已提交
1461

1462
		if (bio_bytes == bio->bi_iter.bi_size)
L
Linus Torvalds 已提交
1463 1464
			req->bio = bio->bi_next;

N
NeilBrown 已提交
1465 1466
		/* Completion has already been traced */
		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1467
		req_bio_endio(req, bio, bio_bytes, error);
L
Linus Torvalds 已提交
1468

1469 1470
		total_bytes += bio_bytes;
		nr_bytes -= bio_bytes;
L
Linus Torvalds 已提交
1471

1472 1473
		if (!nr_bytes)
			break;
L
Linus Torvalds 已提交
1474 1475 1476 1477 1478
	}

	/*
	 * completely done
	 */
1479 1480 1481 1482 1483 1484
	if (!req->bio) {
		/*
		 * Reset counters so that the request stacking driver
		 * can find how many bytes remain in the request
		 * later.
		 */
1485
		req->__data_len = 0;
1486 1487
		return false;
	}
L
Linus Torvalds 已提交
1488

1489
	req->__data_len -= total_bytes;
1490 1491

	/* update sector only for requests with clear definition of sector */
1492
	if (!blk_rq_is_passthrough(req))
1493
		req->__sector += total_bytes >> 9;
1494

1495
	/* mixed attributes always follow the first bio */
1496
	if (req->rq_flags & RQF_MIXED_MERGE) {
1497
		req->cmd_flags &= ~REQ_FAILFAST_MASK;
J
Jens Axboe 已提交
1498
		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1499 1500
	}

1501 1502 1503 1504 1505 1506 1507 1508 1509
	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
		/*
		 * If total number of sectors is less than the first segment
		 * size, something has gone terribly wrong.
		 */
		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
			blk_dump_rq_flags(req, "request botched");
			req->__data_len = blk_rq_cur_bytes(req);
		}
1510

1511
		/* recalculate the number of segments */
1512
		req->nr_phys_segments = blk_recalc_rq_segments(req);
1513
	}
1514

1515
	return true;
L
Linus Torvalds 已提交
1516
}
1517
EXPORT_SYMBOL_GPL(blk_update_request);
L
Linus Torvalds 已提交
1518

1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
/**
 * rq_flush_dcache_pages - Helper function to flush all pages in a request
 * @rq: the request to be flushed
 *
 * Description:
 *     Flush all pages in @rq.
 */
void rq_flush_dcache_pages(struct request *rq)
{
	struct req_iterator iter;
1530
	struct bio_vec bvec;
1531 1532

	rq_for_each_segment(bvec, rq, iter)
1533
		flush_dcache_page(bvec.bv_page);
1534 1535 1536 1537
}
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
#endif

1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
/**
 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
 * @q : the queue of the device being checked
 *
 * Description:
 *    Check if underlying low-level drivers of a device are busy.
 *    If the drivers want to export their busy state, they must set own
 *    exporting function using blk_queue_lld_busy() first.
 *
 *    Basically, this function is used only by request stacking drivers
 *    to stop dispatching requests to underlying devices when underlying
 *    devices are busy.  This behavior helps more I/O merging on the queue
 *    of the request stacking driver and prevents I/O throughput regression
 *    on burst I/O load.
 *
 * Return:
 *    0 - Not busy (The request stacking driver should dispatch request)
 *    1 - Busy (The request stacking driver should stop dispatching request)
 */
int blk_lld_busy(struct request_queue *q)
{
J
Jens Axboe 已提交
1559
	if (queue_is_mq(q) && q->mq_ops->busy)
J
Jens Axboe 已提交
1560
		return q->mq_ops->busy(q);
1561 1562 1563 1564 1565

	return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);

1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609
/**
 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
 * @rq: the clone request to be cleaned up
 *
 * Description:
 *     Free all bios in @rq for a cloned request.
 */
void blk_rq_unprep_clone(struct request *rq)
{
	struct bio *bio;

	while ((bio = rq->bio) != NULL) {
		rq->bio = bio->bi_next;

		bio_put(bio);
	}
}
EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);

/**
 * blk_rq_prep_clone - Helper function to setup clone request
 * @rq: the request to be setup
 * @rq_src: original request to be cloned
 * @bs: bio_set that bios for clone are allocated from
 * @gfp_mask: memory allocation mask for bio
 * @bio_ctr: setup function to be called for each clone bio.
 *           Returns %0 for success, non %0 for failure.
 * @data: private data to be passed to @bio_ctr
 *
 * Description:
 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
 *     Also, pages which the original bios are pointing to are not copied
 *     and the cloned bios just point same pages.
 *     So cloned bios must be completed before original bios, which means
 *     the caller must complete @rq before @rq_src.
 */
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
		      struct bio_set *bs, gfp_t gfp_mask,
		      int (*bio_ctr)(struct bio *, struct bio *, void *),
		      void *data)
{
	struct bio *bio, *bio_src;

	if (!bs)
1610
		bs = &fs_bio_set;
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622

	__rq_for_each_bio(bio_src, rq_src) {
		bio = bio_clone_fast(bio_src, gfp_mask, bs);
		if (!bio)
			goto free_and_out;

		if (bio_ctr && bio_ctr(bio, bio_src, data))
			goto free_and_out;

		if (rq->bio) {
			rq->biotail->bi_next = bio;
			rq->biotail = bio;
1623
		} else {
1624
			rq->bio = rq->biotail = bio;
1625 1626
		}
		bio = NULL;
1627 1628
	}

1629 1630 1631 1632 1633 1634 1635 1636 1637
	/* Copy attributes of the original request to the clone request. */
	rq->__sector = blk_rq_pos(rq_src);
	rq->__data_len = blk_rq_bytes(rq_src);
	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
		rq->special_vec = rq_src->special_vec;
	}
	rq->nr_phys_segments = rq_src->nr_phys_segments;
	rq->ioprio = rq_src->ioprio;
1638

1639 1640
	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
		goto free_and_out;
1641 1642 1643 1644 1645 1646 1647 1648 1649

	return 0;

free_and_out:
	if (bio)
		bio_put(bio);
	blk_rq_unprep_clone(rq);

	return -ENOMEM;
1650 1651 1652
}
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);

1653
int kblockd_schedule_work(struct work_struct *work)
L
Linus Torvalds 已提交
1654 1655 1656 1657 1658
{
	return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);

1659 1660 1661 1662 1663 1664 1665
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
				unsigned long delay)
{
	return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);

S
Suresh Jayaraman 已提交
1666 1667 1668 1669 1670
/**
 * blk_start_plug - initialize blk_plug and track it inside the task_struct
 * @plug:	The &struct blk_plug that needs to be initialized
 *
 * Description:
1671 1672 1673 1674 1675 1676 1677 1678 1679
 *   blk_start_plug() indicates to the block layer an intent by the caller
 *   to submit multiple I/O requests in a batch.  The block layer may use
 *   this hint to defer submitting I/Os from the caller until blk_finish_plug()
 *   is called.  However, the block layer may choose to submit requests
 *   before a call to blk_finish_plug() if the number of queued I/Os
 *   exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
 *   %BLK_PLUG_FLUSH_SIZE.  The queued I/Os may also be submitted early if
 *   the task schedules (see below).
 *
S
Suresh Jayaraman 已提交
1680 1681 1682 1683 1684 1685 1686 1687 1688
 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
 *   pending I/O should the task end up blocking between blk_start_plug() and
 *   blk_finish_plug(). This is important from a performance perspective, but
 *   also ensures that we don't deadlock. For instance, if the task is blocking
 *   for a memory allocation, memory reclaim could end up wanting to free a
 *   page belonging to that request that is currently residing in our private
 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
 *   this kind of deadlock.
 */
1689 1690 1691 1692
void blk_start_plug(struct blk_plug *plug)
{
	struct task_struct *tsk = current;

S
Shaohua Li 已提交
1693 1694 1695 1696 1697 1698
	/*
	 * If this is a nested plug, don't actually assign it.
	 */
	if (tsk->plug)
		return;

1699
	INIT_LIST_HEAD(&plug->mq_list);
1700
	INIT_LIST_HEAD(&plug->cb_list);
1701
	plug->rq_count = 0;
1702
	plug->multiple_queues = false;
1703
	plug->nowait = false;
1704

1705
	/*
S
Shaohua Li 已提交
1706 1707
	 * Store ordering should not be needed here, since a potential
	 * preempt will imply a full memory barrier
1708
	 */
S
Shaohua Li 已提交
1709
	tsk->plug = plug;
1710 1711 1712
}
EXPORT_SYMBOL(blk_start_plug);

1713
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1714 1715 1716
{
	LIST_HEAD(callbacks);

S
Shaohua Li 已提交
1717 1718
	while (!list_empty(&plug->cb_list)) {
		list_splice_init(&plug->cb_list, &callbacks);
1719

S
Shaohua Li 已提交
1720 1721
		while (!list_empty(&callbacks)) {
			struct blk_plug_cb *cb = list_first_entry(&callbacks,
1722 1723
							  struct blk_plug_cb,
							  list);
S
Shaohua Li 已提交
1724
			list_del(&cb->list);
1725
			cb->callback(cb, from_schedule);
S
Shaohua Li 已提交
1726
		}
1727 1728 1729
	}
}

1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754
struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
				      int size)
{
	struct blk_plug *plug = current->plug;
	struct blk_plug_cb *cb;

	if (!plug)
		return NULL;

	list_for_each_entry(cb, &plug->cb_list, list)
		if (cb->callback == unplug && cb->data == data)
			return cb;

	/* Not currently on the callback list */
	BUG_ON(size < sizeof(*cb));
	cb = kzalloc(size, GFP_ATOMIC);
	if (cb) {
		cb->data = data;
		cb->callback = unplug;
		list_add(&cb->list, &plug->cb_list);
	}
	return cb;
}
EXPORT_SYMBOL(blk_check_plugged);

1755
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1756
{
1757
	flush_plug_callbacks(plug, from_schedule);
1758 1759 1760

	if (!list_empty(&plug->mq_list))
		blk_mq_flush_plug_list(plug, from_schedule);
1761 1762
}

1763 1764 1765 1766 1767 1768 1769 1770 1771 1772
/**
 * blk_finish_plug - mark the end of a batch of submitted I/O
 * @plug:	The &struct blk_plug passed to blk_start_plug()
 *
 * Description:
 * Indicate that a batch of I/O submissions is complete.  This function
 * must be paired with an initial call to blk_start_plug().  The intent
 * is to allow the block layer to optimize I/O submission.  See the
 * documentation for blk_start_plug() for more information.
 */
1773 1774
void blk_finish_plug(struct blk_plug *plug)
{
S
Shaohua Li 已提交
1775 1776
	if (plug != current->plug)
		return;
1777
	blk_flush_plug_list(plug, false);
1778

S
Shaohua Li 已提交
1779
	current->plug = NULL;
1780
}
1781
EXPORT_SYMBOL(blk_finish_plug);
1782

1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794
void blk_io_schedule(void)
{
	/* Prevent hang_check timer from firing at us during very long I/O */
	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;

	if (timeout)
		io_schedule_timeout(timeout);
	else
		io_schedule();
}
EXPORT_SYMBOL_GPL(blk_io_schedule);

L
Linus Torvalds 已提交
1795 1796
int __init blk_dev_init(void)
{
1797 1798
	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1799
			sizeof_field(struct request, cmd_flags));
1800
	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1801
			sizeof_field(struct bio, bi_opf));
1802

1803 1804
	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
	kblockd_workqueue = alloc_workqueue("kblockd",
1805
					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
L
Linus Torvalds 已提交
1806 1807 1808
	if (!kblockd_workqueue)
		panic("Failed to create kblockd\n");

1809
	blk_requestq_cachep = kmem_cache_create("request_queue",
1810
			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1811

1812 1813
	blk_debugfs_root = debugfs_create_dir("block", NULL);

1814
	return 0;
L
Linus Torvalds 已提交
1815
}