blk-mq.c 118.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7
/*
 * Block multiqueue core code
 *
 * Copyright (C) 2013-2014 Jens Axboe
 * Copyright (C) 2013-2014 Christoph Hellwig
 */
8 9 10 11 12
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
13
#include <linux/blk-integrity.h>
14
#include <linux/kmemleak.h>
15 16 17 18 19
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>
20
#include <linux/interrupt.h>
21 22 23 24
#include <linux/llist.h>
#include <linux/cpu.h>
#include <linux/cache.h>
#include <linux/sched/sysctl.h>
25
#include <linux/sched/topology.h>
26
#include <linux/sched/signal.h>
27
#include <linux/delay.h>
28
#include <linux/crash_dump.h>
29
#include <linux/prefetch.h>
30
#include <linux/blk-crypto.h>
31
#include <linux/part_stat.h>
32 33 34 35

#include <trace/events/block.h>

#include <linux/blk-mq.h>
36
#include <linux/t10-pi.h>
37 38
#include "blk.h"
#include "blk-mq.h"
39
#include "blk-mq-debugfs.h"
40
#include "blk-mq-tag.h"
41
#include "blk-pm.h"
42
#include "blk-stat.h"
43
#include "blk-mq-sched.h"
44
#include "blk-rq-qos.h"
45

46
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
47

48 49 50
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);

51 52
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
53
	int ddir, sectors, bucket;
54

J
Jens Axboe 已提交
55
	ddir = rq_data_dir(rq);
56
	sectors = blk_rq_stats_sectors(rq);
57

58
	bucket = ddir + 2 * ilog2(sectors);
59 60 61 62 63 64 65 66 67

	if (bucket < 0)
		return -1;
	else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
		return ddir + BLK_MQ_POLL_STATS_BKTS - 2;

	return bucket;
}

68 69 70
#define BLK_QC_T_SHIFT		16
#define BLK_QC_T_INTERNAL	(1U << 31)

71 72 73 74 75 76
static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
		blk_qc_t qc)
{
	return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT];
}

77 78 79
static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
		blk_qc_t qc)
{
80 81 82 83 84
	unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1);

	if (qc & BLK_QC_T_INTERNAL)
		return blk_mq_tag_to_rq(hctx->sched_tags, tag);
	return blk_mq_tag_to_rq(hctx->tags, tag);
85 86
}

87 88 89 90 91 92 93
static inline blk_qc_t blk_rq_to_qc(struct request *rq)
{
	return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
		(rq->tag != -1 ?
		 rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
}

94
/*
95 96
 * Check if any of the ctx, dispatch list or elevator
 * have pending work in this hardware queue.
97
 */
98
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
99
{
100 101
	return !list_empty_careful(&hctx->dispatch) ||
		sbitmap_any_bit_set(&hctx->ctx_map) ||
102
			blk_mq_sched_has_work(hctx);
103 104
}

105 106 107 108 109 110
/*
 * Mark this ctx as having pending work in this hardware queue
 */
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
				     struct blk_mq_ctx *ctx)
{
111 112 113 114
	const int bit = ctx->index_hw[hctx->type];

	if (!sbitmap_test_bit(&hctx->ctx_map, bit))
		sbitmap_set_bit(&hctx->ctx_map, bit);
115 116 117 118 119
}

static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
				      struct blk_mq_ctx *ctx)
{
120 121 122
	const int bit = ctx->index_hw[hctx->type];

	sbitmap_clear_bit(&hctx->ctx_map, bit);
123 124
}

125
struct mq_inflight {
126
	struct block_device *part;
127
	unsigned int inflight[2];
128 129
};

130
static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
131 132 133 134 135
				  struct request *rq, void *priv,
				  bool reserved)
{
	struct mq_inflight *mi = priv;

136 137
	if ((!mi->part->bd_partno || rq->part == mi->part) &&
	    blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
138
		mi->inflight[rq_data_dir(rq)]++;
139 140

	return true;
141 142
}

143 144
unsigned int blk_mq_in_flight(struct request_queue *q,
		struct block_device *part)
145
{
146
	struct mq_inflight mi = { .part = part };
147 148

	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
149

150
	return mi.inflight[0] + mi.inflight[1];
151 152
}

153 154
void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
		unsigned int inflight[2])
155
{
156
	struct mq_inflight mi = { .part = part };
157

158
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
159 160
	inflight[0] = mi.inflight[0];
	inflight[1] = mi.inflight[1];
161 162
}

163
void blk_freeze_queue_start(struct request_queue *q)
164
{
165 166
	mutex_lock(&q->mq_freeze_lock);
	if (++q->mq_freeze_depth == 1) {
167
		percpu_ref_kill(&q->q_usage_counter);
168
		mutex_unlock(&q->mq_freeze_lock);
J
Jens Axboe 已提交
169
		if (queue_is_mq(q))
170
			blk_mq_run_hw_queues(q, false);
171 172
	} else {
		mutex_unlock(&q->mq_freeze_lock);
173
	}
174
}
175
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
176

177
void blk_mq_freeze_queue_wait(struct request_queue *q)
178
{
179
	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
180
}
181
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
182

183 184 185 186 187 188 189 190
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
				     unsigned long timeout)
{
	return wait_event_timeout(q->mq_freeze_wq,
					percpu_ref_is_zero(&q->q_usage_counter),
					timeout);
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
191

192 193 194 195
/*
 * Guarantee no request is in use, so we can change any data structure of
 * the queue afterward.
 */
196
void blk_freeze_queue(struct request_queue *q)
197
{
198 199 200 201 202 203 204
	/*
	 * In the !blk_mq case we are only calling this to kill the
	 * q_usage_counter, otherwise this increases the freeze depth
	 * and waits for it to return to zero.  For this reason there is
	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
	 * exported to drivers as the only user for unfreeze is blk_mq.
	 */
205
	blk_freeze_queue_start(q);
206 207
	blk_mq_freeze_queue_wait(q);
}
208 209 210 211 212 213 214 215 216

void blk_mq_freeze_queue(struct request_queue *q)
{
	/*
	 * ...just an alias to keep freeze and unfreeze actions balanced
	 * in the blk_mq_* namespace
	 */
	blk_freeze_queue(q);
}
217
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
218

219
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
220
{
221
	mutex_lock(&q->mq_freeze_lock);
222 223
	if (force_atomic)
		q->q_usage_counter.data->force_atomic = true;
224 225 226
	q->mq_freeze_depth--;
	WARN_ON_ONCE(q->mq_freeze_depth < 0);
	if (!q->mq_freeze_depth) {
227
		percpu_ref_resurrect(&q->q_usage_counter);
228
		wake_up_all(&q->mq_freeze_wq);
229
	}
230
	mutex_unlock(&q->mq_freeze_lock);
231
}
232 233 234 235 236

void blk_mq_unfreeze_queue(struct request_queue *q)
{
	__blk_mq_unfreeze_queue(q, false);
}
237
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
238

239 240 241 242 243 244
/*
 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
 * mpt3sas driver such that this function can be removed.
 */
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
{
245 246 247 248 249 250
	unsigned long flags;

	spin_lock_irqsave(&q->queue_lock, flags);
	if (!q->quiesce_depth++)
		blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
	spin_unlock_irqrestore(&q->queue_lock, flags);
251 252 253
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);

254
/**
255
 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
256 257
 * @q: request queue.
 *
258 259
 * Note: it is driver's responsibility for making sure that quiesce has
 * been started.
260
 */
261
void blk_mq_wait_quiesce_done(struct request_queue *q)
262 263 264 265 266 267 268
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;
	bool rcu = false;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (hctx->flags & BLK_MQ_F_BLOCKING)
269
			synchronize_srcu(hctx->srcu);
270 271 272 273 274 275
		else
			rcu = true;
	}
	if (rcu)
		synchronize_rcu();
}
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);

/**
 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
 * @q: request queue.
 *
 * Note: this function does not prevent that the struct request end_io()
 * callback function is invoked. Once this function is returned, we make
 * sure no dispatch can happen until the queue is unquiesced via
 * blk_mq_unquiesce_queue().
 */
void blk_mq_quiesce_queue(struct request_queue *q)
{
	blk_mq_quiesce_queue_nowait(q);
	blk_mq_wait_quiesce_done(q);
}
292 293
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);

294 295 296 297 298 299 300 301 302
/*
 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
 * @q: request queue.
 *
 * This function recovers queue into the state before quiescing
 * which is done by blk_mq_quiesce_queue.
 */
void blk_mq_unquiesce_queue(struct request_queue *q)
{
303 304 305 306 307 308 309 310 311 312 313
	unsigned long flags;
	bool run_queue = false;

	spin_lock_irqsave(&q->queue_lock, flags);
	if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
		;
	} else if (!--q->quiesce_depth) {
		blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
		run_queue = true;
	}
	spin_unlock_irqrestore(&q->queue_lock, flags);
314

315
	/* dispatch requests which are inserted during quiescing */
316 317
	if (run_queue)
		blk_mq_run_hw_queues(q, true);
318 319 320
}
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);

321 322 323 324 325 326 327 328 329 330
void blk_mq_wake_waiters(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	queue_for_each_hw_ctx(q, hctx, i)
		if (blk_mq_hw_queue_mapped(hctx))
			blk_mq_tag_wakeup_all(hctx->tags, true);
}

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
void blk_rq_init(struct request_queue *q, struct request *rq)
{
	memset(rq, 0, sizeof(*rq));

	INIT_LIST_HEAD(&rq->queuelist);
	rq->q = q;
	rq->__sector = (sector_t) -1;
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
	rq->tag = BLK_MQ_NO_TAG;
	rq->internal_tag = BLK_MQ_NO_TAG;
	rq->start_time_ns = ktime_get_ns();
	rq->part = NULL;
	blk_crypto_rq_set_defaults(rq);
}
EXPORT_SYMBOL(blk_rq_init);

348
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
349
		struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
350
{
351 352 353
	struct blk_mq_ctx *ctx = data->ctx;
	struct blk_mq_hw_ctx *hctx = data->hctx;
	struct request_queue *q = data->q;
354
	struct request *rq = tags->static_rqs[tag];
355

J
Jens Axboe 已提交
356 357 358 359 360 361 362 363 364 365 366
	rq->q = q;
	rq->mq_ctx = ctx;
	rq->mq_hctx = hctx;
	rq->cmd_flags = data->cmd_flags;

	if (data->flags & BLK_MQ_REQ_PM)
		data->rq_flags |= RQF_PM;
	if (blk_queue_io_stat(q))
		data->rq_flags |= RQF_IO_STAT;
	rq->rq_flags = data->rq_flags;

367
	if (!(data->rq_flags & RQF_ELV)) {
368
		rq->tag = tag;
369
		rq->internal_tag = BLK_MQ_NO_TAG;
370 371 372
	} else {
		rq->tag = BLK_MQ_NO_TAG;
		rq->internal_tag = tag;
373
	}
J
Jens Axboe 已提交
374
	rq->timeout = 0;
375

376 377 378 379
	if (blk_mq_need_time_stamp(rq))
		rq->start_time_ns = ktime_get_ns();
	else
		rq->start_time_ns = 0;
380 381
	rq->rq_disk = NULL;
	rq->part = NULL;
382 383 384
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
	rq->alloc_time_ns = alloc_time_ns;
#endif
385
	rq->io_start_time_ns = 0;
386
	rq->stats_sectors = 0;
387 388 389 390 391 392 393
	rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
	rq->nr_integrity_segments = 0;
#endif
	rq->end_io = NULL;
	rq->end_io_data = NULL;

394 395 396 397
	blk_crypto_rq_set_defaults(rq);
	INIT_LIST_HEAD(&rq->queuelist);
	/* tag was already set */
	WRITE_ONCE(rq->deadline, 0);
K
Keith Busch 已提交
398
	refcount_set(&rq->ref, 1);
399

400
	if (rq->rq_flags & RQF_ELV) {
401 402 403
		struct elevator_queue *e = data->q->elevator;

		rq->elv.icq = NULL;
404 405 406 407 408
		INIT_HLIST_NODE(&rq->hash);
		RB_CLEAR_NODE(&rq->rb_node);

		if (!op_is_flush(data->cmd_flags) &&
		    e->type->ops.prepare_request) {
409 410 411 412 413
			e->type->ops.prepare_request(rq);
			rq->rq_flags |= RQF_ELVPRIV;
		}
	}

414
	return rq;
415 416
}

J
Jens Axboe 已提交
417 418 419 420 421
static inline struct request *
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
		u64 alloc_time_ns)
{
	unsigned int tag, tag_offset;
422
	struct blk_mq_tags *tags;
J
Jens Axboe 已提交
423
	struct request *rq;
424
	unsigned long tag_mask;
J
Jens Axboe 已提交
425 426
	int i, nr = 0;

427 428
	tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
	if (unlikely(!tag_mask))
J
Jens Axboe 已提交
429 430
		return NULL;

431 432 433
	tags = blk_mq_tags_from_data(data);
	for (i = 0; tag_mask; i++) {
		if (!(tag_mask & (1UL << i)))
J
Jens Axboe 已提交
434 435
			continue;
		tag = tag_offset + i;
436
		prefetch(tags->static_rqs[tag]);
437 438
		tag_mask &= ~(1UL << i);
		rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
439
		rq_list_add(data->cached_rq, rq);
440
		nr++;
J
Jens Axboe 已提交
441
	}
442 443
	/* caller already holds a reference, add for remainder */
	percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
J
Jens Axboe 已提交
444 445
	data->nr_tags -= nr;

446
	return rq_list_pop(data->cached_rq);
J
Jens Axboe 已提交
447 448
}

449
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
450
{
451
	struct request_queue *q = data->q;
452
	u64 alloc_time_ns = 0;
453
	struct request *rq;
454
	unsigned int tag;
455

456 457 458 459
	/* alloc_time includes depth and tag waits */
	if (blk_queue_rq_alloc_time(q))
		alloc_time_ns = ktime_get_ns();

460
	if (data->cmd_flags & REQ_NOWAIT)
461
		data->flags |= BLK_MQ_REQ_NOWAIT;
462

463 464 465 466 467
	if (q->elevator) {
		struct elevator_queue *e = q->elevator;

		data->rq_flags |= RQF_ELV;

468
		/*
469
		 * Flush/passthrough requests are special and go directly to the
470 471
		 * dispatch list. Don't include reserved tags in the
		 * limiting, as it isn't useful.
472
		 */
473
		if (!op_is_flush(data->cmd_flags) &&
474
		    !blk_op_is_passthrough(data->cmd_flags) &&
475
		    e->type->ops.limit_depth &&
476
		    !(data->flags & BLK_MQ_REQ_RESERVED))
477
			e->type->ops.limit_depth(data->cmd_flags, data);
478 479
	}

480
retry:
481 482
	data->ctx = blk_mq_get_ctx(q);
	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
483
	if (!(data->rq_flags & RQF_ELV))
484 485
		blk_mq_tag_busy(data->hctx);

J
Jens Axboe 已提交
486 487 488 489 490 491 492 493 494 495
	/*
	 * Try batched alloc if we want more than 1 tag.
	 */
	if (data->nr_tags > 1) {
		rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
		if (rq)
			return rq;
		data->nr_tags = 1;
	}

496 497 498 499 500
	/*
	 * Waiting allocations only fail because of an inactive hctx.  In that
	 * case just retry the hctx assignment and tag allocation as CPU hotplug
	 * should have migrated us to an online CPU by now.
	 */
501
	tag = blk_mq_get_tag(data);
502 503 504 505
	if (tag == BLK_MQ_NO_TAG) {
		if (data->flags & BLK_MQ_REQ_NOWAIT)
			return NULL;
		/*
J
Jens Axboe 已提交
506 507 508 509
		 * Give up the CPU and sleep for a random short time to
		 * ensure that thread using a realtime scheduling class
		 * are migrated off the CPU, and thus off the hctx that
		 * is going away.
510 511 512 513
		 */
		msleep(3);
		goto retry;
	}
514

515 516
	return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
					alloc_time_ns);
517 518
}

519
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
520
		blk_mq_req_flags_t flags)
521
{
522 523 524 525
	struct blk_mq_alloc_data data = {
		.q		= q,
		.flags		= flags,
		.cmd_flags	= op,
526
		.nr_tags	= 1,
527
	};
528
	struct request *rq;
529
	int ret;
530

531
	ret = blk_queue_enter(q, flags);
532 533
	if (ret)
		return ERR_PTR(ret);
534

535
	rq = __blk_mq_alloc_requests(&data);
536
	if (!rq)
537
		goto out_queue_exit;
538 539 540
	rq->__data_len = 0;
	rq->__sector = (sector_t) -1;
	rq->bio = rq->biotail = NULL;
541
	return rq;
542 543 544
out_queue_exit:
	blk_queue_exit(q);
	return ERR_PTR(-EWOULDBLOCK);
545
}
546
EXPORT_SYMBOL(blk_mq_alloc_request);
547

548
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
549
	unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
M
Ming Lin 已提交
550
{
551 552 553 554
	struct blk_mq_alloc_data data = {
		.q		= q,
		.flags		= flags,
		.cmd_flags	= op,
555
		.nr_tags	= 1,
556
	};
557
	u64 alloc_time_ns = 0;
558
	unsigned int cpu;
559
	unsigned int tag;
M
Ming Lin 已提交
560 561
	int ret;

562 563 564 565
	/* alloc_time includes depth and tag waits */
	if (blk_queue_rq_alloc_time(q))
		alloc_time_ns = ktime_get_ns();

M
Ming Lin 已提交
566 567 568 569 570 571
	/*
	 * If the tag allocator sleeps we could get an allocation for a
	 * different hardware context.  No need to complicate the low level
	 * allocator for this for the rare use case of a command tied to
	 * a specific queue.
	 */
572
	if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
M
Ming Lin 已提交
573 574 575 576 577
		return ERR_PTR(-EINVAL);

	if (hctx_idx >= q->nr_hw_queues)
		return ERR_PTR(-EIO);

578
	ret = blk_queue_enter(q, flags);
M
Ming Lin 已提交
579 580 581
	if (ret)
		return ERR_PTR(ret);

582 583 584 585
	/*
	 * Check if the hardware context is actually mapped to anything.
	 * If not tell the caller that it should skip this queue.
	 */
586
	ret = -EXDEV;
587 588
	data.hctx = q->queue_hw_ctx[hctx_idx];
	if (!blk_mq_hw_queue_mapped(data.hctx))
589
		goto out_queue_exit;
590 591
	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
	data.ctx = __blk_mq_get_ctx(q, cpu);
M
Ming Lin 已提交
592

593
	if (!q->elevator)
594
		blk_mq_tag_busy(data.hctx);
595 596
	else
		data.rq_flags |= RQF_ELV;
597

598
	ret = -EWOULDBLOCK;
599 600
	tag = blk_mq_get_tag(&data);
	if (tag == BLK_MQ_NO_TAG)
601
		goto out_queue_exit;
602 603
	return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
					alloc_time_ns);
604

605 606 607
out_queue_exit:
	blk_queue_exit(q);
	return ERR_PTR(ret);
M
Ming Lin 已提交
608 609 610
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);

K
Keith Busch 已提交
611 612 613 614
static void __blk_mq_free_request(struct request *rq)
{
	struct request_queue *q = rq->q;
	struct blk_mq_ctx *ctx = rq->mq_ctx;
615
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
K
Keith Busch 已提交
616 617
	const int sched_tag = rq->internal_tag;

618
	blk_crypto_free_request(rq);
619
	blk_pm_mark_last_busy(rq);
620
	rq->mq_hctx = NULL;
621
	if (rq->tag != BLK_MQ_NO_TAG)
622
		blk_mq_put_tag(hctx->tags, ctx, rq->tag);
623
	if (sched_tag != BLK_MQ_NO_TAG)
624
		blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
K
Keith Busch 已提交
625 626 627 628
	blk_mq_sched_restart(hctx);
	blk_queue_exit(q);
}

629
void blk_mq_free_request(struct request *rq)
630 631
{
	struct request_queue *q = rq->q;
632
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
633

634
	if (rq->rq_flags & RQF_ELVPRIV) {
635 636 637
		struct elevator_queue *e = q->elevator;

		if (e->type->ops.finish_request)
638
			e->type->ops.finish_request(rq);
639 640 641 642 643
		if (rq->elv.icq) {
			put_io_context(rq->elv.icq->ioc);
			rq->elv.icq = NULL;
		}
	}
644

645
	if (rq->rq_flags & RQF_MQ_INFLIGHT)
646
		__blk_mq_dec_active_requests(hctx);
J
Jens Axboe 已提交
647

648
	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
649
		laptop_io_completion(q->disk->bdi);
650

651
	rq_qos_done(q, rq);
652

K
Keith Busch 已提交
653 654 655
	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
	if (refcount_dec_and_test(&rq->ref))
		__blk_mq_free_request(rq);
656
}
J
Jens Axboe 已提交
657
EXPORT_SYMBOL_GPL(blk_mq_free_request);
658

659
void blk_mq_free_plug_rqs(struct blk_plug *plug)
660
{
661
	struct request *rq;
662

663
	while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
664 665
		blk_mq_free_request(rq);
}
666

667 668 669 670 671 672 673 674 675 676 677 678 679 680
void blk_dump_rq_flags(struct request *rq, char *msg)
{
	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
		rq->rq_disk ? rq->rq_disk->disk_name : "?",
		(unsigned long long) rq->cmd_flags);

	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
	       rq->bio, rq->biotail, blk_rq_bytes(rq));
}
EXPORT_SYMBOL(blk_dump_rq_flags);

681 682 683
static void req_bio_endio(struct request *rq, struct bio *bio,
			  unsigned int nbytes, blk_status_t error)
{
P
Pavel Begunkov 已提交
684
	if (unlikely(error)) {
685
		bio->bi_status = error;
P
Pavel Begunkov 已提交
686
	} else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
687 688 689 690
		/*
		 * Partial zone append completions cannot be supported as the
		 * BIO fragments may end up not being written sequentially.
		 */
691
		if (bio->bi_iter.bi_size != nbytes)
692 693 694 695 696
			bio->bi_status = BLK_STS_IOERR;
		else
			bio->bi_iter.bi_sector = rq->__sector;
	}

P
Pavel Begunkov 已提交
697 698 699 700
	bio_advance(bio, nbytes);

	if (unlikely(rq->rq_flags & RQF_QUIET))
		bio_set_flag(bio, BIO_QUIET);
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
	/* don't actually finish bio if it's part of flush sequence */
	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
		bio_endio(bio);
}

static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
	if (req->part && blk_do_io_stat(req)) {
		const int sgrp = op_stat_group(req_op(req));

		part_stat_lock();
		part_stat_add(req->part, sectors[sgrp], bytes >> 9);
		part_stat_unlock();
	}
}

717 718 719 720 721 722 723 724 725 726 727 728 729
static void blk_print_req_error(struct request *req, blk_status_t status)
{
	printk_ratelimited(KERN_ERR
		"%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
		"phys_seg %u prio class %u\n",
		blk_status_to_str(status),
		req->rq_disk ? req->rq_disk->disk_name : "?",
		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
		req->cmd_flags & ~REQ_OP_MASK,
		req->nr_phys_segments,
		IOPRIO_PRIO_CLASS(req->ioprio));
}

730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
/**
 * blk_update_request - Complete multiple bytes without completing the request
 * @req:      the request being processed
 * @error:    block status code
 * @nr_bytes: number of bytes to complete for @req
 *
 * Description:
 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
 *     the request structure even if @req doesn't have leftover.
 *     If @req has leftover, sets it up for the next range of segments.
 *
 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
 *     %false return from this function.
 *
 * Note:
 *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
 *      except in the consistency check at the end of this function.
 *
 * Return:
 *     %false - this request doesn't have any more data
 *     %true  - this request has more data
 **/
bool blk_update_request(struct request *req, blk_status_t error,
		unsigned int nr_bytes)
{
	int total_bytes;

757
	trace_block_rq_complete(req, error, nr_bytes);
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835

	if (!req->bio)
		return false;

#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
	    error == BLK_STS_OK)
		req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif

	if (unlikely(error && !blk_rq_is_passthrough(req) &&
		     !(req->rq_flags & RQF_QUIET)))
		blk_print_req_error(req, error);

	blk_account_io_completion(req, nr_bytes);

	total_bytes = 0;
	while (req->bio) {
		struct bio *bio = req->bio;
		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);

		if (bio_bytes == bio->bi_iter.bi_size)
			req->bio = bio->bi_next;

		/* Completion has already been traced */
		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
		req_bio_endio(req, bio, bio_bytes, error);

		total_bytes += bio_bytes;
		nr_bytes -= bio_bytes;

		if (!nr_bytes)
			break;
	}

	/*
	 * completely done
	 */
	if (!req->bio) {
		/*
		 * Reset counters so that the request stacking driver
		 * can find how many bytes remain in the request
		 * later.
		 */
		req->__data_len = 0;
		return false;
	}

	req->__data_len -= total_bytes;

	/* update sector only for requests with clear definition of sector */
	if (!blk_rq_is_passthrough(req))
		req->__sector += total_bytes >> 9;

	/* mixed attributes always follow the first bio */
	if (req->rq_flags & RQF_MIXED_MERGE) {
		req->cmd_flags &= ~REQ_FAILFAST_MASK;
		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
	}

	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
		/*
		 * If total number of sectors is less than the first segment
		 * size, something has gone terribly wrong.
		 */
		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
			blk_dump_rq_flags(req, "request botched");
			req->__data_len = blk_rq_cur_bytes(req);
		}

		/* recalculate the number of segments */
		req->nr_phys_segments = blk_recalc_rq_segments(req);
	}

	return true;
}
EXPORT_SYMBOL_GPL(blk_update_request);

836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
static void __blk_account_io_done(struct request *req, u64 now)
{
	const int sgrp = op_stat_group(req_op(req));

	part_stat_lock();
	update_io_ticks(req->part, jiffies, true);
	part_stat_inc(req->part, ios[sgrp]);
	part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
	part_stat_unlock();
}

static inline void blk_account_io_done(struct request *req, u64 now)
{
	/*
	 * Account IO completion.  flush_rq isn't accounted as a
	 * normal IO on queueing nor completion.  Accounting the
	 * containing request is enough.
	 */
	if (blk_do_io_stat(req) && req->part &&
	    !(req->rq_flags & RQF_FLUSH_SEQ))
		__blk_account_io_done(req, now);
}

static void __blk_account_io_start(struct request *rq)
{
	/* passthrough requests can hold bios that do not have ->bi_bdev set */
	if (rq->bio && rq->bio->bi_bdev)
		rq->part = rq->bio->bi_bdev;
	else
		rq->part = rq->rq_disk->part0;

	part_stat_lock();
	update_io_ticks(rq->part, jiffies, false);
	part_stat_unlock();
}

static inline void blk_account_io_start(struct request *req)
{
	if (blk_do_io_stat(req))
		__blk_account_io_start(req);
}

878
static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
879
{
880 881
	if (rq->rq_flags & RQF_STATS) {
		blk_mq_poll_stats_start(rq->q);
882
		blk_stat_add(rq, now);
883 884
	}

885
	blk_mq_sched_completed_request(rq, now);
886
	blk_account_io_done(rq, now);
887
}
888

889 890 891 892
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
{
	if (blk_mq_need_time_stamp(rq))
		__blk_mq_end_request_acct(rq, ktime_get_ns());
M
Ming Lei 已提交
893

C
Christoph Hellwig 已提交
894
	if (rq->end_io) {
895
		rq_qos_done(rq->q, rq);
896
		rq->end_io(rq, error);
C
Christoph Hellwig 已提交
897
	} else {
898
		blk_mq_free_request(rq);
C
Christoph Hellwig 已提交
899
	}
900
}
901
EXPORT_SYMBOL(__blk_mq_end_request);
902

903
void blk_mq_end_request(struct request *rq, blk_status_t error)
904 905 906
{
	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
		BUG();
907
	__blk_mq_end_request(rq, error);
908
}
909
EXPORT_SYMBOL(blk_mq_end_request);
910

911 912 913 914 915 916 917
#define TAG_COMP_BATCH		32

static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
					  int *tag_array, int nr_tags)
{
	struct request_queue *q = hctx->queue;

918 919 920 921 922 923 924
	/*
	 * All requests should have been marked as RQF_MQ_INFLIGHT, so
	 * update hctx->nr_active in batch
	 */
	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
		__blk_mq_sub_active_requests(hctx, nr_tags);

925 926 927 928 929 930 931
	blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
	percpu_ref_put_many(&q->q_usage_counter, nr_tags);
}

void blk_mq_end_request_batch(struct io_comp_batch *iob)
{
	int tags[TAG_COMP_BATCH], nr_tags = 0;
932
	struct blk_mq_hw_ctx *cur_hctx = NULL;
933 934 935 936 937 938 939 940 941 942 943 944 945 946
	struct request *rq;
	u64 now = 0;

	if (iob->need_ts)
		now = ktime_get_ns();

	while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
		prefetch(rq->bio);
		prefetch(rq->rq_next);

		blk_update_request(rq, BLK_STS_OK, blk_rq_bytes(rq));
		if (iob->need_ts)
			__blk_mq_end_request_acct(rq, now);

947 948
		rq_qos_done(rq->q, rq);

949 950 951 952 953 954 955
		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
		if (!refcount_dec_and_test(&rq->ref))
			continue;

		blk_crypto_free_request(rq);
		blk_pm_mark_last_busy(rq);

956 957 958
		if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
			if (cur_hctx)
				blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
959
			nr_tags = 0;
960
			cur_hctx = rq->mq_hctx;
961 962 963 964 965
		}
		tags[nr_tags++] = rq->tag;
	}

	if (nr_tags)
966
		blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
967 968 969
}
EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);

970
static void blk_complete_reqs(struct llist_head *list)
971
{
972 973
	struct llist_node *entry = llist_reverse_order(llist_del_all(list));
	struct request *rq, *next;
974

975
	llist_for_each_entry_safe(rq, next, entry, ipi_list)
976
		rq->q->mq_ops->complete(rq);
977 978
}

979
static __latent_entropy void blk_done_softirq(struct softirq_action *h)
980
{
981
	blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
982 983
}

984 985
static int blk_softirq_cpu_dead(unsigned int cpu)
{
986
	blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
987 988 989
	return 0;
}

990
static void __blk_mq_complete_request_remote(void *data)
991
{
992
	__raise_softirq_irqoff(BLOCK_SOFTIRQ);
993 994
}

995 996 997 998 999 1000 1001
static inline bool blk_mq_complete_need_ipi(struct request *rq)
{
	int cpu = raw_smp_processor_id();

	if (!IS_ENABLED(CONFIG_SMP) ||
	    !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
		return false;
1002 1003 1004 1005 1006 1007
	/*
	 * With force threaded interrupts enabled, raising softirq from an SMP
	 * function call will always result in waking the ksoftirqd thread.
	 * This is probably worse than completing the request on a different
	 * cache domain.
	 */
1008
	if (force_irqthreads())
1009
		return false;
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020

	/* same CPU or cache domain?  Complete locally */
	if (cpu == rq->mq_ctx->cpu ||
	    (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
	     cpus_share_cache(cpu, rq->mq_ctx->cpu)))
		return false;

	/* don't try to IPI to an offline CPU */
	return cpu_online(rq->mq_ctx->cpu);
}

1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
static void blk_mq_complete_send_ipi(struct request *rq)
{
	struct llist_head *list;
	unsigned int cpu;

	cpu = rq->mq_ctx->cpu;
	list = &per_cpu(blk_cpu_done, cpu);
	if (llist_add(&rq->ipi_list, list)) {
		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
		smp_call_function_single_async(cpu, &rq->csd);
	}
}

static void blk_mq_raise_softirq(struct request *rq)
{
	struct llist_head *list;

	preempt_disable();
	list = this_cpu_ptr(&blk_cpu_done);
	if (llist_add(&rq->ipi_list, list))
		raise_softirq(BLOCK_SOFTIRQ);
	preempt_enable();
}

1045
bool blk_mq_complete_request_remote(struct request *rq)
1046
{
1047
	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1048

1049 1050 1051 1052
	/*
	 * For a polled request, always complete locallly, it's pointless
	 * to redirect the completion.
	 */
1053
	if (rq->cmd_flags & REQ_POLLED)
1054
		return false;
C
Christoph Hellwig 已提交
1055

1056
	if (blk_mq_complete_need_ipi(rq)) {
1057 1058
		blk_mq_complete_send_ipi(rq);
		return true;
1059
	}
1060

1061 1062 1063 1064 1065
	if (rq->q->nr_hw_queues == 1) {
		blk_mq_raise_softirq(rq);
		return true;
	}
	return false;
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
}
EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);

/**
 * blk_mq_complete_request - end I/O on a request
 * @rq:		the request being processed
 *
 * Description:
 *	Complete a request by scheduling the ->complete_rq operation.
 **/
void blk_mq_complete_request(struct request *rq)
{
	if (!blk_mq_complete_request_remote(rq))
		rq->q->mq_ops->complete(rq);
1080
}
1081
EXPORT_SYMBOL(blk_mq_complete_request);
1082

1083
static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
1084
	__releases(hctx->srcu)
1085 1086 1087 1088
{
	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
		rcu_read_unlock();
	else
1089
		srcu_read_unlock(hctx->srcu, srcu_idx);
1090 1091 1092
}

static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
1093
	__acquires(hctx->srcu)
1094
{
1095 1096 1097
	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
		/* shut up gcc false positive */
		*srcu_idx = 0;
1098
		rcu_read_lock();
1099
	} else
1100
		*srcu_idx = srcu_read_lock(hctx->srcu);
1101 1102
}

1103 1104 1105 1106 1107 1108 1109 1110
/**
 * blk_mq_start_request - Start processing a request
 * @rq: Pointer to request to be started
 *
 * Function used by device drivers to notify the block layer that a request
 * is going to be processed now, so blk layer can do proper initializations
 * such as starting the timeout timer.
 */
1111
void blk_mq_start_request(struct request *rq)
1112 1113 1114
{
	struct request_queue *q = rq->q;

1115
	trace_block_rq_issue(rq);
1116

1117
	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
1118 1119 1120 1121 1122 1123 1124 1125
		u64 start_time;
#ifdef CONFIG_BLK_CGROUP
		if (rq->bio)
			start_time = bio_issue_time(&rq->bio->bi_issue);
		else
#endif
			start_time = ktime_get_ns();
		rq->io_start_time_ns = start_time;
1126
		rq->stats_sectors = blk_rq_sectors(rq);
1127
		rq->rq_flags |= RQF_STATS;
1128
		rq_qos_issue(q, rq);
1129 1130
	}

1131
	WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1132

1133
	blk_add_timer(rq);
K
Keith Busch 已提交
1134
	WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1135

1136 1137 1138 1139
#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
		q->integrity.profile->prepare_fn(rq);
#endif
1140 1141
	if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
	        WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
1142
}
1143
EXPORT_SYMBOL(blk_mq_start_request);
1144

C
Christoph Hellwig 已提交
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
/**
 * blk_end_sync_rq - executes a completion event on a request
 * @rq: request to complete
 * @error: end I/O status of the request
 */
static void blk_end_sync_rq(struct request *rq, blk_status_t error)
{
	struct completion *waiting = rq->end_io_data;

	rq->end_io_data = (void *)(uintptr_t)error;

	/*
	 * complete last, if this is a stack request the process (and thus
	 * the rq pointer) could be invalid right after this complete()
	 */
	complete(waiting);
}

/**
 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
 * @bd_disk:	matching gendisk
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 * @done:	I/O completion handler
 *
 * Description:
 *    Insert a fully prepared request at the back of the I/O scheduler queue
 *    for execution.  Don't wait for completion.
 *
 * Note:
 *    This function will invoke @done directly if the queue is dead.
 */
void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
			   int at_head, rq_end_io_fn *done)
{
	WARN_ON(irqs_disabled());
	WARN_ON(!blk_rq_is_passthrough(rq));

	rq->rq_disk = bd_disk;
	rq->end_io = done;

	blk_account_io_start(rq);

	/*
	 * don't check dying flag for MQ because the request won't
	 * be reused after dying flag is set
	 */
	blk_mq_sched_insert_request(rq, at_head, true, false);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);

static bool blk_rq_is_poll(struct request *rq)
{
	if (!rq->mq_hctx)
		return false;
	if (rq->mq_hctx->type != HCTX_TYPE_POLL)
		return false;
	if (WARN_ON_ONCE(!rq->bio))
		return false;
	return true;
}

static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
	do {
		bio_poll(rq->bio, NULL, 0);
		cond_resched();
	} while (!completion_done(wait));
}

/**
 * blk_execute_rq - insert a request into queue for execution
 * @bd_disk:	matching gendisk
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 *
 * Description:
 *    Insert a fully prepared request at the back of the I/O scheduler queue
 *    for execution and wait for completion.
 * Return: The blk_status_t result provided to blk_mq_end_request().
 */
blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq,
		int at_head)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	unsigned long hang_check;

	rq->end_io_data = &wait;
	blk_execute_rq_nowait(bd_disk, rq, at_head, blk_end_sync_rq);

	/* Prevent hang_check timer from firing at us during very long I/O */
	hang_check = sysctl_hung_task_timeout_secs;

	if (blk_rq_is_poll(rq))
		blk_rq_poll_completion(rq, &wait);
	else if (hang_check)
		while (!wait_for_completion_io_timeout(&wait,
				hang_check * (HZ/2)))
			;
	else
		wait_for_completion_io(&wait);

	return (blk_status_t)(uintptr_t)rq->end_io_data;
}
EXPORT_SYMBOL(blk_execute_rq);

1251
static void __blk_mq_requeue_request(struct request *rq)
1252 1253 1254
{
	struct request_queue *q = rq->q;

1255 1256
	blk_mq_put_driver_tag(rq);

1257
	trace_block_rq_requeue(rq);
1258
	rq_qos_requeue(q, rq);
1259

K
Keith Busch 已提交
1260 1261
	if (blk_mq_request_started(rq)) {
		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1262
		rq->rq_flags &= ~RQF_TIMED_OUT;
1263
	}
1264 1265
}

1266
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1267 1268 1269
{
	__blk_mq_requeue_request(rq);

1270 1271 1272
	/* this request will be re-inserted to io scheduler queue */
	blk_mq_sched_requeue_request(rq);

1273
	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
1274 1275 1276
}
EXPORT_SYMBOL(blk_mq_requeue_request);

1277 1278 1279
static void blk_mq_requeue_work(struct work_struct *work)
{
	struct request_queue *q =
1280
		container_of(work, struct request_queue, requeue_work.work);
1281 1282 1283
	LIST_HEAD(rq_list);
	struct request *rq, *next;

1284
	spin_lock_irq(&q->requeue_lock);
1285
	list_splice_init(&q->requeue_list, &rq_list);
1286
	spin_unlock_irq(&q->requeue_lock);
1287 1288

	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
1289
		if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
1290 1291
			continue;

1292
		rq->rq_flags &= ~RQF_SOFTBARRIER;
1293
		list_del_init(&rq->queuelist);
1294 1295 1296 1297 1298 1299
		/*
		 * If RQF_DONTPREP, rq has contained some driver specific
		 * data, so insert it to hctx dispatch list to avoid any
		 * merge.
		 */
		if (rq->rq_flags & RQF_DONTPREP)
1300
			blk_mq_request_bypass_insert(rq, false, false);
1301 1302
		else
			blk_mq_sched_insert_request(rq, true, false, false);
1303 1304 1305 1306 1307
	}

	while (!list_empty(&rq_list)) {
		rq = list_entry(rq_list.next, struct request, queuelist);
		list_del_init(&rq->queuelist);
1308
		blk_mq_sched_insert_request(rq, false, false, false);
1309 1310
	}

1311
	blk_mq_run_hw_queues(q, false);
1312 1313
}

1314 1315
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
				bool kick_requeue_list)
1316 1317 1318 1319 1320 1321
{
	struct request_queue *q = rq->q;
	unsigned long flags;

	/*
	 * We abuse this flag that is otherwise used by the I/O scheduler to
1322
	 * request head insertion from the workqueue.
1323
	 */
1324
	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
1325 1326 1327

	spin_lock_irqsave(&q->requeue_lock, flags);
	if (at_head) {
1328
		rq->rq_flags |= RQF_SOFTBARRIER;
1329 1330 1331 1332 1333
		list_add(&rq->queuelist, &q->requeue_list);
	} else {
		list_add_tail(&rq->queuelist, &q->requeue_list);
	}
	spin_unlock_irqrestore(&q->requeue_lock, flags);
1334 1335 1336

	if (kick_requeue_list)
		blk_mq_kick_requeue_list(q);
1337 1338 1339 1340
}

void blk_mq_kick_requeue_list(struct request_queue *q)
{
1341
	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1342 1343 1344
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);

1345 1346 1347
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
				    unsigned long msecs)
{
1348 1349
	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
				    msecs_to_jiffies(msecs));
1350 1351 1352
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);

1353 1354
static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
			       void *priv, bool reserved)
1355 1356
{
	/*
1357
	 * If we find a request that isn't idle and the queue matches,
1358
	 * we know the queue is busy. Return false to stop the iteration.
1359
	 */
1360
	if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
1361 1362 1363 1364 1365 1366 1367 1368 1369
		bool *busy = priv;

		*busy = true;
		return false;
	}

	return true;
}

1370
bool blk_mq_queue_inflight(struct request_queue *q)
1371 1372 1373
{
	bool busy = false;

1374
	blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1375 1376
	return busy;
}
1377
EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1378

1379
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
1380
{
1381
	req->rq_flags |= RQF_TIMED_OUT;
1382 1383 1384 1385 1386 1387 1388
	if (req->q->mq_ops->timeout) {
		enum blk_eh_timer_return ret;

		ret = req->q->mq_ops->timeout(req, reserved);
		if (ret == BLK_EH_DONE)
			return;
		WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1389
	}
1390 1391

	blk_add_timer(req);
1392
}
1393

K
Keith Busch 已提交
1394
static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
1395
{
K
Keith Busch 已提交
1396
	unsigned long deadline;
1397

K
Keith Busch 已提交
1398 1399
	if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
		return false;
1400 1401
	if (rq->rq_flags & RQF_TIMED_OUT)
		return false;
1402

1403
	deadline = READ_ONCE(rq->deadline);
K
Keith Busch 已提交
1404 1405
	if (time_after_eq(jiffies, deadline))
		return true;
1406

K
Keith Busch 已提交
1407 1408 1409 1410 1411
	if (*next == 0)
		*next = deadline;
	else if (time_after(*next, deadline))
		*next = deadline;
	return false;
1412 1413
}

1414 1415
void blk_mq_put_rq_ref(struct request *rq)
{
M
Ming Lei 已提交
1416
	if (is_flush_rq(rq))
1417 1418 1419 1420 1421
		rq->end_io(rq, 0);
	else if (refcount_dec_and_test(&rq->ref))
		__blk_mq_free_request(rq);
}

1422
static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
1423 1424
		struct request *rq, void *priv, bool reserved)
{
K
Keith Busch 已提交
1425 1426 1427
	unsigned long *next = priv;

	/*
1428 1429 1430 1431 1432
	 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
	 * be reallocated underneath the timeout handler's processing, then
	 * the expire check is reliable. If the request is not expired, then
	 * it was completed and reallocated as a new request after returning
	 * from blk_mq_check_expired().
1433
	 */
K
Keith Busch 已提交
1434
	if (blk_mq_req_expired(rq, next))
1435
		blk_mq_rq_timed_out(rq, reserved);
1436
	return true;
1437 1438
}

1439
static void blk_mq_timeout_work(struct work_struct *work)
1440
{
1441 1442
	struct request_queue *q =
		container_of(work, struct request_queue, timeout_work);
K
Keith Busch 已提交
1443
	unsigned long next = 0;
1444
	struct blk_mq_hw_ctx *hctx;
1445
	int i;
1446

1447 1448 1449 1450 1451 1452 1453 1454 1455
	/* A deadlock might occur if a request is stuck requiring a
	 * timeout at the same time a queue freeze is waiting
	 * completion, since the timeout code would not be able to
	 * acquire the queue reference here.
	 *
	 * That's why we don't use blk_queue_enter here; instead, we use
	 * percpu_ref_tryget directly, because we need to be able to
	 * obtain a reference even in the short window between the queue
	 * starting to freeze, by dropping the first reference in
1456
	 * blk_freeze_queue_start, and the moment the last request is
1457 1458 1459 1460
	 * consumed, marked by the instant q_usage_counter reaches
	 * zero.
	 */
	if (!percpu_ref_tryget(&q->q_usage_counter))
1461 1462
		return;

K
Keith Busch 已提交
1463
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
1464

K
Keith Busch 已提交
1465 1466
	if (next != 0) {
		mod_timer(&q->timeout, next);
1467
	} else {
1468 1469 1470 1471 1472 1473
		/*
		 * Request timeouts are handled as a forward rolling timer. If
		 * we end up here it means that no requests are pending and
		 * also that no request has been pending for a while. Mark
		 * each hctx as idle.
		 */
1474 1475 1476 1477 1478
		queue_for_each_hw_ctx(q, hctx, i) {
			/* the hctx may be unmapped, so check it here */
			if (blk_mq_hw_queue_mapped(hctx))
				blk_mq_tag_idle(hctx);
		}
1479
	}
1480
	blk_queue_exit(q);
1481 1482
}

1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
struct flush_busy_ctx_data {
	struct blk_mq_hw_ctx *hctx;
	struct list_head *list;
};

static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
{
	struct flush_busy_ctx_data *flush_data = data;
	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
M
Ming Lei 已提交
1493
	enum hctx_type type = hctx->type;
1494 1495

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
1496
	list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1497
	sbitmap_clear_bit(sb, bitnr);
1498 1499 1500 1501
	spin_unlock(&ctx->lock);
	return true;
}

1502 1503 1504 1505
/*
 * Process software queues that have been marked busy, splicing them
 * to the for-dispatch
 */
1506
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1507
{
1508 1509 1510 1511
	struct flush_busy_ctx_data data = {
		.hctx = hctx,
		.list = list,
	};
1512

1513
	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1514
}
1515
EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1516

1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
struct dispatch_rq_data {
	struct blk_mq_hw_ctx *hctx;
	struct request *rq;
};

static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
		void *data)
{
	struct dispatch_rq_data *dispatch_data = data;
	struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
M
Ming Lei 已提交
1528
	enum hctx_type type = hctx->type;
1529 1530

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
1531 1532
	if (!list_empty(&ctx->rq_lists[type])) {
		dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1533
		list_del_init(&dispatch_data->rq->queuelist);
M
Ming Lei 已提交
1534
		if (list_empty(&ctx->rq_lists[type]))
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544
			sbitmap_clear_bit(sb, bitnr);
	}
	spin_unlock(&ctx->lock);

	return !dispatch_data->rq;
}

struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
					struct blk_mq_ctx *start)
{
1545
	unsigned off = start ? start->index_hw[hctx->type] : 0;
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
	struct dispatch_rq_data data = {
		.hctx = hctx,
		.rq   = NULL,
	};

	__sbitmap_for_each_set(&hctx->ctx_map, off,
			       dispatch_rq_from_ctx, &data);

	return data.rq;
}

1557
static bool __blk_mq_alloc_driver_tag(struct request *rq)
1558
{
1559
	struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1560 1561 1562
	unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
	int tag;

1563 1564
	blk_mq_tag_busy(rq->mq_hctx);

1565
	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1566
		bt = &rq->mq_hctx->tags->breserved_tags;
1567
		tag_offset = 0;
1568 1569 1570
	} else {
		if (!hctx_may_queue(rq->mq_hctx, bt))
			return false;
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
	}

	tag = __sbitmap_queue_get(bt);
	if (tag == BLK_MQ_NO_TAG)
		return false;

	rq->tag = tag + tag_offset;
	return true;
}

1581
bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1582
{
1583
	if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
1584 1585
		return false;

1586
	if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1587 1588
			!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
		rq->rq_flags |= RQF_MQ_INFLIGHT;
1589
		__blk_mq_inc_active_requests(hctx);
1590 1591 1592
	}
	hctx->tags->rqs[rq->tag] = rq;
	return true;
1593 1594
}

1595 1596
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
				int flags, void *key)
1597 1598 1599 1600 1601
{
	struct blk_mq_hw_ctx *hctx;

	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);

1602
	spin_lock(&hctx->dispatch_wait_lock);
1603 1604 1605 1606
	if (!list_empty(&wait->entry)) {
		struct sbitmap_queue *sbq;

		list_del_init(&wait->entry);
1607
		sbq = &hctx->tags->bitmap_tags;
1608 1609
		atomic_dec(&sbq->ws_active);
	}
1610 1611
	spin_unlock(&hctx->dispatch_wait_lock);

1612 1613 1614 1615
	blk_mq_run_hw_queue(hctx, true);
	return 1;
}

1616 1617
/*
 * Mark us waiting for a tag. For shared tags, this involves hooking us into
1618 1619
 * the tag wakeups. For non-shared tags, we can simply mark us needing a
 * restart. For both cases, take care to check the condition again after
1620 1621
 * marking us as waiting.
 */
1622
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1623
				 struct request *rq)
1624
{
1625
	struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1626
	struct wait_queue_head *wq;
1627 1628
	wait_queue_entry_t *wait;
	bool ret;
1629

1630
	if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
1631
		blk_mq_sched_mark_restart_hctx(hctx);
1632

1633 1634 1635 1636 1637 1638 1639 1640
		/*
		 * It's possible that a tag was freed in the window between the
		 * allocation failure and adding the hardware queue to the wait
		 * queue.
		 *
		 * Don't clear RESTART here, someone else could have set it.
		 * At most this will cost an extra queue run.
		 */
1641
		return blk_mq_get_driver_tag(rq);
1642 1643
	}

1644
	wait = &hctx->dispatch_wait;
1645 1646 1647
	if (!list_empty_careful(&wait->entry))
		return false;

1648
	wq = &bt_wait_ptr(sbq, hctx)->wait;
1649 1650 1651

	spin_lock_irq(&wq->lock);
	spin_lock(&hctx->dispatch_wait_lock);
1652
	if (!list_empty(&wait->entry)) {
1653 1654
		spin_unlock(&hctx->dispatch_wait_lock);
		spin_unlock_irq(&wq->lock);
1655
		return false;
1656 1657
	}

1658
	atomic_inc(&sbq->ws_active);
1659 1660
	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
	__add_wait_queue(wq, wait);
1661

1662
	/*
1663 1664 1665
	 * It's possible that a tag was freed in the window between the
	 * allocation failure and adding the hardware queue to the wait
	 * queue.
1666
	 */
1667
	ret = blk_mq_get_driver_tag(rq);
1668
	if (!ret) {
1669 1670
		spin_unlock(&hctx->dispatch_wait_lock);
		spin_unlock_irq(&wq->lock);
1671
		return false;
1672
	}
1673 1674 1675 1676 1677 1678

	/*
	 * We got a tag, remove ourselves from the wait queue to ensure
	 * someone else gets the wakeup.
	 */
	list_del_init(&wait->entry);
1679
	atomic_dec(&sbq->ws_active);
1680 1681
	spin_unlock(&hctx->dispatch_wait_lock);
	spin_unlock_irq(&wq->lock);
1682 1683

	return true;
1684 1685
}

1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
#define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
#define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
/*
 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
 * - EWMA is one simple way to compute running average value
 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
 * - take 4 as factor for avoiding to get too small(0) result, and this
 *   factor doesn't matter because EWMA decreases exponentially
 */
static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
{
	unsigned int ewma;

	ewma = hctx->dispatch_busy;

	if (!ewma && !busy)
		return;

	ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
	if (busy)
		ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
	ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;

	hctx->dispatch_busy = ewma;
}

1712 1713
#define BLK_MQ_RESOURCE_DELAY	3		/* ms units */

1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730
static void blk_mq_handle_dev_resource(struct request *rq,
				       struct list_head *list)
{
	struct request *next =
		list_first_entry_or_null(list, struct request, queuelist);

	/*
	 * If an I/O scheduler has been configured and we got a driver tag for
	 * the next request already, free it.
	 */
	if (next)
		blk_mq_put_driver_tag(next);

	list_add(&rq->queuelist, list);
	__blk_mq_requeue_request(rq);
}

1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
static void blk_mq_handle_zone_resource(struct request *rq,
					struct list_head *zone_list)
{
	/*
	 * If we end up here it is because we cannot dispatch a request to a
	 * specific zone due to LLD level zone-write locking or other zone
	 * related resource not being available. In this case, set the request
	 * aside in zone_list for retrying it later.
	 */
	list_add(&rq->queuelist, zone_list);
	__blk_mq_requeue_request(rq);
}

1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
enum prep_dispatch {
	PREP_DISPATCH_OK,
	PREP_DISPATCH_NO_TAG,
	PREP_DISPATCH_NO_BUDGET,
};

static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
						  bool need_budget)
{
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1754
	int budget_token = -1;
1755

1756 1757 1758 1759 1760 1761 1762
	if (need_budget) {
		budget_token = blk_mq_get_dispatch_budget(rq->q);
		if (budget_token < 0) {
			blk_mq_put_driver_tag(rq);
			return PREP_DISPATCH_NO_BUDGET;
		}
		blk_mq_set_rq_budget_token(rq, budget_token);
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
	}

	if (!blk_mq_get_driver_tag(rq)) {
		/*
		 * The initial allocation attempt failed, so we need to
		 * rerun the hardware queue when a tag is freed. The
		 * waitqueue takes care of that. If the queue is run
		 * before we add this entry back on the dispatch list,
		 * we'll re-run it below.
		 */
		if (!blk_mq_mark_tag_wait(hctx, rq)) {
1774 1775 1776 1777 1778
			/*
			 * All budgets not got from this function will be put
			 * together during handling partial dispatch
			 */
			if (need_budget)
1779
				blk_mq_put_dispatch_budget(rq->q, budget_token);
1780 1781 1782 1783 1784 1785 1786
			return PREP_DISPATCH_NO_TAG;
		}
	}

	return PREP_DISPATCH_OK;
}

1787 1788
/* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
static void blk_mq_release_budgets(struct request_queue *q,
1789
		struct list_head *list)
1790
{
1791
	struct request *rq;
1792

1793 1794
	list_for_each_entry(rq, list, queuelist) {
		int budget_token = blk_mq_get_rq_budget_token(rq);
1795

1796 1797 1798
		if (budget_token >= 0)
			blk_mq_put_dispatch_budget(q, budget_token);
	}
1799 1800
}

1801 1802 1803
/*
 * Returns true if we did some work AND can potentially do more.
 */
1804
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1805
			     unsigned int nr_budgets)
1806
{
1807
	enum prep_dispatch prep;
1808
	struct request_queue *q = hctx->queue;
1809
	struct request *rq, *nxt;
1810
	int errors, queued;
1811
	blk_status_t ret = BLK_STS_OK;
1812
	LIST_HEAD(zone_list);
1813
	bool needs_resource = false;
1814

1815 1816 1817
	if (list_empty(list))
		return false;

1818 1819 1820
	/*
	 * Now process all the entries, sending them to the driver.
	 */
1821
	errors = queued = 0;
1822
	do {
1823
		struct blk_mq_queue_data bd;
1824

1825
		rq = list_first_entry(list, struct request, queuelist);
1826

1827
		WARN_ON_ONCE(hctx != rq->mq_hctx);
1828
		prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
1829
		if (prep != PREP_DISPATCH_OK)
1830
			break;
1831

1832 1833
		list_del_init(&rq->queuelist);

1834
		bd.rq = rq;
1835 1836 1837 1838 1839 1840 1841 1842 1843

		/*
		 * Flag last if we have no more requests, or if we have more
		 * but can't assign a driver tag to it.
		 */
		if (list_empty(list))
			bd.last = true;
		else {
			nxt = list_first_entry(list, struct request, queuelist);
1844
			bd.last = !blk_mq_get_driver_tag(nxt);
1845
		}
1846

1847 1848 1849 1850 1851 1852
		/*
		 * once the request is queued to lld, no need to cover the
		 * budget any more
		 */
		if (nr_budgets)
			nr_budgets--;
1853
		ret = q->mq_ops->queue_rq(hctx, &bd);
1854 1855 1856
		switch (ret) {
		case BLK_STS_OK:
			queued++;
1857
			break;
1858
		case BLK_STS_RESOURCE:
1859 1860
			needs_resource = true;
			fallthrough;
1861 1862 1863 1864
		case BLK_STS_DEV_RESOURCE:
			blk_mq_handle_dev_resource(rq, list);
			goto out;
		case BLK_STS_ZONE_RESOURCE:
1865 1866 1867 1868 1869 1870
			/*
			 * Move the request to zone_list and keep going through
			 * the dispatch list to find more requests the drive can
			 * accept.
			 */
			blk_mq_handle_zone_resource(rq, &zone_list);
1871
			needs_resource = true;
1872 1873
			break;
		default:
1874
			errors++;
1875
			blk_mq_end_request(rq, ret);
1876
		}
1877
	} while (!list_empty(list));
1878
out:
1879 1880 1881
	if (!list_empty(&zone_list))
		list_splice_tail_init(&zone_list, list);

1882 1883 1884 1885 1886
	/* If we didn't flush the entire list, we could have told the driver
	 * there was more coming, but that turned out to be a lie.
	 */
	if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
		q->mq_ops->commit_rqs(hctx);
1887 1888 1889 1890
	/*
	 * Any items that need requeuing? Stuff them into hctx->dispatch,
	 * that is where we will continue on next queue run.
	 */
1891
	if (!list_empty(list)) {
1892
		bool needs_restart;
1893 1894
		/* For non-shared tags, the RESTART check will suffice */
		bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
1895
			(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
1896

1897 1898
		if (nr_budgets)
			blk_mq_release_budgets(q, list);
1899

1900
		spin_lock(&hctx->lock);
1901
		list_splice_tail_init(list, &hctx->dispatch);
1902
		spin_unlock(&hctx->lock);
1903

1904 1905 1906 1907 1908 1909 1910 1911 1912
		/*
		 * Order adding requests to hctx->dispatch and checking
		 * SCHED_RESTART flag. The pair of this smp_mb() is the one
		 * in blk_mq_sched_restart(). Avoid restart code path to
		 * miss the new added requests to hctx->dispatch, meantime
		 * SCHED_RESTART is observed here.
		 */
		smp_mb();

1913
		/*
1914 1915 1916
		 * If SCHED_RESTART was set by the caller of this function and
		 * it is no longer set that means that it was cleared by another
		 * thread and hence that a queue rerun is needed.
1917
		 *
1918 1919 1920 1921
		 * If 'no_tag' is set, that means that we failed getting
		 * a driver tag with an I/O scheduler attached. If our dispatch
		 * waitqueue is no longer active, ensure that we run the queue
		 * AFTER adding our entries back to the list.
1922
		 *
1923 1924 1925 1926 1927 1928 1929
		 * If no I/O scheduler has been configured it is possible that
		 * the hardware queue got stopped and restarted before requests
		 * were pushed back onto the dispatch list. Rerun the queue to
		 * avoid starvation. Notes:
		 * - blk_mq_run_hw_queue() checks whether or not a queue has
		 *   been stopped before rerunning a queue.
		 * - Some but not all block drivers stop a queue before
1930
		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1931
		 *   and dm-rq.
1932 1933 1934
		 *
		 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
		 * bit is set, run queue after a delay to avoid IO stalls
1935
		 * that could otherwise occur if the queue is idle.  We'll do
1936 1937
		 * similar if we couldn't get budget or couldn't lock a zone
		 * and SCHED_RESTART is set.
1938
		 */
1939
		needs_restart = blk_mq_sched_needs_restart(hctx);
1940 1941
		if (prep == PREP_DISPATCH_NO_BUDGET)
			needs_resource = true;
1942
		if (!needs_restart ||
1943
		    (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1944
			blk_mq_run_hw_queue(hctx, true);
1945
		else if (needs_restart && needs_resource)
1946
			blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1947

1948
		blk_mq_update_dispatch_busy(hctx, true);
1949
		return false;
1950 1951
	} else
		blk_mq_update_dispatch_busy(hctx, false);
1952

1953
	return (queued + errors) != 0;
1954 1955
}

1956 1957 1958 1959 1960 1961
/**
 * __blk_mq_run_hw_queue - Run a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 *
 * Send pending requests to the hardware.
 */
1962 1963 1964 1965
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	int srcu_idx;

1966 1967 1968 1969 1970 1971
	/*
	 * We can't run the queue inline with ints disabled. Ensure that
	 * we catch bad users of this early.
	 */
	WARN_ON_ONCE(in_interrupt());

1972
	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1973

1974 1975 1976
	hctx_lock(hctx, &srcu_idx);
	blk_mq_sched_dispatch_requests(hctx);
	hctx_unlock(hctx, srcu_idx);
1977 1978
}

1979 1980 1981 1982 1983 1984 1985 1986 1987
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
{
	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);

	if (cpu >= nr_cpu_ids)
		cpu = cpumask_first(hctx->cpumask);
	return cpu;
}

1988 1989 1990 1991 1992 1993 1994 1995
/*
 * It'd be great if the workqueue API had a way to pass
 * in a mask and had some smarts for more clever placement.
 * For now we just round-robin here, switching for every
 * BLK_MQ_CPU_WORK_BATCH queued items.
 */
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{
1996
	bool tried = false;
1997
	int next_cpu = hctx->next_cpu;
1998

1999 2000
	if (hctx->queue->nr_hw_queues == 1)
		return WORK_CPU_UNBOUND;
2001 2002

	if (--hctx->next_cpu_batch <= 0) {
2003
select_cpu:
2004
		next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2005
				cpu_online_mask);
2006
		if (next_cpu >= nr_cpu_ids)
2007
			next_cpu = blk_mq_first_mapped_cpu(hctx);
2008 2009 2010
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
	}

2011 2012 2013 2014
	/*
	 * Do unbound schedule if we can't find a online CPU for this hctx,
	 * and it should only happen in the path of handling CPU DEAD.
	 */
2015
	if (!cpu_online(next_cpu)) {
2016 2017 2018 2019 2020 2021 2022 2023 2024
		if (!tried) {
			tried = true;
			goto select_cpu;
		}

		/*
		 * Make sure to re-select CPU next time once after CPUs
		 * in hctx->cpumask become online again.
		 */
2025
		hctx->next_cpu = next_cpu;
2026 2027 2028
		hctx->next_cpu_batch = 1;
		return WORK_CPU_UNBOUND;
	}
2029 2030 2031

	hctx->next_cpu = next_cpu;
	return next_cpu;
2032 2033
}

2034 2035 2036 2037
/**
 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 * @async: If we want to run the queue asynchronously.
2038
 * @msecs: Milliseconds of delay to wait before running the queue.
2039 2040 2041 2042
 *
 * If !@async, try to run the queue now. Else, run the queue asynchronously and
 * with a delay of @msecs.
 */
2043 2044
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
					unsigned long msecs)
2045
{
2046
	if (unlikely(blk_mq_hctx_stopped(hctx)))
2047 2048
		return;

2049
	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
2050 2051
		int cpu = get_cpu();
		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
2052
			__blk_mq_run_hw_queue(hctx);
2053
			put_cpu();
2054 2055
			return;
		}
2056

2057
		put_cpu();
2058
	}
2059

2060 2061
	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
				    msecs_to_jiffies(msecs));
2062 2063
}

2064 2065 2066
/**
 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
 * @hctx: Pointer to the hardware queue to run.
2067
 * @msecs: Milliseconds of delay to wait before running the queue.
2068 2069 2070
 *
 * Run a hardware queue asynchronously with a delay of @msecs.
 */
2071 2072 2073 2074 2075 2076
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{
	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);

2077 2078 2079 2080 2081 2082 2083 2084 2085
/**
 * blk_mq_run_hw_queue - Start to run a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 * @async: If we want to run the queue asynchronously.
 *
 * Check if the request queue is not in a quiesced state and if there are
 * pending requests to be sent. If this is true, run the queue to send requests
 * to hardware.
 */
2086
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2087
{
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098
	int srcu_idx;
	bool need_run;

	/*
	 * When queue is quiesced, we may be switching io scheduler, or
	 * updating nr_hw_queues, or other things, and we can't run queue
	 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
	 *
	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
	 * quiesced.
	 */
2099 2100 2101 2102
	hctx_lock(hctx, &srcu_idx);
	need_run = !blk_queue_quiesced(hctx->queue) &&
		blk_mq_hctx_has_pending(hctx);
	hctx_unlock(hctx, srcu_idx);
2103

2104
	if (need_run)
2105
		__blk_mq_delay_run_hw_queue(hctx, async, 0);
2106
}
O
Omar Sandoval 已提交
2107
EXPORT_SYMBOL(blk_mq_run_hw_queue);
2108

2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
/*
 * Is the request queue handled by an IO scheduler that does not respect
 * hardware queues when dispatching?
 */
static bool blk_mq_has_sqsched(struct request_queue *q)
{
	struct elevator_queue *e = q->elevator;

	if (e && e->type->ops.dispatch_request &&
	    !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
		return true;
	return false;
}

/*
 * Return prefered queue to dispatch from (if any) for non-mq aware IO
 * scheduler.
 */
static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;

	/*
	 * If the IO scheduler does not respect hardware queues when
	 * dispatching, we just don't bother with multiple HW queues and
	 * dispatch from hctx for the current CPU since running multiple queues
	 * just causes lock contention inside the scheduler and pointless cache
	 * bouncing.
	 */
	hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
				     raw_smp_processor_id());
	if (!blk_mq_hctx_stopped(hctx))
		return hctx;
	return NULL;
}

2145
/**
2146
 * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2147 2148 2149
 * @q: Pointer to the request queue to run.
 * @async: If we want to run the queue asynchronously.
 */
2150
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2151
{
2152
	struct blk_mq_hw_ctx *hctx, *sq_hctx;
2153 2154
	int i;

2155 2156 2157
	sq_hctx = NULL;
	if (blk_mq_has_sqsched(q))
		sq_hctx = blk_mq_get_sq_hctx(q);
2158
	queue_for_each_hw_ctx(q, hctx, i) {
2159
		if (blk_mq_hctx_stopped(hctx))
2160
			continue;
2161 2162 2163 2164 2165 2166 2167 2168
		/*
		 * Dispatch from this hctx either if there's no hctx preferred
		 * by IO scheduler or if it has requests that bypass the
		 * scheduler.
		 */
		if (!sq_hctx || sq_hctx == hctx ||
		    !list_empty_careful(&hctx->dispatch))
			blk_mq_run_hw_queue(hctx, async);
2169 2170
	}
}
2171
EXPORT_SYMBOL(blk_mq_run_hw_queues);
2172

2173 2174 2175
/**
 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
 * @q: Pointer to the request queue to run.
2176
 * @msecs: Milliseconds of delay to wait before running the queues.
2177 2178 2179
 */
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
{
2180
	struct blk_mq_hw_ctx *hctx, *sq_hctx;
2181 2182
	int i;

2183 2184 2185
	sq_hctx = NULL;
	if (blk_mq_has_sqsched(q))
		sq_hctx = blk_mq_get_sq_hctx(q);
2186 2187 2188
	queue_for_each_hw_ctx(q, hctx, i) {
		if (blk_mq_hctx_stopped(hctx))
			continue;
2189 2190 2191 2192 2193 2194 2195 2196
		/*
		 * Dispatch from this hctx either if there's no hctx preferred
		 * by IO scheduler or if it has requests that bypass the
		 * scheduler.
		 */
		if (!sq_hctx || sq_hctx == hctx ||
		    !list_empty_careful(&hctx->dispatch))
			blk_mq_delay_run_hw_queue(hctx, msecs);
2197 2198 2199 2200
	}
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);

2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
/**
 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
 * @q: request queue.
 *
 * The caller is responsible for serializing this function against
 * blk_mq_{start,stop}_hw_queue().
 */
bool blk_mq_queue_stopped(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		if (blk_mq_hctx_stopped(hctx))
			return true;

	return false;
}
EXPORT_SYMBOL(blk_mq_queue_stopped);

2221 2222 2223
/*
 * This function is often used for pausing .queue_rq() by driver when
 * there isn't enough resource or some conditions aren't satisfied, and
2224
 * BLK_STS_RESOURCE is usually returned.
2225 2226 2227 2228 2229
 *
 * We do not guarantee that dispatch can be drained or blocked
 * after blk_mq_stop_hw_queue() returns. Please use
 * blk_mq_quiesce_queue() for that requirement.
 */
2230 2231
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
2232
	cancel_delayed_work(&hctx->run_work);
2233

2234
	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2235
}
2236
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2237

2238 2239 2240
/*
 * This function is often used for pausing .queue_rq() by driver when
 * there isn't enough resource or some conditions aren't satisfied, and
2241
 * BLK_STS_RESOURCE is usually returned.
2242 2243 2244 2245 2246
 *
 * We do not guarantee that dispatch can be drained or blocked
 * after blk_mq_stop_hw_queues() returns. Please use
 * blk_mq_quiesce_queue() for that requirement.
 */
2247 2248
void blk_mq_stop_hw_queues(struct request_queue *q)
{
2249 2250 2251 2252 2253
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_stop_hw_queue(hctx);
2254 2255 2256
}
EXPORT_SYMBOL(blk_mq_stop_hw_queues);

2257 2258 2259
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2260

2261
	blk_mq_run_hw_queue(hctx, false);
2262 2263 2264
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);

2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
void blk_mq_start_hw_queues(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_start_hw_queue(hctx);
}
EXPORT_SYMBOL(blk_mq_start_hw_queues);

2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
	if (!blk_mq_hctx_stopped(hctx))
		return;

	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
	blk_mq_run_hw_queue(hctx, async);
}
EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);

2285
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2286 2287 2288 2289
{
	struct blk_mq_hw_ctx *hctx;
	int i;

2290 2291
	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_start_stopped_hw_queue(hctx, async);
2292 2293 2294
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);

2295
static void blk_mq_run_work_fn(struct work_struct *work)
2296 2297 2298
{
	struct blk_mq_hw_ctx *hctx;

2299
	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
2300

2301
	/*
M
Ming Lei 已提交
2302
	 * If we are stopped, don't run the queue.
2303
	 */
2304
	if (blk_mq_hctx_stopped(hctx))
2305
		return;
2306 2307 2308 2309

	__blk_mq_run_hw_queue(hctx);
}

2310 2311 2312
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
					    struct request *rq,
					    bool at_head)
2313
{
J
Jens Axboe 已提交
2314
	struct blk_mq_ctx *ctx = rq->mq_ctx;
M
Ming Lei 已提交
2315
	enum hctx_type type = hctx->type;
J
Jens Axboe 已提交
2316

2317 2318
	lockdep_assert_held(&ctx->lock);

2319
	trace_block_rq_insert(rq);
2320

2321
	if (at_head)
M
Ming Lei 已提交
2322
		list_add(&rq->queuelist, &ctx->rq_lists[type]);
2323
	else
M
Ming Lei 已提交
2324
		list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
2325
}
2326

2327 2328
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
			     bool at_head)
2329 2330 2331
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;

2332 2333
	lockdep_assert_held(&ctx->lock);

J
Jens Axboe 已提交
2334
	__blk_mq_insert_req_list(hctx, rq, at_head);
2335 2336 2337
	blk_mq_hctx_mark_pending(hctx, ctx);
}

2338 2339 2340
/**
 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
 * @rq: Pointer to request to be inserted.
2341
 * @at_head: true if the request should be inserted at the head of the list.
2342 2343
 * @run_queue: If we should run the hardware queue after inserting the request.
 *
2344 2345 2346
 * Should only be used carefully, when the caller knows we want to
 * bypass a potential IO scheduler on the target device.
 */
2347 2348
void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
				  bool run_queue)
2349
{
2350
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2351 2352

	spin_lock(&hctx->lock);
2353 2354 2355 2356
	if (at_head)
		list_add(&rq->queuelist, &hctx->dispatch);
	else
		list_add_tail(&rq->queuelist, &hctx->dispatch);
2357 2358
	spin_unlock(&hctx->lock);

2359 2360
	if (run_queue)
		blk_mq_run_hw_queue(hctx, false);
2361 2362
}

2363 2364
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
			    struct list_head *list)
2365 2366

{
2367
	struct request *rq;
M
Ming Lei 已提交
2368
	enum hctx_type type = hctx->type;
2369

2370 2371 2372 2373
	/*
	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
	 * offline now
	 */
2374
	list_for_each_entry(rq, list, queuelist) {
J
Jens Axboe 已提交
2375
		BUG_ON(rq->mq_ctx != ctx);
2376
		trace_block_rq_insert(rq);
2377
	}
2378 2379

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
2380
	list_splice_tail_init(list, &ctx->rq_lists[type]);
2381
	blk_mq_hctx_mark_pending(hctx, ctx);
2382 2383 2384
	spin_unlock(&ctx->lock);
}

2385 2386
static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
			      bool from_schedule)
2387
{
2388 2389 2390 2391 2392 2393
	if (hctx->queue->mq_ops->commit_rqs) {
		trace_block_unplug(hctx->queue, *queued, !from_schedule);
		hctx->queue->mq_ops->commit_rqs(hctx);
	}
	*queued = 0;
}
2394

2395 2396
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
		unsigned int nr_segs)
2397
{
2398 2399
	int err;

2400 2401 2402 2403 2404
	if (bio->bi_opf & REQ_RAHEAD)
		rq->cmd_flags |= REQ_FAILFAST_MASK;

	rq->__sector = bio->bi_iter.bi_sector;
	rq->write_hint = bio->bi_write_hint;
2405
	blk_rq_bio_prep(rq, bio, nr_segs);
2406 2407 2408 2409

	/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
	err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
	WARN_ON_ONCE(err);
2410

2411
	blk_account_io_start(rq);
2412 2413
}

2414
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2415
					    struct request *rq, bool last)
2416 2417 2418 2419
{
	struct request_queue *q = rq->q;
	struct blk_mq_queue_data bd = {
		.rq = rq,
2420
		.last = last,
2421
	};
2422
	blk_status_t ret;
2423 2424 2425 2426 2427 2428 2429 2430 2431

	/*
	 * For OK queue, we are done. For error, caller may kill it.
	 * Any other error (busy), just add it to our list as we
	 * previously would have done.
	 */
	ret = q->mq_ops->queue_rq(hctx, &bd);
	switch (ret) {
	case BLK_STS_OK:
2432
		blk_mq_update_dispatch_busy(hctx, false);
2433 2434
		break;
	case BLK_STS_RESOURCE:
2435
	case BLK_STS_DEV_RESOURCE:
2436
		blk_mq_update_dispatch_busy(hctx, true);
2437 2438 2439
		__blk_mq_requeue_request(rq);
		break;
	default:
2440
		blk_mq_update_dispatch_busy(hctx, false);
2441 2442 2443 2444 2445 2446
		break;
	}

	return ret;
}

2447
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2448
						struct request *rq,
2449
						bool bypass_insert, bool last)
2450 2451
{
	struct request_queue *q = rq->q;
M
Ming Lei 已提交
2452
	bool run_queue = true;
2453
	int budget_token;
M
Ming Lei 已提交
2454

2455
	/*
2456
	 * RCU or SRCU read lock is needed before checking quiesced flag.
2457
	 *
2458 2459 2460
	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
	 * and avoid driver to try to dispatch again.
2461
	 */
2462
	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
M
Ming Lei 已提交
2463
		run_queue = false;
2464 2465
		bypass_insert = false;
		goto insert;
M
Ming Lei 已提交
2466
	}
2467

2468
	if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
2469
		goto insert;
2470

2471 2472
	budget_token = blk_mq_get_dispatch_budget(q);
	if (budget_token < 0)
2473
		goto insert;
2474

2475 2476
	blk_mq_set_rq_budget_token(rq, budget_token);

2477
	if (!blk_mq_get_driver_tag(rq)) {
2478
		blk_mq_put_dispatch_budget(q, budget_token);
2479
		goto insert;
2480
	}
2481

2482
	return __blk_mq_issue_directly(hctx, rq, last);
2483 2484 2485 2486
insert:
	if (bypass_insert)
		return BLK_STS_RESOURCE;

2487 2488
	blk_mq_sched_insert_request(rq, false, run_queue, false);

2489 2490 2491
	return BLK_STS_OK;
}

2492 2493 2494 2495 2496 2497 2498 2499 2500 2501
/**
 * blk_mq_try_issue_directly - Try to send a request directly to device driver.
 * @hctx: Pointer of the associated hardware queue.
 * @rq: Pointer to request to be sent.
 *
 * If the device has enough resources to accept a new request now, send the
 * request directly to device driver. Else, insert at hctx->dispatch queue, so
 * we can try send it another time in the future. Requests inserted at this
 * queue have higher priority.
 */
2502
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2503
		struct request *rq)
2504 2505 2506 2507 2508 2509 2510 2511
{
	blk_status_t ret;
	int srcu_idx;

	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);

	hctx_lock(hctx, &srcu_idx);

2512
	ret = __blk_mq_try_issue_directly(hctx, rq, false, true);
2513
	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
2514
		blk_mq_request_bypass_insert(rq, false, true);
2515 2516 2517 2518 2519 2520
	else if (ret != BLK_STS_OK)
		blk_mq_end_request(rq, ret);

	hctx_unlock(hctx, srcu_idx);
}

2521
static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2522 2523 2524 2525 2526 2527
{
	blk_status_t ret;
	int srcu_idx;
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;

	hctx_lock(hctx, &srcu_idx);
2528
	ret = __blk_mq_try_issue_directly(hctx, rq, true, last);
2529
	hctx_unlock(hctx, srcu_idx);
2530 2531

	return ret;
2532 2533
}

2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625
static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
{
	struct blk_mq_hw_ctx *hctx = NULL;
	struct request *rq;
	int queued = 0;
	int errors = 0;

	while ((rq = rq_list_pop(&plug->mq_list))) {
		bool last = rq_list_empty(plug->mq_list);
		blk_status_t ret;

		if (hctx != rq->mq_hctx) {
			if (hctx)
				blk_mq_commit_rqs(hctx, &queued, from_schedule);
			hctx = rq->mq_hctx;
		}

		ret = blk_mq_request_issue_directly(rq, last);
		switch (ret) {
		case BLK_STS_OK:
			queued++;
			break;
		case BLK_STS_RESOURCE:
		case BLK_STS_DEV_RESOURCE:
			blk_mq_request_bypass_insert(rq, false, last);
			blk_mq_commit_rqs(hctx, &queued, from_schedule);
			return;
		default:
			blk_mq_end_request(rq, ret);
			errors++;
			break;
		}
	}

	/*
	 * If we didn't flush the entire list, we could have told the driver
	 * there was more coming, but that turned out to be a lie.
	 */
	if (errors)
		blk_mq_commit_rqs(hctx, &queued, from_schedule);
}

void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
	struct blk_mq_hw_ctx *this_hctx;
	struct blk_mq_ctx *this_ctx;
	unsigned int depth;
	LIST_HEAD(list);

	if (rq_list_empty(plug->mq_list))
		return;
	plug->rq_count = 0;

	if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
		blk_mq_plug_issue_direct(plug, false);
		if (rq_list_empty(plug->mq_list))
			return;
	}

	this_hctx = NULL;
	this_ctx = NULL;
	depth = 0;
	do {
		struct request *rq;

		rq = rq_list_pop(&plug->mq_list);

		if (!this_hctx) {
			this_hctx = rq->mq_hctx;
			this_ctx = rq->mq_ctx;
		} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
			trace_block_unplug(this_hctx->queue, depth,
						!from_schedule);
			blk_mq_sched_insert_requests(this_hctx, this_ctx,
						&list, from_schedule);
			depth = 0;
			this_hctx = rq->mq_hctx;
			this_ctx = rq->mq_ctx;

		}

		list_add(&rq->queuelist, &list);
		depth++;
	} while (!rq_list_empty(plug->mq_list));

	if (!list_empty(&list)) {
		trace_block_unplug(this_hctx->queue, depth, !from_schedule);
		blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
						from_schedule);
	}
}

2626 2627 2628
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
		struct list_head *list)
{
2629
	int queued = 0;
2630
	int errors = 0;
2631

2632
	while (!list_empty(list)) {
2633
		blk_status_t ret;
2634 2635 2636 2637
		struct request *rq = list_first_entry(list, struct request,
				queuelist);

		list_del_init(&rq->queuelist);
2638 2639 2640 2641
		ret = blk_mq_request_issue_directly(rq, list_empty(list));
		if (ret != BLK_STS_OK) {
			if (ret == BLK_STS_RESOURCE ||
					ret == BLK_STS_DEV_RESOURCE) {
2642
				blk_mq_request_bypass_insert(rq, false,
2643
							list_empty(list));
2644 2645 2646
				break;
			}
			blk_mq_end_request(rq, ret);
2647
			errors++;
2648 2649
		} else
			queued++;
2650
	}
J
Jens Axboe 已提交
2651 2652 2653 2654 2655 2656

	/*
	 * If we didn't flush the entire list, we could have told
	 * the driver there was more coming, but that turned out to
	 * be a lie.
	 */
2657 2658
	if ((!list_empty(list) || errors) &&
	     hctx->queue->mq_ops->commit_rqs && queued)
J
Jens Axboe 已提交
2659
		hctx->queue->mq_ops->commit_rqs(hctx);
2660 2661
}

2662
/*
2663
 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
2664 2665 2666 2667 2668 2669
 * queues. This is important for md arrays to benefit from merging
 * requests.
 */
static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
{
	if (plug->multiple_queues)
2670
		return BLK_MAX_REQUEST_COUNT * 2;
2671 2672 2673
	return BLK_MAX_REQUEST_COUNT;
}

2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695
static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
{
	struct request *last = rq_list_peek(&plug->mq_list);

	if (!plug->rq_count) {
		trace_block_plug(rq->q);
	} else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
		   (!blk_queue_nomerges(rq->q) &&
		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
		blk_mq_flush_plug_list(plug, false);
		trace_block_plug(rq->q);
	}

	if (!plug->multiple_queues && last && last->q != rq->q)
		plug->multiple_queues = true;
	if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
		plug->has_elevator = true;
	rq->rq_next = NULL;
	rq_list_add(&plug->mq_list, rq);
	plug->rq_count++;
}

M
Ming Lei 已提交
2696
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2697
				     struct bio *bio, unsigned int nr_segs)
2698 2699
{
	if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2700
		if (blk_attempt_plug_merge(q, bio, nr_segs))
2701 2702 2703 2704 2705 2706 2707
			return true;
		if (blk_mq_sched_bio_merge(q, bio, nr_segs))
			return true;
	}
	return false;
}

2708 2709
static struct request *blk_mq_get_new_requests(struct request_queue *q,
					       struct blk_plug *plug,
2710
					       struct bio *bio,
2711
					       unsigned int nsegs)
2712 2713 2714 2715 2716 2717 2718 2719
{
	struct blk_mq_alloc_data data = {
		.q		= q,
		.nr_tags	= 1,
		.cmd_flags	= bio->bi_opf,
	};
	struct request *rq;

2720
	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2721
		return NULL;
2722 2723 2724

	rq_qos_throttle(q, bio);

2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737
	if (plug) {
		data.nr_tags = plug->nr_ios;
		plug->nr_ios = 1;
		data.cached_rq = &plug->cached_rq;
	}

	rq = __blk_mq_alloc_requests(&data);
	if (rq)
		return rq;

	rq_qos_cleanup(q, bio);
	if (bio->bi_opf & REQ_NOWAIT)
		bio_wouldblock_error(bio);
2738

2739 2740 2741
	return NULL;
}

2742
static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio)
2743 2744 2745 2746 2747 2748 2749 2750 2751 2752
{
	if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
		return false;

	if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
		return false;

	return true;
}

2753 2754
static inline struct request *blk_mq_get_request(struct request_queue *q,
						 struct blk_plug *plug,
2755
						 struct bio *bio,
2756
						 unsigned int nsegs)
2757
{
2758 2759 2760
	struct request *rq;
	bool checked = false;

2761 2762
	if (plug) {
		rq = rq_list_peek(&plug->cached_rq);
2763
		if (rq && rq->q == q) {
2764 2765
			if (unlikely(!submit_bio_checks(bio)))
				return NULL;
2766
			if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2767
				return NULL;
2768 2769 2770 2771
			checked = true;
			if (!blk_mq_can_use_cached_rq(rq, bio))
				goto fallback;
			rq->cmd_flags = bio->bi_opf;
2772 2773
			plug->cached_rq = rq_list_next(rq);
			INIT_LIST_HEAD(&rq->queuelist);
2774
			rq_qos_throttle(q, bio);
2775 2776 2777 2778
			return rq;
		}
	}

2779 2780 2781
fallback:
	if (unlikely(bio_queue_enter(bio)))
		return NULL;
2782 2783
	if (unlikely(!checked && !submit_bio_checks(bio)))
		goto out_put;
2784
	rq = blk_mq_get_new_requests(q, plug, bio, nsegs);
2785 2786 2787 2788 2789
	if (rq)
		return rq;
out_put:
	blk_queue_exit(q);
	return NULL;
2790 2791
}

2792
/**
2793
 * blk_mq_submit_bio - Create and send a request to block device.
2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804
 * @bio: Bio pointer.
 *
 * Builds up a request structure from @q and @bio and send to the device. The
 * request may not be queued directly to hardware if:
 * * This request can be merged with another one
 * * We want to place request at plug queue for possible future merging
 * * There is an IO scheduler active at this queue
 *
 * It will not queue the request if there is an error with the bio, or at the
 * request creation.
 */
2805
void blk_mq_submit_bio(struct bio *bio)
2806
{
2807
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2808
	const int is_sync = op_is_sync(bio->bi_opf);
2809
	struct request *rq;
2810
	struct blk_plug *plug;
2811
	unsigned int nr_segs = 1;
2812
	blk_status_t ret;
2813

2814 2815 2816
	if (unlikely(!blk_crypto_bio_prep(&bio)))
		return;

2817
	blk_queue_bounce(q, &bio);
2818 2819
	if (blk_may_split(q, bio))
		__blk_queue_split(q, &bio, &nr_segs);
2820

2821
	if (!bio_integrity_prep(bio))
2822
		return;
J
Jens Axboe 已提交
2823

2824
	plug = blk_mq_plug(q, bio);
2825
	rq = blk_mq_get_request(q, plug, bio, nr_segs);
2826
	if (unlikely(!rq))
2827
		return;
J
Jens Axboe 已提交
2828

2829
	trace_block_getrq(bio);
2830

2831
	rq_qos_track(q, rq, bio);
2832

2833 2834
	blk_mq_bio_to_request(rq, bio, nr_segs);

2835 2836 2837 2838 2839
	ret = blk_crypto_init_request(rq);
	if (ret != BLK_STS_OK) {
		bio->bi_status = ret;
		bio_endio(bio);
		blk_mq_free_request(rq);
2840
		return;
2841 2842
	}

2843 2844
	if (op_is_flush(bio->bi_opf)) {
		blk_insert_flush(rq);
2845
		return;
2846
	}
2847

2848
	if (plug)
2849
		blk_add_rq_to_plug(plug, rq);
2850 2851 2852
	else if ((rq->rq_flags & RQF_ELV) ||
		 (rq->mq_hctx->dispatch_busy &&
		  (q->nr_hw_queues == 1 || !is_sync)))
2853
		blk_mq_sched_insert_request(rq, false, true, true);
2854
	else
2855
		blk_mq_try_issue_directly(rq->mq_hctx, rq);
2856 2857
}

2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030
/**
 * blk_cloned_rq_check_limits - Helper function to check a cloned request
 *                              for the new queue limits
 * @q:  the queue
 * @rq: the request being checked
 *
 * Description:
 *    @rq may have been made based on weaker limitations of upper-level queues
 *    in request stacking drivers, and it may violate the limitation of @q.
 *    Since the block layer and the underlying device driver trust @rq
 *    after it is inserted to @q, it should be checked against @q before
 *    the insertion using this generic function.
 *
 *    Request stacking drivers like request-based dm may change the queue
 *    limits when retrying requests on other queues. Those requests need
 *    to be checked against the new queue limits again during dispatch.
 */
static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
				      struct request *rq)
{
	unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));

	if (blk_rq_sectors(rq) > max_sectors) {
		/*
		 * SCSI device does not have a good way to return if
		 * Write Same/Zero is actually supported. If a device rejects
		 * a non-read/write command (discard, write same,etc.) the
		 * low-level device driver will set the relevant queue limit to
		 * 0 to prevent blk-lib from issuing more of the offending
		 * operations. Commands queued prior to the queue limit being
		 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
		 * errors being propagated to upper layers.
		 */
		if (max_sectors == 0)
			return BLK_STS_NOTSUPP;

		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
			__func__, blk_rq_sectors(rq), max_sectors);
		return BLK_STS_IOERR;
	}

	/*
	 * The queue settings related to segment counting may differ from the
	 * original queue.
	 */
	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
	if (rq->nr_phys_segments > queue_max_segments(q)) {
		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
			__func__, rq->nr_phys_segments, queue_max_segments(q));
		return BLK_STS_IOERR;
	}

	return BLK_STS_OK;
}

/**
 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
 * @q:  the queue to submit the request
 * @rq: the request being queued
 */
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{
	blk_status_t ret;

	ret = blk_cloned_rq_check_limits(q, rq);
	if (ret != BLK_STS_OK)
		return ret;

	if (rq->rq_disk &&
	    should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
		return BLK_STS_IOERR;

	if (blk_crypto_insert_cloned_request(rq))
		return BLK_STS_IOERR;

	blk_account_io_start(rq);

	/*
	 * Since we have a scheduler attached on the top device,
	 * bypass a potential scheduler on the bottom device for
	 * insert.
	 */
	return blk_mq_request_issue_directly(rq, true);
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);

/**
 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
 * @rq: the clone request to be cleaned up
 *
 * Description:
 *     Free all bios in @rq for a cloned request.
 */
void blk_rq_unprep_clone(struct request *rq)
{
	struct bio *bio;

	while ((bio = rq->bio) != NULL) {
		rq->bio = bio->bi_next;

		bio_put(bio);
	}
}
EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);

/**
 * blk_rq_prep_clone - Helper function to setup clone request
 * @rq: the request to be setup
 * @rq_src: original request to be cloned
 * @bs: bio_set that bios for clone are allocated from
 * @gfp_mask: memory allocation mask for bio
 * @bio_ctr: setup function to be called for each clone bio.
 *           Returns %0 for success, non %0 for failure.
 * @data: private data to be passed to @bio_ctr
 *
 * Description:
 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
 *     Also, pages which the original bios are pointing to are not copied
 *     and the cloned bios just point same pages.
 *     So cloned bios must be completed before original bios, which means
 *     the caller must complete @rq before @rq_src.
 */
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
		      struct bio_set *bs, gfp_t gfp_mask,
		      int (*bio_ctr)(struct bio *, struct bio *, void *),
		      void *data)
{
	struct bio *bio, *bio_src;

	if (!bs)
		bs = &fs_bio_set;

	__rq_for_each_bio(bio_src, rq_src) {
		bio = bio_clone_fast(bio_src, gfp_mask, bs);
		if (!bio)
			goto free_and_out;

		if (bio_ctr && bio_ctr(bio, bio_src, data))
			goto free_and_out;

		if (rq->bio) {
			rq->biotail->bi_next = bio;
			rq->biotail = bio;
		} else {
			rq->bio = rq->biotail = bio;
		}
		bio = NULL;
	}

	/* Copy attributes of the original request to the clone request. */
	rq->__sector = blk_rq_pos(rq_src);
	rq->__data_len = blk_rq_bytes(rq_src);
	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
		rq->special_vec = rq_src->special_vec;
	}
	rq->nr_phys_segments = rq_src->nr_phys_segments;
	rq->ioprio = rq_src->ioprio;

	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
		goto free_and_out;

	return 0;

free_and_out:
	if (bio)
		bio_put(bio);
	blk_rq_unprep_clone(rq);

	return -ENOMEM;
}
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);

3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051
/*
 * Steal bios from a request and add them to a bio list.
 * The request must not have been partially completed before.
 */
void blk_steal_bios(struct bio_list *list, struct request *rq)
{
	if (rq->bio) {
		if (list->tail)
			list->tail->bi_next = rq->bio;
		else
			list->head = rq->bio;
		list->tail = rq->biotail;

		rq->bio = NULL;
		rq->biotail = NULL;
	}

	rq->__data_len = 0;
}
EXPORT_SYMBOL_GPL(blk_steal_bios);

3052 3053 3054 3055 3056 3057
static size_t order_to_size(unsigned int order)
{
	return (size_t)PAGE_SIZE << order;
}

/* called before freeing request pool in @tags */
3058 3059
static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
				    struct blk_mq_tags *tags)
3060 3061 3062 3063
{
	struct page *page;
	unsigned long flags;

3064 3065 3066 3067
	/* There is no need to clear a driver tags own mapping */
	if (drv_tags == tags)
		return;

3068 3069 3070 3071 3072
	list_for_each_entry(page, &tags->page_list, lru) {
		unsigned long start = (unsigned long)page_address(page);
		unsigned long end = start + order_to_size(page->private);
		int i;

3073
		for (i = 0; i < drv_tags->nr_tags; i++) {
3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093
			struct request *rq = drv_tags->rqs[i];
			unsigned long rq_addr = (unsigned long)rq;

			if (rq_addr >= start && rq_addr < end) {
				WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
				cmpxchg(&drv_tags->rqs[i], rq, NULL);
			}
		}
	}

	/*
	 * Wait until all pending iteration is done.
	 *
	 * Request reference is cleared and it is guaranteed to be observed
	 * after the ->lock is released.
	 */
	spin_lock_irqsave(&drv_tags->lock, flags);
	spin_unlock_irqrestore(&drv_tags->lock, flags);
}

3094 3095
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		     unsigned int hctx_idx)
3096
{
3097
	struct blk_mq_tags *drv_tags;
3098
	struct page *page;
3099

3100 3101
	if (blk_mq_is_shared_tags(set->flags))
		drv_tags = set->shared_tags;
3102 3103
	else
		drv_tags = set->tags[hctx_idx];
3104

3105
	if (tags->static_rqs && set->ops->exit_request) {
3106
		int i;
3107

3108
		for (i = 0; i < tags->nr_tags; i++) {
J
Jens Axboe 已提交
3109 3110 3111
			struct request *rq = tags->static_rqs[i];

			if (!rq)
3112
				continue;
3113
			set->ops->exit_request(set, rq, hctx_idx);
J
Jens Axboe 已提交
3114
			tags->static_rqs[i] = NULL;
3115
		}
3116 3117
	}

3118
	blk_mq_clear_rq_mapping(drv_tags, tags);
3119

3120 3121
	while (!list_empty(&tags->page_list)) {
		page = list_first_entry(&tags->page_list, struct page, lru);
3122
		list_del_init(&page->lru);
3123 3124
		/*
		 * Remove kmemleak object previously allocated in
3125
		 * blk_mq_alloc_rqs().
3126 3127
		 */
		kmemleak_free(page_address(page));
3128 3129
		__free_pages(page, page->private);
	}
3130
}
3131

3132
void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3133
{
3134
	kfree(tags->rqs);
3135
	tags->rqs = NULL;
J
Jens Axboe 已提交
3136 3137
	kfree(tags->static_rqs);
	tags->static_rqs = NULL;
3138

3139
	blk_mq_free_tags(tags);
3140 3141
}

3142 3143 3144
static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
					       unsigned int hctx_idx,
					       unsigned int nr_tags,
3145
					       unsigned int reserved_tags)
3146
{
3147
	struct blk_mq_tags *tags;
3148
	int node;
3149

3150
	node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
3151 3152 3153
	if (node == NUMA_NO_NODE)
		node = set->numa_node;

3154 3155
	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3156 3157
	if (!tags)
		return NULL;
3158

3159
	tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3160
				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3161
				 node);
3162
	if (!tags->rqs) {
3163
		blk_mq_free_tags(tags);
3164 3165
		return NULL;
	}
3166

3167 3168 3169
	tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
					GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
					node);
J
Jens Axboe 已提交
3170 3171
	if (!tags->static_rqs) {
		kfree(tags->rqs);
3172
		blk_mq_free_tags(tags);
J
Jens Axboe 已提交
3173 3174 3175
		return NULL;
	}

3176 3177 3178
	return tags;
}

3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189
static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
			       unsigned int hctx_idx, int node)
{
	int ret;

	if (set->ops->init_request) {
		ret = set->ops->init_request(set, rq, hctx_idx, node);
		if (ret)
			return ret;
	}

K
Keith Busch 已提交
3190
	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3191 3192 3193
	return 0;
}

3194 3195 3196
static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
			    struct blk_mq_tags *tags,
			    unsigned int hctx_idx, unsigned int depth)
3197 3198 3199
{
	unsigned int i, j, entries_per_page, max_order = 4;
	size_t rq_size, left;
3200 3201
	int node;

3202
	node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
3203 3204
	if (node == NUMA_NO_NODE)
		node = set->numa_node;
3205 3206 3207

	INIT_LIST_HEAD(&tags->page_list);

3208 3209 3210 3211
	/*
	 * rq_size is the size of the request plus driver payload, rounded
	 * to the cacheline size
	 */
3212
	rq_size = round_up(sizeof(struct request) + set->cmd_size,
3213
				cache_line_size());
3214
	left = rq_size * depth;
3215

3216
	for (i = 0; i < depth; ) {
3217 3218 3219 3220 3221
		int this_order = max_order;
		struct page *page;
		int to_do;
		void *p;

3222
		while (this_order && left < order_to_size(this_order - 1))
3223 3224 3225
			this_order--;

		do {
3226
			page = alloc_pages_node(node,
3227
				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3228
				this_order);
3229 3230 3231 3232 3233 3234 3235 3236 3237
			if (page)
				break;
			if (!this_order--)
				break;
			if (order_to_size(this_order) < rq_size)
				break;
		} while (1);

		if (!page)
3238
			goto fail;
3239 3240

		page->private = this_order;
3241
		list_add_tail(&page->lru, &tags->page_list);
3242 3243

		p = page_address(page);
3244 3245 3246 3247
		/*
		 * Allow kmemleak to scan these pages as they contain pointers
		 * to additional allocations like via ops->init_request().
		 */
3248
		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3249
		entries_per_page = order_to_size(this_order) / rq_size;
3250
		to_do = min(entries_per_page, depth - i);
3251 3252
		left -= to_do * rq_size;
		for (j = 0; j < to_do; j++) {
J
Jens Axboe 已提交
3253 3254 3255
			struct request *rq = p;

			tags->static_rqs[i] = rq;
3256 3257 3258
			if (blk_mq_init_request(set, rq, hctx_idx, node)) {
				tags->static_rqs[i] = NULL;
				goto fail;
3259 3260
			}

3261 3262 3263 3264
			p += rq_size;
			i++;
		}
	}
3265
	return 0;
3266

3267
fail:
3268 3269
	blk_mq_free_rqs(set, tags, hctx_idx);
	return -ENOMEM;
3270 3271
}

3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351
struct rq_iter_data {
	struct blk_mq_hw_ctx *hctx;
	bool has_rq;
};

static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
{
	struct rq_iter_data *iter_data = data;

	if (rq->mq_hctx != iter_data->hctx)
		return true;
	iter_data->has_rq = true;
	return false;
}

static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
{
	struct blk_mq_tags *tags = hctx->sched_tags ?
			hctx->sched_tags : hctx->tags;
	struct rq_iter_data data = {
		.hctx	= hctx,
	};

	blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
	return data.has_rq;
}

static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
		struct blk_mq_hw_ctx *hctx)
{
	if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
		return false;
	if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
		return false;
	return true;
}

static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
{
	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
			struct blk_mq_hw_ctx, cpuhp_online);

	if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
	    !blk_mq_last_cpu_in_hctx(cpu, hctx))
		return 0;

	/*
	 * Prevent new request from being allocated on the current hctx.
	 *
	 * The smp_mb__after_atomic() Pairs with the implied barrier in
	 * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
	 * seen once we return from the tag allocator.
	 */
	set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
	smp_mb__after_atomic();

	/*
	 * Try to grab a reference to the queue and wait for any outstanding
	 * requests.  If we could not grab a reference the queue has been
	 * frozen and there are no requests.
	 */
	if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
		while (blk_mq_hctx_has_requests(hctx))
			msleep(5);
		percpu_ref_put(&hctx->queue->q_usage_counter);
	}

	return 0;
}

static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
{
	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
			struct blk_mq_hw_ctx, cpuhp_online);

	if (cpumask_test_cpu(cpu, hctx->cpumask))
		clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
	return 0;
}

J
Jens Axboe 已提交
3352 3353 3354 3355 3356
/*
 * 'cpu' is going away. splice any existing rq_list entries from this
 * software queue to the hw queue dispatch list, and ensure that it
 * gets run.
 */
3357
static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3358
{
3359
	struct blk_mq_hw_ctx *hctx;
3360 3361
	struct blk_mq_ctx *ctx;
	LIST_HEAD(tmp);
M
Ming Lei 已提交
3362
	enum hctx_type type;
3363

3364
	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3365 3366 3367
	if (!cpumask_test_cpu(cpu, hctx->cpumask))
		return 0;

J
Jens Axboe 已提交
3368
	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
M
Ming Lei 已提交
3369
	type = hctx->type;
3370 3371

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
3372 3373
	if (!list_empty(&ctx->rq_lists[type])) {
		list_splice_init(&ctx->rq_lists[type], &tmp);
3374 3375 3376 3377 3378
		blk_mq_hctx_clear_pending(hctx, ctx);
	}
	spin_unlock(&ctx->lock);

	if (list_empty(&tmp))
3379
		return 0;
3380

J
Jens Axboe 已提交
3381 3382 3383
	spin_lock(&hctx->lock);
	list_splice_tail_init(&tmp, &hctx->dispatch);
	spin_unlock(&hctx->lock);
3384 3385

	blk_mq_run_hw_queue(hctx, true);
3386
	return 0;
3387 3388
}

3389
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3390
{
3391 3392 3393
	if (!(hctx->flags & BLK_MQ_F_STACKING))
		cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
						    &hctx->cpuhp_online);
3394 3395
	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
					    &hctx->cpuhp_dead);
3396 3397
}

3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426
/*
 * Before freeing hw queue, clearing the flush request reference in
 * tags->rqs[] for avoiding potential UAF.
 */
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
		unsigned int queue_depth, struct request *flush_rq)
{
	int i;
	unsigned long flags;

	/* The hw queue may not be mapped yet */
	if (!tags)
		return;

	WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);

	for (i = 0; i < queue_depth; i++)
		cmpxchg(&tags->rqs[i], flush_rq, NULL);

	/*
	 * Wait until all pending iteration is done.
	 *
	 * Request reference is cleared and it is guaranteed to be observed
	 * after the ->lock is released.
	 */
	spin_lock_irqsave(&tags->lock, flags);
	spin_unlock_irqrestore(&tags->lock, flags);
}

3427
/* hctx->ctxs will be freed in queue's release handler */
3428 3429 3430 3431
static void blk_mq_exit_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
3432 3433
	struct request *flush_rq = hctx->fq->flush_rq;

3434 3435
	if (blk_mq_hw_queue_mapped(hctx))
		blk_mq_tag_idle(hctx);
3436

3437 3438
	blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
			set->queue_depth, flush_rq);
3439
	if (set->ops->exit_request)
3440
		set->ops->exit_request(set, flush_rq, hctx_idx);
3441

3442 3443 3444
	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);

3445
	blk_mq_remove_cpuhp(hctx);
3446 3447 3448 3449

	spin_lock(&q->unused_hctx_lock);
	list_add(&hctx->hctx_list, &q->unused_hctx_list);
	spin_unlock(&q->unused_hctx_lock);
3450 3451
}

M
Ming Lei 已提交
3452 3453 3454 3455 3456 3457 3458 3459 3460
static void blk_mq_exit_hw_queues(struct request_queue *q,
		struct blk_mq_tag_set *set, int nr_queue)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (i == nr_queue)
			break;
3461
		blk_mq_debugfs_unregister_hctx(hctx);
3462
		blk_mq_exit_hctx(q, set, hctx, i);
M
Ming Lei 已提交
3463 3464 3465
	}
}

3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479
static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
{
	int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);

	BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
			   __alignof__(struct blk_mq_hw_ctx)) !=
		     sizeof(struct blk_mq_hw_ctx));

	if (tag_set->flags & BLK_MQ_F_BLOCKING)
		hw_ctx_size += sizeof(struct srcu_struct);

	return hw_ctx_size;
}

3480 3481 3482
static int blk_mq_init_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3483
{
3484 3485
	hctx->queue_num = hctx_idx;

3486 3487 3488
	if (!(hctx->flags & BLK_MQ_F_STACKING))
		cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
				&hctx->cpuhp_online);
3489 3490 3491 3492 3493 3494 3495
	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);

	hctx->tags = set->tags[hctx_idx];

	if (set->ops->init_hctx &&
	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
		goto unregister_cpu_notifier;
3496

3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524
	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
				hctx->numa_node))
		goto exit_hctx;
	return 0;

 exit_hctx:
	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);
 unregister_cpu_notifier:
	blk_mq_remove_cpuhp(hctx);
	return -1;
}

static struct blk_mq_hw_ctx *
blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
		int node)
{
	struct blk_mq_hw_ctx *hctx;
	gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;

	hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
	if (!hctx)
		goto fail_alloc_hctx;

	if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
		goto free_hctx;

	atomic_set(&hctx->nr_active, 0);
3525
	if (node == NUMA_NO_NODE)
3526 3527
		node = set->numa_node;
	hctx->numa_node = node;
3528

3529
	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3530 3531 3532
	spin_lock_init(&hctx->lock);
	INIT_LIST_HEAD(&hctx->dispatch);
	hctx->queue = q;
3533
	hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3534

3535 3536
	INIT_LIST_HEAD(&hctx->hctx_list);

3537
	/*
3538 3539
	 * Allocate space for all possible cpus to avoid allocation at
	 * runtime
3540
	 */
3541
	hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3542
			gfp, node);
3543
	if (!hctx->ctxs)
3544
		goto free_cpumask;
3545

3546
	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3547
				gfp, node, false, false))
3548 3549
		goto free_ctxs;
	hctx->nr_ctx = 0;
3550

3551
	spin_lock_init(&hctx->dispatch_wait_lock);
3552 3553 3554
	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
	INIT_LIST_HEAD(&hctx->dispatch_wait.entry);

3555
	hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3556
	if (!hctx->fq)
3557
		goto free_bitmap;
3558

3559
	if (hctx->flags & BLK_MQ_F_BLOCKING)
3560
		init_srcu_struct(hctx->srcu);
3561
	blk_mq_hctx_kobj_init(hctx);
3562

3563
	return hctx;
3564

3565
 free_bitmap:
3566
	sbitmap_free(&hctx->ctx_map);
3567 3568
 free_ctxs:
	kfree(hctx->ctxs);
3569 3570 3571 3572 3573 3574
 free_cpumask:
	free_cpumask_var(hctx->cpumask);
 free_hctx:
	kfree(hctx);
 fail_alloc_hctx:
	return NULL;
3575
}
3576 3577 3578 3579

static void blk_mq_init_cpu_queues(struct request_queue *q,
				   unsigned int nr_hw_queues)
{
J
Jens Axboe 已提交
3580 3581
	struct blk_mq_tag_set *set = q->tag_set;
	unsigned int i, j;
3582 3583 3584 3585

	for_each_possible_cpu(i) {
		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
		struct blk_mq_hw_ctx *hctx;
M
Ming Lei 已提交
3586
		int k;
3587 3588 3589

		__ctx->cpu = i;
		spin_lock_init(&__ctx->lock);
M
Ming Lei 已提交
3590 3591 3592
		for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
			INIT_LIST_HEAD(&__ctx->rq_lists[k]);

3593 3594 3595 3596 3597 3598
		__ctx->queue = q;

		/*
		 * Set local node, IFF we have more than one hw queue. If
		 * not, we remain on the home node of the device
		 */
J
Jens Axboe 已提交
3599 3600 3601
		for (j = 0; j < set->nr_maps; j++) {
			hctx = blk_mq_map_queue_type(q, j, i);
			if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3602
				hctx->numa_node = cpu_to_node(i);
J
Jens Axboe 已提交
3603
		}
3604 3605 3606
	}
}

3607 3608 3609
struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
					     unsigned int hctx_idx,
					     unsigned int depth)
3610
{
3611 3612
	struct blk_mq_tags *tags;
	int ret;
3613

3614
	tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3615 3616
	if (!tags)
		return NULL;
3617

3618 3619
	ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
	if (ret) {
3620
		blk_mq_free_rq_map(tags);
3621 3622
		return NULL;
	}
3623

3624
	return tags;
3625 3626
}

3627 3628
static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
				       int hctx_idx)
3629
{
3630 3631
	if (blk_mq_is_shared_tags(set->flags)) {
		set->tags[hctx_idx] = set->shared_tags;
3632

3633
		return true;
3634
	}
3635

3636 3637 3638 3639
	set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
						       set->queue_depth);

	return set->tags[hctx_idx];
3640 3641
}

3642 3643 3644
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
			     struct blk_mq_tags *tags,
			     unsigned int hctx_idx)
3645
{
3646 3647
	if (tags) {
		blk_mq_free_rqs(set, tags, hctx_idx);
3648
		blk_mq_free_rq_map(tags);
3649
	}
3650 3651
}

3652 3653 3654
static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
				      unsigned int hctx_idx)
{
3655
	if (!blk_mq_is_shared_tags(set->flags))
3656 3657 3658
		blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);

	set->tags[hctx_idx] = NULL;
3659 3660
}

3661
static void blk_mq_map_swqueue(struct request_queue *q)
3662
{
J
Jens Axboe 已提交
3663
	unsigned int i, j, hctx_idx;
3664 3665
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
M
Ming Lei 已提交
3666
	struct blk_mq_tag_set *set = q->tag_set;
3667 3668

	queue_for_each_hw_ctx(q, hctx, i) {
3669
		cpumask_clear(hctx->cpumask);
3670
		hctx->nr_ctx = 0;
3671
		hctx->dispatch_from = NULL;
3672 3673 3674
	}

	/*
3675
	 * Map software to hardware queues.
3676 3677
	 *
	 * If the cpu isn't present, the cpu is mapped to first hctx.
3678
	 */
3679
	for_each_possible_cpu(i) {
3680

3681
		ctx = per_cpu_ptr(q->queue_ctx, i);
J
Jens Axboe 已提交
3682
		for (j = 0; j < set->nr_maps; j++) {
3683 3684 3685
			if (!set->map[j].nr_queues) {
				ctx->hctxs[j] = blk_mq_map_queue_type(q,
						HCTX_TYPE_DEFAULT, i);
3686
				continue;
3687
			}
3688 3689 3690
			hctx_idx = set->map[j].mq_map[i];
			/* unmapped hw queue can be remapped after CPU topo changed */
			if (!set->tags[hctx_idx] &&
3691
			    !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3692 3693 3694 3695 3696 3697 3698 3699
				/*
				 * If tags initialization fail for some hctx,
				 * that hctx won't be brought online.  In this
				 * case, remap the current ctx to hctx[0] which
				 * is guaranteed to always have tags allocated
				 */
				set->map[j].mq_map[i] = 0;
			}
3700

J
Jens Axboe 已提交
3701
			hctx = blk_mq_map_queue_type(q, j, i);
3702
			ctx->hctxs[j] = hctx;
J
Jens Axboe 已提交
3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721
			/*
			 * If the CPU is already set in the mask, then we've
			 * mapped this one already. This can happen if
			 * devices share queues across queue maps.
			 */
			if (cpumask_test_cpu(i, hctx->cpumask))
				continue;

			cpumask_set_cpu(i, hctx->cpumask);
			hctx->type = j;
			ctx->index_hw[hctx->type] = hctx->nr_ctx;
			hctx->ctxs[hctx->nr_ctx++] = ctx;

			/*
			 * If the nr_ctx type overflows, we have exceeded the
			 * amount of sw queues we can support.
			 */
			BUG_ON(!hctx->nr_ctx);
		}
3722 3723 3724 3725

		for (; j < HCTX_MAX_TYPES; j++)
			ctx->hctxs[j] = blk_mq_map_queue_type(q,
					HCTX_TYPE_DEFAULT, i);
3726
	}
3727 3728

	queue_for_each_hw_ctx(q, hctx, i) {
3729 3730 3731 3732 3733 3734 3735 3736 3737
		/*
		 * If no software queues are mapped to this hardware queue,
		 * disable it and free the request entries.
		 */
		if (!hctx->nr_ctx) {
			/* Never unmap queue 0.  We need it as a
			 * fallback in case of a new remap fails
			 * allocation
			 */
3738 3739
			if (i)
				__blk_mq_free_map_and_rqs(set, i);
3740 3741 3742 3743

			hctx->tags = NULL;
			continue;
		}
3744

M
Ming Lei 已提交
3745 3746 3747
		hctx->tags = set->tags[i];
		WARN_ON(!hctx->tags);

3748 3749 3750 3751 3752
		/*
		 * Set the map size to the number of mapped software queues.
		 * This is more accurate and more efficient than looping
		 * over all possibly mapped software queues.
		 */
3753
		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3754

3755 3756 3757
		/*
		 * Initialize batch roundrobin counts
		 */
3758
		hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3759 3760
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
	}
3761 3762
}

3763 3764 3765 3766
/*
 * Caller needs to ensure that we're either frozen/quiesced, or that
 * the queue isn't live yet.
 */
3767
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
3768 3769 3770 3771
{
	struct blk_mq_hw_ctx *hctx;
	int i;

3772
	queue_for_each_hw_ctx(q, hctx, i) {
3773
		if (shared) {
3774
			hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3775 3776
		} else {
			blk_mq_tag_idle(hctx);
3777
			hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3778
		}
3779 3780 3781
	}
}

3782 3783
static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
					 bool shared)
3784 3785
{
	struct request_queue *q;
3786

3787 3788
	lockdep_assert_held(&set->tag_list_lock);

3789 3790
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_freeze_queue(q);
3791
		queue_set_hctx_shared(q, shared);
3792 3793 3794 3795 3796 3797 3798 3799 3800
		blk_mq_unfreeze_queue(q);
	}
}

static void blk_mq_del_queue_tag_set(struct request_queue *q)
{
	struct blk_mq_tag_set *set = q->tag_set;

	mutex_lock(&set->tag_list_lock);
3801
	list_del(&q->tag_set_list);
3802 3803
	if (list_is_singular(&set->tag_list)) {
		/* just transitioned to unshared */
3804
		set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3805
		/* update existing queue */
3806
		blk_mq_update_tag_set_shared(set, false);
3807
	}
3808
	mutex_unlock(&set->tag_list_lock);
3809
	INIT_LIST_HEAD(&q->tag_set_list);
3810 3811 3812 3813 3814 3815
}

static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
				     struct request_queue *q)
{
	mutex_lock(&set->tag_list_lock);
3816

3817 3818 3819 3820
	/*
	 * Check to see if we're transitioning to shared (from 1 to 2 queues).
	 */
	if (!list_empty(&set->tag_list) &&
3821 3822
	    !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
		set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3823
		/* update existing queue */
3824
		blk_mq_update_tag_set_shared(set, true);
3825
	}
3826
	if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
3827
		queue_set_hctx_shared(q, true);
3828
	list_add_tail(&q->tag_set_list, &set->tag_list);
3829

3830 3831 3832
	mutex_unlock(&set->tag_list_lock);
}

3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860
/* All allocations will be freed in release handler of q->mq_kobj */
static int blk_mq_alloc_ctxs(struct request_queue *q)
{
	struct blk_mq_ctxs *ctxs;
	int cpu;

	ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
	if (!ctxs)
		return -ENOMEM;

	ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
	if (!ctxs->queue_ctx)
		goto fail;

	for_each_possible_cpu(cpu) {
		struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
		ctx->ctxs = ctxs;
	}

	q->mq_kobj = &ctxs->kobj;
	q->queue_ctx = ctxs->queue_ctx;

	return 0;
 fail:
	kfree(ctxs);
	return -ENOMEM;
}

3861 3862 3863 3864 3865 3866 3867 3868
/*
 * It is the actual release handler for mq, but we do it from
 * request queue's release handler for avoiding use-after-free
 * and headache because q->mq_kobj shouldn't have been introduced,
 * but we can't group ctx/kctx kobj without it.
 */
void blk_mq_release(struct request_queue *q)
{
3869 3870
	struct blk_mq_hw_ctx *hctx, *next;
	int i;
3871

3872 3873 3874 3875 3876 3877
	queue_for_each_hw_ctx(q, hctx, i)
		WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));

	/* all hctx are in .unused_hctx_list now */
	list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
		list_del_init(&hctx->hctx_list);
3878
		kobject_put(&hctx->kobj);
3879
	}
3880 3881 3882

	kfree(q->queue_hw_ctx);

3883 3884 3885 3886 3887
	/*
	 * release .mq_kobj and sw queue's kobject now because
	 * both share lifetime with request queue.
	 */
	blk_mq_sysfs_deinit(q);
3888 3889
}

3890
static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
3891
		void *queuedata)
3892
{
3893 3894
	struct request_queue *q;
	int ret;
3895

3896 3897
	q = blk_alloc_queue(set->numa_node);
	if (!q)
3898
		return ERR_PTR(-ENOMEM);
3899 3900 3901 3902 3903 3904
	q->queuedata = queuedata;
	ret = blk_mq_init_allocated_queue(set, q);
	if (ret) {
		blk_cleanup_queue(q);
		return ERR_PTR(ret);
	}
3905 3906
	return q;
}
3907 3908 3909 3910 3911

struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
	return blk_mq_init_queue_data(set, NULL);
}
3912 3913
EXPORT_SYMBOL(blk_mq_init_queue);

3914 3915
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
		struct lock_class_key *lkclass)
3916 3917
{
	struct request_queue *q;
3918
	struct gendisk *disk;
3919

3920 3921 3922
	q = blk_mq_init_queue_data(set, queuedata);
	if (IS_ERR(q))
		return ERR_CAST(q);
3923

3924
	disk = __alloc_disk_node(q, set->numa_node, lkclass);
3925 3926 3927
	if (!disk) {
		blk_cleanup_queue(q);
		return ERR_PTR(-ENOMEM);
3928
	}
3929
	return disk;
3930
}
3931
EXPORT_SYMBOL(__blk_mq_alloc_disk);
3932

3933 3934 3935 3936
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
		struct blk_mq_tag_set *set, struct request_queue *q,
		int hctx_idx, int node)
{
3937
	struct blk_mq_hw_ctx *hctx = NULL, *tmp;
3938

3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952
	/* reuse dead hctx first */
	spin_lock(&q->unused_hctx_lock);
	list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
		if (tmp->numa_node == node) {
			hctx = tmp;
			break;
		}
	}
	if (hctx)
		list_del_init(&hctx->hctx_list);
	spin_unlock(&q->unused_hctx_lock);

	if (!hctx)
		hctx = blk_mq_alloc_hctx(q, set, node);
3953
	if (!hctx)
3954
		goto fail;
3955

3956 3957
	if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
		goto free_hctx;
3958 3959

	return hctx;
3960 3961 3962 3963 3964

 free_hctx:
	kobject_put(&hctx->kobj);
 fail:
	return NULL;
3965 3966
}

K
Keith Busch 已提交
3967 3968
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
						struct request_queue *q)
3969
{
3970
	int i, j, end;
K
Keith Busch 已提交
3971
	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
3972

3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988
	if (q->nr_hw_queues < set->nr_hw_queues) {
		struct blk_mq_hw_ctx **new_hctxs;

		new_hctxs = kcalloc_node(set->nr_hw_queues,
				       sizeof(*new_hctxs), GFP_KERNEL,
				       set->numa_node);
		if (!new_hctxs)
			return;
		if (hctxs)
			memcpy(new_hctxs, hctxs, q->nr_hw_queues *
			       sizeof(*hctxs));
		q->queue_hw_ctx = new_hctxs;
		kfree(hctxs);
		hctxs = new_hctxs;
	}

3989 3990
	/* protect against switching io scheduler  */
	mutex_lock(&q->sysfs_lock);
3991
	for (i = 0; i < set->nr_hw_queues; i++) {
K
Keith Busch 已提交
3992
		int node;
3993
		struct blk_mq_hw_ctx *hctx;
K
Keith Busch 已提交
3994

3995
		node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
3996 3997 3998 3999 4000 4001 4002
		/*
		 * If the hw queue has been mapped to another numa node,
		 * we need to realloc the hctx. If allocation fails, fallback
		 * to use the previous one.
		 */
		if (hctxs[i] && (hctxs[i]->numa_node == node))
			continue;
K
Keith Busch 已提交
4003

4004 4005
		hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
		if (hctx) {
4006
			if (hctxs[i])
4007 4008 4009 4010 4011 4012 4013 4014 4015
				blk_mq_exit_hctx(q, set, hctxs[i], i);
			hctxs[i] = hctx;
		} else {
			if (hctxs[i])
				pr_warn("Allocate new hctx on node %d fails,\
						fallback to previous one on node %d\n",
						node, hctxs[i]->numa_node);
			else
				break;
K
Keith Busch 已提交
4016
		}
4017
	}
4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029
	/*
	 * Increasing nr_hw_queues fails. Free the newly allocated
	 * hctxs and keep the previous q->nr_hw_queues.
	 */
	if (i != set->nr_hw_queues) {
		j = q->nr_hw_queues;
		end = i;
	} else {
		j = i;
		end = q->nr_hw_queues;
		q->nr_hw_queues = set->nr_hw_queues;
	}
4030

4031
	for (; j < end; j++) {
K
Keith Busch 已提交
4032 4033 4034 4035 4036 4037 4038
		struct blk_mq_hw_ctx *hctx = hctxs[j];

		if (hctx) {
			blk_mq_exit_hctx(q, set, hctx, j);
			hctxs[j] = NULL;
		}
	}
4039
	mutex_unlock(&q->sysfs_lock);
K
Keith Busch 已提交
4040 4041
}

4042 4043
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
		struct request_queue *q)
K
Keith Busch 已提交
4044
{
M
Ming Lei 已提交
4045 4046 4047
	/* mark the queue as mq asap */
	q->mq_ops = set->ops;

4048
	q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
4049 4050
					     blk_mq_poll_stats_bkt,
					     BLK_MQ_POLL_STATS_BKTS, q);
4051 4052 4053
	if (!q->poll_cb)
		goto err_exit;

4054
	if (blk_mq_alloc_ctxs(q))
4055
		goto err_poll;
K
Keith Busch 已提交
4056

4057 4058 4059
	/* init q->mq_kobj and sw queues' kobjects */
	blk_mq_sysfs_init(q);

4060 4061 4062
	INIT_LIST_HEAD(&q->unused_hctx_list);
	spin_lock_init(&q->unused_hctx_lock);

K
Keith Busch 已提交
4063 4064 4065
	blk_mq_realloc_hw_ctxs(set, q);
	if (!q->nr_hw_queues)
		goto err_hctxs;
4066

4067
	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4068
	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4069

J
Jens Axboe 已提交
4070
	q->tag_set = set;
4071

4072
	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4073 4074
	if (set->nr_maps > HCTX_TYPE_POLL &&
	    set->map[HCTX_TYPE_POLL].nr_queues)
4075
		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
4076

4077
	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4078 4079 4080
	INIT_LIST_HEAD(&q->requeue_list);
	spin_lock_init(&q->requeue_lock);

4081 4082
	q->nr_requests = set->queue_depth;

4083 4084 4085
	/*
	 * Default to classic polling
	 */
4086
	q->poll_nsec = BLK_MQ_POLL_CLASSIC;
4087

4088
	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4089
	blk_mq_add_queue_tag_set(set, q);
4090
	blk_mq_map_swqueue(q);
4091
	return 0;
4092

4093
err_hctxs:
K
Keith Busch 已提交
4094
	kfree(q->queue_hw_ctx);
4095
	q->nr_hw_queues = 0;
4096
	blk_mq_sysfs_deinit(q);
4097 4098 4099
err_poll:
	blk_stat_free_callback(q->poll_cb);
	q->poll_cb = NULL;
M
Ming Lin 已提交
4100 4101
err_exit:
	q->mq_ops = NULL;
4102
	return -ENOMEM;
4103
}
4104
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4105

4106 4107
/* tags can _not_ be used after returning from blk_mq_exit_queue */
void blk_mq_exit_queue(struct request_queue *q)
4108
{
4109
	struct blk_mq_tag_set *set = q->tag_set;
4110

4111
	/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
M
Ming Lei 已提交
4112
	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4113 4114
	/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
	blk_mq_del_queue_tag_set(q);
4115 4116
}

4117 4118 4119 4120
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{
	int i;

4121 4122
	if (blk_mq_is_shared_tags(set->flags)) {
		set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4123 4124
						BLK_MQ_NO_HCTX_IDX,
						set->queue_depth);
4125
		if (!set->shared_tags)
4126 4127 4128
			return -ENOMEM;
	}

4129
	for (i = 0; i < set->nr_hw_queues; i++) {
4130
		if (!__blk_mq_alloc_map_and_rqs(set, i))
4131
			goto out_unwind;
4132 4133
		cond_resched();
	}
4134 4135 4136 4137 4138

	return 0;

out_unwind:
	while (--i >= 0)
4139 4140
		__blk_mq_free_map_and_rqs(set, i);

4141 4142
	if (blk_mq_is_shared_tags(set->flags)) {
		blk_mq_free_map_and_rqs(set, set->shared_tags,
4143
					BLK_MQ_NO_HCTX_IDX);
4144
	}
4145 4146 4147 4148 4149 4150 4151 4152 4153

	return -ENOMEM;
}

/*
 * Allocate the request maps associated with this tag_set. Note that this
 * may reduce the depth asked for, if memory is tight. set->queue_depth
 * will be updated to reflect the allocated depth.
 */
4154
static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183
{
	unsigned int depth;
	int err;

	depth = set->queue_depth;
	do {
		err = __blk_mq_alloc_rq_maps(set);
		if (!err)
			break;

		set->queue_depth >>= 1;
		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
			err = -ENOMEM;
			break;
		}
	} while (set->queue_depth);

	if (!set->queue_depth || err) {
		pr_err("blk-mq: failed to allocate request map\n");
		return -ENOMEM;
	}

	if (depth != set->queue_depth)
		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
						depth, set->queue_depth);

	return 0;
}

4184 4185
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
4186 4187 4188 4189 4190 4191 4192 4193
	/*
	 * blk_mq_map_queues() and multiple .map_queues() implementations
	 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
	 * number of hardware queues.
	 */
	if (set->nr_maps == 1)
		set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;

4194
	if (set->ops->map_queues && !is_kdump_kernel()) {
J
Jens Axboe 已提交
4195 4196
		int i;

4197 4198 4199 4200 4201 4202 4203
		/*
		 * transport .map_queues is usually done in the following
		 * way:
		 *
		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
		 * 	mask = get_cpu_mask(queue)
		 * 	for_each_cpu(cpu, mask)
J
Jens Axboe 已提交
4204
		 * 		set->map[x].mq_map[cpu] = queue;
4205 4206 4207 4208 4209 4210
		 * }
		 *
		 * When we need to remap, the table has to be cleared for
		 * killing stale mapping since one CPU may not be mapped
		 * to any hw queue.
		 */
J
Jens Axboe 已提交
4211 4212
		for (i = 0; i < set->nr_maps; i++)
			blk_mq_clear_mq_map(&set->map[i]);
4213

4214
		return set->ops->map_queues(set);
J
Jens Axboe 已提交
4215 4216
	} else {
		BUG_ON(set->nr_maps > 1);
4217
		return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
J
Jens Axboe 已提交
4218
	}
4219 4220
}

4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243
static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
				  int cur_nr_hw_queues, int new_nr_hw_queues)
{
	struct blk_mq_tags **new_tags;

	if (cur_nr_hw_queues >= new_nr_hw_queues)
		return 0;

	new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
				GFP_KERNEL, set->numa_node);
	if (!new_tags)
		return -ENOMEM;

	if (set->tags)
		memcpy(new_tags, set->tags, cur_nr_hw_queues *
		       sizeof(*set->tags));
	kfree(set->tags);
	set->tags = new_tags;
	set->nr_hw_queues = new_nr_hw_queues;

	return 0;
}

4244 4245 4246 4247 4248 4249
static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
				int new_nr_hw_queues)
{
	return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
}

4250 4251 4252
/*
 * Alloc a tag set to be associated with one or more request queues.
 * May fail with EINVAL for various error conditions. May adjust the
4253
 * requested depth down, if it's too large. In that case, the set
4254 4255
 * value will be stored in set->queue_depth.
 */
4256 4257
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
J
Jens Axboe 已提交
4258
	int i, ret;
4259

B
Bart Van Assche 已提交
4260 4261
	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);

4262 4263
	if (!set->nr_hw_queues)
		return -EINVAL;
4264
	if (!set->queue_depth)
4265 4266 4267 4268
		return -EINVAL;
	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
		return -EINVAL;

C
Christoph Hellwig 已提交
4269
	if (!set->ops->queue_rq)
4270 4271
		return -EINVAL;

4272 4273 4274
	if (!set->ops->get_budget ^ !set->ops->put_budget)
		return -EINVAL;

4275 4276 4277 4278 4279
	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
		pr_info("blk-mq: reduced tag depth to %u\n",
			BLK_MQ_MAX_DEPTH);
		set->queue_depth = BLK_MQ_MAX_DEPTH;
	}
4280

J
Jens Axboe 已提交
4281 4282 4283 4284 4285
	if (!set->nr_maps)
		set->nr_maps = 1;
	else if (set->nr_maps > HCTX_MAX_TYPES)
		return -EINVAL;

4286 4287 4288 4289 4290 4291 4292
	/*
	 * If a crashdump is active, then we are potentially in a very
	 * memory constrained environment. Limit us to 1 queue and
	 * 64 tags to prevent using too much memory.
	 */
	if (is_kdump_kernel()) {
		set->nr_hw_queues = 1;
4293
		set->nr_maps = 1;
4294 4295
		set->queue_depth = min(64U, set->queue_depth);
	}
K
Keith Busch 已提交
4296
	/*
4297 4298
	 * There is no use for more h/w queues than cpus if we just have
	 * a single map
K
Keith Busch 已提交
4299
	 */
4300
	if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
K
Keith Busch 已提交
4301
		set->nr_hw_queues = nr_cpu_ids;
4302

4303
	if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
4304
		return -ENOMEM;
4305

4306
	ret = -ENOMEM;
J
Jens Axboe 已提交
4307 4308
	for (i = 0; i < set->nr_maps; i++) {
		set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4309
						  sizeof(set->map[i].mq_map[0]),
J
Jens Axboe 已提交
4310 4311 4312
						  GFP_KERNEL, set->numa_node);
		if (!set->map[i].mq_map)
			goto out_free_mq_map;
4313
		set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
J
Jens Axboe 已提交
4314
	}
4315

4316
	ret = blk_mq_update_queue_map(set);
4317 4318 4319
	if (ret)
		goto out_free_mq_map;

4320
	ret = blk_mq_alloc_set_map_and_rqs(set);
4321
	if (ret)
4322
		goto out_free_mq_map;
4323

4324 4325 4326
	mutex_init(&set->tag_list_lock);
	INIT_LIST_HEAD(&set->tag_list);

4327
	return 0;
4328 4329

out_free_mq_map:
J
Jens Axboe 已提交
4330 4331 4332 4333
	for (i = 0; i < set->nr_maps; i++) {
		kfree(set->map[i].mq_map);
		set->map[i].mq_map = NULL;
	}
4334 4335
	kfree(set->tags);
	set->tags = NULL;
4336
	return ret;
4337 4338 4339
}
EXPORT_SYMBOL(blk_mq_alloc_tag_set);

4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355
/* allocate and initialize a tagset for a simple single-queue device */
int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
		const struct blk_mq_ops *ops, unsigned int queue_depth,
		unsigned int set_flags)
{
	memset(set, 0, sizeof(*set));
	set->ops = ops;
	set->nr_hw_queues = 1;
	set->nr_maps = 1;
	set->queue_depth = queue_depth;
	set->numa_node = NUMA_NO_NODE;
	set->flags = set_flags;
	return blk_mq_alloc_tag_set(set);
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);

4356 4357
void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
J
Jens Axboe 已提交
4358
	int i, j;
4359

4360
	for (i = 0; i < set->nr_hw_queues; i++)
4361
		__blk_mq_free_map_and_rqs(set, i);
4362

4363 4364
	if (blk_mq_is_shared_tags(set->flags)) {
		blk_mq_free_map_and_rqs(set, set->shared_tags,
4365 4366
					BLK_MQ_NO_HCTX_IDX);
	}
4367

J
Jens Axboe 已提交
4368 4369 4370 4371
	for (j = 0; j < set->nr_maps; j++) {
		kfree(set->map[j].mq_map);
		set->map[j].mq_map = NULL;
	}
4372

M
Ming Lei 已提交
4373
	kfree(set->tags);
4374
	set->tags = NULL;
4375 4376 4377
}
EXPORT_SYMBOL(blk_mq_free_tag_set);

4378 4379 4380 4381 4382 4383
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
{
	struct blk_mq_tag_set *set = q->tag_set;
	struct blk_mq_hw_ctx *hctx;
	int i, ret;

4384
	if (!set)
4385 4386
		return -EINVAL;

4387 4388 4389
	if (q->nr_requests == nr)
		return 0;

4390
	blk_mq_freeze_queue(q);
4391
	blk_mq_quiesce_queue(q);
4392

4393 4394
	ret = 0;
	queue_for_each_hw_ctx(q, hctx, i) {
4395 4396
		if (!hctx->tags)
			continue;
4397 4398 4399 4400
		/*
		 * If we're using an MQ scheduler, just update the scheduler
		 * queue depth. This is similar to what the old code would do.
		 */
4401
		if (hctx->sched_tags) {
4402
			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4403 4404 4405 4406
						      nr, true);
		} else {
			ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
						      false);
4407
		}
4408 4409
		if (ret)
			break;
4410 4411
		if (q->elevator && q->elevator->type->ops.depth_updated)
			q->elevator->type->ops.depth_updated(hctx);
4412
	}
4413
	if (!ret) {
4414
		q->nr_requests = nr;
4415
		if (blk_mq_is_shared_tags(set->flags)) {
4416
			if (q->elevator)
4417
				blk_mq_tag_update_sched_shared_tags(q);
4418
			else
4419
				blk_mq_tag_resize_shared_tags(set, nr);
4420
		}
4421
	}
4422

4423
	blk_mq_unquiesce_queue(q);
4424 4425
	blk_mq_unfreeze_queue(q);

4426 4427 4428
	return ret;
}

4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498
/*
 * request_queue and elevator_type pair.
 * It is just used by __blk_mq_update_nr_hw_queues to cache
 * the elevator_type associated with a request_queue.
 */
struct blk_mq_qe_pair {
	struct list_head node;
	struct request_queue *q;
	struct elevator_type *type;
};

/*
 * Cache the elevator_type in qe pair list and switch the
 * io scheduler to 'none'
 */
static bool blk_mq_elv_switch_none(struct list_head *head,
		struct request_queue *q)
{
	struct blk_mq_qe_pair *qe;

	if (!q->elevator)
		return true;

	qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
	if (!qe)
		return false;

	INIT_LIST_HEAD(&qe->node);
	qe->q = q;
	qe->type = q->elevator->type;
	list_add(&qe->node, head);

	mutex_lock(&q->sysfs_lock);
	/*
	 * After elevator_switch_mq, the previous elevator_queue will be
	 * released by elevator_release. The reference of the io scheduler
	 * module get by elevator_get will also be put. So we need to get
	 * a reference of the io scheduler module here to prevent it to be
	 * removed.
	 */
	__module_get(qe->type->elevator_owner);
	elevator_switch_mq(q, NULL);
	mutex_unlock(&q->sysfs_lock);

	return true;
}

static void blk_mq_elv_switch_back(struct list_head *head,
		struct request_queue *q)
{
	struct blk_mq_qe_pair *qe;
	struct elevator_type *t = NULL;

	list_for_each_entry(qe, head, node)
		if (qe->q == q) {
			t = qe->type;
			break;
		}

	if (!t)
		return;

	list_del(&qe->node);
	kfree(qe);

	mutex_lock(&q->sysfs_lock);
	elevator_switch_mq(q, t);
	mutex_unlock(&q->sysfs_lock);
}

4499 4500
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
							int nr_hw_queues)
K
Keith Busch 已提交
4501 4502
{
	struct request_queue *q;
4503
	LIST_HEAD(head);
4504
	int prev_nr_hw_queues;
K
Keith Busch 已提交
4505

4506 4507
	lockdep_assert_held(&set->tag_list_lock);

4508
	if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
K
Keith Busch 已提交
4509
		nr_hw_queues = nr_cpu_ids;
4510 4511 4512
	if (nr_hw_queues < 1)
		return;
	if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
K
Keith Busch 已提交
4513 4514 4515 4516
		return;

	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_freeze_queue(q);
4517 4518 4519 4520 4521 4522 4523 4524
	/*
	 * Switch IO scheduler to 'none', cleaning up the data associated
	 * with the previous scheduler. We will switch back once we are done
	 * updating the new sw to hw queue mappings.
	 */
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		if (!blk_mq_elv_switch_none(&head, q))
			goto switch_back;
K
Keith Busch 已提交
4525

4526 4527 4528 4529 4530
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_debugfs_unregister_hctxs(q);
		blk_mq_sysfs_unregister(q);
	}

4531
	prev_nr_hw_queues = set->nr_hw_queues;
4532 4533 4534 4535
	if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
	    0)
		goto reregister;

K
Keith Busch 已提交
4536
	set->nr_hw_queues = nr_hw_queues;
4537
fallback:
4538
	blk_mq_update_queue_map(set);
K
Keith Busch 已提交
4539 4540
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_realloc_hw_ctxs(set, q);
4541
		if (q->nr_hw_queues != set->nr_hw_queues) {
4542 4543
			int i = prev_nr_hw_queues;

4544 4545
			pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
					nr_hw_queues, prev_nr_hw_queues);
4546 4547 4548
			for (; i < set->nr_hw_queues; i++)
				__blk_mq_free_map_and_rqs(set, i);

4549
			set->nr_hw_queues = prev_nr_hw_queues;
4550
			blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4551 4552
			goto fallback;
		}
4553 4554 4555
		blk_mq_map_swqueue(q);
	}

4556
reregister:
4557 4558 4559
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_sysfs_register(q);
		blk_mq_debugfs_register_hctxs(q);
K
Keith Busch 已提交
4560 4561
	}

4562 4563 4564 4565
switch_back:
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_elv_switch_back(&head, q);

K
Keith Busch 已提交
4566 4567 4568
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_unfreeze_queue(q);
}
4569 4570 4571 4572 4573 4574 4575

void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{
	mutex_lock(&set->tag_list_lock);
	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
	mutex_unlock(&set->tag_list_lock);
}
K
Keith Busch 已提交
4576 4577
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);

4578 4579 4580
/* Enable polling stats and return whether they were already enabled. */
static bool blk_poll_stats_enable(struct request_queue *q)
{
4581
	if (q->poll_stat)
4582
		return true;
4583 4584

	return blk_stats_alloc_enable(q);
4585 4586 4587 4588 4589 4590 4591 4592
}

static void blk_mq_poll_stats_start(struct request_queue *q)
{
	/*
	 * We don't arm the callback if polling stats are not enabled or the
	 * callback is already active.
	 */
4593
	if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
4594 4595 4596 4597 4598 4599 4600 4601
		return;

	blk_stat_activate_msecs(q->poll_cb, 100);
}

static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
{
	struct request_queue *q = cb->data;
4602
	int bucket;
4603

4604 4605 4606 4607
	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
		if (cb->stat[bucket].nr_samples)
			q->poll_stat[bucket] = cb->stat[bucket];
	}
4608 4609
}

4610 4611 4612 4613
static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
				       struct request *rq)
{
	unsigned long ret = 0;
4614
	int bucket;
4615 4616 4617 4618 4619

	/*
	 * If stats collection isn't on, don't sleep but turn it on for
	 * future users
	 */
4620
	if (!blk_poll_stats_enable(q))
4621 4622 4623 4624 4625 4626 4627 4628
		return 0;

	/*
	 * As an optimistic guess, use half of the mean service time
	 * for this type of request. We can (and should) make this smarter.
	 * For instance, if the completion latencies are tight, we can
	 * get closer than just half the mean. This is especially
	 * important on devices where the completion latencies are longer
4629 4630
	 * than ~10 usec. We do use the stats for the relevant IO size
	 * if available which does lead to better estimates.
4631
	 */
4632 4633 4634 4635 4636 4637
	bucket = blk_mq_poll_stats_bkt(rq);
	if (bucket < 0)
		return ret;

	if (q->poll_stat[bucket].nr_samples)
		ret = (q->poll_stat[bucket].mean + 1) / 2;
4638 4639 4640 4641

	return ret;
}

4642
static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
4643
{
4644 4645
	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
	struct request *rq = blk_qc_to_rq(hctx, qc);
4646 4647
	struct hrtimer_sleeper hs;
	enum hrtimer_mode mode;
4648
	unsigned int nsecs;
4649 4650
	ktime_t kt;

4651 4652 4653 4654 4655
	/*
	 * If a request has completed on queue that uses an I/O scheduler, we
	 * won't get back a request from blk_qc_to_rq.
	 */
	if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
4656 4657 4658
		return false;

	/*
4659
	 * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
4660 4661 4662 4663
	 *
	 *  0:	use half of prev avg
	 * >0:	use this specific value
	 */
4664
	if (q->poll_nsec > 0)
4665 4666
		nsecs = q->poll_nsec;
	else
4667
		nsecs = blk_mq_poll_nsecs(q, rq);
4668 4669

	if (!nsecs)
4670 4671
		return false;

J
Jens Axboe 已提交
4672
	rq->rq_flags |= RQF_MQ_POLL_SLEPT;
4673 4674 4675 4676 4677

	/*
	 * This will be replaced with the stats tracking code, using
	 * 'avg_completion_time / 2' as the pre-sleep target.
	 */
T
Thomas Gleixner 已提交
4678
	kt = nsecs;
4679 4680

	mode = HRTIMER_MODE_REL;
4681
	hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
4682 4683 4684
	hrtimer_set_expires(&hs.timer, kt);

	do {
T
Tejun Heo 已提交
4685
		if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
4686 4687
			break;
		set_current_state(TASK_UNINTERRUPTIBLE);
4688
		hrtimer_sleeper_start_expires(&hs, mode);
4689 4690 4691 4692 4693 4694 4695 4696
		if (hs.task)
			io_schedule();
		hrtimer_cancel(&hs.timer);
		mode = HRTIMER_MODE_ABS;
	} while (hs.task && !signal_pending(current));

	__set_current_state(TASK_RUNNING);
	destroy_hrtimer_on_stack(&hs.timer);
4697

4698
	/*
4699 4700 4701 4702 4703
	 * If we sleep, have the caller restart the poll loop to reset the
	 * state.  Like for the other success return cases, the caller is
	 * responsible for checking if the IO completed.  If the IO isn't
	 * complete, we'll get called again and will go straight to the busy
	 * poll loop.
4704 4705 4706 4707
	 */
	return true;
}

4708
static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
4709
			       struct io_comp_batch *iob, unsigned int flags)
J
Jens Axboe 已提交
4710
{
4711 4712 4713
	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
	long state = get_current_state();
	int ret;
J
Jens Axboe 已提交
4714

4715
	do {
4716
		ret = q->mq_ops->poll(hctx, iob);
J
Jens Axboe 已提交
4717
		if (ret > 0) {
4718
			__set_current_state(TASK_RUNNING);
4719
			return ret;
J
Jens Axboe 已提交
4720 4721 4722
		}

		if (signal_pending_state(state, current))
4723
			__set_current_state(TASK_RUNNING);
4724
		if (task_is_running(current))
4725
			return 1;
4726

4727
		if (ret < 0 || (flags & BLK_POLL_ONESHOT))
J
Jens Axboe 已提交
4728 4729
			break;
		cpu_relax();
4730
	} while (!need_resched());
J
Jens Axboe 已提交
4731

4732
	__set_current_state(TASK_RUNNING);
4733
	return 0;
J
Jens Axboe 已提交
4734
}
4735

4736 4737
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
		unsigned int flags)
4738
{
4739 4740
	if (!(flags & BLK_POLL_NOSLEEP) &&
	    q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
4741
		if (blk_mq_poll_hybrid(q, cookie))
4742
			return 1;
4743
	}
4744
	return blk_mq_poll_classic(q, cookie, iob, flags);
J
Jens Axboe 已提交
4745 4746
}

J
Jens Axboe 已提交
4747 4748 4749 4750 4751 4752
unsigned int blk_mq_rq_cpu(struct request *rq)
{
	return rq->mq_ctx->cpu;
}
EXPORT_SYMBOL(blk_mq_rq_cpu);

4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765
void blk_mq_cancel_work_sync(struct request_queue *q)
{
	if (queue_is_mq(q)) {
		struct blk_mq_hw_ctx *hctx;
		int i;

		cancel_delayed_work_sync(&q->requeue_work);

		queue_for_each_hw_ctx(q, hctx, i)
			cancel_delayed_work_sync(&hctx->run_work);
	}
}

4766 4767
static int __init blk_mq_init(void)
{
4768 4769 4770
	int i;

	for_each_possible_cpu(i)
4771
		init_llist_head(&per_cpu(blk_cpu_done, i));
4772 4773 4774 4775 4776
	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);

	cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
				  "block/softirq:dead", NULL,
				  blk_softirq_cpu_dead);
4777 4778
	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
				blk_mq_hctx_notify_dead);
4779 4780 4781
	cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
				blk_mq_hctx_notify_online,
				blk_mq_hctx_notify_offline);
4782 4783 4784
	return 0;
}
subsys_initcall(blk_mq_init);