blk-mq.c 118.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7
/*
 * Block multiqueue core code
 *
 * Copyright (C) 2013-2014 Jens Axboe
 * Copyright (C) 2013-2014 Christoph Hellwig
 */
8 9 10 11 12
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
13
#include <linux/blk-integrity.h>
14
#include <linux/kmemleak.h>
15 16 17 18 19
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>
20
#include <linux/interrupt.h>
21 22 23 24
#include <linux/llist.h>
#include <linux/cpu.h>
#include <linux/cache.h>
#include <linux/sched/sysctl.h>
25
#include <linux/sched/topology.h>
26
#include <linux/sched/signal.h>
27
#include <linux/delay.h>
28
#include <linux/crash_dump.h>
29
#include <linux/prefetch.h>
30
#include <linux/blk-crypto.h>
31
#include <linux/part_stat.h>
32 33 34 35

#include <trace/events/block.h>

#include <linux/blk-mq.h>
36
#include <linux/t10-pi.h>
37 38
#include "blk.h"
#include "blk-mq.h"
39
#include "blk-mq-debugfs.h"
40
#include "blk-mq-tag.h"
41
#include "blk-pm.h"
42
#include "blk-stat.h"
43
#include "blk-mq-sched.h"
44
#include "blk-rq-qos.h"
45

46
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
47

48 49 50
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);

51 52
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
53
	int ddir, sectors, bucket;
54

J
Jens Axboe 已提交
55
	ddir = rq_data_dir(rq);
56
	sectors = blk_rq_stats_sectors(rq);
57

58
	bucket = ddir + 2 * ilog2(sectors);
59 60 61 62 63 64 65 66 67

	if (bucket < 0)
		return -1;
	else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
		return ddir + BLK_MQ_POLL_STATS_BKTS - 2;

	return bucket;
}

68 69 70
#define BLK_QC_T_SHIFT		16
#define BLK_QC_T_INTERNAL	(1U << 31)

71 72 73
static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
		blk_qc_t qc)
{
M
Ming Lei 已提交
74 75
	return xa_load(&q->hctx_table,
			(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT);
76 77
}

78 79 80
static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
		blk_qc_t qc)
{
81 82 83 84 85
	unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1);

	if (qc & BLK_QC_T_INTERNAL)
		return blk_mq_tag_to_rq(hctx->sched_tags, tag);
	return blk_mq_tag_to_rq(hctx->tags, tag);
86 87
}

88 89 90 91 92 93 94
static inline blk_qc_t blk_rq_to_qc(struct request *rq)
{
	return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
		(rq->tag != -1 ?
		 rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
}

95
/*
96 97
 * Check if any of the ctx, dispatch list or elevator
 * have pending work in this hardware queue.
98
 */
99
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
100
{
101 102
	return !list_empty_careful(&hctx->dispatch) ||
		sbitmap_any_bit_set(&hctx->ctx_map) ||
103
			blk_mq_sched_has_work(hctx);
104 105
}

106 107 108 109 110 111
/*
 * Mark this ctx as having pending work in this hardware queue
 */
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
				     struct blk_mq_ctx *ctx)
{
112 113 114 115
	const int bit = ctx->index_hw[hctx->type];

	if (!sbitmap_test_bit(&hctx->ctx_map, bit))
		sbitmap_set_bit(&hctx->ctx_map, bit);
116 117 118 119 120
}

static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
				      struct blk_mq_ctx *ctx)
{
121 122 123
	const int bit = ctx->index_hw[hctx->type];

	sbitmap_clear_bit(&hctx->ctx_map, bit);
124 125
}

126
struct mq_inflight {
127
	struct block_device *part;
128
	unsigned int inflight[2];
129 130
};

131
static bool blk_mq_check_inflight(struct request *rq, void *priv,
132 133 134 135
				  bool reserved)
{
	struct mq_inflight *mi = priv;

136 137
	if ((!mi->part->bd_partno || rq->part == mi->part) &&
	    blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
138
		mi->inflight[rq_data_dir(rq)]++;
139 140

	return true;
141 142
}

143 144
unsigned int blk_mq_in_flight(struct request_queue *q,
		struct block_device *part)
145
{
146
	struct mq_inflight mi = { .part = part };
147 148

	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
149

150
	return mi.inflight[0] + mi.inflight[1];
151 152
}

153 154
void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
		unsigned int inflight[2])
155
{
156
	struct mq_inflight mi = { .part = part };
157

158
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
159 160
	inflight[0] = mi.inflight[0];
	inflight[1] = mi.inflight[1];
161 162
}

163
void blk_freeze_queue_start(struct request_queue *q)
164
{
165 166
	mutex_lock(&q->mq_freeze_lock);
	if (++q->mq_freeze_depth == 1) {
167
		percpu_ref_kill(&q->q_usage_counter);
168
		mutex_unlock(&q->mq_freeze_lock);
J
Jens Axboe 已提交
169
		if (queue_is_mq(q))
170
			blk_mq_run_hw_queues(q, false);
171 172
	} else {
		mutex_unlock(&q->mq_freeze_lock);
173
	}
174
}
175
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
176

177
void blk_mq_freeze_queue_wait(struct request_queue *q)
178
{
179
	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
180
}
181
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
182

183 184 185 186 187 188 189 190
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
				     unsigned long timeout)
{
	return wait_event_timeout(q->mq_freeze_wq,
					percpu_ref_is_zero(&q->q_usage_counter),
					timeout);
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
191

192 193 194 195
/*
 * Guarantee no request is in use, so we can change any data structure of
 * the queue afterward.
 */
196
void blk_freeze_queue(struct request_queue *q)
197
{
198 199 200 201 202 203 204
	/*
	 * In the !blk_mq case we are only calling this to kill the
	 * q_usage_counter, otherwise this increases the freeze depth
	 * and waits for it to return to zero.  For this reason there is
	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
	 * exported to drivers as the only user for unfreeze is blk_mq.
	 */
205
	blk_freeze_queue_start(q);
206 207
	blk_mq_freeze_queue_wait(q);
}
208 209 210 211 212 213 214 215 216

void blk_mq_freeze_queue(struct request_queue *q)
{
	/*
	 * ...just an alias to keep freeze and unfreeze actions balanced
	 * in the blk_mq_* namespace
	 */
	blk_freeze_queue(q);
}
217
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
218

219
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
220
{
221
	mutex_lock(&q->mq_freeze_lock);
222 223
	if (force_atomic)
		q->q_usage_counter.data->force_atomic = true;
224 225 226
	q->mq_freeze_depth--;
	WARN_ON_ONCE(q->mq_freeze_depth < 0);
	if (!q->mq_freeze_depth) {
227
		percpu_ref_resurrect(&q->q_usage_counter);
228
		wake_up_all(&q->mq_freeze_wq);
229
	}
230
	mutex_unlock(&q->mq_freeze_lock);
231
}
232 233 234 235 236

void blk_mq_unfreeze_queue(struct request_queue *q)
{
	__blk_mq_unfreeze_queue(q, false);
}
237
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
238

239 240 241 242 243 244
/*
 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
 * mpt3sas driver such that this function can be removed.
 */
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
{
245 246 247 248 249 250
	unsigned long flags;

	spin_lock_irqsave(&q->queue_lock, flags);
	if (!q->quiesce_depth++)
		blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
	spin_unlock_irqrestore(&q->queue_lock, flags);
251 252 253
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);

254
/**
255
 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
256 257
 * @q: request queue.
 *
258 259
 * Note: it is driver's responsibility for making sure that quiesce has
 * been started.
260
 */
261
void blk_mq_wait_quiesce_done(struct request_queue *q)
262
{
263 264 265
	if (blk_queue_has_srcu(q))
		synchronize_srcu(q->srcu);
	else
266 267
		synchronize_rcu();
}
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);

/**
 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
 * @q: request queue.
 *
 * Note: this function does not prevent that the struct request end_io()
 * callback function is invoked. Once this function is returned, we make
 * sure no dispatch can happen until the queue is unquiesced via
 * blk_mq_unquiesce_queue().
 */
void blk_mq_quiesce_queue(struct request_queue *q)
{
	blk_mq_quiesce_queue_nowait(q);
	blk_mq_wait_quiesce_done(q);
}
284 285
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);

286 287 288 289 290 291 292 293 294
/*
 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
 * @q: request queue.
 *
 * This function recovers queue into the state before quiescing
 * which is done by blk_mq_quiesce_queue.
 */
void blk_mq_unquiesce_queue(struct request_queue *q)
{
295 296 297 298 299 300 301 302 303 304 305
	unsigned long flags;
	bool run_queue = false;

	spin_lock_irqsave(&q->queue_lock, flags);
	if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
		;
	} else if (!--q->quiesce_depth) {
		blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
		run_queue = true;
	}
	spin_unlock_irqrestore(&q->queue_lock, flags);
306

307
	/* dispatch requests which are inserted during quiescing */
308 309
	if (run_queue)
		blk_mq_run_hw_queues(q, true);
310 311 312
}
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);

313 314 315
void blk_mq_wake_waiters(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
316
	unsigned long i;
317 318 319 320 321 322

	queue_for_each_hw_ctx(q, hctx, i)
		if (blk_mq_hw_queue_mapped(hctx))
			blk_mq_tag_wakeup_all(hctx->tags, true);
}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
void blk_rq_init(struct request_queue *q, struct request *rq)
{
	memset(rq, 0, sizeof(*rq));

	INIT_LIST_HEAD(&rq->queuelist);
	rq->q = q;
	rq->__sector = (sector_t) -1;
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
	rq->tag = BLK_MQ_NO_TAG;
	rq->internal_tag = BLK_MQ_NO_TAG;
	rq->start_time_ns = ktime_get_ns();
	rq->part = NULL;
	blk_crypto_rq_set_defaults(rq);
}
EXPORT_SYMBOL(blk_rq_init);

340
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
341
		struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
342
{
343 344 345
	struct blk_mq_ctx *ctx = data->ctx;
	struct blk_mq_hw_ctx *hctx = data->hctx;
	struct request_queue *q = data->q;
346
	struct request *rq = tags->static_rqs[tag];
347

J
Jens Axboe 已提交
348 349 350 351 352 353 354 355 356 357 358
	rq->q = q;
	rq->mq_ctx = ctx;
	rq->mq_hctx = hctx;
	rq->cmd_flags = data->cmd_flags;

	if (data->flags & BLK_MQ_REQ_PM)
		data->rq_flags |= RQF_PM;
	if (blk_queue_io_stat(q))
		data->rq_flags |= RQF_IO_STAT;
	rq->rq_flags = data->rq_flags;

359
	if (!(data->rq_flags & RQF_ELV)) {
360
		rq->tag = tag;
361
		rq->internal_tag = BLK_MQ_NO_TAG;
362 363 364
	} else {
		rq->tag = BLK_MQ_NO_TAG;
		rq->internal_tag = tag;
365
	}
J
Jens Axboe 已提交
366
	rq->timeout = 0;
367

368 369 370 371
	if (blk_mq_need_time_stamp(rq))
		rq->start_time_ns = ktime_get_ns();
	else
		rq->start_time_ns = 0;
372
	rq->part = NULL;
373 374 375
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
	rq->alloc_time_ns = alloc_time_ns;
#endif
376
	rq->io_start_time_ns = 0;
377
	rq->stats_sectors = 0;
378 379 380 381 382 383 384
	rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
	rq->nr_integrity_segments = 0;
#endif
	rq->end_io = NULL;
	rq->end_io_data = NULL;

385 386 387 388
	blk_crypto_rq_set_defaults(rq);
	INIT_LIST_HEAD(&rq->queuelist);
	/* tag was already set */
	WRITE_ONCE(rq->deadline, 0);
389
	req_ref_set(rq, 1);
390

391
	if (rq->rq_flags & RQF_ELV) {
392 393
		struct elevator_queue *e = data->q->elevator;

394 395 396 397 398
		INIT_HLIST_NODE(&rq->hash);
		RB_CLEAR_NODE(&rq->rb_node);

		if (!op_is_flush(data->cmd_flags) &&
		    e->type->ops.prepare_request) {
399 400 401 402 403
			e->type->ops.prepare_request(rq);
			rq->rq_flags |= RQF_ELVPRIV;
		}
	}

404
	return rq;
405 406
}

J
Jens Axboe 已提交
407 408 409 410 411
static inline struct request *
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
		u64 alloc_time_ns)
{
	unsigned int tag, tag_offset;
412
	struct blk_mq_tags *tags;
J
Jens Axboe 已提交
413
	struct request *rq;
414
	unsigned long tag_mask;
J
Jens Axboe 已提交
415 416
	int i, nr = 0;

417 418
	tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
	if (unlikely(!tag_mask))
J
Jens Axboe 已提交
419 420
		return NULL;

421 422 423
	tags = blk_mq_tags_from_data(data);
	for (i = 0; tag_mask; i++) {
		if (!(tag_mask & (1UL << i)))
J
Jens Axboe 已提交
424 425
			continue;
		tag = tag_offset + i;
426
		prefetch(tags->static_rqs[tag]);
427 428
		tag_mask &= ~(1UL << i);
		rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
429
		rq_list_add(data->cached_rq, rq);
430
		nr++;
J
Jens Axboe 已提交
431
	}
432 433
	/* caller already holds a reference, add for remainder */
	percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
J
Jens Axboe 已提交
434 435
	data->nr_tags -= nr;

436
	return rq_list_pop(data->cached_rq);
J
Jens Axboe 已提交
437 438
}

439
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
440
{
441
	struct request_queue *q = data->q;
442
	u64 alloc_time_ns = 0;
443
	struct request *rq;
444
	unsigned int tag;
445

446 447 448 449
	/* alloc_time includes depth and tag waits */
	if (blk_queue_rq_alloc_time(q))
		alloc_time_ns = ktime_get_ns();

450
	if (data->cmd_flags & REQ_NOWAIT)
451
		data->flags |= BLK_MQ_REQ_NOWAIT;
452

453 454 455 456 457
	if (q->elevator) {
		struct elevator_queue *e = q->elevator;

		data->rq_flags |= RQF_ELV;

458
		/*
459
		 * Flush/passthrough requests are special and go directly to the
460 461
		 * dispatch list. Don't include reserved tags in the
		 * limiting, as it isn't useful.
462
		 */
463
		if (!op_is_flush(data->cmd_flags) &&
464
		    !blk_op_is_passthrough(data->cmd_flags) &&
465
		    e->type->ops.limit_depth &&
466
		    !(data->flags & BLK_MQ_REQ_RESERVED))
467
			e->type->ops.limit_depth(data->cmd_flags, data);
468 469
	}

470
retry:
471 472
	data->ctx = blk_mq_get_ctx(q);
	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
473
	if (!(data->rq_flags & RQF_ELV))
474 475
		blk_mq_tag_busy(data->hctx);

J
Jens Axboe 已提交
476 477 478 479 480 481 482 483 484 485
	/*
	 * Try batched alloc if we want more than 1 tag.
	 */
	if (data->nr_tags > 1) {
		rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
		if (rq)
			return rq;
		data->nr_tags = 1;
	}

486 487 488 489 490
	/*
	 * Waiting allocations only fail because of an inactive hctx.  In that
	 * case just retry the hctx assignment and tag allocation as CPU hotplug
	 * should have migrated us to an online CPU by now.
	 */
491
	tag = blk_mq_get_tag(data);
492 493 494 495
	if (tag == BLK_MQ_NO_TAG) {
		if (data->flags & BLK_MQ_REQ_NOWAIT)
			return NULL;
		/*
J
Jens Axboe 已提交
496 497 498 499
		 * Give up the CPU and sleep for a random short time to
		 * ensure that thread using a realtime scheduling class
		 * are migrated off the CPU, and thus off the hctx that
		 * is going away.
500 501 502 503
		 */
		msleep(3);
		goto retry;
	}
504

505 506
	return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
					alloc_time_ns);
507 508
}

509
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
510
		blk_mq_req_flags_t flags)
511
{
512 513 514 515
	struct blk_mq_alloc_data data = {
		.q		= q,
		.flags		= flags,
		.cmd_flags	= op,
516
		.nr_tags	= 1,
517
	};
518
	struct request *rq;
519
	int ret;
520

521
	ret = blk_queue_enter(q, flags);
522 523
	if (ret)
		return ERR_PTR(ret);
524

525
	rq = __blk_mq_alloc_requests(&data);
526
	if (!rq)
527
		goto out_queue_exit;
528 529 530
	rq->__data_len = 0;
	rq->__sector = (sector_t) -1;
	rq->bio = rq->biotail = NULL;
531
	return rq;
532 533 534
out_queue_exit:
	blk_queue_exit(q);
	return ERR_PTR(-EWOULDBLOCK);
535
}
536
EXPORT_SYMBOL(blk_mq_alloc_request);
537

538
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
539
	unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
M
Ming Lin 已提交
540
{
541 542 543 544
	struct blk_mq_alloc_data data = {
		.q		= q,
		.flags		= flags,
		.cmd_flags	= op,
545
		.nr_tags	= 1,
546
	};
547
	u64 alloc_time_ns = 0;
548
	unsigned int cpu;
549
	unsigned int tag;
M
Ming Lin 已提交
550 551
	int ret;

552 553 554 555
	/* alloc_time includes depth and tag waits */
	if (blk_queue_rq_alloc_time(q))
		alloc_time_ns = ktime_get_ns();

M
Ming Lin 已提交
556 557 558 559 560 561
	/*
	 * If the tag allocator sleeps we could get an allocation for a
	 * different hardware context.  No need to complicate the low level
	 * allocator for this for the rare use case of a command tied to
	 * a specific queue.
	 */
562
	if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
M
Ming Lin 已提交
563 564 565 566 567
		return ERR_PTR(-EINVAL);

	if (hctx_idx >= q->nr_hw_queues)
		return ERR_PTR(-EIO);

568
	ret = blk_queue_enter(q, flags);
M
Ming Lin 已提交
569 570 571
	if (ret)
		return ERR_PTR(ret);

572 573 574 575
	/*
	 * Check if the hardware context is actually mapped to anything.
	 * If not tell the caller that it should skip this queue.
	 */
576
	ret = -EXDEV;
M
Ming Lei 已提交
577
	data.hctx = xa_load(&q->hctx_table, hctx_idx);
578
	if (!blk_mq_hw_queue_mapped(data.hctx))
579
		goto out_queue_exit;
580 581
	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
	data.ctx = __blk_mq_get_ctx(q, cpu);
M
Ming Lin 已提交
582

583
	if (!q->elevator)
584
		blk_mq_tag_busy(data.hctx);
585 586
	else
		data.rq_flags |= RQF_ELV;
587

588
	ret = -EWOULDBLOCK;
589 590
	tag = blk_mq_get_tag(&data);
	if (tag == BLK_MQ_NO_TAG)
591
		goto out_queue_exit;
592 593
	return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
					alloc_time_ns);
594

595 596 597
out_queue_exit:
	blk_queue_exit(q);
	return ERR_PTR(ret);
M
Ming Lin 已提交
598 599 600
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);

K
Keith Busch 已提交
601 602 603 604
static void __blk_mq_free_request(struct request *rq)
{
	struct request_queue *q = rq->q;
	struct blk_mq_ctx *ctx = rq->mq_ctx;
605
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
K
Keith Busch 已提交
606 607
	const int sched_tag = rq->internal_tag;

608
	blk_crypto_free_request(rq);
609
	blk_pm_mark_last_busy(rq);
610
	rq->mq_hctx = NULL;
611
	if (rq->tag != BLK_MQ_NO_TAG)
612
		blk_mq_put_tag(hctx->tags, ctx, rq->tag);
613
	if (sched_tag != BLK_MQ_NO_TAG)
614
		blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
K
Keith Busch 已提交
615 616 617 618
	blk_mq_sched_restart(hctx);
	blk_queue_exit(q);
}

619
void blk_mq_free_request(struct request *rq)
620 621
{
	struct request_queue *q = rq->q;
622
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
623

624 625 626
	if ((rq->rq_flags & RQF_ELVPRIV) &&
	    q->elevator->type->ops.finish_request)
		q->elevator->type->ops.finish_request(rq);
627

628
	if (rq->rq_flags & RQF_MQ_INFLIGHT)
629
		__blk_mq_dec_active_requests(hctx);
J
Jens Axboe 已提交
630

631
	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
632
		laptop_io_completion(q->disk->bdi);
633

634
	rq_qos_done(q, rq);
635

K
Keith Busch 已提交
636
	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
637
	if (req_ref_put_and_test(rq))
K
Keith Busch 已提交
638
		__blk_mq_free_request(rq);
639
}
J
Jens Axboe 已提交
640
EXPORT_SYMBOL_GPL(blk_mq_free_request);
641

642
void blk_mq_free_plug_rqs(struct blk_plug *plug)
643
{
644
	struct request *rq;
645

646
	while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
647 648
		blk_mq_free_request(rq);
}
649

650 651 652
void blk_dump_rq_flags(struct request *rq, char *msg)
{
	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
653
		rq->q->disk ? rq->q->disk->disk_name : "?",
654 655 656 657 658 659 660 661 662 663
		(unsigned long long) rq->cmd_flags);

	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
	       rq->bio, rq->biotail, blk_rq_bytes(rq));
}
EXPORT_SYMBOL(blk_dump_rq_flags);

664 665 666
static void req_bio_endio(struct request *rq, struct bio *bio,
			  unsigned int nbytes, blk_status_t error)
{
P
Pavel Begunkov 已提交
667
	if (unlikely(error)) {
668
		bio->bi_status = error;
P
Pavel Begunkov 已提交
669
	} else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
670 671 672 673
		/*
		 * Partial zone append completions cannot be supported as the
		 * BIO fragments may end up not being written sequentially.
		 */
674
		if (bio->bi_iter.bi_size != nbytes)
675 676 677 678 679
			bio->bi_status = BLK_STS_IOERR;
		else
			bio->bi_iter.bi_sector = rq->__sector;
	}

P
Pavel Begunkov 已提交
680 681 682 683
	bio_advance(bio, nbytes);

	if (unlikely(rq->rq_flags & RQF_QUIET))
		bio_set_flag(bio, BIO_QUIET);
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
	/* don't actually finish bio if it's part of flush sequence */
	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
		bio_endio(bio);
}

static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
	if (req->part && blk_do_io_stat(req)) {
		const int sgrp = op_stat_group(req_op(req));

		part_stat_lock();
		part_stat_add(req->part, sectors[sgrp], bytes >> 9);
		part_stat_unlock();
	}
}

700 701 702 703 704 705
static void blk_print_req_error(struct request *req, blk_status_t status)
{
	printk_ratelimited(KERN_ERR
		"%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
		"phys_seg %u prio class %u\n",
		blk_status_to_str(status),
706
		req->q->disk ? req->q->disk->disk_name : "?",
707 708 709 710 711 712
		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
		req->cmd_flags & ~REQ_OP_MASK,
		req->nr_phys_segments,
		IOPRIO_PRIO_CLASS(req->ioprio));
}

713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
/*
 * Fully end IO on a request. Does not support partial completions, or
 * errors.
 */
static void blk_complete_request(struct request *req)
{
	const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
	int total_bytes = blk_rq_bytes(req);
	struct bio *bio = req->bio;

	trace_block_rq_complete(req, BLK_STS_OK, total_bytes);

	if (!bio)
		return;

#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
		req->q->integrity.profile->complete_fn(req, total_bytes);
#endif

	blk_account_io_completion(req, total_bytes);

	do {
		struct bio *next = bio->bi_next;

		/* Completion has already been traced */
		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
740 741 742 743

		if (req_op(req) == REQ_OP_ZONE_APPEND)
			bio->bi_iter.bi_sector = req->__sector;

744 745 746 747 748 749 750 751 752 753 754 755 756 757
		if (!is_flush)
			bio_endio(bio);
		bio = next;
	} while (bio);

	/*
	 * Reset counters so that the request stacking driver
	 * can find how many bytes remain in the request
	 * later.
	 */
	req->bio = NULL;
	req->__data_len = 0;
}

758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
/**
 * blk_update_request - Complete multiple bytes without completing the request
 * @req:      the request being processed
 * @error:    block status code
 * @nr_bytes: number of bytes to complete for @req
 *
 * Description:
 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
 *     the request structure even if @req doesn't have leftover.
 *     If @req has leftover, sets it up for the next range of segments.
 *
 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
 *     %false return from this function.
 *
 * Note:
 *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
 *      except in the consistency check at the end of this function.
 *
 * Return:
 *     %false - this request doesn't have any more data
 *     %true  - this request has more data
 **/
bool blk_update_request(struct request *req, blk_status_t error,
		unsigned int nr_bytes)
{
	int total_bytes;

785
	trace_block_rq_complete(req, error, nr_bytes);
786 787 788 789 790 791 792 793 794 795 796

	if (!req->bio)
		return false;

#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
	    error == BLK_STS_OK)
		req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif

	if (unlikely(error && !blk_rq_is_passthrough(req) &&
797
		     !(req->rq_flags & RQF_QUIET))) {
798
		blk_print_req_error(req, error);
799 800
		trace_block_rq_error(req, error, nr_bytes);
	}
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865

	blk_account_io_completion(req, nr_bytes);

	total_bytes = 0;
	while (req->bio) {
		struct bio *bio = req->bio;
		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);

		if (bio_bytes == bio->bi_iter.bi_size)
			req->bio = bio->bi_next;

		/* Completion has already been traced */
		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
		req_bio_endio(req, bio, bio_bytes, error);

		total_bytes += bio_bytes;
		nr_bytes -= bio_bytes;

		if (!nr_bytes)
			break;
	}

	/*
	 * completely done
	 */
	if (!req->bio) {
		/*
		 * Reset counters so that the request stacking driver
		 * can find how many bytes remain in the request
		 * later.
		 */
		req->__data_len = 0;
		return false;
	}

	req->__data_len -= total_bytes;

	/* update sector only for requests with clear definition of sector */
	if (!blk_rq_is_passthrough(req))
		req->__sector += total_bytes >> 9;

	/* mixed attributes always follow the first bio */
	if (req->rq_flags & RQF_MIXED_MERGE) {
		req->cmd_flags &= ~REQ_FAILFAST_MASK;
		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
	}

	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
		/*
		 * If total number of sectors is less than the first segment
		 * size, something has gone terribly wrong.
		 */
		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
			blk_dump_rq_flags(req, "request botched");
			req->__data_len = blk_rq_cur_bytes(req);
		}

		/* recalculate the number of segments */
		req->nr_phys_segments = blk_recalc_rq_segments(req);
	}

	return true;
}
EXPORT_SYMBOL_GPL(blk_update_request);

866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
static void __blk_account_io_done(struct request *req, u64 now)
{
	const int sgrp = op_stat_group(req_op(req));

	part_stat_lock();
	update_io_ticks(req->part, jiffies, true);
	part_stat_inc(req->part, ios[sgrp]);
	part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
	part_stat_unlock();
}

static inline void blk_account_io_done(struct request *req, u64 now)
{
	/*
	 * Account IO completion.  flush_rq isn't accounted as a
	 * normal IO on queueing nor completion.  Accounting the
	 * containing request is enough.
	 */
	if (blk_do_io_stat(req) && req->part &&
	    !(req->rq_flags & RQF_FLUSH_SEQ))
		__blk_account_io_done(req, now);
}

static void __blk_account_io_start(struct request *rq)
{
891 892 893 894 895 896 897
	/*
	 * All non-passthrough requests are created from a bio with one
	 * exception: when a flush command that is part of a flush sequence
	 * generated by the state machine in blk-flush.c is cloned onto the
	 * lower device by dm-multipath we can get here without a bio.
	 */
	if (rq->bio)
898
		rq->part = rq->bio->bi_bdev;
899
	else
900
		rq->part = rq->q->disk->part0;
901 902 903 904 905 906 907 908 909 910 911 912

	part_stat_lock();
	update_io_ticks(rq->part, jiffies, false);
	part_stat_unlock();
}

static inline void blk_account_io_start(struct request *req)
{
	if (blk_do_io_stat(req))
		__blk_account_io_start(req);
}

913
static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
914
{
915 916
	if (rq->rq_flags & RQF_STATS) {
		blk_mq_poll_stats_start(rq->q);
917
		blk_stat_add(rq, now);
918 919
	}

920
	blk_mq_sched_completed_request(rq, now);
921
	blk_account_io_done(rq, now);
922
}
923

924 925 926 927
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
{
	if (blk_mq_need_time_stamp(rq))
		__blk_mq_end_request_acct(rq, ktime_get_ns());
M
Ming Lei 已提交
928

C
Christoph Hellwig 已提交
929
	if (rq->end_io) {
930
		rq_qos_done(rq->q, rq);
931
		rq->end_io(rq, error);
C
Christoph Hellwig 已提交
932
	} else {
933
		blk_mq_free_request(rq);
C
Christoph Hellwig 已提交
934
	}
935
}
936
EXPORT_SYMBOL(__blk_mq_end_request);
937

938
void blk_mq_end_request(struct request *rq, blk_status_t error)
939 940 941
{
	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
		BUG();
942
	__blk_mq_end_request(rq, error);
943
}
944
EXPORT_SYMBOL(blk_mq_end_request);
945

946 947 948 949 950 951 952
#define TAG_COMP_BATCH		32

static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
					  int *tag_array, int nr_tags)
{
	struct request_queue *q = hctx->queue;

953 954 955 956 957 958 959
	/*
	 * All requests should have been marked as RQF_MQ_INFLIGHT, so
	 * update hctx->nr_active in batch
	 */
	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
		__blk_mq_sub_active_requests(hctx, nr_tags);

960 961 962 963 964 965 966
	blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
	percpu_ref_put_many(&q->q_usage_counter, nr_tags);
}

void blk_mq_end_request_batch(struct io_comp_batch *iob)
{
	int tags[TAG_COMP_BATCH], nr_tags = 0;
967
	struct blk_mq_hw_ctx *cur_hctx = NULL;
968 969 970 971 972 973 974 975 976 977
	struct request *rq;
	u64 now = 0;

	if (iob->need_ts)
		now = ktime_get_ns();

	while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
		prefetch(rq->bio);
		prefetch(rq->rq_next);

978
		blk_complete_request(rq);
979 980 981
		if (iob->need_ts)
			__blk_mq_end_request_acct(rq, now);

982 983
		rq_qos_done(rq->q, rq);

984
		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
985
		if (!req_ref_put_and_test(rq))
986 987 988 989 990
			continue;

		blk_crypto_free_request(rq);
		blk_pm_mark_last_busy(rq);

991 992 993
		if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
			if (cur_hctx)
				blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
994
			nr_tags = 0;
995
			cur_hctx = rq->mq_hctx;
996 997 998 999 1000
		}
		tags[nr_tags++] = rq->tag;
	}

	if (nr_tags)
1001
		blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1002 1003 1004
}
EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);

1005
static void blk_complete_reqs(struct llist_head *list)
1006
{
1007 1008
	struct llist_node *entry = llist_reverse_order(llist_del_all(list));
	struct request *rq, *next;
1009

1010
	llist_for_each_entry_safe(rq, next, entry, ipi_list)
1011
		rq->q->mq_ops->complete(rq);
1012 1013
}

1014
static __latent_entropy void blk_done_softirq(struct softirq_action *h)
1015
{
1016
	blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1017 1018
}

1019 1020
static int blk_softirq_cpu_dead(unsigned int cpu)
{
1021
	blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1022 1023 1024
	return 0;
}

1025
static void __blk_mq_complete_request_remote(void *data)
1026
{
1027
	__raise_softirq_irqoff(BLOCK_SOFTIRQ);
1028 1029
}

1030 1031 1032 1033 1034 1035 1036
static inline bool blk_mq_complete_need_ipi(struct request *rq)
{
	int cpu = raw_smp_processor_id();

	if (!IS_ENABLED(CONFIG_SMP) ||
	    !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
		return false;
1037 1038 1039 1040 1041 1042
	/*
	 * With force threaded interrupts enabled, raising softirq from an SMP
	 * function call will always result in waking the ksoftirqd thread.
	 * This is probably worse than completing the request on a different
	 * cache domain.
	 */
1043
	if (force_irqthreads())
1044
		return false;
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055

	/* same CPU or cache domain?  Complete locally */
	if (cpu == rq->mq_ctx->cpu ||
	    (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
	     cpus_share_cache(cpu, rq->mq_ctx->cpu)))
		return false;

	/* don't try to IPI to an offline CPU */
	return cpu_online(rq->mq_ctx->cpu);
}

1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
static void blk_mq_complete_send_ipi(struct request *rq)
{
	struct llist_head *list;
	unsigned int cpu;

	cpu = rq->mq_ctx->cpu;
	list = &per_cpu(blk_cpu_done, cpu);
	if (llist_add(&rq->ipi_list, list)) {
		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
		smp_call_function_single_async(cpu, &rq->csd);
	}
}

static void blk_mq_raise_softirq(struct request *rq)
{
	struct llist_head *list;

	preempt_disable();
	list = this_cpu_ptr(&blk_cpu_done);
	if (llist_add(&rq->ipi_list, list))
		raise_softirq(BLOCK_SOFTIRQ);
	preempt_enable();
}

1080
bool blk_mq_complete_request_remote(struct request *rq)
1081
{
1082
	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1083

1084 1085 1086 1087
	/*
	 * For a polled request, always complete locallly, it's pointless
	 * to redirect the completion.
	 */
1088
	if (rq->cmd_flags & REQ_POLLED)
1089
		return false;
C
Christoph Hellwig 已提交
1090

1091
	if (blk_mq_complete_need_ipi(rq)) {
1092 1093
		blk_mq_complete_send_ipi(rq);
		return true;
1094
	}
1095

1096 1097 1098 1099 1100
	if (rq->q->nr_hw_queues == 1) {
		blk_mq_raise_softirq(rq);
		return true;
	}
	return false;
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
}
EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);

/**
 * blk_mq_complete_request - end I/O on a request
 * @rq:		the request being processed
 *
 * Description:
 *	Complete a request by scheduling the ->complete_rq operation.
 **/
void blk_mq_complete_request(struct request *rq)
{
	if (!blk_mq_complete_request_remote(rq))
		rq->q->mq_ops->complete(rq);
1115
}
1116
EXPORT_SYMBOL(blk_mq_complete_request);
1117

1118 1119 1120 1121 1122 1123 1124 1125
/**
 * blk_mq_start_request - Start processing a request
 * @rq: Pointer to request to be started
 *
 * Function used by device drivers to notify the block layer that a request
 * is going to be processed now, so blk layer can do proper initializations
 * such as starting the timeout timer.
 */
1126
void blk_mq_start_request(struct request *rq)
1127 1128 1129
{
	struct request_queue *q = rq->q;

1130
	trace_block_rq_issue(rq);
1131

1132
	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
1133 1134 1135 1136 1137 1138 1139 1140
		u64 start_time;
#ifdef CONFIG_BLK_CGROUP
		if (rq->bio)
			start_time = bio_issue_time(&rq->bio->bi_issue);
		else
#endif
			start_time = ktime_get_ns();
		rq->io_start_time_ns = start_time;
1141
		rq->stats_sectors = blk_rq_sectors(rq);
1142
		rq->rq_flags |= RQF_STATS;
1143
		rq_qos_issue(q, rq);
1144 1145
	}

1146
	WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1147

1148
	blk_add_timer(rq);
K
Keith Busch 已提交
1149
	WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1150

1151 1152 1153 1154
#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
		q->integrity.profile->prepare_fn(rq);
#endif
1155 1156
	if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
	        WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
1157
}
1158
EXPORT_SYMBOL(blk_mq_start_request);
1159

C
Christoph Hellwig 已提交
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
/**
 * blk_end_sync_rq - executes a completion event on a request
 * @rq: request to complete
 * @error: end I/O status of the request
 */
static void blk_end_sync_rq(struct request *rq, blk_status_t error)
{
	struct completion *waiting = rq->end_io_data;

	rq->end_io_data = (void *)(uintptr_t)error;

	/*
	 * complete last, if this is a stack request the process (and thus
	 * the rq pointer) could be invalid right after this complete()
	 */
	complete(waiting);
}

/**
 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 * @done:	I/O completion handler
 *
 * Description:
 *    Insert a fully prepared request at the back of the I/O scheduler queue
 *    for execution.  Don't wait for completion.
 *
 * Note:
 *    This function will invoke @done directly if the queue is dead.
 */
1191
void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
C
Christoph Hellwig 已提交
1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
{
	WARN_ON(irqs_disabled());
	WARN_ON(!blk_rq_is_passthrough(rq));

	rq->end_io = done;

	blk_account_io_start(rq);

	/*
	 * don't check dying flag for MQ because the request won't
	 * be reused after dying flag is set
	 */
	blk_mq_sched_insert_request(rq, at_head, true, false);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);

static bool blk_rq_is_poll(struct request *rq)
{
	if (!rq->mq_hctx)
		return false;
	if (rq->mq_hctx->type != HCTX_TYPE_POLL)
		return false;
	if (WARN_ON_ONCE(!rq->bio))
		return false;
	return true;
}

static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
	do {
		bio_poll(rq->bio, NULL, 0);
		cond_resched();
	} while (!completion_done(wait));
}

/**
 * blk_execute_rq - insert a request into queue for execution
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 *
 * Description:
 *    Insert a fully prepared request at the back of the I/O scheduler queue
 *    for execution and wait for completion.
 * Return: The blk_status_t result provided to blk_mq_end_request().
 */
1237
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
C
Christoph Hellwig 已提交
1238 1239 1240 1241 1242
{
	DECLARE_COMPLETION_ONSTACK(wait);
	unsigned long hang_check;

	rq->end_io_data = &wait;
1243
	blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq);
C
Christoph Hellwig 已提交
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260

	/* Prevent hang_check timer from firing at us during very long I/O */
	hang_check = sysctl_hung_task_timeout_secs;

	if (blk_rq_is_poll(rq))
		blk_rq_poll_completion(rq, &wait);
	else if (hang_check)
		while (!wait_for_completion_io_timeout(&wait,
				hang_check * (HZ/2)))
			;
	else
		wait_for_completion_io(&wait);

	return (blk_status_t)(uintptr_t)rq->end_io_data;
}
EXPORT_SYMBOL(blk_execute_rq);

1261
static void __blk_mq_requeue_request(struct request *rq)
1262 1263 1264
{
	struct request_queue *q = rq->q;

1265 1266
	blk_mq_put_driver_tag(rq);

1267
	trace_block_rq_requeue(rq);
1268
	rq_qos_requeue(q, rq);
1269

K
Keith Busch 已提交
1270 1271
	if (blk_mq_request_started(rq)) {
		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1272
		rq->rq_flags &= ~RQF_TIMED_OUT;
1273
	}
1274 1275
}

1276
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1277 1278 1279
{
	__blk_mq_requeue_request(rq);

1280 1281 1282
	/* this request will be re-inserted to io scheduler queue */
	blk_mq_sched_requeue_request(rq);

1283
	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
1284 1285 1286
}
EXPORT_SYMBOL(blk_mq_requeue_request);

1287 1288 1289
static void blk_mq_requeue_work(struct work_struct *work)
{
	struct request_queue *q =
1290
		container_of(work, struct request_queue, requeue_work.work);
1291 1292 1293
	LIST_HEAD(rq_list);
	struct request *rq, *next;

1294
	spin_lock_irq(&q->requeue_lock);
1295
	list_splice_init(&q->requeue_list, &rq_list);
1296
	spin_unlock_irq(&q->requeue_lock);
1297 1298

	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
1299
		if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
1300 1301
			continue;

1302
		rq->rq_flags &= ~RQF_SOFTBARRIER;
1303
		list_del_init(&rq->queuelist);
1304 1305 1306 1307 1308 1309
		/*
		 * If RQF_DONTPREP, rq has contained some driver specific
		 * data, so insert it to hctx dispatch list to avoid any
		 * merge.
		 */
		if (rq->rq_flags & RQF_DONTPREP)
1310
			blk_mq_request_bypass_insert(rq, false, false);
1311 1312
		else
			blk_mq_sched_insert_request(rq, true, false, false);
1313 1314 1315 1316 1317
	}

	while (!list_empty(&rq_list)) {
		rq = list_entry(rq_list.next, struct request, queuelist);
		list_del_init(&rq->queuelist);
1318
		blk_mq_sched_insert_request(rq, false, false, false);
1319 1320
	}

1321
	blk_mq_run_hw_queues(q, false);
1322 1323
}

1324 1325
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
				bool kick_requeue_list)
1326 1327 1328 1329 1330 1331
{
	struct request_queue *q = rq->q;
	unsigned long flags;

	/*
	 * We abuse this flag that is otherwise used by the I/O scheduler to
1332
	 * request head insertion from the workqueue.
1333
	 */
1334
	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
1335 1336 1337

	spin_lock_irqsave(&q->requeue_lock, flags);
	if (at_head) {
1338
		rq->rq_flags |= RQF_SOFTBARRIER;
1339 1340 1341 1342 1343
		list_add(&rq->queuelist, &q->requeue_list);
	} else {
		list_add_tail(&rq->queuelist, &q->requeue_list);
	}
	spin_unlock_irqrestore(&q->requeue_lock, flags);
1344 1345 1346

	if (kick_requeue_list)
		blk_mq_kick_requeue_list(q);
1347 1348 1349 1350
}

void blk_mq_kick_requeue_list(struct request_queue *q)
{
1351
	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1352 1353 1354
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);

1355 1356 1357
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
				    unsigned long msecs)
{
1358 1359
	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
				    msecs_to_jiffies(msecs));
1360 1361 1362
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);

1363 1364
static bool blk_mq_rq_inflight(struct request *rq, void *priv,
			       bool reserved)
1365 1366
{
	/*
1367 1368 1369
	 * If we find a request that isn't idle we know the queue is busy
	 * as it's checked in the iter.
	 * Return false to stop the iteration.
1370
	 */
1371
	if (blk_mq_request_started(rq)) {
1372 1373 1374 1375 1376 1377 1378 1379 1380
		bool *busy = priv;

		*busy = true;
		return false;
	}

	return true;
}

1381
bool blk_mq_queue_inflight(struct request_queue *q)
1382 1383 1384
{
	bool busy = false;

1385
	blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1386 1387
	return busy;
}
1388
EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1389

1390
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
1391
{
1392
	req->rq_flags |= RQF_TIMED_OUT;
1393 1394 1395 1396 1397 1398 1399
	if (req->q->mq_ops->timeout) {
		enum blk_eh_timer_return ret;

		ret = req->q->mq_ops->timeout(req, reserved);
		if (ret == BLK_EH_DONE)
			return;
		WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1400
	}
1401 1402

	blk_add_timer(req);
1403
}
1404

K
Keith Busch 已提交
1405
static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
1406
{
K
Keith Busch 已提交
1407
	unsigned long deadline;
1408

K
Keith Busch 已提交
1409 1410
	if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
		return false;
1411 1412
	if (rq->rq_flags & RQF_TIMED_OUT)
		return false;
1413

1414
	deadline = READ_ONCE(rq->deadline);
K
Keith Busch 已提交
1415 1416
	if (time_after_eq(jiffies, deadline))
		return true;
1417

K
Keith Busch 已提交
1418 1419 1420 1421 1422
	if (*next == 0)
		*next = deadline;
	else if (time_after(*next, deadline))
		*next = deadline;
	return false;
1423 1424
}

1425 1426
void blk_mq_put_rq_ref(struct request *rq)
{
M
Ming Lei 已提交
1427
	if (is_flush_rq(rq))
1428
		rq->end_io(rq, 0);
1429
	else if (req_ref_put_and_test(rq))
1430 1431 1432
		__blk_mq_free_request(rq);
}

1433
static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved)
1434
{
K
Keith Busch 已提交
1435 1436 1437
	unsigned long *next = priv;

	/*
1438 1439 1440 1441 1442
	 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
	 * be reallocated underneath the timeout handler's processing, then
	 * the expire check is reliable. If the request is not expired, then
	 * it was completed and reallocated as a new request after returning
	 * from blk_mq_check_expired().
1443
	 */
K
Keith Busch 已提交
1444
	if (blk_mq_req_expired(rq, next))
1445
		blk_mq_rq_timed_out(rq, reserved);
1446
	return true;
1447 1448
}

1449
static void blk_mq_timeout_work(struct work_struct *work)
1450
{
1451 1452
	struct request_queue *q =
		container_of(work, struct request_queue, timeout_work);
K
Keith Busch 已提交
1453
	unsigned long next = 0;
1454
	struct blk_mq_hw_ctx *hctx;
1455
	unsigned long i;
1456

1457 1458 1459 1460 1461 1462 1463 1464 1465
	/* A deadlock might occur if a request is stuck requiring a
	 * timeout at the same time a queue freeze is waiting
	 * completion, since the timeout code would not be able to
	 * acquire the queue reference here.
	 *
	 * That's why we don't use blk_queue_enter here; instead, we use
	 * percpu_ref_tryget directly, because we need to be able to
	 * obtain a reference even in the short window between the queue
	 * starting to freeze, by dropping the first reference in
1466
	 * blk_freeze_queue_start, and the moment the last request is
1467 1468 1469 1470
	 * consumed, marked by the instant q_usage_counter reaches
	 * zero.
	 */
	if (!percpu_ref_tryget(&q->q_usage_counter))
1471 1472
		return;

K
Keith Busch 已提交
1473
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
1474

K
Keith Busch 已提交
1475 1476
	if (next != 0) {
		mod_timer(&q->timeout, next);
1477
	} else {
1478 1479 1480 1481 1482 1483
		/*
		 * Request timeouts are handled as a forward rolling timer. If
		 * we end up here it means that no requests are pending and
		 * also that no request has been pending for a while. Mark
		 * each hctx as idle.
		 */
1484 1485 1486 1487 1488
		queue_for_each_hw_ctx(q, hctx, i) {
			/* the hctx may be unmapped, so check it here */
			if (blk_mq_hw_queue_mapped(hctx))
				blk_mq_tag_idle(hctx);
		}
1489
	}
1490
	blk_queue_exit(q);
1491 1492
}

1493 1494 1495 1496 1497 1498 1499 1500 1501 1502
struct flush_busy_ctx_data {
	struct blk_mq_hw_ctx *hctx;
	struct list_head *list;
};

static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
{
	struct flush_busy_ctx_data *flush_data = data;
	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
M
Ming Lei 已提交
1503
	enum hctx_type type = hctx->type;
1504 1505

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
1506
	list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1507
	sbitmap_clear_bit(sb, bitnr);
1508 1509 1510 1511
	spin_unlock(&ctx->lock);
	return true;
}

1512 1513 1514 1515
/*
 * Process software queues that have been marked busy, splicing them
 * to the for-dispatch
 */
1516
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1517
{
1518 1519 1520 1521
	struct flush_busy_ctx_data data = {
		.hctx = hctx,
		.list = list,
	};
1522

1523
	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1524
}
1525
EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1526

1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
struct dispatch_rq_data {
	struct blk_mq_hw_ctx *hctx;
	struct request *rq;
};

static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
		void *data)
{
	struct dispatch_rq_data *dispatch_data = data;
	struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
M
Ming Lei 已提交
1538
	enum hctx_type type = hctx->type;
1539 1540

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
1541 1542
	if (!list_empty(&ctx->rq_lists[type])) {
		dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1543
		list_del_init(&dispatch_data->rq->queuelist);
M
Ming Lei 已提交
1544
		if (list_empty(&ctx->rq_lists[type]))
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
			sbitmap_clear_bit(sb, bitnr);
	}
	spin_unlock(&ctx->lock);

	return !dispatch_data->rq;
}

struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
					struct blk_mq_ctx *start)
{
1555
	unsigned off = start ? start->index_hw[hctx->type] : 0;
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
	struct dispatch_rq_data data = {
		.hctx = hctx,
		.rq   = NULL,
	};

	__sbitmap_for_each_set(&hctx->ctx_map, off,
			       dispatch_rq_from_ctx, &data);

	return data.rq;
}

1567
static bool __blk_mq_alloc_driver_tag(struct request *rq)
1568
{
1569
	struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1570 1571 1572
	unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
	int tag;

1573 1574
	blk_mq_tag_busy(rq->mq_hctx);

1575
	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1576
		bt = &rq->mq_hctx->tags->breserved_tags;
1577
		tag_offset = 0;
1578 1579 1580
	} else {
		if (!hctx_may_queue(rq->mq_hctx, bt))
			return false;
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
	}

	tag = __sbitmap_queue_get(bt);
	if (tag == BLK_MQ_NO_TAG)
		return false;

	rq->tag = tag + tag_offset;
	return true;
}

1591
bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1592
{
1593
	if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
1594 1595
		return false;

1596
	if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1597 1598
			!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
		rq->rq_flags |= RQF_MQ_INFLIGHT;
1599
		__blk_mq_inc_active_requests(hctx);
1600 1601 1602
	}
	hctx->tags->rqs[rq->tag] = rq;
	return true;
1603 1604
}

1605 1606
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
				int flags, void *key)
1607 1608 1609 1610 1611
{
	struct blk_mq_hw_ctx *hctx;

	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);

1612
	spin_lock(&hctx->dispatch_wait_lock);
1613 1614 1615 1616
	if (!list_empty(&wait->entry)) {
		struct sbitmap_queue *sbq;

		list_del_init(&wait->entry);
1617
		sbq = &hctx->tags->bitmap_tags;
1618 1619
		atomic_dec(&sbq->ws_active);
	}
1620 1621
	spin_unlock(&hctx->dispatch_wait_lock);

1622 1623 1624 1625
	blk_mq_run_hw_queue(hctx, true);
	return 1;
}

1626 1627
/*
 * Mark us waiting for a tag. For shared tags, this involves hooking us into
1628 1629
 * the tag wakeups. For non-shared tags, we can simply mark us needing a
 * restart. For both cases, take care to check the condition again after
1630 1631
 * marking us as waiting.
 */
1632
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1633
				 struct request *rq)
1634
{
1635
	struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1636
	struct wait_queue_head *wq;
1637 1638
	wait_queue_entry_t *wait;
	bool ret;
1639

1640
	if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
1641
		blk_mq_sched_mark_restart_hctx(hctx);
1642

1643 1644 1645 1646 1647 1648 1649 1650
		/*
		 * It's possible that a tag was freed in the window between the
		 * allocation failure and adding the hardware queue to the wait
		 * queue.
		 *
		 * Don't clear RESTART here, someone else could have set it.
		 * At most this will cost an extra queue run.
		 */
1651
		return blk_mq_get_driver_tag(rq);
1652 1653
	}

1654
	wait = &hctx->dispatch_wait;
1655 1656 1657
	if (!list_empty_careful(&wait->entry))
		return false;

1658
	wq = &bt_wait_ptr(sbq, hctx)->wait;
1659 1660 1661

	spin_lock_irq(&wq->lock);
	spin_lock(&hctx->dispatch_wait_lock);
1662
	if (!list_empty(&wait->entry)) {
1663 1664
		spin_unlock(&hctx->dispatch_wait_lock);
		spin_unlock_irq(&wq->lock);
1665
		return false;
1666 1667
	}

1668
	atomic_inc(&sbq->ws_active);
1669 1670
	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
	__add_wait_queue(wq, wait);
1671

1672
	/*
1673 1674 1675
	 * It's possible that a tag was freed in the window between the
	 * allocation failure and adding the hardware queue to the wait
	 * queue.
1676
	 */
1677
	ret = blk_mq_get_driver_tag(rq);
1678
	if (!ret) {
1679 1680
		spin_unlock(&hctx->dispatch_wait_lock);
		spin_unlock_irq(&wq->lock);
1681
		return false;
1682
	}
1683 1684 1685 1686 1687 1688

	/*
	 * We got a tag, remove ourselves from the wait queue to ensure
	 * someone else gets the wakeup.
	 */
	list_del_init(&wait->entry);
1689
	atomic_dec(&sbq->ws_active);
1690 1691
	spin_unlock(&hctx->dispatch_wait_lock);
	spin_unlock_irq(&wq->lock);
1692 1693

	return true;
1694 1695
}

1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
#define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
#define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
/*
 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
 * - EWMA is one simple way to compute running average value
 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
 * - take 4 as factor for avoiding to get too small(0) result, and this
 *   factor doesn't matter because EWMA decreases exponentially
 */
static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
{
	unsigned int ewma;

	ewma = hctx->dispatch_busy;

	if (!ewma && !busy)
		return;

	ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
	if (busy)
		ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
	ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;

	hctx->dispatch_busy = ewma;
}

1722 1723
#define BLK_MQ_RESOURCE_DELAY	3		/* ms units */

1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
static void blk_mq_handle_dev_resource(struct request *rq,
				       struct list_head *list)
{
	struct request *next =
		list_first_entry_or_null(list, struct request, queuelist);

	/*
	 * If an I/O scheduler has been configured and we got a driver tag for
	 * the next request already, free it.
	 */
	if (next)
		blk_mq_put_driver_tag(next);

	list_add(&rq->queuelist, list);
	__blk_mq_requeue_request(rq);
}

1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
static void blk_mq_handle_zone_resource(struct request *rq,
					struct list_head *zone_list)
{
	/*
	 * If we end up here it is because we cannot dispatch a request to a
	 * specific zone due to LLD level zone-write locking or other zone
	 * related resource not being available. In this case, set the request
	 * aside in zone_list for retrying it later.
	 */
	list_add(&rq->queuelist, zone_list);
	__blk_mq_requeue_request(rq);
}

1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
enum prep_dispatch {
	PREP_DISPATCH_OK,
	PREP_DISPATCH_NO_TAG,
	PREP_DISPATCH_NO_BUDGET,
};

static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
						  bool need_budget)
{
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1764
	int budget_token = -1;
1765

1766 1767 1768 1769 1770 1771 1772
	if (need_budget) {
		budget_token = blk_mq_get_dispatch_budget(rq->q);
		if (budget_token < 0) {
			blk_mq_put_driver_tag(rq);
			return PREP_DISPATCH_NO_BUDGET;
		}
		blk_mq_set_rq_budget_token(rq, budget_token);
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783
	}

	if (!blk_mq_get_driver_tag(rq)) {
		/*
		 * The initial allocation attempt failed, so we need to
		 * rerun the hardware queue when a tag is freed. The
		 * waitqueue takes care of that. If the queue is run
		 * before we add this entry back on the dispatch list,
		 * we'll re-run it below.
		 */
		if (!blk_mq_mark_tag_wait(hctx, rq)) {
1784 1785 1786 1787 1788
			/*
			 * All budgets not got from this function will be put
			 * together during handling partial dispatch
			 */
			if (need_budget)
1789
				blk_mq_put_dispatch_budget(rq->q, budget_token);
1790 1791 1792 1793 1794 1795 1796
			return PREP_DISPATCH_NO_TAG;
		}
	}

	return PREP_DISPATCH_OK;
}

1797 1798
/* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
static void blk_mq_release_budgets(struct request_queue *q,
1799
		struct list_head *list)
1800
{
1801
	struct request *rq;
1802

1803 1804
	list_for_each_entry(rq, list, queuelist) {
		int budget_token = blk_mq_get_rq_budget_token(rq);
1805

1806 1807 1808
		if (budget_token >= 0)
			blk_mq_put_dispatch_budget(q, budget_token);
	}
1809 1810
}

1811 1812 1813
/*
 * Returns true if we did some work AND can potentially do more.
 */
1814
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1815
			     unsigned int nr_budgets)
1816
{
1817
	enum prep_dispatch prep;
1818
	struct request_queue *q = hctx->queue;
1819
	struct request *rq, *nxt;
1820
	int errors, queued;
1821
	blk_status_t ret = BLK_STS_OK;
1822
	LIST_HEAD(zone_list);
1823
	bool needs_resource = false;
1824

1825 1826 1827
	if (list_empty(list))
		return false;

1828 1829 1830
	/*
	 * Now process all the entries, sending them to the driver.
	 */
1831
	errors = queued = 0;
1832
	do {
1833
		struct blk_mq_queue_data bd;
1834

1835
		rq = list_first_entry(list, struct request, queuelist);
1836

1837
		WARN_ON_ONCE(hctx != rq->mq_hctx);
1838
		prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
1839
		if (prep != PREP_DISPATCH_OK)
1840
			break;
1841

1842 1843
		list_del_init(&rq->queuelist);

1844
		bd.rq = rq;
1845 1846 1847 1848 1849 1850 1851 1852 1853

		/*
		 * Flag last if we have no more requests, or if we have more
		 * but can't assign a driver tag to it.
		 */
		if (list_empty(list))
			bd.last = true;
		else {
			nxt = list_first_entry(list, struct request, queuelist);
1854
			bd.last = !blk_mq_get_driver_tag(nxt);
1855
		}
1856

1857 1858 1859 1860 1861 1862
		/*
		 * once the request is queued to lld, no need to cover the
		 * budget any more
		 */
		if (nr_budgets)
			nr_budgets--;
1863
		ret = q->mq_ops->queue_rq(hctx, &bd);
1864 1865 1866
		switch (ret) {
		case BLK_STS_OK:
			queued++;
1867
			break;
1868
		case BLK_STS_RESOURCE:
1869 1870
			needs_resource = true;
			fallthrough;
1871 1872 1873 1874
		case BLK_STS_DEV_RESOURCE:
			blk_mq_handle_dev_resource(rq, list);
			goto out;
		case BLK_STS_ZONE_RESOURCE:
1875 1876 1877 1878 1879 1880
			/*
			 * Move the request to zone_list and keep going through
			 * the dispatch list to find more requests the drive can
			 * accept.
			 */
			blk_mq_handle_zone_resource(rq, &zone_list);
1881
			needs_resource = true;
1882 1883
			break;
		default:
1884
			errors++;
1885
			blk_mq_end_request(rq, ret);
1886
		}
1887
	} while (!list_empty(list));
1888
out:
1889 1890 1891
	if (!list_empty(&zone_list))
		list_splice_tail_init(&zone_list, list);

1892 1893 1894 1895 1896
	/* If we didn't flush the entire list, we could have told the driver
	 * there was more coming, but that turned out to be a lie.
	 */
	if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
		q->mq_ops->commit_rqs(hctx);
1897 1898 1899 1900
	/*
	 * Any items that need requeuing? Stuff them into hctx->dispatch,
	 * that is where we will continue on next queue run.
	 */
1901
	if (!list_empty(list)) {
1902
		bool needs_restart;
1903 1904
		/* For non-shared tags, the RESTART check will suffice */
		bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
1905
			(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
1906

1907 1908
		if (nr_budgets)
			blk_mq_release_budgets(q, list);
1909

1910
		spin_lock(&hctx->lock);
1911
		list_splice_tail_init(list, &hctx->dispatch);
1912
		spin_unlock(&hctx->lock);
1913

1914 1915 1916 1917 1918 1919 1920 1921 1922
		/*
		 * Order adding requests to hctx->dispatch and checking
		 * SCHED_RESTART flag. The pair of this smp_mb() is the one
		 * in blk_mq_sched_restart(). Avoid restart code path to
		 * miss the new added requests to hctx->dispatch, meantime
		 * SCHED_RESTART is observed here.
		 */
		smp_mb();

1923
		/*
1924 1925 1926
		 * If SCHED_RESTART was set by the caller of this function and
		 * it is no longer set that means that it was cleared by another
		 * thread and hence that a queue rerun is needed.
1927
		 *
1928 1929 1930 1931
		 * If 'no_tag' is set, that means that we failed getting
		 * a driver tag with an I/O scheduler attached. If our dispatch
		 * waitqueue is no longer active, ensure that we run the queue
		 * AFTER adding our entries back to the list.
1932
		 *
1933 1934 1935 1936 1937 1938 1939
		 * If no I/O scheduler has been configured it is possible that
		 * the hardware queue got stopped and restarted before requests
		 * were pushed back onto the dispatch list. Rerun the queue to
		 * avoid starvation. Notes:
		 * - blk_mq_run_hw_queue() checks whether or not a queue has
		 *   been stopped before rerunning a queue.
		 * - Some but not all block drivers stop a queue before
1940
		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1941
		 *   and dm-rq.
1942 1943 1944
		 *
		 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
		 * bit is set, run queue after a delay to avoid IO stalls
1945
		 * that could otherwise occur if the queue is idle.  We'll do
1946 1947
		 * similar if we couldn't get budget or couldn't lock a zone
		 * and SCHED_RESTART is set.
1948
		 */
1949
		needs_restart = blk_mq_sched_needs_restart(hctx);
1950 1951
		if (prep == PREP_DISPATCH_NO_BUDGET)
			needs_resource = true;
1952
		if (!needs_restart ||
1953
		    (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1954
			blk_mq_run_hw_queue(hctx, true);
1955
		else if (needs_restart && needs_resource)
1956
			blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1957

1958
		blk_mq_update_dispatch_busy(hctx, true);
1959
		return false;
1960 1961
	} else
		blk_mq_update_dispatch_busy(hctx, false);
1962

1963
	return (queued + errors) != 0;
1964 1965
}

1966 1967 1968 1969 1970 1971
/**
 * __blk_mq_run_hw_queue - Run a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 *
 * Send pending requests to the hardware.
 */
1972 1973
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
{
1974 1975 1976 1977 1978 1979
	/*
	 * We can't run the queue inline with ints disabled. Ensure that
	 * we catch bad users of this early.
	 */
	WARN_ON_ONCE(in_interrupt());

1980 1981
	blk_mq_run_dispatch_ops(hctx->queue,
			blk_mq_sched_dispatch_requests(hctx));
1982 1983
}

1984 1985 1986 1987 1988 1989 1990 1991 1992
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
{
	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);

	if (cpu >= nr_cpu_ids)
		cpu = cpumask_first(hctx->cpumask);
	return cpu;
}

1993 1994 1995 1996 1997 1998 1999 2000
/*
 * It'd be great if the workqueue API had a way to pass
 * in a mask and had some smarts for more clever placement.
 * For now we just round-robin here, switching for every
 * BLK_MQ_CPU_WORK_BATCH queued items.
 */
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{
2001
	bool tried = false;
2002
	int next_cpu = hctx->next_cpu;
2003

2004 2005
	if (hctx->queue->nr_hw_queues == 1)
		return WORK_CPU_UNBOUND;
2006 2007

	if (--hctx->next_cpu_batch <= 0) {
2008
select_cpu:
2009
		next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2010
				cpu_online_mask);
2011
		if (next_cpu >= nr_cpu_ids)
2012
			next_cpu = blk_mq_first_mapped_cpu(hctx);
2013 2014 2015
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
	}

2016 2017 2018 2019
	/*
	 * Do unbound schedule if we can't find a online CPU for this hctx,
	 * and it should only happen in the path of handling CPU DEAD.
	 */
2020
	if (!cpu_online(next_cpu)) {
2021 2022 2023 2024 2025 2026 2027 2028 2029
		if (!tried) {
			tried = true;
			goto select_cpu;
		}

		/*
		 * Make sure to re-select CPU next time once after CPUs
		 * in hctx->cpumask become online again.
		 */
2030
		hctx->next_cpu = next_cpu;
2031 2032 2033
		hctx->next_cpu_batch = 1;
		return WORK_CPU_UNBOUND;
	}
2034 2035 2036

	hctx->next_cpu = next_cpu;
	return next_cpu;
2037 2038
}

2039 2040 2041 2042
/**
 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 * @async: If we want to run the queue asynchronously.
2043
 * @msecs: Milliseconds of delay to wait before running the queue.
2044 2045 2046 2047
 *
 * If !@async, try to run the queue now. Else, run the queue asynchronously and
 * with a delay of @msecs.
 */
2048 2049
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
					unsigned long msecs)
2050
{
2051
	if (unlikely(blk_mq_hctx_stopped(hctx)))
2052 2053
		return;

2054
	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
2055 2056
		int cpu = get_cpu();
		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
2057
			__blk_mq_run_hw_queue(hctx);
2058
			put_cpu();
2059 2060
			return;
		}
2061

2062
		put_cpu();
2063
	}
2064

2065 2066
	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
				    msecs_to_jiffies(msecs));
2067 2068
}

2069 2070 2071
/**
 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
 * @hctx: Pointer to the hardware queue to run.
2072
 * @msecs: Milliseconds of delay to wait before running the queue.
2073 2074 2075
 *
 * Run a hardware queue asynchronously with a delay of @msecs.
 */
2076 2077 2078 2079 2080 2081
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{
	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);

2082 2083 2084 2085 2086 2087 2088 2089 2090
/**
 * blk_mq_run_hw_queue - Start to run a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 * @async: If we want to run the queue asynchronously.
 *
 * Check if the request queue is not in a quiesced state and if there are
 * pending requests to be sent. If this is true, run the queue to send requests
 * to hardware.
 */
2091
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2092
{
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102
	bool need_run;

	/*
	 * When queue is quiesced, we may be switching io scheduler, or
	 * updating nr_hw_queues, or other things, and we can't run queue
	 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
	 *
	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
	 * quiesced.
	 */
2103
	__blk_mq_run_dispatch_ops(hctx->queue, false,
2104 2105
		need_run = !blk_queue_quiesced(hctx->queue) &&
		blk_mq_hctx_has_pending(hctx));
2106

2107
	if (need_run)
2108
		__blk_mq_delay_run_hw_queue(hctx, async, 0);
2109
}
O
Omar Sandoval 已提交
2110
EXPORT_SYMBOL(blk_mq_run_hw_queue);
2111

2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147
/*
 * Is the request queue handled by an IO scheduler that does not respect
 * hardware queues when dispatching?
 */
static bool blk_mq_has_sqsched(struct request_queue *q)
{
	struct elevator_queue *e = q->elevator;

	if (e && e->type->ops.dispatch_request &&
	    !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
		return true;
	return false;
}

/*
 * Return prefered queue to dispatch from (if any) for non-mq aware IO
 * scheduler.
 */
static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;

	/*
	 * If the IO scheduler does not respect hardware queues when
	 * dispatching, we just don't bother with multiple HW queues and
	 * dispatch from hctx for the current CPU since running multiple queues
	 * just causes lock contention inside the scheduler and pointless cache
	 * bouncing.
	 */
	hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
				     raw_smp_processor_id());
	if (!blk_mq_hctx_stopped(hctx))
		return hctx;
	return NULL;
}

2148
/**
2149
 * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2150 2151 2152
 * @q: Pointer to the request queue to run.
 * @async: If we want to run the queue asynchronously.
 */
2153
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2154
{
2155
	struct blk_mq_hw_ctx *hctx, *sq_hctx;
2156
	unsigned long i;
2157

2158 2159 2160
	sq_hctx = NULL;
	if (blk_mq_has_sqsched(q))
		sq_hctx = blk_mq_get_sq_hctx(q);
2161
	queue_for_each_hw_ctx(q, hctx, i) {
2162
		if (blk_mq_hctx_stopped(hctx))
2163
			continue;
2164 2165 2166 2167 2168 2169 2170 2171
		/*
		 * Dispatch from this hctx either if there's no hctx preferred
		 * by IO scheduler or if it has requests that bypass the
		 * scheduler.
		 */
		if (!sq_hctx || sq_hctx == hctx ||
		    !list_empty_careful(&hctx->dispatch))
			blk_mq_run_hw_queue(hctx, async);
2172 2173
	}
}
2174
EXPORT_SYMBOL(blk_mq_run_hw_queues);
2175

2176 2177 2178
/**
 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
 * @q: Pointer to the request queue to run.
2179
 * @msecs: Milliseconds of delay to wait before running the queues.
2180 2181 2182
 */
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
{
2183
	struct blk_mq_hw_ctx *hctx, *sq_hctx;
2184
	unsigned long i;
2185

2186 2187 2188
	sq_hctx = NULL;
	if (blk_mq_has_sqsched(q))
		sq_hctx = blk_mq_get_sq_hctx(q);
2189 2190 2191
	queue_for_each_hw_ctx(q, hctx, i) {
		if (blk_mq_hctx_stopped(hctx))
			continue;
2192 2193 2194 2195 2196 2197 2198 2199
		/*
		 * If there is already a run_work pending, leave the
		 * pending delay untouched. Otherwise, a hctx can stall
		 * if another hctx is re-delaying the other's work
		 * before the work executes.
		 */
		if (delayed_work_pending(&hctx->run_work))
			continue;
2200 2201 2202 2203 2204 2205 2206 2207
		/*
		 * Dispatch from this hctx either if there's no hctx preferred
		 * by IO scheduler or if it has requests that bypass the
		 * scheduler.
		 */
		if (!sq_hctx || sq_hctx == hctx ||
		    !list_empty_careful(&hctx->dispatch))
			blk_mq_delay_run_hw_queue(hctx, msecs);
2208 2209 2210 2211
	}
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);

2212 2213 2214 2215 2216 2217 2218 2219 2220 2221
/**
 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
 * @q: request queue.
 *
 * The caller is responsible for serializing this function against
 * blk_mq_{start,stop}_hw_queue().
 */
bool blk_mq_queue_stopped(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
2222
	unsigned long i;
2223 2224 2225 2226 2227 2228 2229 2230 2231

	queue_for_each_hw_ctx(q, hctx, i)
		if (blk_mq_hctx_stopped(hctx))
			return true;

	return false;
}
EXPORT_SYMBOL(blk_mq_queue_stopped);

2232 2233 2234
/*
 * This function is often used for pausing .queue_rq() by driver when
 * there isn't enough resource or some conditions aren't satisfied, and
2235
 * BLK_STS_RESOURCE is usually returned.
2236 2237 2238 2239 2240
 *
 * We do not guarantee that dispatch can be drained or blocked
 * after blk_mq_stop_hw_queue() returns. Please use
 * blk_mq_quiesce_queue() for that requirement.
 */
2241 2242
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
2243
	cancel_delayed_work(&hctx->run_work);
2244

2245
	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2246
}
2247
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2248

2249 2250 2251
/*
 * This function is often used for pausing .queue_rq() by driver when
 * there isn't enough resource or some conditions aren't satisfied, and
2252
 * BLK_STS_RESOURCE is usually returned.
2253 2254 2255 2256 2257
 *
 * We do not guarantee that dispatch can be drained or blocked
 * after blk_mq_stop_hw_queues() returns. Please use
 * blk_mq_quiesce_queue() for that requirement.
 */
2258 2259
void blk_mq_stop_hw_queues(struct request_queue *q)
{
2260
	struct blk_mq_hw_ctx *hctx;
2261
	unsigned long i;
2262 2263 2264

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_stop_hw_queue(hctx);
2265 2266 2267
}
EXPORT_SYMBOL(blk_mq_stop_hw_queues);

2268 2269 2270
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2271

2272
	blk_mq_run_hw_queue(hctx, false);
2273 2274 2275
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);

2276 2277 2278
void blk_mq_start_hw_queues(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
2279
	unsigned long i;
2280 2281 2282 2283 2284 2285

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_start_hw_queue(hctx);
}
EXPORT_SYMBOL(blk_mq_start_hw_queues);

2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
	if (!blk_mq_hctx_stopped(hctx))
		return;

	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
	blk_mq_run_hw_queue(hctx, async);
}
EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);

2296
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2297 2298
{
	struct blk_mq_hw_ctx *hctx;
2299
	unsigned long i;
2300

2301 2302
	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_start_stopped_hw_queue(hctx, async);
2303 2304 2305
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);

2306
static void blk_mq_run_work_fn(struct work_struct *work)
2307 2308 2309
{
	struct blk_mq_hw_ctx *hctx;

2310
	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
2311

2312
	/*
M
Ming Lei 已提交
2313
	 * If we are stopped, don't run the queue.
2314
	 */
2315
	if (blk_mq_hctx_stopped(hctx))
2316
		return;
2317 2318 2319 2320

	__blk_mq_run_hw_queue(hctx);
}

2321 2322 2323
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
					    struct request *rq,
					    bool at_head)
2324
{
J
Jens Axboe 已提交
2325
	struct blk_mq_ctx *ctx = rq->mq_ctx;
M
Ming Lei 已提交
2326
	enum hctx_type type = hctx->type;
J
Jens Axboe 已提交
2327

2328 2329
	lockdep_assert_held(&ctx->lock);

2330
	trace_block_rq_insert(rq);
2331

2332
	if (at_head)
M
Ming Lei 已提交
2333
		list_add(&rq->queuelist, &ctx->rq_lists[type]);
2334
	else
M
Ming Lei 已提交
2335
		list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
2336
}
2337

2338 2339
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
			     bool at_head)
2340 2341 2342
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;

2343 2344
	lockdep_assert_held(&ctx->lock);

J
Jens Axboe 已提交
2345
	__blk_mq_insert_req_list(hctx, rq, at_head);
2346 2347 2348
	blk_mq_hctx_mark_pending(hctx, ctx);
}

2349 2350 2351
/**
 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
 * @rq: Pointer to request to be inserted.
2352
 * @at_head: true if the request should be inserted at the head of the list.
2353 2354
 * @run_queue: If we should run the hardware queue after inserting the request.
 *
2355 2356 2357
 * Should only be used carefully, when the caller knows we want to
 * bypass a potential IO scheduler on the target device.
 */
2358 2359
void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
				  bool run_queue)
2360
{
2361
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2362 2363

	spin_lock(&hctx->lock);
2364 2365 2366 2367
	if (at_head)
		list_add(&rq->queuelist, &hctx->dispatch);
	else
		list_add_tail(&rq->queuelist, &hctx->dispatch);
2368 2369
	spin_unlock(&hctx->lock);

2370 2371
	if (run_queue)
		blk_mq_run_hw_queue(hctx, false);
2372 2373
}

2374 2375
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
			    struct list_head *list)
2376 2377

{
2378
	struct request *rq;
M
Ming Lei 已提交
2379
	enum hctx_type type = hctx->type;
2380

2381 2382 2383 2384
	/*
	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
	 * offline now
	 */
2385
	list_for_each_entry(rq, list, queuelist) {
J
Jens Axboe 已提交
2386
		BUG_ON(rq->mq_ctx != ctx);
2387
		trace_block_rq_insert(rq);
2388
	}
2389 2390

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
2391
	list_splice_tail_init(list, &ctx->rq_lists[type]);
2392
	blk_mq_hctx_mark_pending(hctx, ctx);
2393 2394 2395
	spin_unlock(&ctx->lock);
}

2396 2397
static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
			      bool from_schedule)
2398
{
2399 2400 2401 2402 2403 2404
	if (hctx->queue->mq_ops->commit_rqs) {
		trace_block_unplug(hctx->queue, *queued, !from_schedule);
		hctx->queue->mq_ops->commit_rqs(hctx);
	}
	*queued = 0;
}
2405

2406 2407
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
		unsigned int nr_segs)
2408
{
2409 2410
	int err;

2411 2412 2413 2414
	if (bio->bi_opf & REQ_RAHEAD)
		rq->cmd_flags |= REQ_FAILFAST_MASK;

	rq->__sector = bio->bi_iter.bi_sector;
2415
	blk_rq_bio_prep(rq, bio, nr_segs);
2416 2417 2418 2419

	/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
	err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
	WARN_ON_ONCE(err);
2420

2421
	blk_account_io_start(rq);
2422 2423
}

2424
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2425
					    struct request *rq, bool last)
2426 2427 2428 2429
{
	struct request_queue *q = rq->q;
	struct blk_mq_queue_data bd = {
		.rq = rq,
2430
		.last = last,
2431
	};
2432
	blk_status_t ret;
2433 2434 2435 2436 2437 2438 2439 2440 2441

	/*
	 * For OK queue, we are done. For error, caller may kill it.
	 * Any other error (busy), just add it to our list as we
	 * previously would have done.
	 */
	ret = q->mq_ops->queue_rq(hctx, &bd);
	switch (ret) {
	case BLK_STS_OK:
2442
		blk_mq_update_dispatch_busy(hctx, false);
2443 2444
		break;
	case BLK_STS_RESOURCE:
2445
	case BLK_STS_DEV_RESOURCE:
2446
		blk_mq_update_dispatch_busy(hctx, true);
2447 2448 2449
		__blk_mq_requeue_request(rq);
		break;
	default:
2450
		blk_mq_update_dispatch_busy(hctx, false);
2451 2452 2453 2454 2455 2456
		break;
	}

	return ret;
}

2457
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2458
						struct request *rq,
2459
						bool bypass_insert, bool last)
2460 2461
{
	struct request_queue *q = rq->q;
M
Ming Lei 已提交
2462
	bool run_queue = true;
2463
	int budget_token;
M
Ming Lei 已提交
2464

2465
	/*
2466
	 * RCU or SRCU read lock is needed before checking quiesced flag.
2467
	 *
2468 2469 2470
	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
	 * and avoid driver to try to dispatch again.
2471
	 */
2472
	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
M
Ming Lei 已提交
2473
		run_queue = false;
2474 2475
		bypass_insert = false;
		goto insert;
M
Ming Lei 已提交
2476
	}
2477

2478
	if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
2479
		goto insert;
2480

2481 2482
	budget_token = blk_mq_get_dispatch_budget(q);
	if (budget_token < 0)
2483
		goto insert;
2484

2485 2486
	blk_mq_set_rq_budget_token(rq, budget_token);

2487
	if (!blk_mq_get_driver_tag(rq)) {
2488
		blk_mq_put_dispatch_budget(q, budget_token);
2489
		goto insert;
2490
	}
2491

2492
	return __blk_mq_issue_directly(hctx, rq, last);
2493 2494 2495 2496
insert:
	if (bypass_insert)
		return BLK_STS_RESOURCE;

2497 2498
	blk_mq_sched_insert_request(rq, false, run_queue, false);

2499 2500 2501
	return BLK_STS_OK;
}

2502 2503 2504 2505 2506 2507 2508 2509 2510 2511
/**
 * blk_mq_try_issue_directly - Try to send a request directly to device driver.
 * @hctx: Pointer of the associated hardware queue.
 * @rq: Pointer to request to be sent.
 *
 * If the device has enough resources to accept a new request now, send the
 * request directly to device driver. Else, insert at hctx->dispatch queue, so
 * we can try send it another time in the future. Requests inserted at this
 * queue have higher priority.
 */
2512
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2513
		struct request *rq)
2514
{
2515 2516
	blk_status_t ret =
		__blk_mq_try_issue_directly(hctx, rq, false, true);
2517 2518

	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
2519
		blk_mq_request_bypass_insert(rq, false, true);
2520 2521 2522 2523
	else if (ret != BLK_STS_OK)
		blk_mq_end_request(rq, ret);
}

2524
static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2525
{
2526
	return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
2527 2528
}

2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
{
	struct blk_mq_hw_ctx *hctx = NULL;
	struct request *rq;
	int queued = 0;
	int errors = 0;

	while ((rq = rq_list_pop(&plug->mq_list))) {
		bool last = rq_list_empty(plug->mq_list);
		blk_status_t ret;

		if (hctx != rq->mq_hctx) {
			if (hctx)
				blk_mq_commit_rqs(hctx, &queued, from_schedule);
			hctx = rq->mq_hctx;
		}

		ret = blk_mq_request_issue_directly(rq, last);
		switch (ret) {
		case BLK_STS_OK:
			queued++;
			break;
		case BLK_STS_RESOURCE:
		case BLK_STS_DEV_RESOURCE:
			blk_mq_request_bypass_insert(rq, false, last);
			blk_mq_commit_rqs(hctx, &queued, from_schedule);
			return;
		default:
			blk_mq_end_request(rq, ret);
			errors++;
			break;
		}
	}

	/*
	 * If we didn't flush the entire list, we could have told the driver
	 * there was more coming, but that turned out to be a lie.
	 */
	if (errors)
		blk_mq_commit_rqs(hctx, &queued, from_schedule);
}

2571 2572 2573 2574 2575 2576 2577 2578
static void __blk_mq_flush_plug_list(struct request_queue *q,
				     struct blk_plug *plug)
{
	if (blk_queue_quiesced(q))
		return;
	q->mq_ops->queue_rqs(&plug->mq_list);
}

2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605
static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
{
	struct blk_mq_hw_ctx *this_hctx = NULL;
	struct blk_mq_ctx *this_ctx = NULL;
	struct request *requeue_list = NULL;
	unsigned int depth = 0;
	LIST_HEAD(list);

	do {
		struct request *rq = rq_list_pop(&plug->mq_list);

		if (!this_hctx) {
			this_hctx = rq->mq_hctx;
			this_ctx = rq->mq_ctx;
		} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
			rq_list_add(&requeue_list, rq);
			continue;
		}
		list_add_tail(&rq->queuelist, &list);
		depth++;
	} while (!rq_list_empty(plug->mq_list));

	plug->mq_list = requeue_list;
	trace_block_unplug(this_hctx->queue, depth, !from_sched);
	blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
}

2606 2607
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
J
Jens Axboe 已提交
2608
	struct request *rq;
2609 2610 2611 2612 2613 2614

	if (rq_list_empty(plug->mq_list))
		return;
	plug->rq_count = 0;

	if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
J
Jens Axboe 已提交
2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632
		struct request_queue *q;

		rq = rq_list_peek(&plug->mq_list);
		q = rq->q;

		/*
		 * Peek first request and see if we have a ->queue_rqs() hook.
		 * If we do, we can dispatch the whole plug list in one go. We
		 * already know at this point that all requests belong to the
		 * same queue, caller must ensure that's the case.
		 *
		 * Since we pass off the full list to the driver at this point,
		 * we do not increment the active request count for the queue.
		 * Bypass shared tags for now because of that.
		 */
		if (q->mq_ops->queue_rqs &&
		    !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
			blk_mq_run_dispatch_ops(q,
2633
				__blk_mq_flush_plug_list(q, plug));
J
Jens Axboe 已提交
2634 2635 2636
			if (rq_list_empty(plug->mq_list))
				return;
		}
2637 2638

		blk_mq_run_dispatch_ops(q,
2639
				blk_mq_plug_issue_direct(plug, false));
2640 2641 2642 2643 2644
		if (rq_list_empty(plug->mq_list))
			return;
	}

	do {
2645
		blk_mq_dispatch_plug_list(plug, from_schedule);
2646 2647 2648
	} while (!rq_list_empty(plug->mq_list));
}

2649 2650 2651
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
		struct list_head *list)
{
2652
	int queued = 0;
2653
	int errors = 0;
2654

2655
	while (!list_empty(list)) {
2656
		blk_status_t ret;
2657 2658 2659 2660
		struct request *rq = list_first_entry(list, struct request,
				queuelist);

		list_del_init(&rq->queuelist);
2661 2662 2663 2664
		ret = blk_mq_request_issue_directly(rq, list_empty(list));
		if (ret != BLK_STS_OK) {
			if (ret == BLK_STS_RESOURCE ||
					ret == BLK_STS_DEV_RESOURCE) {
2665
				blk_mq_request_bypass_insert(rq, false,
2666
							list_empty(list));
2667 2668 2669
				break;
			}
			blk_mq_end_request(rq, ret);
2670
			errors++;
2671 2672
		} else
			queued++;
2673
	}
J
Jens Axboe 已提交
2674 2675 2676 2677 2678 2679

	/*
	 * If we didn't flush the entire list, we could have told
	 * the driver there was more coming, but that turned out to
	 * be a lie.
	 */
2680 2681
	if ((!list_empty(list) || errors) &&
	     hctx->queue->mq_ops->commit_rqs && queued)
J
Jens Axboe 已提交
2682
		hctx->queue->mq_ops->commit_rqs(hctx);
2683 2684
}

2685
/*
2686
 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
2687 2688 2689 2690 2691 2692
 * queues. This is important for md arrays to benefit from merging
 * requests.
 */
static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
{
	if (plug->multiple_queues)
2693
		return BLK_MAX_REQUEST_COUNT * 2;
2694 2695 2696
	return BLK_MAX_REQUEST_COUNT;
}

2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718
static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
{
	struct request *last = rq_list_peek(&plug->mq_list);

	if (!plug->rq_count) {
		trace_block_plug(rq->q);
	} else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
		   (!blk_queue_nomerges(rq->q) &&
		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
		blk_mq_flush_plug_list(plug, false);
		trace_block_plug(rq->q);
	}

	if (!plug->multiple_queues && last && last->q != rq->q)
		plug->multiple_queues = true;
	if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
		plug->has_elevator = true;
	rq->rq_next = NULL;
	rq_list_add(&plug->mq_list, rq);
	plug->rq_count++;
}

M
Ming Lei 已提交
2719
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2720
				     struct bio *bio, unsigned int nr_segs)
2721 2722
{
	if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2723
		if (blk_attempt_plug_merge(q, bio, nr_segs))
2724 2725 2726 2727 2728 2729 2730
			return true;
		if (blk_mq_sched_bio_merge(q, bio, nr_segs))
			return true;
	}
	return false;
}

2731 2732
static struct request *blk_mq_get_new_requests(struct request_queue *q,
					       struct blk_plug *plug,
2733 2734
					       struct bio *bio,
					       unsigned int nsegs)
2735 2736 2737 2738
{
	struct blk_mq_alloc_data data = {
		.q		= q,
		.nr_tags	= 1,
2739
		.cmd_flags	= bio->bi_opf,
2740 2741 2742
	};
	struct request *rq;

2743
	if (unlikely(bio_queue_enter(bio)))
2744
		return NULL;
2745

2746 2747 2748 2749 2750
	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
		goto queue_exit;

	rq_qos_throttle(q, bio);

2751 2752 2753 2754 2755 2756 2757
	if (plug) {
		data.nr_tags = plug->nr_ios;
		plug->nr_ios = 1;
		data.cached_rq = &plug->cached_rq;
	}

	rq = __blk_mq_alloc_requests(&data);
2758 2759
	if (rq)
		return rq;
2760 2761 2762
	rq_qos_cleanup(q, bio);
	if (bio->bi_opf & REQ_NOWAIT)
		bio_wouldblock_error(bio);
2763
queue_exit:
2764
	blk_queue_exit(q);
2765 2766 2767
	return NULL;
}

2768
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
2769
		struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
2770
{
2771 2772
	struct request *rq;

2773 2774 2775 2776 2777
	if (!plug)
		return NULL;
	rq = rq_list_peek(&plug->cached_rq);
	if (!rq || rq->q != q)
		return NULL;
2778

2779 2780 2781 2782 2783 2784 2785 2786
	if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
		*bio = NULL;
		return NULL;
	}

	rq_qos_throttle(q, *bio);

	if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
2787
		return NULL;
2788
	if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
2789 2790
		return NULL;

2791
	rq->cmd_flags = (*bio)->bi_opf;
2792 2793 2794
	plug->cached_rq = rq_list_next(rq);
	INIT_LIST_HEAD(&rq->queuelist);
	return rq;
2795 2796
}

2797
/**
2798
 * blk_mq_submit_bio - Create and send a request to block device.
2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809
 * @bio: Bio pointer.
 *
 * Builds up a request structure from @q and @bio and send to the device. The
 * request may not be queued directly to hardware if:
 * * This request can be merged with another one
 * * We want to place request at plug queue for possible future merging
 * * There is an IO scheduler active at this queue
 *
 * It will not queue the request if there is an error with the bio, or at the
 * request creation.
 */
2810
void blk_mq_submit_bio(struct bio *bio)
2811
{
2812
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2813
	struct blk_plug *plug = blk_mq_plug(q, bio);
2814
	const int is_sync = op_is_sync(bio->bi_opf);
2815
	struct request *rq;
2816
	unsigned int nr_segs = 1;
2817
	blk_status_t ret;
2818 2819

	blk_queue_bounce(q, &bio);
2820 2821
	if (blk_may_split(q, bio))
		__blk_queue_split(q, &bio, &nr_segs);
2822

2823
	if (!bio_integrity_prep(bio))
2824
		return;
J
Jens Axboe 已提交
2825

2826
	rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
2827
	if (!rq) {
2828 2829 2830
		if (!bio)
			return;
		rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
2831 2832 2833
		if (unlikely(!rq))
			return;
	}
J
Jens Axboe 已提交
2834

2835
	trace_block_getrq(bio);
2836

2837
	rq_qos_track(q, rq, bio);
2838

2839 2840
	blk_mq_bio_to_request(rq, bio, nr_segs);

2841 2842 2843 2844 2845
	ret = blk_crypto_init_request(rq);
	if (ret != BLK_STS_OK) {
		bio->bi_status = ret;
		bio_endio(bio);
		blk_mq_free_request(rq);
2846
		return;
2847 2848
	}

2849 2850
	if (op_is_flush(bio->bi_opf)) {
		blk_insert_flush(rq);
2851
		return;
2852
	}
2853

2854
	if (plug)
2855
		blk_add_rq_to_plug(plug, rq);
2856 2857 2858
	else if ((rq->rq_flags & RQF_ELV) ||
		 (rq->mq_hctx->dispatch_busy &&
		  (q->nr_hw_queues == 1 || !is_sync)))
2859
		blk_mq_sched_insert_request(rq, false, true, true);
2860
	else
2861
		blk_mq_run_dispatch_ops(rq->q,
2862
				blk_mq_try_issue_directly(rq->mq_hctx, rq));
2863 2864
}

2865
#ifdef CONFIG_BLK_MQ_STACKING
2866
/**
2867 2868
 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
 * @rq: the request being queued
2869
 */
2870
blk_status_t blk_insert_cloned_request(struct request *rq)
2871
{
2872
	struct request_queue *q = rq->q;
2873
	unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
2874
	blk_status_t ret;
2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905

	if (blk_rq_sectors(rq) > max_sectors) {
		/*
		 * SCSI device does not have a good way to return if
		 * Write Same/Zero is actually supported. If a device rejects
		 * a non-read/write command (discard, write same,etc.) the
		 * low-level device driver will set the relevant queue limit to
		 * 0 to prevent blk-lib from issuing more of the offending
		 * operations. Commands queued prior to the queue limit being
		 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
		 * errors being propagated to upper layers.
		 */
		if (max_sectors == 0)
			return BLK_STS_NOTSUPP;

		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
			__func__, blk_rq_sectors(rq), max_sectors);
		return BLK_STS_IOERR;
	}

	/*
	 * The queue settings related to segment counting may differ from the
	 * original queue.
	 */
	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
	if (rq->nr_phys_segments > queue_max_segments(q)) {
		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
			__func__, rq->nr_phys_segments, queue_max_segments(q));
		return BLK_STS_IOERR;
	}

2906
	if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
		return BLK_STS_IOERR;

	if (blk_crypto_insert_cloned_request(rq))
		return BLK_STS_IOERR;

	blk_account_io_start(rq);

	/*
	 * Since we have a scheduler attached on the top device,
	 * bypass a potential scheduler on the bottom device for
	 * insert.
	 */
2919
	blk_mq_run_dispatch_ops(q,
2920
			ret = blk_mq_request_issue_directly(rq, true));
2921 2922
	if (ret)
		blk_account_io_done(rq, ktime_get_ns());
2923
	return ret;
2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);

/**
 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
 * @rq: the clone request to be cleaned up
 *
 * Description:
 *     Free all bios in @rq for a cloned request.
 */
void blk_rq_unprep_clone(struct request *rq)
{
	struct bio *bio;

	while ((bio = rq->bio) != NULL) {
		rq->bio = bio->bi_next;

		bio_put(bio);
	}
}
EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);

/**
 * blk_rq_prep_clone - Helper function to setup clone request
 * @rq: the request to be setup
 * @rq_src: original request to be cloned
 * @bs: bio_set that bios for clone are allocated from
 * @gfp_mask: memory allocation mask for bio
 * @bio_ctr: setup function to be called for each clone bio.
 *           Returns %0 for success, non %0 for failure.
 * @data: private data to be passed to @bio_ctr
 *
 * Description:
 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
 *     Also, pages which the original bios are pointing to are not copied
 *     and the cloned bios just point same pages.
 *     So cloned bios must be completed before original bios, which means
 *     the caller must complete @rq before @rq_src.
 */
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
		      struct bio_set *bs, gfp_t gfp_mask,
		      int (*bio_ctr)(struct bio *, struct bio *, void *),
		      void *data)
{
	struct bio *bio, *bio_src;

	if (!bs)
		bs = &fs_bio_set;

	__rq_for_each_bio(bio_src, rq_src) {
2974 2975
		bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
				      bs);
2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013
		if (!bio)
			goto free_and_out;

		if (bio_ctr && bio_ctr(bio, bio_src, data))
			goto free_and_out;

		if (rq->bio) {
			rq->biotail->bi_next = bio;
			rq->biotail = bio;
		} else {
			rq->bio = rq->biotail = bio;
		}
		bio = NULL;
	}

	/* Copy attributes of the original request to the clone request. */
	rq->__sector = blk_rq_pos(rq_src);
	rq->__data_len = blk_rq_bytes(rq_src);
	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
		rq->special_vec = rq_src->special_vec;
	}
	rq->nr_phys_segments = rq_src->nr_phys_segments;
	rq->ioprio = rq_src->ioprio;

	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
		goto free_and_out;

	return 0;

free_and_out:
	if (bio)
		bio_put(bio);
	blk_rq_unprep_clone(rq);

	return -ENOMEM;
}
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3014
#endif /* CONFIG_BLK_MQ_STACKING */
3015

3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036
/*
 * Steal bios from a request and add them to a bio list.
 * The request must not have been partially completed before.
 */
void blk_steal_bios(struct bio_list *list, struct request *rq)
{
	if (rq->bio) {
		if (list->tail)
			list->tail->bi_next = rq->bio;
		else
			list->head = rq->bio;
		list->tail = rq->biotail;

		rq->bio = NULL;
		rq->biotail = NULL;
	}

	rq->__data_len = 0;
}
EXPORT_SYMBOL_GPL(blk_steal_bios);

3037 3038 3039 3040 3041 3042
static size_t order_to_size(unsigned int order)
{
	return (size_t)PAGE_SIZE << order;
}

/* called before freeing request pool in @tags */
3043 3044
static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
				    struct blk_mq_tags *tags)
3045 3046 3047 3048
{
	struct page *page;
	unsigned long flags;

3049 3050 3051 3052
	/* There is no need to clear a driver tags own mapping */
	if (drv_tags == tags)
		return;

3053 3054 3055 3056 3057
	list_for_each_entry(page, &tags->page_list, lru) {
		unsigned long start = (unsigned long)page_address(page);
		unsigned long end = start + order_to_size(page->private);
		int i;

3058
		for (i = 0; i < drv_tags->nr_tags; i++) {
3059 3060 3061 3062
			struct request *rq = drv_tags->rqs[i];
			unsigned long rq_addr = (unsigned long)rq;

			if (rq_addr >= start && rq_addr < end) {
3063
				WARN_ON_ONCE(req_ref_read(rq) != 0);
3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078
				cmpxchg(&drv_tags->rqs[i], rq, NULL);
			}
		}
	}

	/*
	 * Wait until all pending iteration is done.
	 *
	 * Request reference is cleared and it is guaranteed to be observed
	 * after the ->lock is released.
	 */
	spin_lock_irqsave(&drv_tags->lock, flags);
	spin_unlock_irqrestore(&drv_tags->lock, flags);
}

3079 3080
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		     unsigned int hctx_idx)
3081
{
3082
	struct blk_mq_tags *drv_tags;
3083
	struct page *page;
3084

3085 3086 3087
	if (list_empty(&tags->page_list))
		return;

3088 3089
	if (blk_mq_is_shared_tags(set->flags))
		drv_tags = set->shared_tags;
3090 3091
	else
		drv_tags = set->tags[hctx_idx];
3092

3093
	if (tags->static_rqs && set->ops->exit_request) {
3094
		int i;
3095

3096
		for (i = 0; i < tags->nr_tags; i++) {
J
Jens Axboe 已提交
3097 3098 3099
			struct request *rq = tags->static_rqs[i];

			if (!rq)
3100
				continue;
3101
			set->ops->exit_request(set, rq, hctx_idx);
J
Jens Axboe 已提交
3102
			tags->static_rqs[i] = NULL;
3103
		}
3104 3105
	}

3106
	blk_mq_clear_rq_mapping(drv_tags, tags);
3107

3108 3109
	while (!list_empty(&tags->page_list)) {
		page = list_first_entry(&tags->page_list, struct page, lru);
3110
		list_del_init(&page->lru);
3111 3112
		/*
		 * Remove kmemleak object previously allocated in
3113
		 * blk_mq_alloc_rqs().
3114 3115
		 */
		kmemleak_free(page_address(page));
3116 3117
		__free_pages(page, page->private);
	}
3118
}
3119

3120
void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3121
{
3122
	kfree(tags->rqs);
3123
	tags->rqs = NULL;
J
Jens Axboe 已提交
3124 3125
	kfree(tags->static_rqs);
	tags->static_rqs = NULL;
3126

3127
	blk_mq_free_tags(tags);
3128 3129
}

3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156
static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
		unsigned int hctx_idx)
{
	int i;

	for (i = 0; i < set->nr_maps; i++) {
		unsigned int start = set->map[i].queue_offset;
		unsigned int end = start + set->map[i].nr_queues;

		if (hctx_idx >= start && hctx_idx < end)
			break;
	}

	if (i >= set->nr_maps)
		i = HCTX_TYPE_DEFAULT;

	return i;
}

static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
		unsigned int hctx_idx)
{
	enum hctx_type type = hctx_idx_to_type(set, hctx_idx);

	return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
}

3157 3158 3159
static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
					       unsigned int hctx_idx,
					       unsigned int nr_tags,
3160
					       unsigned int reserved_tags)
3161
{
3162
	int node = blk_mq_get_hctx_node(set, hctx_idx);
3163
	struct blk_mq_tags *tags;
3164

3165 3166 3167
	if (node == NUMA_NO_NODE)
		node = set->numa_node;

3168 3169
	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3170 3171
	if (!tags)
		return NULL;
3172

3173
	tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3174
				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3175
				 node);
3176
	if (!tags->rqs) {
3177
		blk_mq_free_tags(tags);
3178 3179
		return NULL;
	}
3180

3181 3182 3183
	tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
					GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
					node);
J
Jens Axboe 已提交
3184 3185
	if (!tags->static_rqs) {
		kfree(tags->rqs);
3186
		blk_mq_free_tags(tags);
J
Jens Axboe 已提交
3187 3188 3189
		return NULL;
	}

3190 3191 3192
	return tags;
}

3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203
static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
			       unsigned int hctx_idx, int node)
{
	int ret;

	if (set->ops->init_request) {
		ret = set->ops->init_request(set, rq, hctx_idx, node);
		if (ret)
			return ret;
	}

K
Keith Busch 已提交
3204
	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3205 3206 3207
	return 0;
}

3208 3209 3210
static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
			    struct blk_mq_tags *tags,
			    unsigned int hctx_idx, unsigned int depth)
3211 3212
{
	unsigned int i, j, entries_per_page, max_order = 4;
3213
	int node = blk_mq_get_hctx_node(set, hctx_idx);
3214
	size_t rq_size, left;
3215 3216 3217

	if (node == NUMA_NO_NODE)
		node = set->numa_node;
3218 3219 3220

	INIT_LIST_HEAD(&tags->page_list);

3221 3222 3223 3224
	/*
	 * rq_size is the size of the request plus driver payload, rounded
	 * to the cacheline size
	 */
3225
	rq_size = round_up(sizeof(struct request) + set->cmd_size,
3226
				cache_line_size());
3227
	left = rq_size * depth;
3228

3229
	for (i = 0; i < depth; ) {
3230 3231 3232 3233 3234
		int this_order = max_order;
		struct page *page;
		int to_do;
		void *p;

3235
		while (this_order && left < order_to_size(this_order - 1))
3236 3237 3238
			this_order--;

		do {
3239
			page = alloc_pages_node(node,
3240
				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3241
				this_order);
3242 3243 3244 3245 3246 3247 3248 3249 3250
			if (page)
				break;
			if (!this_order--)
				break;
			if (order_to_size(this_order) < rq_size)
				break;
		} while (1);

		if (!page)
3251
			goto fail;
3252 3253

		page->private = this_order;
3254
		list_add_tail(&page->lru, &tags->page_list);
3255 3256

		p = page_address(page);
3257 3258 3259 3260
		/*
		 * Allow kmemleak to scan these pages as they contain pointers
		 * to additional allocations like via ops->init_request().
		 */
3261
		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3262
		entries_per_page = order_to_size(this_order) / rq_size;
3263
		to_do = min(entries_per_page, depth - i);
3264 3265
		left -= to_do * rq_size;
		for (j = 0; j < to_do; j++) {
J
Jens Axboe 已提交
3266 3267 3268
			struct request *rq = p;

			tags->static_rqs[i] = rq;
3269 3270 3271
			if (blk_mq_init_request(set, rq, hctx_idx, node)) {
				tags->static_rqs[i] = NULL;
				goto fail;
3272 3273
			}

3274 3275 3276 3277
			p += rq_size;
			i++;
		}
	}
3278
	return 0;
3279

3280
fail:
3281 3282
	blk_mq_free_rqs(set, tags, hctx_idx);
	return -ENOMEM;
3283 3284
}

3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314
struct rq_iter_data {
	struct blk_mq_hw_ctx *hctx;
	bool has_rq;
};

static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
{
	struct rq_iter_data *iter_data = data;

	if (rq->mq_hctx != iter_data->hctx)
		return true;
	iter_data->has_rq = true;
	return false;
}

static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
{
	struct blk_mq_tags *tags = hctx->sched_tags ?
			hctx->sched_tags : hctx->tags;
	struct rq_iter_data data = {
		.hctx	= hctx,
	};

	blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
	return data.has_rq;
}

static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
		struct blk_mq_hw_ctx *hctx)
{
3315
	if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364
		return false;
	if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
		return false;
	return true;
}

static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
{
	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
			struct blk_mq_hw_ctx, cpuhp_online);

	if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
	    !blk_mq_last_cpu_in_hctx(cpu, hctx))
		return 0;

	/*
	 * Prevent new request from being allocated on the current hctx.
	 *
	 * The smp_mb__after_atomic() Pairs with the implied barrier in
	 * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
	 * seen once we return from the tag allocator.
	 */
	set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
	smp_mb__after_atomic();

	/*
	 * Try to grab a reference to the queue and wait for any outstanding
	 * requests.  If we could not grab a reference the queue has been
	 * frozen and there are no requests.
	 */
	if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
		while (blk_mq_hctx_has_requests(hctx))
			msleep(5);
		percpu_ref_put(&hctx->queue->q_usage_counter);
	}

	return 0;
}

static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
{
	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
			struct blk_mq_hw_ctx, cpuhp_online);

	if (cpumask_test_cpu(cpu, hctx->cpumask))
		clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
	return 0;
}

J
Jens Axboe 已提交
3365 3366 3367 3368 3369
/*
 * 'cpu' is going away. splice any existing rq_list entries from this
 * software queue to the hw queue dispatch list, and ensure that it
 * gets run.
 */
3370
static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3371
{
3372
	struct blk_mq_hw_ctx *hctx;
3373 3374
	struct blk_mq_ctx *ctx;
	LIST_HEAD(tmp);
M
Ming Lei 已提交
3375
	enum hctx_type type;
3376

3377
	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3378 3379 3380
	if (!cpumask_test_cpu(cpu, hctx->cpumask))
		return 0;

J
Jens Axboe 已提交
3381
	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
M
Ming Lei 已提交
3382
	type = hctx->type;
3383 3384

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
3385 3386
	if (!list_empty(&ctx->rq_lists[type])) {
		list_splice_init(&ctx->rq_lists[type], &tmp);
3387 3388 3389 3390 3391
		blk_mq_hctx_clear_pending(hctx, ctx);
	}
	spin_unlock(&ctx->lock);

	if (list_empty(&tmp))
3392
		return 0;
3393

J
Jens Axboe 已提交
3394 3395 3396
	spin_lock(&hctx->lock);
	list_splice_tail_init(&tmp, &hctx->dispatch);
	spin_unlock(&hctx->lock);
3397 3398

	blk_mq_run_hw_queue(hctx, true);
3399
	return 0;
3400 3401
}

3402
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3403
{
3404 3405 3406
	if (!(hctx->flags & BLK_MQ_F_STACKING))
		cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
						    &hctx->cpuhp_online);
3407 3408
	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
					    &hctx->cpuhp_dead);
3409 3410
}

3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424
/*
 * Before freeing hw queue, clearing the flush request reference in
 * tags->rqs[] for avoiding potential UAF.
 */
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
		unsigned int queue_depth, struct request *flush_rq)
{
	int i;
	unsigned long flags;

	/* The hw queue may not be mapped yet */
	if (!tags)
		return;

3425
	WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439

	for (i = 0; i < queue_depth; i++)
		cmpxchg(&tags->rqs[i], flush_rq, NULL);

	/*
	 * Wait until all pending iteration is done.
	 *
	 * Request reference is cleared and it is guaranteed to be observed
	 * after the ->lock is released.
	 */
	spin_lock_irqsave(&tags->lock, flags);
	spin_unlock_irqrestore(&tags->lock, flags);
}

3440
/* hctx->ctxs will be freed in queue's release handler */
3441 3442 3443 3444
static void blk_mq_exit_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
3445 3446
	struct request *flush_rq = hctx->fq->flush_rq;

3447 3448
	if (blk_mq_hw_queue_mapped(hctx))
		blk_mq_tag_idle(hctx);
3449

3450 3451
	blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
			set->queue_depth, flush_rq);
3452
	if (set->ops->exit_request)
3453
		set->ops->exit_request(set, flush_rq, hctx_idx);
3454

3455 3456 3457
	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);

3458
	blk_mq_remove_cpuhp(hctx);
3459

M
Ming Lei 已提交
3460 3461
	xa_erase(&q->hctx_table, hctx_idx);

3462 3463 3464
	spin_lock(&q->unused_hctx_lock);
	list_add(&hctx->hctx_list, &q->unused_hctx_list);
	spin_unlock(&q->unused_hctx_lock);
3465 3466
}

M
Ming Lei 已提交
3467 3468 3469 3470
static void blk_mq_exit_hw_queues(struct request_queue *q,
		struct blk_mq_tag_set *set, int nr_queue)
{
	struct blk_mq_hw_ctx *hctx;
3471
	unsigned long i;
M
Ming Lei 已提交
3472 3473 3474 3475

	queue_for_each_hw_ctx(q, hctx, i) {
		if (i == nr_queue)
			break;
3476
		blk_mq_exit_hctx(q, set, hctx, i);
M
Ming Lei 已提交
3477 3478 3479
	}
}

3480 3481 3482
static int blk_mq_init_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3483
{
3484 3485
	hctx->queue_num = hctx_idx;

3486 3487 3488
	if (!(hctx->flags & BLK_MQ_F_STACKING))
		cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
				&hctx->cpuhp_online);
3489 3490 3491 3492 3493 3494 3495
	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);

	hctx->tags = set->tags[hctx_idx];

	if (set->ops->init_hctx &&
	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
		goto unregister_cpu_notifier;
3496

3497 3498 3499
	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
				hctx->numa_node))
		goto exit_hctx;
M
Ming Lei 已提交
3500 3501 3502 3503

	if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
		goto exit_flush_rq;

3504 3505
	return 0;

M
Ming Lei 已提交
3506 3507 3508
 exit_flush_rq:
	if (set->ops->exit_request)
		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523
 exit_hctx:
	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);
 unregister_cpu_notifier:
	blk_mq_remove_cpuhp(hctx);
	return -1;
}

static struct blk_mq_hw_ctx *
blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
		int node)
{
	struct blk_mq_hw_ctx *hctx;
	gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;

3524
	hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3525 3526 3527 3528 3529 3530 3531
	if (!hctx)
		goto fail_alloc_hctx;

	if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
		goto free_hctx;

	atomic_set(&hctx->nr_active, 0);
3532
	if (node == NUMA_NO_NODE)
3533 3534
		node = set->numa_node;
	hctx->numa_node = node;
3535

3536
	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3537 3538 3539
	spin_lock_init(&hctx->lock);
	INIT_LIST_HEAD(&hctx->dispatch);
	hctx->queue = q;
3540
	hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3541

3542 3543
	INIT_LIST_HEAD(&hctx->hctx_list);

3544
	/*
3545 3546
	 * Allocate space for all possible cpus to avoid allocation at
	 * runtime
3547
	 */
3548
	hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3549
			gfp, node);
3550
	if (!hctx->ctxs)
3551
		goto free_cpumask;
3552

3553
	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3554
				gfp, node, false, false))
3555 3556
		goto free_ctxs;
	hctx->nr_ctx = 0;
3557

3558
	spin_lock_init(&hctx->dispatch_wait_lock);
3559 3560 3561
	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
	INIT_LIST_HEAD(&hctx->dispatch_wait.entry);

3562
	hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3563
	if (!hctx->fq)
3564
		goto free_bitmap;
3565

3566
	blk_mq_hctx_kobj_init(hctx);
3567

3568
	return hctx;
3569

3570
 free_bitmap:
3571
	sbitmap_free(&hctx->ctx_map);
3572 3573
 free_ctxs:
	kfree(hctx->ctxs);
3574 3575 3576 3577 3578 3579
 free_cpumask:
	free_cpumask_var(hctx->cpumask);
 free_hctx:
	kfree(hctx);
 fail_alloc_hctx:
	return NULL;
3580
}
3581 3582 3583 3584

static void blk_mq_init_cpu_queues(struct request_queue *q,
				   unsigned int nr_hw_queues)
{
J
Jens Axboe 已提交
3585 3586
	struct blk_mq_tag_set *set = q->tag_set;
	unsigned int i, j;
3587 3588 3589 3590

	for_each_possible_cpu(i) {
		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
		struct blk_mq_hw_ctx *hctx;
M
Ming Lei 已提交
3591
		int k;
3592 3593 3594

		__ctx->cpu = i;
		spin_lock_init(&__ctx->lock);
M
Ming Lei 已提交
3595 3596 3597
		for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
			INIT_LIST_HEAD(&__ctx->rq_lists[k]);

3598 3599 3600 3601 3602 3603
		__ctx->queue = q;

		/*
		 * Set local node, IFF we have more than one hw queue. If
		 * not, we remain on the home node of the device
		 */
J
Jens Axboe 已提交
3604 3605 3606
		for (j = 0; j < set->nr_maps; j++) {
			hctx = blk_mq_map_queue_type(q, j, i);
			if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3607
				hctx->numa_node = cpu_to_node(i);
J
Jens Axboe 已提交
3608
		}
3609 3610 3611
	}
}

3612 3613 3614
struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
					     unsigned int hctx_idx,
					     unsigned int depth)
3615
{
3616 3617
	struct blk_mq_tags *tags;
	int ret;
3618

3619
	tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3620 3621
	if (!tags)
		return NULL;
3622

3623 3624
	ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
	if (ret) {
3625
		blk_mq_free_rq_map(tags);
3626 3627
		return NULL;
	}
3628

3629
	return tags;
3630 3631
}

3632 3633
static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
				       int hctx_idx)
3634
{
3635 3636
	if (blk_mq_is_shared_tags(set->flags)) {
		set->tags[hctx_idx] = set->shared_tags;
3637

3638
		return true;
3639
	}
3640

3641 3642 3643 3644
	set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
						       set->queue_depth);

	return set->tags[hctx_idx];
3645 3646
}

3647 3648 3649
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
			     struct blk_mq_tags *tags,
			     unsigned int hctx_idx)
3650
{
3651 3652
	if (tags) {
		blk_mq_free_rqs(set, tags, hctx_idx);
3653
		blk_mq_free_rq_map(tags);
3654
	}
3655 3656
}

3657 3658 3659
static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
				      unsigned int hctx_idx)
{
3660
	if (!blk_mq_is_shared_tags(set->flags))
3661 3662 3663
		blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);

	set->tags[hctx_idx] = NULL;
3664 3665
}

3666
static void blk_mq_map_swqueue(struct request_queue *q)
3667
{
3668 3669
	unsigned int j, hctx_idx;
	unsigned long i;
3670 3671
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
M
Ming Lei 已提交
3672
	struct blk_mq_tag_set *set = q->tag_set;
3673 3674

	queue_for_each_hw_ctx(q, hctx, i) {
3675
		cpumask_clear(hctx->cpumask);
3676
		hctx->nr_ctx = 0;
3677
		hctx->dispatch_from = NULL;
3678 3679 3680
	}

	/*
3681
	 * Map software to hardware queues.
3682 3683
	 *
	 * If the cpu isn't present, the cpu is mapped to first hctx.
3684
	 */
3685
	for_each_possible_cpu(i) {
3686

3687
		ctx = per_cpu_ptr(q->queue_ctx, i);
J
Jens Axboe 已提交
3688
		for (j = 0; j < set->nr_maps; j++) {
3689 3690 3691
			if (!set->map[j].nr_queues) {
				ctx->hctxs[j] = blk_mq_map_queue_type(q,
						HCTX_TYPE_DEFAULT, i);
3692
				continue;
3693
			}
3694 3695 3696
			hctx_idx = set->map[j].mq_map[i];
			/* unmapped hw queue can be remapped after CPU topo changed */
			if (!set->tags[hctx_idx] &&
3697
			    !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3698 3699 3700 3701 3702 3703 3704 3705
				/*
				 * If tags initialization fail for some hctx,
				 * that hctx won't be brought online.  In this
				 * case, remap the current ctx to hctx[0] which
				 * is guaranteed to always have tags allocated
				 */
				set->map[j].mq_map[i] = 0;
			}
3706

J
Jens Axboe 已提交
3707
			hctx = blk_mq_map_queue_type(q, j, i);
3708
			ctx->hctxs[j] = hctx;
J
Jens Axboe 已提交
3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727
			/*
			 * If the CPU is already set in the mask, then we've
			 * mapped this one already. This can happen if
			 * devices share queues across queue maps.
			 */
			if (cpumask_test_cpu(i, hctx->cpumask))
				continue;

			cpumask_set_cpu(i, hctx->cpumask);
			hctx->type = j;
			ctx->index_hw[hctx->type] = hctx->nr_ctx;
			hctx->ctxs[hctx->nr_ctx++] = ctx;

			/*
			 * If the nr_ctx type overflows, we have exceeded the
			 * amount of sw queues we can support.
			 */
			BUG_ON(!hctx->nr_ctx);
		}
3728 3729 3730 3731

		for (; j < HCTX_MAX_TYPES; j++)
			ctx->hctxs[j] = blk_mq_map_queue_type(q,
					HCTX_TYPE_DEFAULT, i);
3732
	}
3733 3734

	queue_for_each_hw_ctx(q, hctx, i) {
3735 3736 3737 3738 3739 3740 3741 3742 3743
		/*
		 * If no software queues are mapped to this hardware queue,
		 * disable it and free the request entries.
		 */
		if (!hctx->nr_ctx) {
			/* Never unmap queue 0.  We need it as a
			 * fallback in case of a new remap fails
			 * allocation
			 */
3744 3745
			if (i)
				__blk_mq_free_map_and_rqs(set, i);
3746 3747 3748 3749

			hctx->tags = NULL;
			continue;
		}
3750

M
Ming Lei 已提交
3751 3752 3753
		hctx->tags = set->tags[i];
		WARN_ON(!hctx->tags);

3754 3755 3756 3757 3758
		/*
		 * Set the map size to the number of mapped software queues.
		 * This is more accurate and more efficient than looping
		 * over all possibly mapped software queues.
		 */
3759
		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3760

3761 3762 3763
		/*
		 * Initialize batch roundrobin counts
		 */
3764
		hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3765 3766
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
	}
3767 3768
}

3769 3770 3771 3772
/*
 * Caller needs to ensure that we're either frozen/quiesced, or that
 * the queue isn't live yet.
 */
3773
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
3774 3775
{
	struct blk_mq_hw_ctx *hctx;
3776
	unsigned long i;
3777

3778
	queue_for_each_hw_ctx(q, hctx, i) {
3779
		if (shared) {
3780
			hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3781 3782
		} else {
			blk_mq_tag_idle(hctx);
3783
			hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3784
		}
3785 3786 3787
	}
}

3788 3789
static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
					 bool shared)
3790 3791
{
	struct request_queue *q;
3792

3793 3794
	lockdep_assert_held(&set->tag_list_lock);

3795 3796
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_freeze_queue(q);
3797
		queue_set_hctx_shared(q, shared);
3798 3799 3800 3801 3802 3803 3804 3805 3806
		blk_mq_unfreeze_queue(q);
	}
}

static void blk_mq_del_queue_tag_set(struct request_queue *q)
{
	struct blk_mq_tag_set *set = q->tag_set;

	mutex_lock(&set->tag_list_lock);
3807
	list_del(&q->tag_set_list);
3808 3809
	if (list_is_singular(&set->tag_list)) {
		/* just transitioned to unshared */
3810
		set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3811
		/* update existing queue */
3812
		blk_mq_update_tag_set_shared(set, false);
3813
	}
3814
	mutex_unlock(&set->tag_list_lock);
3815
	INIT_LIST_HEAD(&q->tag_set_list);
3816 3817 3818 3819 3820 3821
}

static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
				     struct request_queue *q)
{
	mutex_lock(&set->tag_list_lock);
3822

3823 3824 3825 3826
	/*
	 * Check to see if we're transitioning to shared (from 1 to 2 queues).
	 */
	if (!list_empty(&set->tag_list) &&
3827 3828
	    !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
		set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3829
		/* update existing queue */
3830
		blk_mq_update_tag_set_shared(set, true);
3831
	}
3832
	if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
3833
		queue_set_hctx_shared(q, true);
3834
	list_add_tail(&q->tag_set_list, &set->tag_list);
3835

3836 3837 3838
	mutex_unlock(&set->tag_list_lock);
}

3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866
/* All allocations will be freed in release handler of q->mq_kobj */
static int blk_mq_alloc_ctxs(struct request_queue *q)
{
	struct blk_mq_ctxs *ctxs;
	int cpu;

	ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
	if (!ctxs)
		return -ENOMEM;

	ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
	if (!ctxs->queue_ctx)
		goto fail;

	for_each_possible_cpu(cpu) {
		struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
		ctx->ctxs = ctxs;
	}

	q->mq_kobj = &ctxs->kobj;
	q->queue_ctx = ctxs->queue_ctx;

	return 0;
 fail:
	kfree(ctxs);
	return -ENOMEM;
}

3867 3868 3869 3870 3871 3872 3873 3874
/*
 * It is the actual release handler for mq, but we do it from
 * request queue's release handler for avoiding use-after-free
 * and headache because q->mq_kobj shouldn't have been introduced,
 * but we can't group ctx/kctx kobj without it.
 */
void blk_mq_release(struct request_queue *q)
{
3875
	struct blk_mq_hw_ctx *hctx, *next;
3876
	unsigned long i;
3877

3878 3879 3880 3881 3882 3883
	queue_for_each_hw_ctx(q, hctx, i)
		WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));

	/* all hctx are in .unused_hctx_list now */
	list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
		list_del_init(&hctx->hctx_list);
3884
		kobject_put(&hctx->kobj);
3885
	}
3886

M
Ming Lei 已提交
3887
	xa_destroy(&q->hctx_table);
3888

3889 3890 3891 3892 3893
	/*
	 * release .mq_kobj and sw queue's kobject now because
	 * both share lifetime with request queue.
	 */
	blk_mq_sysfs_deinit(q);
3894 3895
}

3896
static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
3897
		void *queuedata)
3898
{
3899 3900
	struct request_queue *q;
	int ret;
3901

3902
	q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING);
3903
	if (!q)
3904
		return ERR_PTR(-ENOMEM);
3905 3906 3907 3908 3909 3910
	q->queuedata = queuedata;
	ret = blk_mq_init_allocated_queue(set, q);
	if (ret) {
		blk_cleanup_queue(q);
		return ERR_PTR(ret);
	}
3911 3912
	return q;
}
3913 3914 3915 3916 3917

struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
	return blk_mq_init_queue_data(set, NULL);
}
3918 3919
EXPORT_SYMBOL(blk_mq_init_queue);

3920 3921
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
		struct lock_class_key *lkclass)
3922 3923
{
	struct request_queue *q;
3924
	struct gendisk *disk;
3925

3926 3927 3928
	q = blk_mq_init_queue_data(set, queuedata);
	if (IS_ERR(q))
		return ERR_CAST(q);
3929

3930
	disk = __alloc_disk_node(q, set->numa_node, lkclass);
3931 3932 3933
	if (!disk) {
		blk_cleanup_queue(q);
		return ERR_PTR(-ENOMEM);
3934
	}
3935
	return disk;
3936
}
3937
EXPORT_SYMBOL(__blk_mq_alloc_disk);
3938

3939 3940 3941 3942
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
		struct blk_mq_tag_set *set, struct request_queue *q,
		int hctx_idx, int node)
{
3943
	struct blk_mq_hw_ctx *hctx = NULL, *tmp;
3944

3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958
	/* reuse dead hctx first */
	spin_lock(&q->unused_hctx_lock);
	list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
		if (tmp->numa_node == node) {
			hctx = tmp;
			break;
		}
	}
	if (hctx)
		list_del_init(&hctx->hctx_list);
	spin_unlock(&q->unused_hctx_lock);

	if (!hctx)
		hctx = blk_mq_alloc_hctx(q, set, node);
3959
	if (!hctx)
3960
		goto fail;
3961

3962 3963
	if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
		goto free_hctx;
3964 3965

	return hctx;
3966 3967 3968 3969 3970

 free_hctx:
	kobject_put(&hctx->kobj);
 fail:
	return NULL;
3971 3972
}

K
Keith Busch 已提交
3973 3974
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
						struct request_queue *q)
3975
{
M
Ming Lei 已提交
3976 3977
	struct blk_mq_hw_ctx *hctx;
	unsigned long i, j;
3978

3979 3980
	/* protect against switching io scheduler  */
	mutex_lock(&q->sysfs_lock);
3981
	for (i = 0; i < set->nr_hw_queues; i++) {
3982
		int old_node;
3983
		int node = blk_mq_get_hctx_node(set, i);
M
Ming Lei 已提交
3984
		struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
K
Keith Busch 已提交
3985

3986 3987 3988 3989
		if (old_hctx) {
			old_node = old_hctx->numa_node;
			blk_mq_exit_hctx(q, set, old_hctx, i);
		}
K
Keith Busch 已提交
3990

M
Ming Lei 已提交
3991
		if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
3992
			if (!old_hctx)
3993
				break;
3994 3995
			pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
					node, old_node);
M
Ming Lei 已提交
3996 3997
			hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
			WARN_ON_ONCE(!hctx);
K
Keith Busch 已提交
3998
		}
3999
	}
4000 4001 4002 4003 4004 4005 4006 4007 4008 4009
	/*
	 * Increasing nr_hw_queues fails. Free the newly allocated
	 * hctxs and keep the previous q->nr_hw_queues.
	 */
	if (i != set->nr_hw_queues) {
		j = q->nr_hw_queues;
	} else {
		j = i;
		q->nr_hw_queues = set->nr_hw_queues;
	}
4010

M
Ming Lei 已提交
4011 4012
	xa_for_each_start(&q->hctx_table, j, hctx, j)
		blk_mq_exit_hctx(q, set, hctx, j);
4013
	mutex_unlock(&q->sysfs_lock);
K
Keith Busch 已提交
4014 4015
}

4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026
static void blk_mq_update_poll_flag(struct request_queue *q)
{
	struct blk_mq_tag_set *set = q->tag_set;

	if (set->nr_maps > HCTX_TYPE_POLL &&
	    set->map[HCTX_TYPE_POLL].nr_queues)
		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
	else
		blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
}

4027 4028
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
		struct request_queue *q)
K
Keith Busch 已提交
4029
{
4030 4031 4032
	WARN_ON_ONCE(blk_queue_has_srcu(q) !=
			!!(set->flags & BLK_MQ_F_BLOCKING));

M
Ming Lei 已提交
4033 4034 4035
	/* mark the queue as mq asap */
	q->mq_ops = set->ops;

4036
	q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
4037 4038
					     blk_mq_poll_stats_bkt,
					     BLK_MQ_POLL_STATS_BKTS, q);
4039 4040 4041
	if (!q->poll_cb)
		goto err_exit;

4042
	if (blk_mq_alloc_ctxs(q))
4043
		goto err_poll;
K
Keith Busch 已提交
4044

4045 4046 4047
	/* init q->mq_kobj and sw queues' kobjects */
	blk_mq_sysfs_init(q);

4048 4049 4050
	INIT_LIST_HEAD(&q->unused_hctx_list);
	spin_lock_init(&q->unused_hctx_lock);

M
Ming Lei 已提交
4051 4052
	xa_init(&q->hctx_table);

K
Keith Busch 已提交
4053 4054 4055
	blk_mq_realloc_hw_ctxs(set, q);
	if (!q->nr_hw_queues)
		goto err_hctxs;
4056

4057
	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4058
	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4059

J
Jens Axboe 已提交
4060
	q->tag_set = set;
4061

4062
	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4063
	blk_mq_update_poll_flag(q);
4064

4065
	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4066 4067 4068
	INIT_LIST_HEAD(&q->requeue_list);
	spin_lock_init(&q->requeue_lock);

4069 4070
	q->nr_requests = set->queue_depth;

4071 4072 4073
	/*
	 * Default to classic polling
	 */
4074
	q->poll_nsec = BLK_MQ_POLL_CLASSIC;
4075

4076
	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4077
	blk_mq_add_queue_tag_set(set, q);
4078
	blk_mq_map_swqueue(q);
4079
	return 0;
4080

4081
err_hctxs:
M
Ming Lei 已提交
4082
	xa_destroy(&q->hctx_table);
4083
	q->nr_hw_queues = 0;
4084
	blk_mq_sysfs_deinit(q);
4085 4086 4087
err_poll:
	blk_stat_free_callback(q->poll_cb);
	q->poll_cb = NULL;
M
Ming Lin 已提交
4088 4089
err_exit:
	q->mq_ops = NULL;
4090
	return -ENOMEM;
4091
}
4092
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4093

4094 4095
/* tags can _not_ be used after returning from blk_mq_exit_queue */
void blk_mq_exit_queue(struct request_queue *q)
4096
{
4097
	struct blk_mq_tag_set *set = q->tag_set;
4098

4099
	/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
M
Ming Lei 已提交
4100
	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4101 4102
	/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
	blk_mq_del_queue_tag_set(q);
4103 4104
}

4105 4106 4107 4108
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{
	int i;

4109 4110
	if (blk_mq_is_shared_tags(set->flags)) {
		set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4111 4112
						BLK_MQ_NO_HCTX_IDX,
						set->queue_depth);
4113
		if (!set->shared_tags)
4114 4115 4116
			return -ENOMEM;
	}

4117
	for (i = 0; i < set->nr_hw_queues; i++) {
4118
		if (!__blk_mq_alloc_map_and_rqs(set, i))
4119
			goto out_unwind;
4120 4121
		cond_resched();
	}
4122 4123 4124 4125 4126

	return 0;

out_unwind:
	while (--i >= 0)
4127 4128
		__blk_mq_free_map_and_rqs(set, i);

4129 4130
	if (blk_mq_is_shared_tags(set->flags)) {
		blk_mq_free_map_and_rqs(set, set->shared_tags,
4131
					BLK_MQ_NO_HCTX_IDX);
4132
	}
4133 4134 4135 4136 4137 4138 4139 4140 4141

	return -ENOMEM;
}

/*
 * Allocate the request maps associated with this tag_set. Note that this
 * may reduce the depth asked for, if memory is tight. set->queue_depth
 * will be updated to reflect the allocated depth.
 */
4142
static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171
{
	unsigned int depth;
	int err;

	depth = set->queue_depth;
	do {
		err = __blk_mq_alloc_rq_maps(set);
		if (!err)
			break;

		set->queue_depth >>= 1;
		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
			err = -ENOMEM;
			break;
		}
	} while (set->queue_depth);

	if (!set->queue_depth || err) {
		pr_err("blk-mq: failed to allocate request map\n");
		return -ENOMEM;
	}

	if (depth != set->queue_depth)
		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
						depth, set->queue_depth);

	return 0;
}

4172 4173
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
4174 4175 4176 4177 4178 4179 4180 4181
	/*
	 * blk_mq_map_queues() and multiple .map_queues() implementations
	 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
	 * number of hardware queues.
	 */
	if (set->nr_maps == 1)
		set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;

4182
	if (set->ops->map_queues && !is_kdump_kernel()) {
J
Jens Axboe 已提交
4183 4184
		int i;

4185 4186 4187 4188 4189 4190 4191
		/*
		 * transport .map_queues is usually done in the following
		 * way:
		 *
		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
		 * 	mask = get_cpu_mask(queue)
		 * 	for_each_cpu(cpu, mask)
J
Jens Axboe 已提交
4192
		 * 		set->map[x].mq_map[cpu] = queue;
4193 4194 4195 4196 4197 4198
		 * }
		 *
		 * When we need to remap, the table has to be cleared for
		 * killing stale mapping since one CPU may not be mapped
		 * to any hw queue.
		 */
J
Jens Axboe 已提交
4199 4200
		for (i = 0; i < set->nr_maps; i++)
			blk_mq_clear_mq_map(&set->map[i]);
4201

4202
		return set->ops->map_queues(set);
J
Jens Axboe 已提交
4203 4204
	} else {
		BUG_ON(set->nr_maps > 1);
4205
		return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
J
Jens Axboe 已提交
4206
	}
4207 4208
}

4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231
static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
				  int cur_nr_hw_queues, int new_nr_hw_queues)
{
	struct blk_mq_tags **new_tags;

	if (cur_nr_hw_queues >= new_nr_hw_queues)
		return 0;

	new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
				GFP_KERNEL, set->numa_node);
	if (!new_tags)
		return -ENOMEM;

	if (set->tags)
		memcpy(new_tags, set->tags, cur_nr_hw_queues *
		       sizeof(*set->tags));
	kfree(set->tags);
	set->tags = new_tags;
	set->nr_hw_queues = new_nr_hw_queues;

	return 0;
}

4232 4233 4234 4235 4236 4237
static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
				int new_nr_hw_queues)
{
	return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
}

4238 4239 4240
/*
 * Alloc a tag set to be associated with one or more request queues.
 * May fail with EINVAL for various error conditions. May adjust the
4241
 * requested depth down, if it's too large. In that case, the set
4242 4243
 * value will be stored in set->queue_depth.
 */
4244 4245
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
J
Jens Axboe 已提交
4246
	int i, ret;
4247

B
Bart Van Assche 已提交
4248 4249
	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);

4250 4251
	if (!set->nr_hw_queues)
		return -EINVAL;
4252
	if (!set->queue_depth)
4253 4254 4255 4256
		return -EINVAL;
	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
		return -EINVAL;

C
Christoph Hellwig 已提交
4257
	if (!set->ops->queue_rq)
4258 4259
		return -EINVAL;

4260 4261 4262
	if (!set->ops->get_budget ^ !set->ops->put_budget)
		return -EINVAL;

4263 4264 4265 4266 4267
	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
		pr_info("blk-mq: reduced tag depth to %u\n",
			BLK_MQ_MAX_DEPTH);
		set->queue_depth = BLK_MQ_MAX_DEPTH;
	}
4268

J
Jens Axboe 已提交
4269 4270 4271 4272 4273
	if (!set->nr_maps)
		set->nr_maps = 1;
	else if (set->nr_maps > HCTX_MAX_TYPES)
		return -EINVAL;

4274 4275 4276 4277 4278 4279 4280
	/*
	 * If a crashdump is active, then we are potentially in a very
	 * memory constrained environment. Limit us to 1 queue and
	 * 64 tags to prevent using too much memory.
	 */
	if (is_kdump_kernel()) {
		set->nr_hw_queues = 1;
4281
		set->nr_maps = 1;
4282 4283
		set->queue_depth = min(64U, set->queue_depth);
	}
K
Keith Busch 已提交
4284
	/*
4285 4286
	 * There is no use for more h/w queues than cpus if we just have
	 * a single map
K
Keith Busch 已提交
4287
	 */
4288
	if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
K
Keith Busch 已提交
4289
		set->nr_hw_queues = nr_cpu_ids;
4290

4291
	if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
4292
		return -ENOMEM;
4293

4294
	ret = -ENOMEM;
J
Jens Axboe 已提交
4295 4296
	for (i = 0; i < set->nr_maps; i++) {
		set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4297
						  sizeof(set->map[i].mq_map[0]),
J
Jens Axboe 已提交
4298 4299 4300
						  GFP_KERNEL, set->numa_node);
		if (!set->map[i].mq_map)
			goto out_free_mq_map;
4301
		set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
J
Jens Axboe 已提交
4302
	}
4303

4304
	ret = blk_mq_update_queue_map(set);
4305 4306 4307
	if (ret)
		goto out_free_mq_map;

4308
	ret = blk_mq_alloc_set_map_and_rqs(set);
4309
	if (ret)
4310
		goto out_free_mq_map;
4311

4312 4313 4314
	mutex_init(&set->tag_list_lock);
	INIT_LIST_HEAD(&set->tag_list);

4315
	return 0;
4316 4317

out_free_mq_map:
J
Jens Axboe 已提交
4318 4319 4320 4321
	for (i = 0; i < set->nr_maps; i++) {
		kfree(set->map[i].mq_map);
		set->map[i].mq_map = NULL;
	}
4322 4323
	kfree(set->tags);
	set->tags = NULL;
4324
	return ret;
4325 4326 4327
}
EXPORT_SYMBOL(blk_mq_alloc_tag_set);

4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343
/* allocate and initialize a tagset for a simple single-queue device */
int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
		const struct blk_mq_ops *ops, unsigned int queue_depth,
		unsigned int set_flags)
{
	memset(set, 0, sizeof(*set));
	set->ops = ops;
	set->nr_hw_queues = 1;
	set->nr_maps = 1;
	set->queue_depth = queue_depth;
	set->numa_node = NUMA_NO_NODE;
	set->flags = set_flags;
	return blk_mq_alloc_tag_set(set);
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);

4344 4345
void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
J
Jens Axboe 已提交
4346
	int i, j;
4347

4348
	for (i = 0; i < set->nr_hw_queues; i++)
4349
		__blk_mq_free_map_and_rqs(set, i);
4350

4351 4352
	if (blk_mq_is_shared_tags(set->flags)) {
		blk_mq_free_map_and_rqs(set, set->shared_tags,
4353 4354
					BLK_MQ_NO_HCTX_IDX);
	}
4355

J
Jens Axboe 已提交
4356 4357 4358 4359
	for (j = 0; j < set->nr_maps; j++) {
		kfree(set->map[j].mq_map);
		set->map[j].mq_map = NULL;
	}
4360

M
Ming Lei 已提交
4361
	kfree(set->tags);
4362
	set->tags = NULL;
4363 4364 4365
}
EXPORT_SYMBOL(blk_mq_free_tag_set);

4366 4367 4368 4369
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
{
	struct blk_mq_tag_set *set = q->tag_set;
	struct blk_mq_hw_ctx *hctx;
4370 4371
	int ret;
	unsigned long i;
4372

4373
	if (!set)
4374 4375
		return -EINVAL;

4376 4377 4378
	if (q->nr_requests == nr)
		return 0;

4379
	blk_mq_freeze_queue(q);
4380
	blk_mq_quiesce_queue(q);
4381

4382 4383
	ret = 0;
	queue_for_each_hw_ctx(q, hctx, i) {
4384 4385
		if (!hctx->tags)
			continue;
4386 4387 4388 4389
		/*
		 * If we're using an MQ scheduler, just update the scheduler
		 * queue depth. This is similar to what the old code would do.
		 */
4390
		if (hctx->sched_tags) {
4391
			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4392 4393 4394 4395
						      nr, true);
		} else {
			ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
						      false);
4396
		}
4397 4398
		if (ret)
			break;
4399 4400
		if (q->elevator && q->elevator->type->ops.depth_updated)
			q->elevator->type->ops.depth_updated(hctx);
4401
	}
4402
	if (!ret) {
4403
		q->nr_requests = nr;
4404
		if (blk_mq_is_shared_tags(set->flags)) {
4405
			if (q->elevator)
4406
				blk_mq_tag_update_sched_shared_tags(q);
4407
			else
4408
				blk_mq_tag_resize_shared_tags(set, nr);
4409
		}
4410
	}
4411

4412
	blk_mq_unquiesce_queue(q);
4413 4414
	blk_mq_unfreeze_queue(q);

4415 4416 4417
	return ret;
}

4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464
/*
 * request_queue and elevator_type pair.
 * It is just used by __blk_mq_update_nr_hw_queues to cache
 * the elevator_type associated with a request_queue.
 */
struct blk_mq_qe_pair {
	struct list_head node;
	struct request_queue *q;
	struct elevator_type *type;
};

/*
 * Cache the elevator_type in qe pair list and switch the
 * io scheduler to 'none'
 */
static bool blk_mq_elv_switch_none(struct list_head *head,
		struct request_queue *q)
{
	struct blk_mq_qe_pair *qe;

	if (!q->elevator)
		return true;

	qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
	if (!qe)
		return false;

	INIT_LIST_HEAD(&qe->node);
	qe->q = q;
	qe->type = q->elevator->type;
	list_add(&qe->node, head);

	mutex_lock(&q->sysfs_lock);
	/*
	 * After elevator_switch_mq, the previous elevator_queue will be
	 * released by elevator_release. The reference of the io scheduler
	 * module get by elevator_get will also be put. So we need to get
	 * a reference of the io scheduler module here to prevent it to be
	 * removed.
	 */
	__module_get(qe->type->elevator_owner);
	elevator_switch_mq(q, NULL);
	mutex_unlock(&q->sysfs_lock);

	return true;
}

4465 4466
static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
						struct request_queue *q)
4467 4468 4469 4470
{
	struct blk_mq_qe_pair *qe;

	list_for_each_entry(qe, head, node)
4471 4472
		if (qe->q == q)
			return qe;
4473

4474 4475
	return NULL;
}
4476

4477 4478 4479 4480 4481 4482 4483 4484 4485 4486
static void blk_mq_elv_switch_back(struct list_head *head,
				  struct request_queue *q)
{
	struct blk_mq_qe_pair *qe;
	struct elevator_type *t;

	qe = blk_lookup_qe_pair(head, q);
	if (!qe)
		return;
	t = qe->type;
4487 4488 4489 4490 4491 4492 4493 4494
	list_del(&qe->node);
	kfree(qe);

	mutex_lock(&q->sysfs_lock);
	elevator_switch_mq(q, t);
	mutex_unlock(&q->sysfs_lock);
}

4495 4496
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
							int nr_hw_queues)
K
Keith Busch 已提交
4497 4498
{
	struct request_queue *q;
4499
	LIST_HEAD(head);
4500
	int prev_nr_hw_queues;
K
Keith Busch 已提交
4501

4502 4503
	lockdep_assert_held(&set->tag_list_lock);

4504
	if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
K
Keith Busch 已提交
4505
		nr_hw_queues = nr_cpu_ids;
4506 4507 4508
	if (nr_hw_queues < 1)
		return;
	if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
K
Keith Busch 已提交
4509 4510 4511 4512
		return;

	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_freeze_queue(q);
4513 4514 4515 4516 4517 4518 4519 4520
	/*
	 * Switch IO scheduler to 'none', cleaning up the data associated
	 * with the previous scheduler. We will switch back once we are done
	 * updating the new sw to hw queue mappings.
	 */
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		if (!blk_mq_elv_switch_none(&head, q))
			goto switch_back;
K
Keith Busch 已提交
4521

4522 4523 4524 4525 4526
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_debugfs_unregister_hctxs(q);
		blk_mq_sysfs_unregister(q);
	}

4527
	prev_nr_hw_queues = set->nr_hw_queues;
4528 4529 4530 4531
	if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
	    0)
		goto reregister;

K
Keith Busch 已提交
4532
	set->nr_hw_queues = nr_hw_queues;
4533
fallback:
4534
	blk_mq_update_queue_map(set);
K
Keith Busch 已提交
4535 4536
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_realloc_hw_ctxs(set, q);
4537
		blk_mq_update_poll_flag(q);
4538
		if (q->nr_hw_queues != set->nr_hw_queues) {
4539 4540
			int i = prev_nr_hw_queues;

4541 4542
			pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
					nr_hw_queues, prev_nr_hw_queues);
4543 4544 4545
			for (; i < set->nr_hw_queues; i++)
				__blk_mq_free_map_and_rqs(set, i);

4546
			set->nr_hw_queues = prev_nr_hw_queues;
4547
			blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4548 4549
			goto fallback;
		}
4550 4551 4552
		blk_mq_map_swqueue(q);
	}

4553
reregister:
4554 4555 4556
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_sysfs_register(q);
		blk_mq_debugfs_register_hctxs(q);
K
Keith Busch 已提交
4557 4558
	}

4559 4560 4561 4562
switch_back:
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_elv_switch_back(&head, q);

K
Keith Busch 已提交
4563 4564 4565
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_unfreeze_queue(q);
}
4566 4567 4568 4569 4570 4571 4572

void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{
	mutex_lock(&set->tag_list_lock);
	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
	mutex_unlock(&set->tag_list_lock);
}
K
Keith Busch 已提交
4573 4574
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);

4575 4576 4577
/* Enable polling stats and return whether they were already enabled. */
static bool blk_poll_stats_enable(struct request_queue *q)
{
4578
	if (q->poll_stat)
4579
		return true;
4580 4581

	return blk_stats_alloc_enable(q);
4582 4583 4584 4585 4586 4587 4588 4589
}

static void blk_mq_poll_stats_start(struct request_queue *q)
{
	/*
	 * We don't arm the callback if polling stats are not enabled or the
	 * callback is already active.
	 */
4590
	if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
4591 4592 4593 4594 4595 4596 4597 4598
		return;

	blk_stat_activate_msecs(q->poll_cb, 100);
}

static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
{
	struct request_queue *q = cb->data;
4599
	int bucket;
4600

4601 4602 4603 4604
	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
		if (cb->stat[bucket].nr_samples)
			q->poll_stat[bucket] = cb->stat[bucket];
	}
4605 4606
}

4607 4608 4609 4610
static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
				       struct request *rq)
{
	unsigned long ret = 0;
4611
	int bucket;
4612 4613 4614 4615 4616

	/*
	 * If stats collection isn't on, don't sleep but turn it on for
	 * future users
	 */
4617
	if (!blk_poll_stats_enable(q))
4618 4619 4620 4621 4622 4623 4624 4625
		return 0;

	/*
	 * As an optimistic guess, use half of the mean service time
	 * for this type of request. We can (and should) make this smarter.
	 * For instance, if the completion latencies are tight, we can
	 * get closer than just half the mean. This is especially
	 * important on devices where the completion latencies are longer
4626 4627
	 * than ~10 usec. We do use the stats for the relevant IO size
	 * if available which does lead to better estimates.
4628
	 */
4629 4630 4631 4632 4633 4634
	bucket = blk_mq_poll_stats_bkt(rq);
	if (bucket < 0)
		return ret;

	if (q->poll_stat[bucket].nr_samples)
		ret = (q->poll_stat[bucket].mean + 1) / 2;
4635 4636 4637 4638

	return ret;
}

4639
static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
4640
{
4641 4642
	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
	struct request *rq = blk_qc_to_rq(hctx, qc);
4643 4644
	struct hrtimer_sleeper hs;
	enum hrtimer_mode mode;
4645
	unsigned int nsecs;
4646 4647
	ktime_t kt;

4648 4649 4650 4651 4652
	/*
	 * If a request has completed on queue that uses an I/O scheduler, we
	 * won't get back a request from blk_qc_to_rq.
	 */
	if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
4653 4654 4655
		return false;

	/*
4656
	 * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
4657 4658 4659 4660
	 *
	 *  0:	use half of prev avg
	 * >0:	use this specific value
	 */
4661
	if (q->poll_nsec > 0)
4662 4663
		nsecs = q->poll_nsec;
	else
4664
		nsecs = blk_mq_poll_nsecs(q, rq);
4665 4666

	if (!nsecs)
4667 4668
		return false;

J
Jens Axboe 已提交
4669
	rq->rq_flags |= RQF_MQ_POLL_SLEPT;
4670 4671 4672 4673 4674

	/*
	 * This will be replaced with the stats tracking code, using
	 * 'avg_completion_time / 2' as the pre-sleep target.
	 */
T
Thomas Gleixner 已提交
4675
	kt = nsecs;
4676 4677

	mode = HRTIMER_MODE_REL;
4678
	hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
4679 4680 4681
	hrtimer_set_expires(&hs.timer, kt);

	do {
T
Tejun Heo 已提交
4682
		if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
4683 4684
			break;
		set_current_state(TASK_UNINTERRUPTIBLE);
4685
		hrtimer_sleeper_start_expires(&hs, mode);
4686 4687 4688 4689 4690 4691 4692 4693
		if (hs.task)
			io_schedule();
		hrtimer_cancel(&hs.timer);
		mode = HRTIMER_MODE_ABS;
	} while (hs.task && !signal_pending(current));

	__set_current_state(TASK_RUNNING);
	destroy_hrtimer_on_stack(&hs.timer);
4694

4695
	/*
4696 4697 4698 4699 4700
	 * If we sleep, have the caller restart the poll loop to reset the
	 * state.  Like for the other success return cases, the caller is
	 * responsible for checking if the IO completed.  If the IO isn't
	 * complete, we'll get called again and will go straight to the busy
	 * poll loop.
4701 4702 4703 4704
	 */
	return true;
}

4705
static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
4706
			       struct io_comp_batch *iob, unsigned int flags)
J
Jens Axboe 已提交
4707
{
4708 4709 4710
	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
	long state = get_current_state();
	int ret;
J
Jens Axboe 已提交
4711

4712
	do {
4713
		ret = q->mq_ops->poll(hctx, iob);
J
Jens Axboe 已提交
4714
		if (ret > 0) {
4715
			__set_current_state(TASK_RUNNING);
4716
			return ret;
J
Jens Axboe 已提交
4717 4718 4719
		}

		if (signal_pending_state(state, current))
4720
			__set_current_state(TASK_RUNNING);
4721
		if (task_is_running(current))
4722
			return 1;
4723

4724
		if (ret < 0 || (flags & BLK_POLL_ONESHOT))
J
Jens Axboe 已提交
4725 4726
			break;
		cpu_relax();
4727
	} while (!need_resched());
J
Jens Axboe 已提交
4728

4729
	__set_current_state(TASK_RUNNING);
4730
	return 0;
J
Jens Axboe 已提交
4731
}
4732

4733 4734
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
		unsigned int flags)
4735
{
4736 4737
	if (!(flags & BLK_POLL_NOSLEEP) &&
	    q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
4738
		if (blk_mq_poll_hybrid(q, cookie))
4739
			return 1;
4740
	}
4741
	return blk_mq_poll_classic(q, cookie, iob, flags);
J
Jens Axboe 已提交
4742 4743
}

J
Jens Axboe 已提交
4744 4745 4746 4747 4748 4749
unsigned int blk_mq_rq_cpu(struct request *rq)
{
	return rq->mq_ctx->cpu;
}
EXPORT_SYMBOL(blk_mq_rq_cpu);

4750 4751 4752 4753
void blk_mq_cancel_work_sync(struct request_queue *q)
{
	if (queue_is_mq(q)) {
		struct blk_mq_hw_ctx *hctx;
4754
		unsigned long i;
4755 4756 4757 4758 4759 4760 4761 4762

		cancel_delayed_work_sync(&q->requeue_work);

		queue_for_each_hw_ctx(q, hctx, i)
			cancel_delayed_work_sync(&hctx->run_work);
	}
}

4763 4764
static int __init blk_mq_init(void)
{
4765 4766 4767
	int i;

	for_each_possible_cpu(i)
4768
		init_llist_head(&per_cpu(blk_cpu_done, i));
4769 4770 4771 4772 4773
	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);

	cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
				  "block/softirq:dead", NULL,
				  blk_softirq_cpu_dead);
4774 4775
	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
				blk_mq_hctx_notify_dead);
4776 4777 4778
	cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
				blk_mq_hctx_notify_online,
				blk_mq_hctx_notify_offline);
4779 4780 4781
	return 0;
}
subsys_initcall(blk_mq_init);