blk-mq.c 118.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7
/*
 * Block multiqueue core code
 *
 * Copyright (C) 2013-2014 Jens Axboe
 * Copyright (C) 2013-2014 Christoph Hellwig
 */
8 9 10 11 12
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
13
#include <linux/blk-integrity.h>
14
#include <linux/kmemleak.h>
15 16 17 18 19
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>
20
#include <linux/interrupt.h>
21 22 23 24
#include <linux/llist.h>
#include <linux/cpu.h>
#include <linux/cache.h>
#include <linux/sched/sysctl.h>
25
#include <linux/sched/topology.h>
26
#include <linux/sched/signal.h>
27
#include <linux/delay.h>
28
#include <linux/crash_dump.h>
29
#include <linux/prefetch.h>
30
#include <linux/blk-crypto.h>
31
#include <linux/part_stat.h>
32 33 34 35

#include <trace/events/block.h>

#include <linux/blk-mq.h>
36
#include <linux/t10-pi.h>
37 38
#include "blk.h"
#include "blk-mq.h"
39
#include "blk-mq-debugfs.h"
40
#include "blk-mq-tag.h"
41
#include "blk-pm.h"
42
#include "blk-stat.h"
43
#include "blk-mq-sched.h"
44
#include "blk-rq-qos.h"
45

46
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
47

48 49 50
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);

51 52
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
53
	int ddir, sectors, bucket;
54

J
Jens Axboe 已提交
55
	ddir = rq_data_dir(rq);
56
	sectors = blk_rq_stats_sectors(rq);
57

58
	bucket = ddir + 2 * ilog2(sectors);
59 60 61 62 63 64 65 66 67

	if (bucket < 0)
		return -1;
	else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
		return ddir + BLK_MQ_POLL_STATS_BKTS - 2;

	return bucket;
}

68 69 70
#define BLK_QC_T_SHIFT		16
#define BLK_QC_T_INTERNAL	(1U << 31)

71 72 73
static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
		blk_qc_t qc)
{
M
Ming Lei 已提交
74 75
	return xa_load(&q->hctx_table,
			(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT);
76 77
}

78 79 80
static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
		blk_qc_t qc)
{
81 82 83 84 85
	unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1);

	if (qc & BLK_QC_T_INTERNAL)
		return blk_mq_tag_to_rq(hctx->sched_tags, tag);
	return blk_mq_tag_to_rq(hctx->tags, tag);
86 87
}

88 89 90 91 92 93 94
static inline blk_qc_t blk_rq_to_qc(struct request *rq)
{
	return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
		(rq->tag != -1 ?
		 rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
}

95
/*
96 97
 * Check if any of the ctx, dispatch list or elevator
 * have pending work in this hardware queue.
98
 */
99
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
100
{
101 102
	return !list_empty_careful(&hctx->dispatch) ||
		sbitmap_any_bit_set(&hctx->ctx_map) ||
103
			blk_mq_sched_has_work(hctx);
104 105
}

106 107 108 109 110 111
/*
 * Mark this ctx as having pending work in this hardware queue
 */
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
				     struct blk_mq_ctx *ctx)
{
112 113 114 115
	const int bit = ctx->index_hw[hctx->type];

	if (!sbitmap_test_bit(&hctx->ctx_map, bit))
		sbitmap_set_bit(&hctx->ctx_map, bit);
116 117 118 119 120
}

static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
				      struct blk_mq_ctx *ctx)
{
121 122 123
	const int bit = ctx->index_hw[hctx->type];

	sbitmap_clear_bit(&hctx->ctx_map, bit);
124 125
}

126
struct mq_inflight {
127
	struct block_device *part;
128
	unsigned int inflight[2];
129 130
};

131
static bool blk_mq_check_inflight(struct request *rq, void *priv,
132 133 134 135
				  bool reserved)
{
	struct mq_inflight *mi = priv;

136 137
	if ((!mi->part->bd_partno || rq->part == mi->part) &&
	    blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
138
		mi->inflight[rq_data_dir(rq)]++;
139 140

	return true;
141 142
}

143 144
unsigned int blk_mq_in_flight(struct request_queue *q,
		struct block_device *part)
145
{
146
	struct mq_inflight mi = { .part = part };
147 148

	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
149

150
	return mi.inflight[0] + mi.inflight[1];
151 152
}

153 154
void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
		unsigned int inflight[2])
155
{
156
	struct mq_inflight mi = { .part = part };
157

158
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
159 160
	inflight[0] = mi.inflight[0];
	inflight[1] = mi.inflight[1];
161 162
}

163
void blk_freeze_queue_start(struct request_queue *q)
164
{
165 166
	mutex_lock(&q->mq_freeze_lock);
	if (++q->mq_freeze_depth == 1) {
167
		percpu_ref_kill(&q->q_usage_counter);
168
		mutex_unlock(&q->mq_freeze_lock);
J
Jens Axboe 已提交
169
		if (queue_is_mq(q))
170
			blk_mq_run_hw_queues(q, false);
171 172
	} else {
		mutex_unlock(&q->mq_freeze_lock);
173
	}
174
}
175
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
176

177
void blk_mq_freeze_queue_wait(struct request_queue *q)
178
{
179
	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
180
}
181
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
182

183 184 185 186 187 188 189 190
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
				     unsigned long timeout)
{
	return wait_event_timeout(q->mq_freeze_wq,
					percpu_ref_is_zero(&q->q_usage_counter),
					timeout);
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
191

192 193 194 195
/*
 * Guarantee no request is in use, so we can change any data structure of
 * the queue afterward.
 */
196
void blk_freeze_queue(struct request_queue *q)
197
{
198 199 200 201 202 203 204
	/*
	 * In the !blk_mq case we are only calling this to kill the
	 * q_usage_counter, otherwise this increases the freeze depth
	 * and waits for it to return to zero.  For this reason there is
	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
	 * exported to drivers as the only user for unfreeze is blk_mq.
	 */
205
	blk_freeze_queue_start(q);
206 207
	blk_mq_freeze_queue_wait(q);
}
208 209 210 211 212 213 214 215 216

void blk_mq_freeze_queue(struct request_queue *q)
{
	/*
	 * ...just an alias to keep freeze and unfreeze actions balanced
	 * in the blk_mq_* namespace
	 */
	blk_freeze_queue(q);
}
217
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
218

219
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
220
{
221
	mutex_lock(&q->mq_freeze_lock);
222 223
	if (force_atomic)
		q->q_usage_counter.data->force_atomic = true;
224 225 226
	q->mq_freeze_depth--;
	WARN_ON_ONCE(q->mq_freeze_depth < 0);
	if (!q->mq_freeze_depth) {
227
		percpu_ref_resurrect(&q->q_usage_counter);
228
		wake_up_all(&q->mq_freeze_wq);
229
	}
230
	mutex_unlock(&q->mq_freeze_lock);
231
}
232 233 234 235 236

void blk_mq_unfreeze_queue(struct request_queue *q)
{
	__blk_mq_unfreeze_queue(q, false);
}
237
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
238

239 240 241 242 243 244
/*
 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
 * mpt3sas driver such that this function can be removed.
 */
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
{
245 246 247 248 249 250
	unsigned long flags;

	spin_lock_irqsave(&q->queue_lock, flags);
	if (!q->quiesce_depth++)
		blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
	spin_unlock_irqrestore(&q->queue_lock, flags);
251 252 253
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);

254
/**
255
 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
256 257
 * @q: request queue.
 *
258 259
 * Note: it is driver's responsibility for making sure that quiesce has
 * been started.
260
 */
261
void blk_mq_wait_quiesce_done(struct request_queue *q)
262
{
263 264 265
	if (blk_queue_has_srcu(q))
		synchronize_srcu(q->srcu);
	else
266 267
		synchronize_rcu();
}
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);

/**
 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
 * @q: request queue.
 *
 * Note: this function does not prevent that the struct request end_io()
 * callback function is invoked. Once this function is returned, we make
 * sure no dispatch can happen until the queue is unquiesced via
 * blk_mq_unquiesce_queue().
 */
void blk_mq_quiesce_queue(struct request_queue *q)
{
	blk_mq_quiesce_queue_nowait(q);
	blk_mq_wait_quiesce_done(q);
}
284 285
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);

286 287 288 289 290 291 292 293 294
/*
 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
 * @q: request queue.
 *
 * This function recovers queue into the state before quiescing
 * which is done by blk_mq_quiesce_queue.
 */
void blk_mq_unquiesce_queue(struct request_queue *q)
{
295 296 297 298 299 300 301 302 303 304 305
	unsigned long flags;
	bool run_queue = false;

	spin_lock_irqsave(&q->queue_lock, flags);
	if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
		;
	} else if (!--q->quiesce_depth) {
		blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
		run_queue = true;
	}
	spin_unlock_irqrestore(&q->queue_lock, flags);
306

307
	/* dispatch requests which are inserted during quiescing */
308 309
	if (run_queue)
		blk_mq_run_hw_queues(q, true);
310 311 312
}
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);

313 314 315
void blk_mq_wake_waiters(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
316
	unsigned long i;
317 318 319 320 321 322

	queue_for_each_hw_ctx(q, hctx, i)
		if (blk_mq_hw_queue_mapped(hctx))
			blk_mq_tag_wakeup_all(hctx->tags, true);
}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
void blk_rq_init(struct request_queue *q, struct request *rq)
{
	memset(rq, 0, sizeof(*rq));

	INIT_LIST_HEAD(&rq->queuelist);
	rq->q = q;
	rq->__sector = (sector_t) -1;
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
	rq->tag = BLK_MQ_NO_TAG;
	rq->internal_tag = BLK_MQ_NO_TAG;
	rq->start_time_ns = ktime_get_ns();
	rq->part = NULL;
	blk_crypto_rq_set_defaults(rq);
}
EXPORT_SYMBOL(blk_rq_init);

340
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
341
		struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
342
{
343 344 345
	struct blk_mq_ctx *ctx = data->ctx;
	struct blk_mq_hw_ctx *hctx = data->hctx;
	struct request_queue *q = data->q;
346
	struct request *rq = tags->static_rqs[tag];
347

J
Jens Axboe 已提交
348 349 350 351 352 353 354 355 356 357 358
	rq->q = q;
	rq->mq_ctx = ctx;
	rq->mq_hctx = hctx;
	rq->cmd_flags = data->cmd_flags;

	if (data->flags & BLK_MQ_REQ_PM)
		data->rq_flags |= RQF_PM;
	if (blk_queue_io_stat(q))
		data->rq_flags |= RQF_IO_STAT;
	rq->rq_flags = data->rq_flags;

359
	if (!(data->rq_flags & RQF_ELV)) {
360
		rq->tag = tag;
361
		rq->internal_tag = BLK_MQ_NO_TAG;
362 363 364
	} else {
		rq->tag = BLK_MQ_NO_TAG;
		rq->internal_tag = tag;
365
	}
J
Jens Axboe 已提交
366
	rq->timeout = 0;
367

368 369 370 371
	if (blk_mq_need_time_stamp(rq))
		rq->start_time_ns = ktime_get_ns();
	else
		rq->start_time_ns = 0;
372
	rq->part = NULL;
373 374 375
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
	rq->alloc_time_ns = alloc_time_ns;
#endif
376
	rq->io_start_time_ns = 0;
377
	rq->stats_sectors = 0;
378 379 380 381 382 383 384
	rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
	rq->nr_integrity_segments = 0;
#endif
	rq->end_io = NULL;
	rq->end_io_data = NULL;

385 386 387 388
	blk_crypto_rq_set_defaults(rq);
	INIT_LIST_HEAD(&rq->queuelist);
	/* tag was already set */
	WRITE_ONCE(rq->deadline, 0);
389
	req_ref_set(rq, 1);
390

391
	if (rq->rq_flags & RQF_ELV) {
392 393
		struct elevator_queue *e = data->q->elevator;

394 395 396 397 398
		INIT_HLIST_NODE(&rq->hash);
		RB_CLEAR_NODE(&rq->rb_node);

		if (!op_is_flush(data->cmd_flags) &&
		    e->type->ops.prepare_request) {
399 400 401 402 403
			e->type->ops.prepare_request(rq);
			rq->rq_flags |= RQF_ELVPRIV;
		}
	}

404
	return rq;
405 406
}

J
Jens Axboe 已提交
407 408 409 410 411
static inline struct request *
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
		u64 alloc_time_ns)
{
	unsigned int tag, tag_offset;
412
	struct blk_mq_tags *tags;
J
Jens Axboe 已提交
413
	struct request *rq;
414
	unsigned long tag_mask;
J
Jens Axboe 已提交
415 416
	int i, nr = 0;

417 418
	tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
	if (unlikely(!tag_mask))
J
Jens Axboe 已提交
419 420
		return NULL;

421 422 423
	tags = blk_mq_tags_from_data(data);
	for (i = 0; tag_mask; i++) {
		if (!(tag_mask & (1UL << i)))
J
Jens Axboe 已提交
424 425
			continue;
		tag = tag_offset + i;
426
		prefetch(tags->static_rqs[tag]);
427 428
		tag_mask &= ~(1UL << i);
		rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
429
		rq_list_add(data->cached_rq, rq);
430
		nr++;
J
Jens Axboe 已提交
431
	}
432 433
	/* caller already holds a reference, add for remainder */
	percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
J
Jens Axboe 已提交
434 435
	data->nr_tags -= nr;

436
	return rq_list_pop(data->cached_rq);
J
Jens Axboe 已提交
437 438
}

439
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
440
{
441
	struct request_queue *q = data->q;
442
	u64 alloc_time_ns = 0;
443
	struct request *rq;
444
	unsigned int tag;
445

446 447 448 449
	/* alloc_time includes depth and tag waits */
	if (blk_queue_rq_alloc_time(q))
		alloc_time_ns = ktime_get_ns();

450
	if (data->cmd_flags & REQ_NOWAIT)
451
		data->flags |= BLK_MQ_REQ_NOWAIT;
452

453 454 455 456 457
	if (q->elevator) {
		struct elevator_queue *e = q->elevator;

		data->rq_flags |= RQF_ELV;

458
		/*
459
		 * Flush/passthrough requests are special and go directly to the
460 461
		 * dispatch list. Don't include reserved tags in the
		 * limiting, as it isn't useful.
462
		 */
463
		if (!op_is_flush(data->cmd_flags) &&
464
		    !blk_op_is_passthrough(data->cmd_flags) &&
465
		    e->type->ops.limit_depth &&
466
		    !(data->flags & BLK_MQ_REQ_RESERVED))
467
			e->type->ops.limit_depth(data->cmd_flags, data);
468 469
	}

470
retry:
471 472
	data->ctx = blk_mq_get_ctx(q);
	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
473
	if (!(data->rq_flags & RQF_ELV))
474 475
		blk_mq_tag_busy(data->hctx);

J
Jens Axboe 已提交
476 477 478 479 480 481 482 483 484 485
	/*
	 * Try batched alloc if we want more than 1 tag.
	 */
	if (data->nr_tags > 1) {
		rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
		if (rq)
			return rq;
		data->nr_tags = 1;
	}

486 487 488 489 490
	/*
	 * Waiting allocations only fail because of an inactive hctx.  In that
	 * case just retry the hctx assignment and tag allocation as CPU hotplug
	 * should have migrated us to an online CPU by now.
	 */
491
	tag = blk_mq_get_tag(data);
492 493 494 495
	if (tag == BLK_MQ_NO_TAG) {
		if (data->flags & BLK_MQ_REQ_NOWAIT)
			return NULL;
		/*
J
Jens Axboe 已提交
496 497 498 499
		 * Give up the CPU and sleep for a random short time to
		 * ensure that thread using a realtime scheduling class
		 * are migrated off the CPU, and thus off the hctx that
		 * is going away.
500 501 502 503
		 */
		msleep(3);
		goto retry;
	}
504

505 506
	return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
					alloc_time_ns);
507 508
}

509
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
510
		blk_mq_req_flags_t flags)
511
{
512 513 514 515
	struct blk_mq_alloc_data data = {
		.q		= q,
		.flags		= flags,
		.cmd_flags	= op,
516
		.nr_tags	= 1,
517
	};
518
	struct request *rq;
519
	int ret;
520

521
	ret = blk_queue_enter(q, flags);
522 523
	if (ret)
		return ERR_PTR(ret);
524

525
	rq = __blk_mq_alloc_requests(&data);
526
	if (!rq)
527
		goto out_queue_exit;
528 529 530
	rq->__data_len = 0;
	rq->__sector = (sector_t) -1;
	rq->bio = rq->biotail = NULL;
531
	return rq;
532 533 534
out_queue_exit:
	blk_queue_exit(q);
	return ERR_PTR(-EWOULDBLOCK);
535
}
536
EXPORT_SYMBOL(blk_mq_alloc_request);
537

538
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
539
	unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
M
Ming Lin 已提交
540
{
541 542 543 544
	struct blk_mq_alloc_data data = {
		.q		= q,
		.flags		= flags,
		.cmd_flags	= op,
545
		.nr_tags	= 1,
546
	};
547
	u64 alloc_time_ns = 0;
548
	unsigned int cpu;
549
	unsigned int tag;
M
Ming Lin 已提交
550 551
	int ret;

552 553 554 555
	/* alloc_time includes depth and tag waits */
	if (blk_queue_rq_alloc_time(q))
		alloc_time_ns = ktime_get_ns();

M
Ming Lin 已提交
556 557 558 559 560 561
	/*
	 * If the tag allocator sleeps we could get an allocation for a
	 * different hardware context.  No need to complicate the low level
	 * allocator for this for the rare use case of a command tied to
	 * a specific queue.
	 */
562
	if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
M
Ming Lin 已提交
563 564 565 566 567
		return ERR_PTR(-EINVAL);

	if (hctx_idx >= q->nr_hw_queues)
		return ERR_PTR(-EIO);

568
	ret = blk_queue_enter(q, flags);
M
Ming Lin 已提交
569 570 571
	if (ret)
		return ERR_PTR(ret);

572 573 574 575
	/*
	 * Check if the hardware context is actually mapped to anything.
	 * If not tell the caller that it should skip this queue.
	 */
576
	ret = -EXDEV;
M
Ming Lei 已提交
577
	data.hctx = xa_load(&q->hctx_table, hctx_idx);
578
	if (!blk_mq_hw_queue_mapped(data.hctx))
579
		goto out_queue_exit;
580 581
	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
	data.ctx = __blk_mq_get_ctx(q, cpu);
M
Ming Lin 已提交
582

583
	if (!q->elevator)
584
		blk_mq_tag_busy(data.hctx);
585 586
	else
		data.rq_flags |= RQF_ELV;
587

588
	ret = -EWOULDBLOCK;
589 590
	tag = blk_mq_get_tag(&data);
	if (tag == BLK_MQ_NO_TAG)
591
		goto out_queue_exit;
592 593
	return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
					alloc_time_ns);
594

595 596 597
out_queue_exit:
	blk_queue_exit(q);
	return ERR_PTR(ret);
M
Ming Lin 已提交
598 599 600
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);

K
Keith Busch 已提交
601 602 603 604
static void __blk_mq_free_request(struct request *rq)
{
	struct request_queue *q = rq->q;
	struct blk_mq_ctx *ctx = rq->mq_ctx;
605
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
K
Keith Busch 已提交
606 607
	const int sched_tag = rq->internal_tag;

608
	blk_crypto_free_request(rq);
609
	blk_pm_mark_last_busy(rq);
610
	rq->mq_hctx = NULL;
611
	if (rq->tag != BLK_MQ_NO_TAG)
612
		blk_mq_put_tag(hctx->tags, ctx, rq->tag);
613
	if (sched_tag != BLK_MQ_NO_TAG)
614
		blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
K
Keith Busch 已提交
615 616 617 618
	blk_mq_sched_restart(hctx);
	blk_queue_exit(q);
}

619
void blk_mq_free_request(struct request *rq)
620 621
{
	struct request_queue *q = rq->q;
622
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
623

624 625 626
	if ((rq->rq_flags & RQF_ELVPRIV) &&
	    q->elevator->type->ops.finish_request)
		q->elevator->type->ops.finish_request(rq);
627

628
	if (rq->rq_flags & RQF_MQ_INFLIGHT)
629
		__blk_mq_dec_active_requests(hctx);
J
Jens Axboe 已提交
630

631
	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
632
		laptop_io_completion(q->disk->bdi);
633

634
	rq_qos_done(q, rq);
635

K
Keith Busch 已提交
636
	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
637
	if (req_ref_put_and_test(rq))
K
Keith Busch 已提交
638
		__blk_mq_free_request(rq);
639
}
J
Jens Axboe 已提交
640
EXPORT_SYMBOL_GPL(blk_mq_free_request);
641

642
void blk_mq_free_plug_rqs(struct blk_plug *plug)
643
{
644
	struct request *rq;
645

646
	while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
647 648
		blk_mq_free_request(rq);
}
649

650 651 652
void blk_dump_rq_flags(struct request *rq, char *msg)
{
	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
653
		rq->q->disk ? rq->q->disk->disk_name : "?",
654 655 656 657 658 659 660 661 662 663
		(unsigned long long) rq->cmd_flags);

	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
	       rq->bio, rq->biotail, blk_rq_bytes(rq));
}
EXPORT_SYMBOL(blk_dump_rq_flags);

664 665 666
static void req_bio_endio(struct request *rq, struct bio *bio,
			  unsigned int nbytes, blk_status_t error)
{
P
Pavel Begunkov 已提交
667
	if (unlikely(error)) {
668
		bio->bi_status = error;
P
Pavel Begunkov 已提交
669
	} else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
670 671 672 673
		/*
		 * Partial zone append completions cannot be supported as the
		 * BIO fragments may end up not being written sequentially.
		 */
674
		if (bio->bi_iter.bi_size != nbytes)
675 676 677 678 679
			bio->bi_status = BLK_STS_IOERR;
		else
			bio->bi_iter.bi_sector = rq->__sector;
	}

P
Pavel Begunkov 已提交
680 681 682 683
	bio_advance(bio, nbytes);

	if (unlikely(rq->rq_flags & RQF_QUIET))
		bio_set_flag(bio, BIO_QUIET);
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
	/* don't actually finish bio if it's part of flush sequence */
	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
		bio_endio(bio);
}

static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
	if (req->part && blk_do_io_stat(req)) {
		const int sgrp = op_stat_group(req_op(req));

		part_stat_lock();
		part_stat_add(req->part, sectors[sgrp], bytes >> 9);
		part_stat_unlock();
	}
}

700 701 702 703 704 705
static void blk_print_req_error(struct request *req, blk_status_t status)
{
	printk_ratelimited(KERN_ERR
		"%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
		"phys_seg %u prio class %u\n",
		blk_status_to_str(status),
706
		req->q->disk ? req->q->disk->disk_name : "?",
707 708 709 710 711 712
		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
		req->cmd_flags & ~REQ_OP_MASK,
		req->nr_phys_segments,
		IOPRIO_PRIO_CLASS(req->ioprio));
}

713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
/*
 * Fully end IO on a request. Does not support partial completions, or
 * errors.
 */
static void blk_complete_request(struct request *req)
{
	const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
	int total_bytes = blk_rq_bytes(req);
	struct bio *bio = req->bio;

	trace_block_rq_complete(req, BLK_STS_OK, total_bytes);

	if (!bio)
		return;

#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
		req->q->integrity.profile->complete_fn(req, total_bytes);
#endif

	blk_account_io_completion(req, total_bytes);

	do {
		struct bio *next = bio->bi_next;

		/* Completion has already been traced */
		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
740 741 742 743

		if (req_op(req) == REQ_OP_ZONE_APPEND)
			bio->bi_iter.bi_sector = req->__sector;

744 745 746 747 748 749 750 751 752 753 754 755 756 757
		if (!is_flush)
			bio_endio(bio);
		bio = next;
	} while (bio);

	/*
	 * Reset counters so that the request stacking driver
	 * can find how many bytes remain in the request
	 * later.
	 */
	req->bio = NULL;
	req->__data_len = 0;
}

758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
/**
 * blk_update_request - Complete multiple bytes without completing the request
 * @req:      the request being processed
 * @error:    block status code
 * @nr_bytes: number of bytes to complete for @req
 *
 * Description:
 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
 *     the request structure even if @req doesn't have leftover.
 *     If @req has leftover, sets it up for the next range of segments.
 *
 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
 *     %false return from this function.
 *
 * Note:
 *	The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
 *      except in the consistency check at the end of this function.
 *
 * Return:
 *     %false - this request doesn't have any more data
 *     %true  - this request has more data
 **/
bool blk_update_request(struct request *req, blk_status_t error,
		unsigned int nr_bytes)
{
	int total_bytes;

785
	trace_block_rq_complete(req, error, nr_bytes);
786 787 788 789 790 791 792 793 794 795 796

	if (!req->bio)
		return false;

#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
	    error == BLK_STS_OK)
		req->q->integrity.profile->complete_fn(req, nr_bytes);
#endif

	if (unlikely(error && !blk_rq_is_passthrough(req) &&
797 798
		     !(req->rq_flags & RQF_QUIET)) &&
		     !test_bit(GD_DEAD, &req->q->disk->state)) {
799
		blk_print_req_error(req, error);
800 801
		trace_block_rq_error(req, error, nr_bytes);
	}
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866

	blk_account_io_completion(req, nr_bytes);

	total_bytes = 0;
	while (req->bio) {
		struct bio *bio = req->bio;
		unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);

		if (bio_bytes == bio->bi_iter.bi_size)
			req->bio = bio->bi_next;

		/* Completion has already been traced */
		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
		req_bio_endio(req, bio, bio_bytes, error);

		total_bytes += bio_bytes;
		nr_bytes -= bio_bytes;

		if (!nr_bytes)
			break;
	}

	/*
	 * completely done
	 */
	if (!req->bio) {
		/*
		 * Reset counters so that the request stacking driver
		 * can find how many bytes remain in the request
		 * later.
		 */
		req->__data_len = 0;
		return false;
	}

	req->__data_len -= total_bytes;

	/* update sector only for requests with clear definition of sector */
	if (!blk_rq_is_passthrough(req))
		req->__sector += total_bytes >> 9;

	/* mixed attributes always follow the first bio */
	if (req->rq_flags & RQF_MIXED_MERGE) {
		req->cmd_flags &= ~REQ_FAILFAST_MASK;
		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
	}

	if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
		/*
		 * If total number of sectors is less than the first segment
		 * size, something has gone terribly wrong.
		 */
		if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
			blk_dump_rq_flags(req, "request botched");
			req->__data_len = blk_rq_cur_bytes(req);
		}

		/* recalculate the number of segments */
		req->nr_phys_segments = blk_recalc_rq_segments(req);
	}

	return true;
}
EXPORT_SYMBOL_GPL(blk_update_request);

867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
static void __blk_account_io_done(struct request *req, u64 now)
{
	const int sgrp = op_stat_group(req_op(req));

	part_stat_lock();
	update_io_ticks(req->part, jiffies, true);
	part_stat_inc(req->part, ios[sgrp]);
	part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
	part_stat_unlock();
}

static inline void blk_account_io_done(struct request *req, u64 now)
{
	/*
	 * Account IO completion.  flush_rq isn't accounted as a
	 * normal IO on queueing nor completion.  Accounting the
	 * containing request is enough.
	 */
	if (blk_do_io_stat(req) && req->part &&
	    !(req->rq_flags & RQF_FLUSH_SEQ))
		__blk_account_io_done(req, now);
}

static void __blk_account_io_start(struct request *rq)
{
892 893 894 895 896 897 898
	/*
	 * All non-passthrough requests are created from a bio with one
	 * exception: when a flush command that is part of a flush sequence
	 * generated by the state machine in blk-flush.c is cloned onto the
	 * lower device by dm-multipath we can get here without a bio.
	 */
	if (rq->bio)
899
		rq->part = rq->bio->bi_bdev;
900
	else
901
		rq->part = rq->q->disk->part0;
902 903 904 905 906 907 908 909 910 911 912 913

	part_stat_lock();
	update_io_ticks(rq->part, jiffies, false);
	part_stat_unlock();
}

static inline void blk_account_io_start(struct request *req)
{
	if (blk_do_io_stat(req))
		__blk_account_io_start(req);
}

914
static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
915
{
916 917
	if (rq->rq_flags & RQF_STATS) {
		blk_mq_poll_stats_start(rq->q);
918
		blk_stat_add(rq, now);
919 920
	}

921
	blk_mq_sched_completed_request(rq, now);
922
	blk_account_io_done(rq, now);
923
}
924

925 926 927 928
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
{
	if (blk_mq_need_time_stamp(rq))
		__blk_mq_end_request_acct(rq, ktime_get_ns());
M
Ming Lei 已提交
929

C
Christoph Hellwig 已提交
930
	if (rq->end_io) {
931
		rq_qos_done(rq->q, rq);
932
		rq->end_io(rq, error);
C
Christoph Hellwig 已提交
933
	} else {
934
		blk_mq_free_request(rq);
C
Christoph Hellwig 已提交
935
	}
936
}
937
EXPORT_SYMBOL(__blk_mq_end_request);
938

939
void blk_mq_end_request(struct request *rq, blk_status_t error)
940 941 942
{
	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
		BUG();
943
	__blk_mq_end_request(rq, error);
944
}
945
EXPORT_SYMBOL(blk_mq_end_request);
946

947 948 949 950 951 952 953
#define TAG_COMP_BATCH		32

static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
					  int *tag_array, int nr_tags)
{
	struct request_queue *q = hctx->queue;

954 955 956 957 958 959 960
	/*
	 * All requests should have been marked as RQF_MQ_INFLIGHT, so
	 * update hctx->nr_active in batch
	 */
	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
		__blk_mq_sub_active_requests(hctx, nr_tags);

961 962 963 964 965 966 967
	blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
	percpu_ref_put_many(&q->q_usage_counter, nr_tags);
}

void blk_mq_end_request_batch(struct io_comp_batch *iob)
{
	int tags[TAG_COMP_BATCH], nr_tags = 0;
968
	struct blk_mq_hw_ctx *cur_hctx = NULL;
969 970 971 972 973 974 975 976 977 978
	struct request *rq;
	u64 now = 0;

	if (iob->need_ts)
		now = ktime_get_ns();

	while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
		prefetch(rq->bio);
		prefetch(rq->rq_next);

979
		blk_complete_request(rq);
980 981 982
		if (iob->need_ts)
			__blk_mq_end_request_acct(rq, now);

983 984
		rq_qos_done(rq->q, rq);

985
		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
986
		if (!req_ref_put_and_test(rq))
987 988 989 990 991
			continue;

		blk_crypto_free_request(rq);
		blk_pm_mark_last_busy(rq);

992 993 994
		if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
			if (cur_hctx)
				blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
995
			nr_tags = 0;
996
			cur_hctx = rq->mq_hctx;
997 998 999 1000 1001
		}
		tags[nr_tags++] = rq->tag;
	}

	if (nr_tags)
1002
		blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1003 1004 1005
}
EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);

1006
static void blk_complete_reqs(struct llist_head *list)
1007
{
1008 1009
	struct llist_node *entry = llist_reverse_order(llist_del_all(list));
	struct request *rq, *next;
1010

1011
	llist_for_each_entry_safe(rq, next, entry, ipi_list)
1012
		rq->q->mq_ops->complete(rq);
1013 1014
}

1015
static __latent_entropy void blk_done_softirq(struct softirq_action *h)
1016
{
1017
	blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1018 1019
}

1020 1021
static int blk_softirq_cpu_dead(unsigned int cpu)
{
1022
	blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1023 1024 1025
	return 0;
}

1026
static void __blk_mq_complete_request_remote(void *data)
1027
{
1028
	__raise_softirq_irqoff(BLOCK_SOFTIRQ);
1029 1030
}

1031 1032 1033 1034 1035 1036 1037
static inline bool blk_mq_complete_need_ipi(struct request *rq)
{
	int cpu = raw_smp_processor_id();

	if (!IS_ENABLED(CONFIG_SMP) ||
	    !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
		return false;
1038 1039 1040 1041 1042 1043
	/*
	 * With force threaded interrupts enabled, raising softirq from an SMP
	 * function call will always result in waking the ksoftirqd thread.
	 * This is probably worse than completing the request on a different
	 * cache domain.
	 */
1044
	if (force_irqthreads())
1045
		return false;
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056

	/* same CPU or cache domain?  Complete locally */
	if (cpu == rq->mq_ctx->cpu ||
	    (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
	     cpus_share_cache(cpu, rq->mq_ctx->cpu)))
		return false;

	/* don't try to IPI to an offline CPU */
	return cpu_online(rq->mq_ctx->cpu);
}

1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
static void blk_mq_complete_send_ipi(struct request *rq)
{
	struct llist_head *list;
	unsigned int cpu;

	cpu = rq->mq_ctx->cpu;
	list = &per_cpu(blk_cpu_done, cpu);
	if (llist_add(&rq->ipi_list, list)) {
		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
		smp_call_function_single_async(cpu, &rq->csd);
	}
}

static void blk_mq_raise_softirq(struct request *rq)
{
	struct llist_head *list;

	preempt_disable();
	list = this_cpu_ptr(&blk_cpu_done);
	if (llist_add(&rq->ipi_list, list))
		raise_softirq(BLOCK_SOFTIRQ);
	preempt_enable();
}

1081
bool blk_mq_complete_request_remote(struct request *rq)
1082
{
1083
	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1084

1085
	/*
J
Julia Lawall 已提交
1086
	 * For a polled request, always complete locally, it's pointless
1087 1088
	 * to redirect the completion.
	 */
1089
	if (rq->cmd_flags & REQ_POLLED)
1090
		return false;
C
Christoph Hellwig 已提交
1091

1092
	if (blk_mq_complete_need_ipi(rq)) {
1093 1094
		blk_mq_complete_send_ipi(rq);
		return true;
1095
	}
1096

1097 1098 1099 1100 1101
	if (rq->q->nr_hw_queues == 1) {
		blk_mq_raise_softirq(rq);
		return true;
	}
	return false;
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
}
EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);

/**
 * blk_mq_complete_request - end I/O on a request
 * @rq:		the request being processed
 *
 * Description:
 *	Complete a request by scheduling the ->complete_rq operation.
 **/
void blk_mq_complete_request(struct request *rq)
{
	if (!blk_mq_complete_request_remote(rq))
		rq->q->mq_ops->complete(rq);
1116
}
1117
EXPORT_SYMBOL(blk_mq_complete_request);
1118

1119 1120 1121 1122 1123 1124 1125 1126
/**
 * blk_mq_start_request - Start processing a request
 * @rq: Pointer to request to be started
 *
 * Function used by device drivers to notify the block layer that a request
 * is going to be processed now, so blk layer can do proper initializations
 * such as starting the timeout timer.
 */
1127
void blk_mq_start_request(struct request *rq)
1128 1129 1130
{
	struct request_queue *q = rq->q;

1131
	trace_block_rq_issue(rq);
1132

1133
	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
1134
		rq->io_start_time_ns = ktime_get_ns();
1135
		rq->stats_sectors = blk_rq_sectors(rq);
1136
		rq->rq_flags |= RQF_STATS;
1137
		rq_qos_issue(q, rq);
1138 1139
	}

1140
	WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1141

1142
	blk_add_timer(rq);
K
Keith Busch 已提交
1143
	WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1144

1145 1146 1147 1148
#ifdef CONFIG_BLK_DEV_INTEGRITY
	if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
		q->integrity.profile->prepare_fn(rq);
#endif
1149 1150
	if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
	        WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
1151
}
1152
EXPORT_SYMBOL(blk_mq_start_request);
1153

C
Christoph Hellwig 已提交
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
/**
 * blk_end_sync_rq - executes a completion event on a request
 * @rq: request to complete
 * @error: end I/O status of the request
 */
static void blk_end_sync_rq(struct request *rq, blk_status_t error)
{
	struct completion *waiting = rq->end_io_data;

	rq->end_io_data = (void *)(uintptr_t)error;

	/*
	 * complete last, if this is a stack request the process (and thus
	 * the rq pointer) could be invalid right after this complete()
	 */
	complete(waiting);
}

M
Ming Lei 已提交
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
/*
 * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
 * queues. This is important for md arrays to benefit from merging
 * requests.
 */
static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
{
	if (plug->multiple_queues)
		return BLK_MAX_REQUEST_COUNT * 2;
	return BLK_MAX_REQUEST_COUNT;
}

static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
{
	struct request *last = rq_list_peek(&plug->mq_list);

	if (!plug->rq_count) {
		trace_block_plug(rq->q);
	} else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
		   (!blk_queue_nomerges(rq->q) &&
		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
		blk_mq_flush_plug_list(plug, false);
		trace_block_plug(rq->q);
	}

	if (!plug->multiple_queues && last && last->q != rq->q)
		plug->multiple_queues = true;
	if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
		plug->has_elevator = true;
	rq->rq_next = NULL;
	rq_list_add(&plug->mq_list, rq);
	plug->rq_count++;
}

C
Christoph Hellwig 已提交
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
/**
 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 * @done:	I/O completion handler
 *
 * Description:
 *    Insert a fully prepared request at the back of the I/O scheduler queue
 *    for execution.  Don't wait for completion.
 *
 * Note:
 *    This function will invoke @done directly if the queue is dead.
 */
1219
void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
C
Christoph Hellwig 已提交
1220
{
1221 1222
	WARN_ON(irqs_disabled());
	WARN_ON(!blk_rq_is_passthrough(rq));
C
Christoph Hellwig 已提交
1223

1224 1225 1226 1227 1228 1229 1230
	rq->end_io = done;

	blk_account_io_start(rq);
	if (current->plug)
		blk_add_rq_to_plug(current->plug, rq);
	else
		blk_mq_sched_insert_request(rq, at_head, true, false);
C
Christoph Hellwig 已提交
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);

static bool blk_rq_is_poll(struct request *rq)
{
	if (!rq->mq_hctx)
		return false;
	if (rq->mq_hctx->type != HCTX_TYPE_POLL)
		return false;
	if (WARN_ON_ONCE(!rq->bio))
		return false;
	return true;
}

static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
	do {
		bio_poll(rq->bio, NULL, 0);
		cond_resched();
	} while (!completion_done(wait));
}

/**
 * blk_execute_rq - insert a request into queue for execution
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 *
 * Description:
 *    Insert a fully prepared request at the back of the I/O scheduler queue
 *    for execution and wait for completion.
 * Return: The blk_status_t result provided to blk_mq_end_request().
 */
1263
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
C
Christoph Hellwig 已提交
1264 1265 1266
{
	DECLARE_COMPLETION_ONSTACK(wait);

1267 1268 1269
	WARN_ON(irqs_disabled());
	WARN_ON(!blk_rq_is_passthrough(rq));

C
Christoph Hellwig 已提交
1270
	rq->end_io_data = &wait;
1271
	rq->end_io = blk_end_sync_rq;
C
Christoph Hellwig 已提交
1272

1273 1274
	blk_account_io_start(rq);
	blk_mq_sched_insert_request(rq, at_head, true, false);
C
Christoph Hellwig 已提交
1275

1276
	if (blk_rq_is_poll(rq)) {
C
Christoph Hellwig 已提交
1277
		blk_rq_poll_completion(rq, &wait);
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
	} else {
		/*
		 * Prevent hang_check timer from firing at us during very long
		 * I/O
		 */
		unsigned long hang_check = sysctl_hung_task_timeout_secs;

		if (hang_check)
			while (!wait_for_completion_io_timeout(&wait,
					hang_check * (HZ/2)))
				;
		else
			wait_for_completion_io(&wait);
	}
C
Christoph Hellwig 已提交
1292 1293 1294 1295 1296

	return (blk_status_t)(uintptr_t)rq->end_io_data;
}
EXPORT_SYMBOL(blk_execute_rq);

1297
static void __blk_mq_requeue_request(struct request *rq)
1298 1299 1300
{
	struct request_queue *q = rq->q;

1301 1302
	blk_mq_put_driver_tag(rq);

1303
	trace_block_rq_requeue(rq);
1304
	rq_qos_requeue(q, rq);
1305

K
Keith Busch 已提交
1306 1307
	if (blk_mq_request_started(rq)) {
		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1308
		rq->rq_flags &= ~RQF_TIMED_OUT;
1309
	}
1310 1311
}

1312
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1313 1314 1315
{
	__blk_mq_requeue_request(rq);

1316 1317 1318
	/* this request will be re-inserted to io scheduler queue */
	blk_mq_sched_requeue_request(rq);

1319
	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
1320 1321 1322
}
EXPORT_SYMBOL(blk_mq_requeue_request);

1323 1324 1325
static void blk_mq_requeue_work(struct work_struct *work)
{
	struct request_queue *q =
1326
		container_of(work, struct request_queue, requeue_work.work);
1327 1328 1329
	LIST_HEAD(rq_list);
	struct request *rq, *next;

1330
	spin_lock_irq(&q->requeue_lock);
1331
	list_splice_init(&q->requeue_list, &rq_list);
1332
	spin_unlock_irq(&q->requeue_lock);
1333 1334

	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
1335
		if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
1336 1337
			continue;

1338
		rq->rq_flags &= ~RQF_SOFTBARRIER;
1339
		list_del_init(&rq->queuelist);
1340 1341 1342 1343 1344 1345
		/*
		 * If RQF_DONTPREP, rq has contained some driver specific
		 * data, so insert it to hctx dispatch list to avoid any
		 * merge.
		 */
		if (rq->rq_flags & RQF_DONTPREP)
1346
			blk_mq_request_bypass_insert(rq, false, false);
1347 1348
		else
			blk_mq_sched_insert_request(rq, true, false, false);
1349 1350 1351 1352 1353
	}

	while (!list_empty(&rq_list)) {
		rq = list_entry(rq_list.next, struct request, queuelist);
		list_del_init(&rq->queuelist);
1354
		blk_mq_sched_insert_request(rq, false, false, false);
1355 1356
	}

1357
	blk_mq_run_hw_queues(q, false);
1358 1359
}

1360 1361
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
				bool kick_requeue_list)
1362 1363 1364 1365 1366 1367
{
	struct request_queue *q = rq->q;
	unsigned long flags;

	/*
	 * We abuse this flag that is otherwise used by the I/O scheduler to
1368
	 * request head insertion from the workqueue.
1369
	 */
1370
	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
1371 1372 1373

	spin_lock_irqsave(&q->requeue_lock, flags);
	if (at_head) {
1374
		rq->rq_flags |= RQF_SOFTBARRIER;
1375 1376 1377 1378 1379
		list_add(&rq->queuelist, &q->requeue_list);
	} else {
		list_add_tail(&rq->queuelist, &q->requeue_list);
	}
	spin_unlock_irqrestore(&q->requeue_lock, flags);
1380 1381 1382

	if (kick_requeue_list)
		blk_mq_kick_requeue_list(q);
1383 1384 1385 1386
}

void blk_mq_kick_requeue_list(struct request_queue *q)
{
1387
	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1388 1389 1390
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);

1391 1392 1393
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
				    unsigned long msecs)
{
1394 1395
	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
				    msecs_to_jiffies(msecs));
1396 1397 1398
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);

1399 1400
static bool blk_mq_rq_inflight(struct request *rq, void *priv,
			       bool reserved)
1401 1402
{
	/*
1403 1404 1405
	 * If we find a request that isn't idle we know the queue is busy
	 * as it's checked in the iter.
	 * Return false to stop the iteration.
1406
	 */
1407
	if (blk_mq_request_started(rq)) {
1408 1409 1410 1411 1412 1413 1414 1415 1416
		bool *busy = priv;

		*busy = true;
		return false;
	}

	return true;
}

1417
bool blk_mq_queue_inflight(struct request_queue *q)
1418 1419 1420
{
	bool busy = false;

1421
	blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1422 1423
	return busy;
}
1424
EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1425

1426
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
1427
{
1428
	req->rq_flags |= RQF_TIMED_OUT;
1429 1430 1431 1432 1433 1434 1435
	if (req->q->mq_ops->timeout) {
		enum blk_eh_timer_return ret;

		ret = req->q->mq_ops->timeout(req, reserved);
		if (ret == BLK_EH_DONE)
			return;
		WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1436
	}
1437 1438

	blk_add_timer(req);
1439
}
1440

K
Keith Busch 已提交
1441
static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
1442
{
K
Keith Busch 已提交
1443
	unsigned long deadline;
1444

K
Keith Busch 已提交
1445 1446
	if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
		return false;
1447 1448
	if (rq->rq_flags & RQF_TIMED_OUT)
		return false;
1449

1450
	deadline = READ_ONCE(rq->deadline);
K
Keith Busch 已提交
1451 1452
	if (time_after_eq(jiffies, deadline))
		return true;
1453

K
Keith Busch 已提交
1454 1455 1456 1457 1458
	if (*next == 0)
		*next = deadline;
	else if (time_after(*next, deadline))
		*next = deadline;
	return false;
1459 1460
}

1461 1462
void blk_mq_put_rq_ref(struct request *rq)
{
M
Ming Lei 已提交
1463
	if (is_flush_rq(rq))
1464
		rq->end_io(rq, 0);
1465
	else if (req_ref_put_and_test(rq))
1466 1467 1468
		__blk_mq_free_request(rq);
}

1469
static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved)
1470
{
K
Keith Busch 已提交
1471 1472 1473
	unsigned long *next = priv;

	/*
1474 1475 1476 1477 1478
	 * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
	 * be reallocated underneath the timeout handler's processing, then
	 * the expire check is reliable. If the request is not expired, then
	 * it was completed and reallocated as a new request after returning
	 * from blk_mq_check_expired().
1479
	 */
K
Keith Busch 已提交
1480
	if (blk_mq_req_expired(rq, next))
1481
		blk_mq_rq_timed_out(rq, reserved);
1482
	return true;
1483 1484
}

1485
static void blk_mq_timeout_work(struct work_struct *work)
1486
{
1487 1488
	struct request_queue *q =
		container_of(work, struct request_queue, timeout_work);
K
Keith Busch 已提交
1489
	unsigned long next = 0;
1490
	struct blk_mq_hw_ctx *hctx;
1491
	unsigned long i;
1492

1493 1494 1495 1496 1497 1498 1499 1500 1501
	/* A deadlock might occur if a request is stuck requiring a
	 * timeout at the same time a queue freeze is waiting
	 * completion, since the timeout code would not be able to
	 * acquire the queue reference here.
	 *
	 * That's why we don't use blk_queue_enter here; instead, we use
	 * percpu_ref_tryget directly, because we need to be able to
	 * obtain a reference even in the short window between the queue
	 * starting to freeze, by dropping the first reference in
1502
	 * blk_freeze_queue_start, and the moment the last request is
1503 1504 1505 1506
	 * consumed, marked by the instant q_usage_counter reaches
	 * zero.
	 */
	if (!percpu_ref_tryget(&q->q_usage_counter))
1507 1508
		return;

K
Keith Busch 已提交
1509
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
1510

K
Keith Busch 已提交
1511 1512
	if (next != 0) {
		mod_timer(&q->timeout, next);
1513
	} else {
1514 1515 1516 1517 1518 1519
		/*
		 * Request timeouts are handled as a forward rolling timer. If
		 * we end up here it means that no requests are pending and
		 * also that no request has been pending for a while. Mark
		 * each hctx as idle.
		 */
1520 1521 1522 1523 1524
		queue_for_each_hw_ctx(q, hctx, i) {
			/* the hctx may be unmapped, so check it here */
			if (blk_mq_hw_queue_mapped(hctx))
				blk_mq_tag_idle(hctx);
		}
1525
	}
1526
	blk_queue_exit(q);
1527 1528
}

1529 1530 1531 1532 1533 1534 1535 1536 1537 1538
struct flush_busy_ctx_data {
	struct blk_mq_hw_ctx *hctx;
	struct list_head *list;
};

static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
{
	struct flush_busy_ctx_data *flush_data = data;
	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
M
Ming Lei 已提交
1539
	enum hctx_type type = hctx->type;
1540 1541

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
1542
	list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1543
	sbitmap_clear_bit(sb, bitnr);
1544 1545 1546 1547
	spin_unlock(&ctx->lock);
	return true;
}

1548 1549 1550 1551
/*
 * Process software queues that have been marked busy, splicing them
 * to the for-dispatch
 */
1552
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1553
{
1554 1555 1556 1557
	struct flush_busy_ctx_data data = {
		.hctx = hctx,
		.list = list,
	};
1558

1559
	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1560
}
1561
EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1562

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
struct dispatch_rq_data {
	struct blk_mq_hw_ctx *hctx;
	struct request *rq;
};

static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
		void *data)
{
	struct dispatch_rq_data *dispatch_data = data;
	struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
M
Ming Lei 已提交
1574
	enum hctx_type type = hctx->type;
1575 1576

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
1577 1578
	if (!list_empty(&ctx->rq_lists[type])) {
		dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1579
		list_del_init(&dispatch_data->rq->queuelist);
M
Ming Lei 已提交
1580
		if (list_empty(&ctx->rq_lists[type]))
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
			sbitmap_clear_bit(sb, bitnr);
	}
	spin_unlock(&ctx->lock);

	return !dispatch_data->rq;
}

struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
					struct blk_mq_ctx *start)
{
1591
	unsigned off = start ? start->index_hw[hctx->type] : 0;
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
	struct dispatch_rq_data data = {
		.hctx = hctx,
		.rq   = NULL,
	};

	__sbitmap_for_each_set(&hctx->ctx_map, off,
			       dispatch_rq_from_ctx, &data);

	return data.rq;
}

1603
static bool __blk_mq_alloc_driver_tag(struct request *rq)
1604
{
1605
	struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1606 1607 1608
	unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
	int tag;

1609 1610
	blk_mq_tag_busy(rq->mq_hctx);

1611
	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1612
		bt = &rq->mq_hctx->tags->breserved_tags;
1613
		tag_offset = 0;
1614 1615 1616
	} else {
		if (!hctx_may_queue(rq->mq_hctx, bt))
			return false;
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
	}

	tag = __sbitmap_queue_get(bt);
	if (tag == BLK_MQ_NO_TAG)
		return false;

	rq->tag = tag + tag_offset;
	return true;
}

1627
bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1628
{
1629
	if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
1630 1631
		return false;

1632
	if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1633 1634
			!(rq->rq_flags & RQF_MQ_INFLIGHT)) {
		rq->rq_flags |= RQF_MQ_INFLIGHT;
1635
		__blk_mq_inc_active_requests(hctx);
1636 1637 1638
	}
	hctx->tags->rqs[rq->tag] = rq;
	return true;
1639 1640
}

1641 1642
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
				int flags, void *key)
1643 1644 1645 1646 1647
{
	struct blk_mq_hw_ctx *hctx;

	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);

1648
	spin_lock(&hctx->dispatch_wait_lock);
1649 1650 1651 1652
	if (!list_empty(&wait->entry)) {
		struct sbitmap_queue *sbq;

		list_del_init(&wait->entry);
1653
		sbq = &hctx->tags->bitmap_tags;
1654 1655
		atomic_dec(&sbq->ws_active);
	}
1656 1657
	spin_unlock(&hctx->dispatch_wait_lock);

1658 1659 1660 1661
	blk_mq_run_hw_queue(hctx, true);
	return 1;
}

1662 1663
/*
 * Mark us waiting for a tag. For shared tags, this involves hooking us into
1664 1665
 * the tag wakeups. For non-shared tags, we can simply mark us needing a
 * restart. For both cases, take care to check the condition again after
1666 1667
 * marking us as waiting.
 */
1668
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1669
				 struct request *rq)
1670
{
1671
	struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1672
	struct wait_queue_head *wq;
1673 1674
	wait_queue_entry_t *wait;
	bool ret;
1675

1676
	if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
1677
		blk_mq_sched_mark_restart_hctx(hctx);
1678

1679 1680 1681 1682 1683 1684 1685 1686
		/*
		 * It's possible that a tag was freed in the window between the
		 * allocation failure and adding the hardware queue to the wait
		 * queue.
		 *
		 * Don't clear RESTART here, someone else could have set it.
		 * At most this will cost an extra queue run.
		 */
1687
		return blk_mq_get_driver_tag(rq);
1688 1689
	}

1690
	wait = &hctx->dispatch_wait;
1691 1692 1693
	if (!list_empty_careful(&wait->entry))
		return false;

1694
	wq = &bt_wait_ptr(sbq, hctx)->wait;
1695 1696 1697

	spin_lock_irq(&wq->lock);
	spin_lock(&hctx->dispatch_wait_lock);
1698
	if (!list_empty(&wait->entry)) {
1699 1700
		spin_unlock(&hctx->dispatch_wait_lock);
		spin_unlock_irq(&wq->lock);
1701
		return false;
1702 1703
	}

1704
	atomic_inc(&sbq->ws_active);
1705 1706
	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
	__add_wait_queue(wq, wait);
1707

1708
	/*
1709 1710 1711
	 * It's possible that a tag was freed in the window between the
	 * allocation failure and adding the hardware queue to the wait
	 * queue.
1712
	 */
1713
	ret = blk_mq_get_driver_tag(rq);
1714
	if (!ret) {
1715 1716
		spin_unlock(&hctx->dispatch_wait_lock);
		spin_unlock_irq(&wq->lock);
1717
		return false;
1718
	}
1719 1720 1721 1722 1723 1724

	/*
	 * We got a tag, remove ourselves from the wait queue to ensure
	 * someone else gets the wakeup.
	 */
	list_del_init(&wait->entry);
1725
	atomic_dec(&sbq->ws_active);
1726 1727
	spin_unlock(&hctx->dispatch_wait_lock);
	spin_unlock_irq(&wq->lock);
1728 1729

	return true;
1730 1731
}

1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757
#define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
#define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
/*
 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
 * - EWMA is one simple way to compute running average value
 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
 * - take 4 as factor for avoiding to get too small(0) result, and this
 *   factor doesn't matter because EWMA decreases exponentially
 */
static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
{
	unsigned int ewma;

	ewma = hctx->dispatch_busy;

	if (!ewma && !busy)
		return;

	ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
	if (busy)
		ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
	ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;

	hctx->dispatch_busy = ewma;
}

1758 1759
#define BLK_MQ_RESOURCE_DELAY	3		/* ms units */

1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
static void blk_mq_handle_dev_resource(struct request *rq,
				       struct list_head *list)
{
	struct request *next =
		list_first_entry_or_null(list, struct request, queuelist);

	/*
	 * If an I/O scheduler has been configured and we got a driver tag for
	 * the next request already, free it.
	 */
	if (next)
		blk_mq_put_driver_tag(next);

	list_add(&rq->queuelist, list);
	__blk_mq_requeue_request(rq);
}

1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789
static void blk_mq_handle_zone_resource(struct request *rq,
					struct list_head *zone_list)
{
	/*
	 * If we end up here it is because we cannot dispatch a request to a
	 * specific zone due to LLD level zone-write locking or other zone
	 * related resource not being available. In this case, set the request
	 * aside in zone_list for retrying it later.
	 */
	list_add(&rq->queuelist, zone_list);
	__blk_mq_requeue_request(rq);
}

1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
enum prep_dispatch {
	PREP_DISPATCH_OK,
	PREP_DISPATCH_NO_TAG,
	PREP_DISPATCH_NO_BUDGET,
};

static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
						  bool need_budget)
{
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1800
	int budget_token = -1;
1801

1802 1803 1804 1805 1806 1807 1808
	if (need_budget) {
		budget_token = blk_mq_get_dispatch_budget(rq->q);
		if (budget_token < 0) {
			blk_mq_put_driver_tag(rq);
			return PREP_DISPATCH_NO_BUDGET;
		}
		blk_mq_set_rq_budget_token(rq, budget_token);
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
	}

	if (!blk_mq_get_driver_tag(rq)) {
		/*
		 * The initial allocation attempt failed, so we need to
		 * rerun the hardware queue when a tag is freed. The
		 * waitqueue takes care of that. If the queue is run
		 * before we add this entry back on the dispatch list,
		 * we'll re-run it below.
		 */
		if (!blk_mq_mark_tag_wait(hctx, rq)) {
1820 1821 1822 1823 1824
			/*
			 * All budgets not got from this function will be put
			 * together during handling partial dispatch
			 */
			if (need_budget)
1825
				blk_mq_put_dispatch_budget(rq->q, budget_token);
1826 1827 1828 1829 1830 1831 1832
			return PREP_DISPATCH_NO_TAG;
		}
	}

	return PREP_DISPATCH_OK;
}

1833 1834
/* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
static void blk_mq_release_budgets(struct request_queue *q,
1835
		struct list_head *list)
1836
{
1837
	struct request *rq;
1838

1839 1840
	list_for_each_entry(rq, list, queuelist) {
		int budget_token = blk_mq_get_rq_budget_token(rq);
1841

1842 1843 1844
		if (budget_token >= 0)
			blk_mq_put_dispatch_budget(q, budget_token);
	}
1845 1846
}

1847 1848 1849
/*
 * Returns true if we did some work AND can potentially do more.
 */
1850
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1851
			     unsigned int nr_budgets)
1852
{
1853
	enum prep_dispatch prep;
1854
	struct request_queue *q = hctx->queue;
1855
	struct request *rq, *nxt;
1856
	int errors, queued;
1857
	blk_status_t ret = BLK_STS_OK;
1858
	LIST_HEAD(zone_list);
1859
	bool needs_resource = false;
1860

1861 1862 1863
	if (list_empty(list))
		return false;

1864 1865 1866
	/*
	 * Now process all the entries, sending them to the driver.
	 */
1867
	errors = queued = 0;
1868
	do {
1869
		struct blk_mq_queue_data bd;
1870

1871
		rq = list_first_entry(list, struct request, queuelist);
1872

1873
		WARN_ON_ONCE(hctx != rq->mq_hctx);
1874
		prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
1875
		if (prep != PREP_DISPATCH_OK)
1876
			break;
1877

1878 1879
		list_del_init(&rq->queuelist);

1880
		bd.rq = rq;
1881 1882 1883 1884 1885 1886 1887 1888 1889

		/*
		 * Flag last if we have no more requests, or if we have more
		 * but can't assign a driver tag to it.
		 */
		if (list_empty(list))
			bd.last = true;
		else {
			nxt = list_first_entry(list, struct request, queuelist);
1890
			bd.last = !blk_mq_get_driver_tag(nxt);
1891
		}
1892

1893 1894 1895 1896 1897 1898
		/*
		 * once the request is queued to lld, no need to cover the
		 * budget any more
		 */
		if (nr_budgets)
			nr_budgets--;
1899
		ret = q->mq_ops->queue_rq(hctx, &bd);
1900 1901 1902
		switch (ret) {
		case BLK_STS_OK:
			queued++;
1903
			break;
1904
		case BLK_STS_RESOURCE:
1905 1906
			needs_resource = true;
			fallthrough;
1907 1908 1909 1910
		case BLK_STS_DEV_RESOURCE:
			blk_mq_handle_dev_resource(rq, list);
			goto out;
		case BLK_STS_ZONE_RESOURCE:
1911 1912 1913 1914 1915 1916
			/*
			 * Move the request to zone_list and keep going through
			 * the dispatch list to find more requests the drive can
			 * accept.
			 */
			blk_mq_handle_zone_resource(rq, &zone_list);
1917
			needs_resource = true;
1918 1919
			break;
		default:
1920
			errors++;
1921
			blk_mq_end_request(rq, ret);
1922
		}
1923
	} while (!list_empty(list));
1924
out:
1925 1926 1927
	if (!list_empty(&zone_list))
		list_splice_tail_init(&zone_list, list);

1928 1929 1930 1931 1932
	/* If we didn't flush the entire list, we could have told the driver
	 * there was more coming, but that turned out to be a lie.
	 */
	if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
		q->mq_ops->commit_rqs(hctx);
1933 1934 1935 1936
	/*
	 * Any items that need requeuing? Stuff them into hctx->dispatch,
	 * that is where we will continue on next queue run.
	 */
1937
	if (!list_empty(list)) {
1938
		bool needs_restart;
1939 1940
		/* For non-shared tags, the RESTART check will suffice */
		bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
1941
			(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
1942

1943 1944
		if (nr_budgets)
			blk_mq_release_budgets(q, list);
1945

1946
		spin_lock(&hctx->lock);
1947
		list_splice_tail_init(list, &hctx->dispatch);
1948
		spin_unlock(&hctx->lock);
1949

1950 1951 1952 1953 1954 1955 1956 1957 1958
		/*
		 * Order adding requests to hctx->dispatch and checking
		 * SCHED_RESTART flag. The pair of this smp_mb() is the one
		 * in blk_mq_sched_restart(). Avoid restart code path to
		 * miss the new added requests to hctx->dispatch, meantime
		 * SCHED_RESTART is observed here.
		 */
		smp_mb();

1959
		/*
1960 1961 1962
		 * If SCHED_RESTART was set by the caller of this function and
		 * it is no longer set that means that it was cleared by another
		 * thread and hence that a queue rerun is needed.
1963
		 *
1964 1965 1966 1967
		 * If 'no_tag' is set, that means that we failed getting
		 * a driver tag with an I/O scheduler attached. If our dispatch
		 * waitqueue is no longer active, ensure that we run the queue
		 * AFTER adding our entries back to the list.
1968
		 *
1969 1970 1971 1972 1973 1974 1975
		 * If no I/O scheduler has been configured it is possible that
		 * the hardware queue got stopped and restarted before requests
		 * were pushed back onto the dispatch list. Rerun the queue to
		 * avoid starvation. Notes:
		 * - blk_mq_run_hw_queue() checks whether or not a queue has
		 *   been stopped before rerunning a queue.
		 * - Some but not all block drivers stop a queue before
1976
		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1977
		 *   and dm-rq.
1978 1979 1980
		 *
		 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
		 * bit is set, run queue after a delay to avoid IO stalls
1981
		 * that could otherwise occur if the queue is idle.  We'll do
1982 1983
		 * similar if we couldn't get budget or couldn't lock a zone
		 * and SCHED_RESTART is set.
1984
		 */
1985
		needs_restart = blk_mq_sched_needs_restart(hctx);
1986 1987
		if (prep == PREP_DISPATCH_NO_BUDGET)
			needs_resource = true;
1988
		if (!needs_restart ||
1989
		    (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1990
			blk_mq_run_hw_queue(hctx, true);
1991
		else if (needs_restart && needs_resource)
1992
			blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1993

1994
		blk_mq_update_dispatch_busy(hctx, true);
1995
		return false;
1996 1997
	} else
		blk_mq_update_dispatch_busy(hctx, false);
1998

1999
	return (queued + errors) != 0;
2000 2001
}

2002 2003 2004 2005 2006 2007
/**
 * __blk_mq_run_hw_queue - Run a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 *
 * Send pending requests to the hardware.
 */
2008 2009
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
{
2010 2011 2012 2013 2014 2015
	/*
	 * We can't run the queue inline with ints disabled. Ensure that
	 * we catch bad users of this early.
	 */
	WARN_ON_ONCE(in_interrupt());

2016 2017
	blk_mq_run_dispatch_ops(hctx->queue,
			blk_mq_sched_dispatch_requests(hctx));
2018 2019
}

2020 2021 2022 2023 2024 2025 2026 2027 2028
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
{
	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);

	if (cpu >= nr_cpu_ids)
		cpu = cpumask_first(hctx->cpumask);
	return cpu;
}

2029 2030 2031 2032 2033 2034 2035 2036
/*
 * It'd be great if the workqueue API had a way to pass
 * in a mask and had some smarts for more clever placement.
 * For now we just round-robin here, switching for every
 * BLK_MQ_CPU_WORK_BATCH queued items.
 */
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{
2037
	bool tried = false;
2038
	int next_cpu = hctx->next_cpu;
2039

2040 2041
	if (hctx->queue->nr_hw_queues == 1)
		return WORK_CPU_UNBOUND;
2042 2043

	if (--hctx->next_cpu_batch <= 0) {
2044
select_cpu:
2045
		next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2046
				cpu_online_mask);
2047
		if (next_cpu >= nr_cpu_ids)
2048
			next_cpu = blk_mq_first_mapped_cpu(hctx);
2049 2050 2051
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
	}

2052 2053 2054 2055
	/*
	 * Do unbound schedule if we can't find a online CPU for this hctx,
	 * and it should only happen in the path of handling CPU DEAD.
	 */
2056
	if (!cpu_online(next_cpu)) {
2057 2058 2059 2060 2061 2062 2063 2064 2065
		if (!tried) {
			tried = true;
			goto select_cpu;
		}

		/*
		 * Make sure to re-select CPU next time once after CPUs
		 * in hctx->cpumask become online again.
		 */
2066
		hctx->next_cpu = next_cpu;
2067 2068 2069
		hctx->next_cpu_batch = 1;
		return WORK_CPU_UNBOUND;
	}
2070 2071 2072

	hctx->next_cpu = next_cpu;
	return next_cpu;
2073 2074
}

2075 2076 2077 2078
/**
 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 * @async: If we want to run the queue asynchronously.
2079
 * @msecs: Milliseconds of delay to wait before running the queue.
2080 2081 2082 2083
 *
 * If !@async, try to run the queue now. Else, run the queue asynchronously and
 * with a delay of @msecs.
 */
2084 2085
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
					unsigned long msecs)
2086
{
2087
	if (unlikely(blk_mq_hctx_stopped(hctx)))
2088 2089
		return;

2090
	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
2091 2092
		int cpu = get_cpu();
		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
2093
			__blk_mq_run_hw_queue(hctx);
2094
			put_cpu();
2095 2096
			return;
		}
2097

2098
		put_cpu();
2099
	}
2100

2101 2102
	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
				    msecs_to_jiffies(msecs));
2103 2104
}

2105 2106 2107
/**
 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
 * @hctx: Pointer to the hardware queue to run.
2108
 * @msecs: Milliseconds of delay to wait before running the queue.
2109 2110 2111
 *
 * Run a hardware queue asynchronously with a delay of @msecs.
 */
2112 2113 2114 2115 2116 2117
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{
	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);

2118 2119 2120 2121 2122 2123 2124 2125 2126
/**
 * blk_mq_run_hw_queue - Start to run a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 * @async: If we want to run the queue asynchronously.
 *
 * Check if the request queue is not in a quiesced state and if there are
 * pending requests to be sent. If this is true, run the queue to send requests
 * to hardware.
 */
2127
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2128
{
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138
	bool need_run;

	/*
	 * When queue is quiesced, we may be switching io scheduler, or
	 * updating nr_hw_queues, or other things, and we can't run queue
	 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
	 *
	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
	 * quiesced.
	 */
2139
	__blk_mq_run_dispatch_ops(hctx->queue, false,
2140 2141
		need_run = !blk_queue_quiesced(hctx->queue) &&
		blk_mq_hctx_has_pending(hctx));
2142

2143
	if (need_run)
2144
		__blk_mq_delay_run_hw_queue(hctx, async, 0);
2145
}
O
Omar Sandoval 已提交
2146
EXPORT_SYMBOL(blk_mq_run_hw_queue);
2147

2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
/*
 * Is the request queue handled by an IO scheduler that does not respect
 * hardware queues when dispatching?
 */
static bool blk_mq_has_sqsched(struct request_queue *q)
{
	struct elevator_queue *e = q->elevator;

	if (e && e->type->ops.dispatch_request &&
	    !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
		return true;
	return false;
}

/*
 * Return prefered queue to dispatch from (if any) for non-mq aware IO
 * scheduler.
 */
static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;

	/*
	 * If the IO scheduler does not respect hardware queues when
	 * dispatching, we just don't bother with multiple HW queues and
	 * dispatch from hctx for the current CPU since running multiple queues
	 * just causes lock contention inside the scheduler and pointless cache
	 * bouncing.
	 */
	hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT,
				     raw_smp_processor_id());
	if (!blk_mq_hctx_stopped(hctx))
		return hctx;
	return NULL;
}

2184
/**
2185
 * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2186 2187 2188
 * @q: Pointer to the request queue to run.
 * @async: If we want to run the queue asynchronously.
 */
2189
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2190
{
2191
	struct blk_mq_hw_ctx *hctx, *sq_hctx;
2192
	unsigned long i;
2193

2194 2195 2196
	sq_hctx = NULL;
	if (blk_mq_has_sqsched(q))
		sq_hctx = blk_mq_get_sq_hctx(q);
2197
	queue_for_each_hw_ctx(q, hctx, i) {
2198
		if (blk_mq_hctx_stopped(hctx))
2199
			continue;
2200 2201 2202 2203 2204 2205 2206 2207
		/*
		 * Dispatch from this hctx either if there's no hctx preferred
		 * by IO scheduler or if it has requests that bypass the
		 * scheduler.
		 */
		if (!sq_hctx || sq_hctx == hctx ||
		    !list_empty_careful(&hctx->dispatch))
			blk_mq_run_hw_queue(hctx, async);
2208 2209
	}
}
2210
EXPORT_SYMBOL(blk_mq_run_hw_queues);
2211

2212 2213 2214
/**
 * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
 * @q: Pointer to the request queue to run.
2215
 * @msecs: Milliseconds of delay to wait before running the queues.
2216 2217 2218
 */
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
{
2219
	struct blk_mq_hw_ctx *hctx, *sq_hctx;
2220
	unsigned long i;
2221

2222 2223 2224
	sq_hctx = NULL;
	if (blk_mq_has_sqsched(q))
		sq_hctx = blk_mq_get_sq_hctx(q);
2225 2226 2227
	queue_for_each_hw_ctx(q, hctx, i) {
		if (blk_mq_hctx_stopped(hctx))
			continue;
2228 2229 2230 2231 2232 2233 2234 2235
		/*
		 * If there is already a run_work pending, leave the
		 * pending delay untouched. Otherwise, a hctx can stall
		 * if another hctx is re-delaying the other's work
		 * before the work executes.
		 */
		if (delayed_work_pending(&hctx->run_work))
			continue;
2236 2237 2238 2239 2240 2241 2242 2243
		/*
		 * Dispatch from this hctx either if there's no hctx preferred
		 * by IO scheduler or if it has requests that bypass the
		 * scheduler.
		 */
		if (!sq_hctx || sq_hctx == hctx ||
		    !list_empty_careful(&hctx->dispatch))
			blk_mq_delay_run_hw_queue(hctx, msecs);
2244 2245 2246 2247
	}
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);

2248 2249 2250 2251 2252 2253 2254 2255 2256 2257
/**
 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
 * @q: request queue.
 *
 * The caller is responsible for serializing this function against
 * blk_mq_{start,stop}_hw_queue().
 */
bool blk_mq_queue_stopped(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
2258
	unsigned long i;
2259 2260 2261 2262 2263 2264 2265 2266 2267

	queue_for_each_hw_ctx(q, hctx, i)
		if (blk_mq_hctx_stopped(hctx))
			return true;

	return false;
}
EXPORT_SYMBOL(blk_mq_queue_stopped);

2268 2269 2270
/*
 * This function is often used for pausing .queue_rq() by driver when
 * there isn't enough resource or some conditions aren't satisfied, and
2271
 * BLK_STS_RESOURCE is usually returned.
2272 2273 2274 2275 2276
 *
 * We do not guarantee that dispatch can be drained or blocked
 * after blk_mq_stop_hw_queue() returns. Please use
 * blk_mq_quiesce_queue() for that requirement.
 */
2277 2278
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
2279
	cancel_delayed_work(&hctx->run_work);
2280

2281
	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2282
}
2283
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2284

2285 2286 2287
/*
 * This function is often used for pausing .queue_rq() by driver when
 * there isn't enough resource or some conditions aren't satisfied, and
2288
 * BLK_STS_RESOURCE is usually returned.
2289 2290 2291 2292 2293
 *
 * We do not guarantee that dispatch can be drained or blocked
 * after blk_mq_stop_hw_queues() returns. Please use
 * blk_mq_quiesce_queue() for that requirement.
 */
2294 2295
void blk_mq_stop_hw_queues(struct request_queue *q)
{
2296
	struct blk_mq_hw_ctx *hctx;
2297
	unsigned long i;
2298 2299 2300

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_stop_hw_queue(hctx);
2301 2302 2303
}
EXPORT_SYMBOL(blk_mq_stop_hw_queues);

2304 2305 2306
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2307

2308
	blk_mq_run_hw_queue(hctx, false);
2309 2310 2311
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);

2312 2313 2314
void blk_mq_start_hw_queues(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
2315
	unsigned long i;
2316 2317 2318 2319 2320 2321

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_start_hw_queue(hctx);
}
EXPORT_SYMBOL(blk_mq_start_hw_queues);

2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
	if (!blk_mq_hctx_stopped(hctx))
		return;

	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
	blk_mq_run_hw_queue(hctx, async);
}
EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);

2332
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2333 2334
{
	struct blk_mq_hw_ctx *hctx;
2335
	unsigned long i;
2336

2337 2338
	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_start_stopped_hw_queue(hctx, async);
2339 2340 2341
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);

2342
static void blk_mq_run_work_fn(struct work_struct *work)
2343 2344 2345
{
	struct blk_mq_hw_ctx *hctx;

2346
	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
2347

2348
	/*
M
Ming Lei 已提交
2349
	 * If we are stopped, don't run the queue.
2350
	 */
2351
	if (blk_mq_hctx_stopped(hctx))
2352
		return;
2353 2354 2355 2356

	__blk_mq_run_hw_queue(hctx);
}

2357 2358 2359
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
					    struct request *rq,
					    bool at_head)
2360
{
J
Jens Axboe 已提交
2361
	struct blk_mq_ctx *ctx = rq->mq_ctx;
M
Ming Lei 已提交
2362
	enum hctx_type type = hctx->type;
J
Jens Axboe 已提交
2363

2364 2365
	lockdep_assert_held(&ctx->lock);

2366
	trace_block_rq_insert(rq);
2367

2368
	if (at_head)
M
Ming Lei 已提交
2369
		list_add(&rq->queuelist, &ctx->rq_lists[type]);
2370
	else
M
Ming Lei 已提交
2371
		list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
2372
}
2373

2374 2375
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
			     bool at_head)
2376 2377 2378
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;

2379 2380
	lockdep_assert_held(&ctx->lock);

J
Jens Axboe 已提交
2381
	__blk_mq_insert_req_list(hctx, rq, at_head);
2382 2383 2384
	blk_mq_hctx_mark_pending(hctx, ctx);
}

2385 2386 2387
/**
 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
 * @rq: Pointer to request to be inserted.
2388
 * @at_head: true if the request should be inserted at the head of the list.
2389 2390
 * @run_queue: If we should run the hardware queue after inserting the request.
 *
2391 2392 2393
 * Should only be used carefully, when the caller knows we want to
 * bypass a potential IO scheduler on the target device.
 */
2394 2395
void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
				  bool run_queue)
2396
{
2397
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2398 2399

	spin_lock(&hctx->lock);
2400 2401 2402 2403
	if (at_head)
		list_add(&rq->queuelist, &hctx->dispatch);
	else
		list_add_tail(&rq->queuelist, &hctx->dispatch);
2404 2405
	spin_unlock(&hctx->lock);

2406 2407
	if (run_queue)
		blk_mq_run_hw_queue(hctx, false);
2408 2409
}

2410 2411
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
			    struct list_head *list)
2412 2413

{
2414
	struct request *rq;
M
Ming Lei 已提交
2415
	enum hctx_type type = hctx->type;
2416

2417 2418 2419 2420
	/*
	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
	 * offline now
	 */
2421
	list_for_each_entry(rq, list, queuelist) {
J
Jens Axboe 已提交
2422
		BUG_ON(rq->mq_ctx != ctx);
2423
		trace_block_rq_insert(rq);
2424
	}
2425 2426

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
2427
	list_splice_tail_init(list, &ctx->rq_lists[type]);
2428
	blk_mq_hctx_mark_pending(hctx, ctx);
2429 2430 2431
	spin_unlock(&ctx->lock);
}

2432 2433
static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
			      bool from_schedule)
2434
{
2435 2436 2437 2438 2439 2440
	if (hctx->queue->mq_ops->commit_rqs) {
		trace_block_unplug(hctx->queue, *queued, !from_schedule);
		hctx->queue->mq_ops->commit_rqs(hctx);
	}
	*queued = 0;
}
2441

2442 2443
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
		unsigned int nr_segs)
2444
{
2445 2446
	int err;

2447 2448 2449 2450
	if (bio->bi_opf & REQ_RAHEAD)
		rq->cmd_flags |= REQ_FAILFAST_MASK;

	rq->__sector = bio->bi_iter.bi_sector;
2451
	blk_rq_bio_prep(rq, bio, nr_segs);
2452 2453 2454 2455

	/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
	err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
	WARN_ON_ONCE(err);
2456

2457
	blk_account_io_start(rq);
2458 2459
}

2460
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2461
					    struct request *rq, bool last)
2462 2463 2464 2465
{
	struct request_queue *q = rq->q;
	struct blk_mq_queue_data bd = {
		.rq = rq,
2466
		.last = last,
2467
	};
2468
	blk_status_t ret;
2469 2470 2471 2472 2473 2474 2475 2476 2477

	/*
	 * For OK queue, we are done. For error, caller may kill it.
	 * Any other error (busy), just add it to our list as we
	 * previously would have done.
	 */
	ret = q->mq_ops->queue_rq(hctx, &bd);
	switch (ret) {
	case BLK_STS_OK:
2478
		blk_mq_update_dispatch_busy(hctx, false);
2479 2480
		break;
	case BLK_STS_RESOURCE:
2481
	case BLK_STS_DEV_RESOURCE:
2482
		blk_mq_update_dispatch_busy(hctx, true);
2483 2484 2485
		__blk_mq_requeue_request(rq);
		break;
	default:
2486
		blk_mq_update_dispatch_busy(hctx, false);
2487 2488 2489 2490 2491 2492
		break;
	}

	return ret;
}

2493
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2494
						struct request *rq,
2495
						bool bypass_insert, bool last)
2496 2497
{
	struct request_queue *q = rq->q;
M
Ming Lei 已提交
2498
	bool run_queue = true;
2499
	int budget_token;
M
Ming Lei 已提交
2500

2501
	/*
2502
	 * RCU or SRCU read lock is needed before checking quiesced flag.
2503
	 *
2504 2505 2506
	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
	 * and avoid driver to try to dispatch again.
2507
	 */
2508
	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
M
Ming Lei 已提交
2509
		run_queue = false;
2510 2511
		bypass_insert = false;
		goto insert;
M
Ming Lei 已提交
2512
	}
2513

2514
	if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
2515
		goto insert;
2516

2517 2518
	budget_token = blk_mq_get_dispatch_budget(q);
	if (budget_token < 0)
2519
		goto insert;
2520

2521 2522
	blk_mq_set_rq_budget_token(rq, budget_token);

2523
	if (!blk_mq_get_driver_tag(rq)) {
2524
		blk_mq_put_dispatch_budget(q, budget_token);
2525
		goto insert;
2526
	}
2527

2528
	return __blk_mq_issue_directly(hctx, rq, last);
2529 2530 2531 2532
insert:
	if (bypass_insert)
		return BLK_STS_RESOURCE;

2533 2534
	blk_mq_sched_insert_request(rq, false, run_queue, false);

2535 2536 2537
	return BLK_STS_OK;
}

2538 2539 2540 2541 2542 2543 2544 2545 2546 2547
/**
 * blk_mq_try_issue_directly - Try to send a request directly to device driver.
 * @hctx: Pointer of the associated hardware queue.
 * @rq: Pointer to request to be sent.
 *
 * If the device has enough resources to accept a new request now, send the
 * request directly to device driver. Else, insert at hctx->dispatch queue, so
 * we can try send it another time in the future. Requests inserted at this
 * queue have higher priority.
 */
2548
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2549
		struct request *rq)
2550
{
2551 2552
	blk_status_t ret =
		__blk_mq_try_issue_directly(hctx, rq, false, true);
2553 2554

	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
2555
		blk_mq_request_bypass_insert(rq, false, true);
2556 2557 2558 2559
	else if (ret != BLK_STS_OK)
		blk_mq_end_request(rq, ret);
}

2560
static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2561
{
2562
	return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
2563 2564
}

2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
{
	struct blk_mq_hw_ctx *hctx = NULL;
	struct request *rq;
	int queued = 0;
	int errors = 0;

	while ((rq = rq_list_pop(&plug->mq_list))) {
		bool last = rq_list_empty(plug->mq_list);
		blk_status_t ret;

		if (hctx != rq->mq_hctx) {
			if (hctx)
				blk_mq_commit_rqs(hctx, &queued, from_schedule);
			hctx = rq->mq_hctx;
		}

		ret = blk_mq_request_issue_directly(rq, last);
		switch (ret) {
		case BLK_STS_OK:
			queued++;
			break;
		case BLK_STS_RESOURCE:
		case BLK_STS_DEV_RESOURCE:
			blk_mq_request_bypass_insert(rq, false, last);
			blk_mq_commit_rqs(hctx, &queued, from_schedule);
			return;
		default:
			blk_mq_end_request(rq, ret);
			errors++;
			break;
		}
	}

	/*
	 * If we didn't flush the entire list, we could have told the driver
	 * there was more coming, but that turned out to be a lie.
	 */
	if (errors)
		blk_mq_commit_rqs(hctx, &queued, from_schedule);
}

2607 2608 2609 2610 2611 2612 2613 2614
static void __blk_mq_flush_plug_list(struct request_queue *q,
				     struct blk_plug *plug)
{
	if (blk_queue_quiesced(q))
		return;
	q->mq_ops->queue_rqs(&plug->mq_list);
}

2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641
static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
{
	struct blk_mq_hw_ctx *this_hctx = NULL;
	struct blk_mq_ctx *this_ctx = NULL;
	struct request *requeue_list = NULL;
	unsigned int depth = 0;
	LIST_HEAD(list);

	do {
		struct request *rq = rq_list_pop(&plug->mq_list);

		if (!this_hctx) {
			this_hctx = rq->mq_hctx;
			this_ctx = rq->mq_ctx;
		} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
			rq_list_add(&requeue_list, rq);
			continue;
		}
		list_add_tail(&rq->queuelist, &list);
		depth++;
	} while (!rq_list_empty(plug->mq_list));

	plug->mq_list = requeue_list;
	trace_block_unplug(this_hctx->queue, depth, !from_sched);
	blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
}

2642 2643
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
J
Jens Axboe 已提交
2644
	struct request *rq;
2645 2646 2647 2648 2649 2650

	if (rq_list_empty(plug->mq_list))
		return;
	plug->rq_count = 0;

	if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
J
Jens Axboe 已提交
2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668
		struct request_queue *q;

		rq = rq_list_peek(&plug->mq_list);
		q = rq->q;

		/*
		 * Peek first request and see if we have a ->queue_rqs() hook.
		 * If we do, we can dispatch the whole plug list in one go. We
		 * already know at this point that all requests belong to the
		 * same queue, caller must ensure that's the case.
		 *
		 * Since we pass off the full list to the driver at this point,
		 * we do not increment the active request count for the queue.
		 * Bypass shared tags for now because of that.
		 */
		if (q->mq_ops->queue_rqs &&
		    !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
			blk_mq_run_dispatch_ops(q,
2669
				__blk_mq_flush_plug_list(q, plug));
J
Jens Axboe 已提交
2670 2671 2672
			if (rq_list_empty(plug->mq_list))
				return;
		}
2673 2674

		blk_mq_run_dispatch_ops(q,
2675
				blk_mq_plug_issue_direct(plug, false));
2676 2677 2678 2679 2680
		if (rq_list_empty(plug->mq_list))
			return;
	}

	do {
2681
		blk_mq_dispatch_plug_list(plug, from_schedule);
2682 2683 2684
	} while (!rq_list_empty(plug->mq_list));
}

2685 2686 2687
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
		struct list_head *list)
{
2688
	int queued = 0;
2689
	int errors = 0;
2690

2691
	while (!list_empty(list)) {
2692
		blk_status_t ret;
2693 2694 2695 2696
		struct request *rq = list_first_entry(list, struct request,
				queuelist);

		list_del_init(&rq->queuelist);
2697 2698 2699 2700
		ret = blk_mq_request_issue_directly(rq, list_empty(list));
		if (ret != BLK_STS_OK) {
			if (ret == BLK_STS_RESOURCE ||
					ret == BLK_STS_DEV_RESOURCE) {
2701
				blk_mq_request_bypass_insert(rq, false,
2702
							list_empty(list));
2703 2704 2705
				break;
			}
			blk_mq_end_request(rq, ret);
2706
			errors++;
2707 2708
		} else
			queued++;
2709
	}
J
Jens Axboe 已提交
2710 2711 2712 2713 2714 2715

	/*
	 * If we didn't flush the entire list, we could have told
	 * the driver there was more coming, but that turned out to
	 * be a lie.
	 */
2716 2717
	if ((!list_empty(list) || errors) &&
	     hctx->queue->mq_ops->commit_rqs && queued)
J
Jens Axboe 已提交
2718
		hctx->queue->mq_ops->commit_rqs(hctx);
2719 2720
}

M
Ming Lei 已提交
2721
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2722
				     struct bio *bio, unsigned int nr_segs)
2723 2724
{
	if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2725
		if (blk_attempt_plug_merge(q, bio, nr_segs))
2726 2727 2728 2729 2730 2731 2732
			return true;
		if (blk_mq_sched_bio_merge(q, bio, nr_segs))
			return true;
	}
	return false;
}

2733 2734
static struct request *blk_mq_get_new_requests(struct request_queue *q,
					       struct blk_plug *plug,
2735 2736
					       struct bio *bio,
					       unsigned int nsegs)
2737 2738 2739 2740
{
	struct blk_mq_alloc_data data = {
		.q		= q,
		.nr_tags	= 1,
2741
		.cmd_flags	= bio->bi_opf,
2742 2743 2744
	};
	struct request *rq;

2745
	if (unlikely(bio_queue_enter(bio)))
2746
		return NULL;
2747

2748 2749 2750 2751 2752
	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
		goto queue_exit;

	rq_qos_throttle(q, bio);

2753 2754 2755 2756 2757 2758 2759
	if (plug) {
		data.nr_tags = plug->nr_ios;
		plug->nr_ios = 1;
		data.cached_rq = &plug->cached_rq;
	}

	rq = __blk_mq_alloc_requests(&data);
2760 2761
	if (rq)
		return rq;
2762 2763 2764
	rq_qos_cleanup(q, bio);
	if (bio->bi_opf & REQ_NOWAIT)
		bio_wouldblock_error(bio);
2765
queue_exit:
2766
	blk_queue_exit(q);
2767 2768 2769
	return NULL;
}

2770
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
2771
		struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
2772
{
2773 2774
	struct request *rq;

2775 2776 2777 2778 2779
	if (!plug)
		return NULL;
	rq = rq_list_peek(&plug->cached_rq);
	if (!rq || rq->q != q)
		return NULL;
2780

2781 2782 2783 2784 2785 2786 2787 2788
	if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
		*bio = NULL;
		return NULL;
	}

	rq_qos_throttle(q, *bio);

	if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
2789
		return NULL;
2790
	if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
2791 2792
		return NULL;

2793
	rq->cmd_flags = (*bio)->bi_opf;
2794 2795 2796
	plug->cached_rq = rq_list_next(rq);
	INIT_LIST_HEAD(&rq->queuelist);
	return rq;
2797 2798
}

2799
/**
2800
 * blk_mq_submit_bio - Create and send a request to block device.
2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811
 * @bio: Bio pointer.
 *
 * Builds up a request structure from @q and @bio and send to the device. The
 * request may not be queued directly to hardware if:
 * * This request can be merged with another one
 * * We want to place request at plug queue for possible future merging
 * * There is an IO scheduler active at this queue
 *
 * It will not queue the request if there is an error with the bio, or at the
 * request creation.
 */
2812
void blk_mq_submit_bio(struct bio *bio)
2813
{
2814
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2815
	struct blk_plug *plug = blk_mq_plug(q, bio);
2816
	const int is_sync = op_is_sync(bio->bi_opf);
2817
	struct request *rq;
2818
	unsigned int nr_segs = 1;
2819
	blk_status_t ret;
2820 2821

	blk_queue_bounce(q, &bio);
2822 2823
	if (blk_may_split(q, bio))
		__blk_queue_split(q, &bio, &nr_segs);
2824

2825
	if (!bio_integrity_prep(bio))
2826
		return;
J
Jens Axboe 已提交
2827

2828
	rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
2829
	if (!rq) {
2830 2831 2832
		if (!bio)
			return;
		rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
2833 2834 2835
		if (unlikely(!rq))
			return;
	}
J
Jens Axboe 已提交
2836

2837
	trace_block_getrq(bio);
2838

2839
	rq_qos_track(q, rq, bio);
2840

2841 2842
	blk_mq_bio_to_request(rq, bio, nr_segs);

2843 2844 2845 2846 2847
	ret = blk_crypto_init_request(rq);
	if (ret != BLK_STS_OK) {
		bio->bi_status = ret;
		bio_endio(bio);
		blk_mq_free_request(rq);
2848
		return;
2849 2850
	}

2851 2852
	if (op_is_flush(bio->bi_opf)) {
		blk_insert_flush(rq);
2853
		return;
2854
	}
2855

2856
	if (plug)
2857
		blk_add_rq_to_plug(plug, rq);
2858 2859 2860
	else if ((rq->rq_flags & RQF_ELV) ||
		 (rq->mq_hctx->dispatch_busy &&
		  (q->nr_hw_queues == 1 || !is_sync)))
2861
		blk_mq_sched_insert_request(rq, false, true, true);
2862
	else
2863
		blk_mq_run_dispatch_ops(rq->q,
2864
				blk_mq_try_issue_directly(rq->mq_hctx, rq));
2865 2866
}

2867
#ifdef CONFIG_BLK_MQ_STACKING
2868
/**
2869 2870
 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
 * @rq: the request being queued
2871
 */
2872
blk_status_t blk_insert_cloned_request(struct request *rq)
2873
{
2874
	struct request_queue *q = rq->q;
2875
	unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
2876
	blk_status_t ret;
2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907

	if (blk_rq_sectors(rq) > max_sectors) {
		/*
		 * SCSI device does not have a good way to return if
		 * Write Same/Zero is actually supported. If a device rejects
		 * a non-read/write command (discard, write same,etc.) the
		 * low-level device driver will set the relevant queue limit to
		 * 0 to prevent blk-lib from issuing more of the offending
		 * operations. Commands queued prior to the queue limit being
		 * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
		 * errors being propagated to upper layers.
		 */
		if (max_sectors == 0)
			return BLK_STS_NOTSUPP;

		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
			__func__, blk_rq_sectors(rq), max_sectors);
		return BLK_STS_IOERR;
	}

	/*
	 * The queue settings related to segment counting may differ from the
	 * original queue.
	 */
	rq->nr_phys_segments = blk_recalc_rq_segments(rq);
	if (rq->nr_phys_segments > queue_max_segments(q)) {
		printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
			__func__, rq->nr_phys_segments, queue_max_segments(q));
		return BLK_STS_IOERR;
	}

2908
	if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920
		return BLK_STS_IOERR;

	if (blk_crypto_insert_cloned_request(rq))
		return BLK_STS_IOERR;

	blk_account_io_start(rq);

	/*
	 * Since we have a scheduler attached on the top device,
	 * bypass a potential scheduler on the bottom device for
	 * insert.
	 */
2921
	blk_mq_run_dispatch_ops(q,
2922
			ret = blk_mq_request_issue_directly(rq, true));
2923 2924
	if (ret)
		blk_account_io_done(rq, ktime_get_ns());
2925
	return ret;
2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);

/**
 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
 * @rq: the clone request to be cleaned up
 *
 * Description:
 *     Free all bios in @rq for a cloned request.
 */
void blk_rq_unprep_clone(struct request *rq)
{
	struct bio *bio;

	while ((bio = rq->bio) != NULL) {
		rq->bio = bio->bi_next;

		bio_put(bio);
	}
}
EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);

/**
 * blk_rq_prep_clone - Helper function to setup clone request
 * @rq: the request to be setup
 * @rq_src: original request to be cloned
 * @bs: bio_set that bios for clone are allocated from
 * @gfp_mask: memory allocation mask for bio
 * @bio_ctr: setup function to be called for each clone bio.
 *           Returns %0 for success, non %0 for failure.
 * @data: private data to be passed to @bio_ctr
 *
 * Description:
 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
 *     Also, pages which the original bios are pointing to are not copied
 *     and the cloned bios just point same pages.
 *     So cloned bios must be completed before original bios, which means
 *     the caller must complete @rq before @rq_src.
 */
int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
		      struct bio_set *bs, gfp_t gfp_mask,
		      int (*bio_ctr)(struct bio *, struct bio *, void *),
		      void *data)
{
	struct bio *bio, *bio_src;

	if (!bs)
		bs = &fs_bio_set;

	__rq_for_each_bio(bio_src, rq_src) {
2976 2977
		bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
				      bs);
2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
		if (!bio)
			goto free_and_out;

		if (bio_ctr && bio_ctr(bio, bio_src, data))
			goto free_and_out;

		if (rq->bio) {
			rq->biotail->bi_next = bio;
			rq->biotail = bio;
		} else {
			rq->bio = rq->biotail = bio;
		}
		bio = NULL;
	}

	/* Copy attributes of the original request to the clone request. */
	rq->__sector = blk_rq_pos(rq_src);
	rq->__data_len = blk_rq_bytes(rq_src);
	if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
		rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
		rq->special_vec = rq_src->special_vec;
	}
	rq->nr_phys_segments = rq_src->nr_phys_segments;
	rq->ioprio = rq_src->ioprio;

	if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
		goto free_and_out;

	return 0;

free_and_out:
	if (bio)
		bio_put(bio);
	blk_rq_unprep_clone(rq);

	return -ENOMEM;
}
EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3016
#endif /* CONFIG_BLK_MQ_STACKING */
3017

3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038
/*
 * Steal bios from a request and add them to a bio list.
 * The request must not have been partially completed before.
 */
void blk_steal_bios(struct bio_list *list, struct request *rq)
{
	if (rq->bio) {
		if (list->tail)
			list->tail->bi_next = rq->bio;
		else
			list->head = rq->bio;
		list->tail = rq->biotail;

		rq->bio = NULL;
		rq->biotail = NULL;
	}

	rq->__data_len = 0;
}
EXPORT_SYMBOL_GPL(blk_steal_bios);

3039 3040 3041 3042 3043 3044
static size_t order_to_size(unsigned int order)
{
	return (size_t)PAGE_SIZE << order;
}

/* called before freeing request pool in @tags */
3045 3046
static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
				    struct blk_mq_tags *tags)
3047 3048 3049 3050
{
	struct page *page;
	unsigned long flags;

3051 3052 3053 3054
	/* There is no need to clear a driver tags own mapping */
	if (drv_tags == tags)
		return;

3055 3056 3057 3058 3059
	list_for_each_entry(page, &tags->page_list, lru) {
		unsigned long start = (unsigned long)page_address(page);
		unsigned long end = start + order_to_size(page->private);
		int i;

3060
		for (i = 0; i < drv_tags->nr_tags; i++) {
3061 3062 3063 3064
			struct request *rq = drv_tags->rqs[i];
			unsigned long rq_addr = (unsigned long)rq;

			if (rq_addr >= start && rq_addr < end) {
3065
				WARN_ON_ONCE(req_ref_read(rq) != 0);
3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080
				cmpxchg(&drv_tags->rqs[i], rq, NULL);
			}
		}
	}

	/*
	 * Wait until all pending iteration is done.
	 *
	 * Request reference is cleared and it is guaranteed to be observed
	 * after the ->lock is released.
	 */
	spin_lock_irqsave(&drv_tags->lock, flags);
	spin_unlock_irqrestore(&drv_tags->lock, flags);
}

3081 3082
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		     unsigned int hctx_idx)
3083
{
3084
	struct blk_mq_tags *drv_tags;
3085
	struct page *page;
3086

3087 3088 3089
	if (list_empty(&tags->page_list))
		return;

3090 3091
	if (blk_mq_is_shared_tags(set->flags))
		drv_tags = set->shared_tags;
3092 3093
	else
		drv_tags = set->tags[hctx_idx];
3094

3095
	if (tags->static_rqs && set->ops->exit_request) {
3096
		int i;
3097

3098
		for (i = 0; i < tags->nr_tags; i++) {
J
Jens Axboe 已提交
3099 3100 3101
			struct request *rq = tags->static_rqs[i];

			if (!rq)
3102
				continue;
3103
			set->ops->exit_request(set, rq, hctx_idx);
J
Jens Axboe 已提交
3104
			tags->static_rqs[i] = NULL;
3105
		}
3106 3107
	}

3108
	blk_mq_clear_rq_mapping(drv_tags, tags);
3109

3110 3111
	while (!list_empty(&tags->page_list)) {
		page = list_first_entry(&tags->page_list, struct page, lru);
3112
		list_del_init(&page->lru);
3113 3114
		/*
		 * Remove kmemleak object previously allocated in
3115
		 * blk_mq_alloc_rqs().
3116 3117
		 */
		kmemleak_free(page_address(page));
3118 3119
		__free_pages(page, page->private);
	}
3120
}
3121

3122
void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3123
{
3124
	kfree(tags->rqs);
3125
	tags->rqs = NULL;
J
Jens Axboe 已提交
3126 3127
	kfree(tags->static_rqs);
	tags->static_rqs = NULL;
3128

3129
	blk_mq_free_tags(tags);
3130 3131
}

3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158
static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
		unsigned int hctx_idx)
{
	int i;

	for (i = 0; i < set->nr_maps; i++) {
		unsigned int start = set->map[i].queue_offset;
		unsigned int end = start + set->map[i].nr_queues;

		if (hctx_idx >= start && hctx_idx < end)
			break;
	}

	if (i >= set->nr_maps)
		i = HCTX_TYPE_DEFAULT;

	return i;
}

static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
		unsigned int hctx_idx)
{
	enum hctx_type type = hctx_idx_to_type(set, hctx_idx);

	return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
}

3159 3160 3161
static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
					       unsigned int hctx_idx,
					       unsigned int nr_tags,
3162
					       unsigned int reserved_tags)
3163
{
3164
	int node = blk_mq_get_hctx_node(set, hctx_idx);
3165
	struct blk_mq_tags *tags;
3166

3167 3168 3169
	if (node == NUMA_NO_NODE)
		node = set->numa_node;

3170 3171
	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3172 3173
	if (!tags)
		return NULL;
3174

3175
	tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3176
				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3177
				 node);
3178
	if (!tags->rqs) {
3179
		blk_mq_free_tags(tags);
3180 3181
		return NULL;
	}
3182

3183 3184 3185
	tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
					GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
					node);
J
Jens Axboe 已提交
3186 3187
	if (!tags->static_rqs) {
		kfree(tags->rqs);
3188
		blk_mq_free_tags(tags);
J
Jens Axboe 已提交
3189 3190 3191
		return NULL;
	}

3192 3193 3194
	return tags;
}

3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205
static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
			       unsigned int hctx_idx, int node)
{
	int ret;

	if (set->ops->init_request) {
		ret = set->ops->init_request(set, rq, hctx_idx, node);
		if (ret)
			return ret;
	}

K
Keith Busch 已提交
3206
	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3207 3208 3209
	return 0;
}

3210 3211 3212
static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
			    struct blk_mq_tags *tags,
			    unsigned int hctx_idx, unsigned int depth)
3213 3214
{
	unsigned int i, j, entries_per_page, max_order = 4;
3215
	int node = blk_mq_get_hctx_node(set, hctx_idx);
3216
	size_t rq_size, left;
3217 3218 3219

	if (node == NUMA_NO_NODE)
		node = set->numa_node;
3220 3221 3222

	INIT_LIST_HEAD(&tags->page_list);

3223 3224 3225 3226
	/*
	 * rq_size is the size of the request plus driver payload, rounded
	 * to the cacheline size
	 */
3227
	rq_size = round_up(sizeof(struct request) + set->cmd_size,
3228
				cache_line_size());
3229
	left = rq_size * depth;
3230

3231
	for (i = 0; i < depth; ) {
3232 3233 3234 3235 3236
		int this_order = max_order;
		struct page *page;
		int to_do;
		void *p;

3237
		while (this_order && left < order_to_size(this_order - 1))
3238 3239 3240
			this_order--;

		do {
3241
			page = alloc_pages_node(node,
3242
				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3243
				this_order);
3244 3245 3246 3247 3248 3249 3250 3251 3252
			if (page)
				break;
			if (!this_order--)
				break;
			if (order_to_size(this_order) < rq_size)
				break;
		} while (1);

		if (!page)
3253
			goto fail;
3254 3255

		page->private = this_order;
3256
		list_add_tail(&page->lru, &tags->page_list);
3257 3258

		p = page_address(page);
3259 3260 3261 3262
		/*
		 * Allow kmemleak to scan these pages as they contain pointers
		 * to additional allocations like via ops->init_request().
		 */
3263
		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3264
		entries_per_page = order_to_size(this_order) / rq_size;
3265
		to_do = min(entries_per_page, depth - i);
3266 3267
		left -= to_do * rq_size;
		for (j = 0; j < to_do; j++) {
J
Jens Axboe 已提交
3268 3269 3270
			struct request *rq = p;

			tags->static_rqs[i] = rq;
3271 3272 3273
			if (blk_mq_init_request(set, rq, hctx_idx, node)) {
				tags->static_rqs[i] = NULL;
				goto fail;
3274 3275
			}

3276 3277 3278 3279
			p += rq_size;
			i++;
		}
	}
3280
	return 0;
3281

3282
fail:
3283 3284
	blk_mq_free_rqs(set, tags, hctx_idx);
	return -ENOMEM;
3285 3286
}

3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316
struct rq_iter_data {
	struct blk_mq_hw_ctx *hctx;
	bool has_rq;
};

static bool blk_mq_has_request(struct request *rq, void *data, bool reserved)
{
	struct rq_iter_data *iter_data = data;

	if (rq->mq_hctx != iter_data->hctx)
		return true;
	iter_data->has_rq = true;
	return false;
}

static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
{
	struct blk_mq_tags *tags = hctx->sched_tags ?
			hctx->sched_tags : hctx->tags;
	struct rq_iter_data data = {
		.hctx	= hctx,
	};

	blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
	return data.has_rq;
}

static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
		struct blk_mq_hw_ctx *hctx)
{
3317
	if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366
		return false;
	if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
		return false;
	return true;
}

static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
{
	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
			struct blk_mq_hw_ctx, cpuhp_online);

	if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
	    !blk_mq_last_cpu_in_hctx(cpu, hctx))
		return 0;

	/*
	 * Prevent new request from being allocated on the current hctx.
	 *
	 * The smp_mb__after_atomic() Pairs with the implied barrier in
	 * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
	 * seen once we return from the tag allocator.
	 */
	set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
	smp_mb__after_atomic();

	/*
	 * Try to grab a reference to the queue and wait for any outstanding
	 * requests.  If we could not grab a reference the queue has been
	 * frozen and there are no requests.
	 */
	if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
		while (blk_mq_hctx_has_requests(hctx))
			msleep(5);
		percpu_ref_put(&hctx->queue->q_usage_counter);
	}

	return 0;
}

static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
{
	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
			struct blk_mq_hw_ctx, cpuhp_online);

	if (cpumask_test_cpu(cpu, hctx->cpumask))
		clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
	return 0;
}

J
Jens Axboe 已提交
3367 3368 3369 3370 3371
/*
 * 'cpu' is going away. splice any existing rq_list entries from this
 * software queue to the hw queue dispatch list, and ensure that it
 * gets run.
 */
3372
static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3373
{
3374
	struct blk_mq_hw_ctx *hctx;
3375 3376
	struct blk_mq_ctx *ctx;
	LIST_HEAD(tmp);
M
Ming Lei 已提交
3377
	enum hctx_type type;
3378

3379
	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3380 3381 3382
	if (!cpumask_test_cpu(cpu, hctx->cpumask))
		return 0;

J
Jens Axboe 已提交
3383
	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
M
Ming Lei 已提交
3384
	type = hctx->type;
3385 3386

	spin_lock(&ctx->lock);
M
Ming Lei 已提交
3387 3388
	if (!list_empty(&ctx->rq_lists[type])) {
		list_splice_init(&ctx->rq_lists[type], &tmp);
3389 3390 3391 3392 3393
		blk_mq_hctx_clear_pending(hctx, ctx);
	}
	spin_unlock(&ctx->lock);

	if (list_empty(&tmp))
3394
		return 0;
3395

J
Jens Axboe 已提交
3396 3397 3398
	spin_lock(&hctx->lock);
	list_splice_tail_init(&tmp, &hctx->dispatch);
	spin_unlock(&hctx->lock);
3399 3400

	blk_mq_run_hw_queue(hctx, true);
3401
	return 0;
3402 3403
}

3404
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3405
{
3406 3407 3408
	if (!(hctx->flags & BLK_MQ_F_STACKING))
		cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
						    &hctx->cpuhp_online);
3409 3410
	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
					    &hctx->cpuhp_dead);
3411 3412
}

3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426
/*
 * Before freeing hw queue, clearing the flush request reference in
 * tags->rqs[] for avoiding potential UAF.
 */
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
		unsigned int queue_depth, struct request *flush_rq)
{
	int i;
	unsigned long flags;

	/* The hw queue may not be mapped yet */
	if (!tags)
		return;

3427
	WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441

	for (i = 0; i < queue_depth; i++)
		cmpxchg(&tags->rqs[i], flush_rq, NULL);

	/*
	 * Wait until all pending iteration is done.
	 *
	 * Request reference is cleared and it is guaranteed to be observed
	 * after the ->lock is released.
	 */
	spin_lock_irqsave(&tags->lock, flags);
	spin_unlock_irqrestore(&tags->lock, flags);
}

3442
/* hctx->ctxs will be freed in queue's release handler */
3443 3444 3445 3446
static void blk_mq_exit_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
3447 3448
	struct request *flush_rq = hctx->fq->flush_rq;

3449 3450
	if (blk_mq_hw_queue_mapped(hctx))
		blk_mq_tag_idle(hctx);
3451

3452 3453
	blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
			set->queue_depth, flush_rq);
3454
	if (set->ops->exit_request)
3455
		set->ops->exit_request(set, flush_rq, hctx_idx);
3456

3457 3458 3459
	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);

3460
	blk_mq_remove_cpuhp(hctx);
3461

M
Ming Lei 已提交
3462 3463
	xa_erase(&q->hctx_table, hctx_idx);

3464 3465 3466
	spin_lock(&q->unused_hctx_lock);
	list_add(&hctx->hctx_list, &q->unused_hctx_list);
	spin_unlock(&q->unused_hctx_lock);
3467 3468
}

M
Ming Lei 已提交
3469 3470 3471 3472
static void blk_mq_exit_hw_queues(struct request_queue *q,
		struct blk_mq_tag_set *set, int nr_queue)
{
	struct blk_mq_hw_ctx *hctx;
3473
	unsigned long i;
M
Ming Lei 已提交
3474 3475 3476 3477

	queue_for_each_hw_ctx(q, hctx, i) {
		if (i == nr_queue)
			break;
3478
		blk_mq_exit_hctx(q, set, hctx, i);
M
Ming Lei 已提交
3479 3480 3481
	}
}

3482 3483 3484
static int blk_mq_init_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3485
{
3486 3487
	hctx->queue_num = hctx_idx;

3488 3489 3490
	if (!(hctx->flags & BLK_MQ_F_STACKING))
		cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
				&hctx->cpuhp_online);
3491 3492 3493 3494 3495 3496 3497
	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);

	hctx->tags = set->tags[hctx_idx];

	if (set->ops->init_hctx &&
	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
		goto unregister_cpu_notifier;
3498

3499 3500 3501
	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
				hctx->numa_node))
		goto exit_hctx;
M
Ming Lei 已提交
3502 3503 3504 3505

	if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
		goto exit_flush_rq;

3506 3507
	return 0;

M
Ming Lei 已提交
3508 3509 3510
 exit_flush_rq:
	if (set->ops->exit_request)
		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525
 exit_hctx:
	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);
 unregister_cpu_notifier:
	blk_mq_remove_cpuhp(hctx);
	return -1;
}

static struct blk_mq_hw_ctx *
blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
		int node)
{
	struct blk_mq_hw_ctx *hctx;
	gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;

3526
	hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3527 3528 3529 3530 3531 3532 3533
	if (!hctx)
		goto fail_alloc_hctx;

	if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
		goto free_hctx;

	atomic_set(&hctx->nr_active, 0);
3534
	if (node == NUMA_NO_NODE)
3535 3536
		node = set->numa_node;
	hctx->numa_node = node;
3537

3538
	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3539 3540 3541
	spin_lock_init(&hctx->lock);
	INIT_LIST_HEAD(&hctx->dispatch);
	hctx->queue = q;
3542
	hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3543

3544 3545
	INIT_LIST_HEAD(&hctx->hctx_list);

3546
	/*
3547 3548
	 * Allocate space for all possible cpus to avoid allocation at
	 * runtime
3549
	 */
3550
	hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3551
			gfp, node);
3552
	if (!hctx->ctxs)
3553
		goto free_cpumask;
3554

3555
	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3556
				gfp, node, false, false))
3557 3558
		goto free_ctxs;
	hctx->nr_ctx = 0;
3559

3560
	spin_lock_init(&hctx->dispatch_wait_lock);
3561 3562 3563
	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
	INIT_LIST_HEAD(&hctx->dispatch_wait.entry);

3564
	hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3565
	if (!hctx->fq)
3566
		goto free_bitmap;
3567

3568
	blk_mq_hctx_kobj_init(hctx);
3569

3570
	return hctx;
3571

3572
 free_bitmap:
3573
	sbitmap_free(&hctx->ctx_map);
3574 3575
 free_ctxs:
	kfree(hctx->ctxs);
3576 3577 3578 3579 3580 3581
 free_cpumask:
	free_cpumask_var(hctx->cpumask);
 free_hctx:
	kfree(hctx);
 fail_alloc_hctx:
	return NULL;
3582
}
3583 3584 3585 3586

static void blk_mq_init_cpu_queues(struct request_queue *q,
				   unsigned int nr_hw_queues)
{
J
Jens Axboe 已提交
3587 3588
	struct blk_mq_tag_set *set = q->tag_set;
	unsigned int i, j;
3589 3590 3591 3592

	for_each_possible_cpu(i) {
		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
		struct blk_mq_hw_ctx *hctx;
M
Ming Lei 已提交
3593
		int k;
3594 3595 3596

		__ctx->cpu = i;
		spin_lock_init(&__ctx->lock);
M
Ming Lei 已提交
3597 3598 3599
		for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
			INIT_LIST_HEAD(&__ctx->rq_lists[k]);

3600 3601 3602 3603 3604 3605
		__ctx->queue = q;

		/*
		 * Set local node, IFF we have more than one hw queue. If
		 * not, we remain on the home node of the device
		 */
J
Jens Axboe 已提交
3606 3607 3608
		for (j = 0; j < set->nr_maps; j++) {
			hctx = blk_mq_map_queue_type(q, j, i);
			if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3609
				hctx->numa_node = cpu_to_node(i);
J
Jens Axboe 已提交
3610
		}
3611 3612 3613
	}
}

3614 3615 3616
struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
					     unsigned int hctx_idx,
					     unsigned int depth)
3617
{
3618 3619
	struct blk_mq_tags *tags;
	int ret;
3620

3621
	tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3622 3623
	if (!tags)
		return NULL;
3624

3625 3626
	ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
	if (ret) {
3627
		blk_mq_free_rq_map(tags);
3628 3629
		return NULL;
	}
3630

3631
	return tags;
3632 3633
}

3634 3635
static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
				       int hctx_idx)
3636
{
3637 3638
	if (blk_mq_is_shared_tags(set->flags)) {
		set->tags[hctx_idx] = set->shared_tags;
3639

3640
		return true;
3641
	}
3642

3643 3644 3645 3646
	set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
						       set->queue_depth);

	return set->tags[hctx_idx];
3647 3648
}

3649 3650 3651
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
			     struct blk_mq_tags *tags,
			     unsigned int hctx_idx)
3652
{
3653 3654
	if (tags) {
		blk_mq_free_rqs(set, tags, hctx_idx);
3655
		blk_mq_free_rq_map(tags);
3656
	}
3657 3658
}

3659 3660 3661
static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
				      unsigned int hctx_idx)
{
3662
	if (!blk_mq_is_shared_tags(set->flags))
3663 3664 3665
		blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);

	set->tags[hctx_idx] = NULL;
3666 3667
}

3668
static void blk_mq_map_swqueue(struct request_queue *q)
3669
{
3670 3671
	unsigned int j, hctx_idx;
	unsigned long i;
3672 3673
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
M
Ming Lei 已提交
3674
	struct blk_mq_tag_set *set = q->tag_set;
3675 3676

	queue_for_each_hw_ctx(q, hctx, i) {
3677
		cpumask_clear(hctx->cpumask);
3678
		hctx->nr_ctx = 0;
3679
		hctx->dispatch_from = NULL;
3680 3681 3682
	}

	/*
3683
	 * Map software to hardware queues.
3684 3685
	 *
	 * If the cpu isn't present, the cpu is mapped to first hctx.
3686
	 */
3687
	for_each_possible_cpu(i) {
3688

3689
		ctx = per_cpu_ptr(q->queue_ctx, i);
J
Jens Axboe 已提交
3690
		for (j = 0; j < set->nr_maps; j++) {
3691 3692 3693
			if (!set->map[j].nr_queues) {
				ctx->hctxs[j] = blk_mq_map_queue_type(q,
						HCTX_TYPE_DEFAULT, i);
3694
				continue;
3695
			}
3696 3697 3698
			hctx_idx = set->map[j].mq_map[i];
			/* unmapped hw queue can be remapped after CPU topo changed */
			if (!set->tags[hctx_idx] &&
3699
			    !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3700 3701 3702 3703 3704 3705 3706 3707
				/*
				 * If tags initialization fail for some hctx,
				 * that hctx won't be brought online.  In this
				 * case, remap the current ctx to hctx[0] which
				 * is guaranteed to always have tags allocated
				 */
				set->map[j].mq_map[i] = 0;
			}
3708

J
Jens Axboe 已提交
3709
			hctx = blk_mq_map_queue_type(q, j, i);
3710
			ctx->hctxs[j] = hctx;
J
Jens Axboe 已提交
3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729
			/*
			 * If the CPU is already set in the mask, then we've
			 * mapped this one already. This can happen if
			 * devices share queues across queue maps.
			 */
			if (cpumask_test_cpu(i, hctx->cpumask))
				continue;

			cpumask_set_cpu(i, hctx->cpumask);
			hctx->type = j;
			ctx->index_hw[hctx->type] = hctx->nr_ctx;
			hctx->ctxs[hctx->nr_ctx++] = ctx;

			/*
			 * If the nr_ctx type overflows, we have exceeded the
			 * amount of sw queues we can support.
			 */
			BUG_ON(!hctx->nr_ctx);
		}
3730 3731 3732 3733

		for (; j < HCTX_MAX_TYPES; j++)
			ctx->hctxs[j] = blk_mq_map_queue_type(q,
					HCTX_TYPE_DEFAULT, i);
3734
	}
3735 3736

	queue_for_each_hw_ctx(q, hctx, i) {
3737 3738 3739 3740 3741 3742 3743 3744 3745
		/*
		 * If no software queues are mapped to this hardware queue,
		 * disable it and free the request entries.
		 */
		if (!hctx->nr_ctx) {
			/* Never unmap queue 0.  We need it as a
			 * fallback in case of a new remap fails
			 * allocation
			 */
3746 3747
			if (i)
				__blk_mq_free_map_and_rqs(set, i);
3748 3749 3750 3751

			hctx->tags = NULL;
			continue;
		}
3752

M
Ming Lei 已提交
3753 3754 3755
		hctx->tags = set->tags[i];
		WARN_ON(!hctx->tags);

3756 3757 3758 3759 3760
		/*
		 * Set the map size to the number of mapped software queues.
		 * This is more accurate and more efficient than looping
		 * over all possibly mapped software queues.
		 */
3761
		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3762

3763 3764 3765
		/*
		 * Initialize batch roundrobin counts
		 */
3766
		hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3767 3768
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
	}
3769 3770
}

3771 3772 3773 3774
/*
 * Caller needs to ensure that we're either frozen/quiesced, or that
 * the queue isn't live yet.
 */
3775
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
3776 3777
{
	struct blk_mq_hw_ctx *hctx;
3778
	unsigned long i;
3779

3780
	queue_for_each_hw_ctx(q, hctx, i) {
3781
		if (shared) {
3782
			hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3783 3784
		} else {
			blk_mq_tag_idle(hctx);
3785
			hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3786
		}
3787 3788 3789
	}
}

3790 3791
static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
					 bool shared)
3792 3793
{
	struct request_queue *q;
3794

3795 3796
	lockdep_assert_held(&set->tag_list_lock);

3797 3798
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_freeze_queue(q);
3799
		queue_set_hctx_shared(q, shared);
3800 3801 3802 3803 3804 3805 3806 3807 3808
		blk_mq_unfreeze_queue(q);
	}
}

static void blk_mq_del_queue_tag_set(struct request_queue *q)
{
	struct blk_mq_tag_set *set = q->tag_set;

	mutex_lock(&set->tag_list_lock);
3809
	list_del(&q->tag_set_list);
3810 3811
	if (list_is_singular(&set->tag_list)) {
		/* just transitioned to unshared */
3812
		set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3813
		/* update existing queue */
3814
		blk_mq_update_tag_set_shared(set, false);
3815
	}
3816
	mutex_unlock(&set->tag_list_lock);
3817
	INIT_LIST_HEAD(&q->tag_set_list);
3818 3819 3820 3821 3822 3823
}

static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
				     struct request_queue *q)
{
	mutex_lock(&set->tag_list_lock);
3824

3825 3826 3827 3828
	/*
	 * Check to see if we're transitioning to shared (from 1 to 2 queues).
	 */
	if (!list_empty(&set->tag_list) &&
3829 3830
	    !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
		set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3831
		/* update existing queue */
3832
		blk_mq_update_tag_set_shared(set, true);
3833
	}
3834
	if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
3835
		queue_set_hctx_shared(q, true);
3836
	list_add_tail(&q->tag_set_list, &set->tag_list);
3837

3838 3839 3840
	mutex_unlock(&set->tag_list_lock);
}

3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868
/* All allocations will be freed in release handler of q->mq_kobj */
static int blk_mq_alloc_ctxs(struct request_queue *q)
{
	struct blk_mq_ctxs *ctxs;
	int cpu;

	ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
	if (!ctxs)
		return -ENOMEM;

	ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
	if (!ctxs->queue_ctx)
		goto fail;

	for_each_possible_cpu(cpu) {
		struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
		ctx->ctxs = ctxs;
	}

	q->mq_kobj = &ctxs->kobj;
	q->queue_ctx = ctxs->queue_ctx;

	return 0;
 fail:
	kfree(ctxs);
	return -ENOMEM;
}

3869 3870 3871 3872 3873 3874 3875 3876
/*
 * It is the actual release handler for mq, but we do it from
 * request queue's release handler for avoiding use-after-free
 * and headache because q->mq_kobj shouldn't have been introduced,
 * but we can't group ctx/kctx kobj without it.
 */
void blk_mq_release(struct request_queue *q)
{
3877
	struct blk_mq_hw_ctx *hctx, *next;
3878
	unsigned long i;
3879

3880 3881 3882 3883 3884 3885
	queue_for_each_hw_ctx(q, hctx, i)
		WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));

	/* all hctx are in .unused_hctx_list now */
	list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
		list_del_init(&hctx->hctx_list);
3886
		kobject_put(&hctx->kobj);
3887
	}
3888

M
Ming Lei 已提交
3889
	xa_destroy(&q->hctx_table);
3890

3891 3892 3893 3894 3895
	/*
	 * release .mq_kobj and sw queue's kobject now because
	 * both share lifetime with request queue.
	 */
	blk_mq_sysfs_deinit(q);
3896 3897
}

3898
static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
3899
		void *queuedata)
3900
{
3901 3902
	struct request_queue *q;
	int ret;
3903

3904
	q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING);
3905
	if (!q)
3906
		return ERR_PTR(-ENOMEM);
3907 3908 3909 3910 3911 3912
	q->queuedata = queuedata;
	ret = blk_mq_init_allocated_queue(set, q);
	if (ret) {
		blk_cleanup_queue(q);
		return ERR_PTR(ret);
	}
3913 3914
	return q;
}
3915 3916 3917 3918 3919

struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
	return blk_mq_init_queue_data(set, NULL);
}
3920 3921
EXPORT_SYMBOL(blk_mq_init_queue);

3922 3923
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
		struct lock_class_key *lkclass)
3924 3925
{
	struct request_queue *q;
3926
	struct gendisk *disk;
3927

3928 3929 3930
	q = blk_mq_init_queue_data(set, queuedata);
	if (IS_ERR(q))
		return ERR_CAST(q);
3931

3932
	disk = __alloc_disk_node(q, set->numa_node, lkclass);
3933 3934 3935
	if (!disk) {
		blk_cleanup_queue(q);
		return ERR_PTR(-ENOMEM);
3936
	}
3937
	return disk;
3938
}
3939
EXPORT_SYMBOL(__blk_mq_alloc_disk);
3940

3941 3942 3943 3944
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
		struct blk_mq_tag_set *set, struct request_queue *q,
		int hctx_idx, int node)
{
3945
	struct blk_mq_hw_ctx *hctx = NULL, *tmp;
3946

3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960
	/* reuse dead hctx first */
	spin_lock(&q->unused_hctx_lock);
	list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
		if (tmp->numa_node == node) {
			hctx = tmp;
			break;
		}
	}
	if (hctx)
		list_del_init(&hctx->hctx_list);
	spin_unlock(&q->unused_hctx_lock);

	if (!hctx)
		hctx = blk_mq_alloc_hctx(q, set, node);
3961
	if (!hctx)
3962
		goto fail;
3963

3964 3965
	if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
		goto free_hctx;
3966 3967

	return hctx;
3968 3969 3970 3971 3972

 free_hctx:
	kobject_put(&hctx->kobj);
 fail:
	return NULL;
3973 3974
}

K
Keith Busch 已提交
3975 3976
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
						struct request_queue *q)
3977
{
M
Ming Lei 已提交
3978 3979
	struct blk_mq_hw_ctx *hctx;
	unsigned long i, j;
3980

3981 3982
	/* protect against switching io scheduler  */
	mutex_lock(&q->sysfs_lock);
3983
	for (i = 0; i < set->nr_hw_queues; i++) {
3984
		int old_node;
3985
		int node = blk_mq_get_hctx_node(set, i);
M
Ming Lei 已提交
3986
		struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
K
Keith Busch 已提交
3987

3988 3989 3990 3991
		if (old_hctx) {
			old_node = old_hctx->numa_node;
			blk_mq_exit_hctx(q, set, old_hctx, i);
		}
K
Keith Busch 已提交
3992

M
Ming Lei 已提交
3993
		if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
3994
			if (!old_hctx)
3995
				break;
3996 3997
			pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
					node, old_node);
M
Ming Lei 已提交
3998 3999
			hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
			WARN_ON_ONCE(!hctx);
K
Keith Busch 已提交
4000
		}
4001
	}
4002 4003 4004 4005 4006 4007 4008 4009 4010 4011
	/*
	 * Increasing nr_hw_queues fails. Free the newly allocated
	 * hctxs and keep the previous q->nr_hw_queues.
	 */
	if (i != set->nr_hw_queues) {
		j = q->nr_hw_queues;
	} else {
		j = i;
		q->nr_hw_queues = set->nr_hw_queues;
	}
4012

M
Ming Lei 已提交
4013 4014
	xa_for_each_start(&q->hctx_table, j, hctx, j)
		blk_mq_exit_hctx(q, set, hctx, j);
4015
	mutex_unlock(&q->sysfs_lock);
K
Keith Busch 已提交
4016 4017
}

4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028
static void blk_mq_update_poll_flag(struct request_queue *q)
{
	struct blk_mq_tag_set *set = q->tag_set;

	if (set->nr_maps > HCTX_TYPE_POLL &&
	    set->map[HCTX_TYPE_POLL].nr_queues)
		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
	else
		blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
}

4029 4030
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
		struct request_queue *q)
K
Keith Busch 已提交
4031
{
4032 4033 4034
	WARN_ON_ONCE(blk_queue_has_srcu(q) !=
			!!(set->flags & BLK_MQ_F_BLOCKING));

M
Ming Lei 已提交
4035 4036 4037
	/* mark the queue as mq asap */
	q->mq_ops = set->ops;

4038
	q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
4039 4040
					     blk_mq_poll_stats_bkt,
					     BLK_MQ_POLL_STATS_BKTS, q);
4041 4042 4043
	if (!q->poll_cb)
		goto err_exit;

4044
	if (blk_mq_alloc_ctxs(q))
4045
		goto err_poll;
K
Keith Busch 已提交
4046

4047 4048 4049
	/* init q->mq_kobj and sw queues' kobjects */
	blk_mq_sysfs_init(q);

4050 4051 4052
	INIT_LIST_HEAD(&q->unused_hctx_list);
	spin_lock_init(&q->unused_hctx_lock);

M
Ming Lei 已提交
4053 4054
	xa_init(&q->hctx_table);

K
Keith Busch 已提交
4055 4056 4057
	blk_mq_realloc_hw_ctxs(set, q);
	if (!q->nr_hw_queues)
		goto err_hctxs;
4058

4059
	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4060
	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4061

J
Jens Axboe 已提交
4062
	q->tag_set = set;
4063

4064
	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4065
	blk_mq_update_poll_flag(q);
4066

4067
	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4068 4069 4070
	INIT_LIST_HEAD(&q->requeue_list);
	spin_lock_init(&q->requeue_lock);

4071 4072
	q->nr_requests = set->queue_depth;

4073 4074 4075
	/*
	 * Default to classic polling
	 */
4076
	q->poll_nsec = BLK_MQ_POLL_CLASSIC;
4077

4078
	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4079
	blk_mq_add_queue_tag_set(set, q);
4080
	blk_mq_map_swqueue(q);
4081
	return 0;
4082

4083
err_hctxs:
M
Ming Lei 已提交
4084
	xa_destroy(&q->hctx_table);
4085
	q->nr_hw_queues = 0;
4086
	blk_mq_sysfs_deinit(q);
4087 4088 4089
err_poll:
	blk_stat_free_callback(q->poll_cb);
	q->poll_cb = NULL;
M
Ming Lin 已提交
4090 4091
err_exit:
	q->mq_ops = NULL;
4092
	return -ENOMEM;
4093
}
4094
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4095

4096 4097
/* tags can _not_ be used after returning from blk_mq_exit_queue */
void blk_mq_exit_queue(struct request_queue *q)
4098
{
4099
	struct blk_mq_tag_set *set = q->tag_set;
4100

4101
	/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
M
Ming Lei 已提交
4102
	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4103 4104
	/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
	blk_mq_del_queue_tag_set(q);
4105 4106
}

4107 4108 4109 4110
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{
	int i;

4111 4112
	if (blk_mq_is_shared_tags(set->flags)) {
		set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4113 4114
						BLK_MQ_NO_HCTX_IDX,
						set->queue_depth);
4115
		if (!set->shared_tags)
4116 4117 4118
			return -ENOMEM;
	}

4119
	for (i = 0; i < set->nr_hw_queues; i++) {
4120
		if (!__blk_mq_alloc_map_and_rqs(set, i))
4121
			goto out_unwind;
4122 4123
		cond_resched();
	}
4124 4125 4126 4127 4128

	return 0;

out_unwind:
	while (--i >= 0)
4129 4130
		__blk_mq_free_map_and_rqs(set, i);

4131 4132
	if (blk_mq_is_shared_tags(set->flags)) {
		blk_mq_free_map_and_rqs(set, set->shared_tags,
4133
					BLK_MQ_NO_HCTX_IDX);
4134
	}
4135 4136 4137 4138 4139 4140 4141 4142 4143

	return -ENOMEM;
}

/*
 * Allocate the request maps associated with this tag_set. Note that this
 * may reduce the depth asked for, if memory is tight. set->queue_depth
 * will be updated to reflect the allocated depth.
 */
4144
static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173
{
	unsigned int depth;
	int err;

	depth = set->queue_depth;
	do {
		err = __blk_mq_alloc_rq_maps(set);
		if (!err)
			break;

		set->queue_depth >>= 1;
		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
			err = -ENOMEM;
			break;
		}
	} while (set->queue_depth);

	if (!set->queue_depth || err) {
		pr_err("blk-mq: failed to allocate request map\n");
		return -ENOMEM;
	}

	if (depth != set->queue_depth)
		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
						depth, set->queue_depth);

	return 0;
}

4174 4175
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
4176 4177 4178 4179 4180 4181 4182 4183
	/*
	 * blk_mq_map_queues() and multiple .map_queues() implementations
	 * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
	 * number of hardware queues.
	 */
	if (set->nr_maps == 1)
		set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;

4184
	if (set->ops->map_queues && !is_kdump_kernel()) {
J
Jens Axboe 已提交
4185 4186
		int i;

4187 4188 4189 4190 4191 4192 4193
		/*
		 * transport .map_queues is usually done in the following
		 * way:
		 *
		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
		 * 	mask = get_cpu_mask(queue)
		 * 	for_each_cpu(cpu, mask)
J
Jens Axboe 已提交
4194
		 * 		set->map[x].mq_map[cpu] = queue;
4195 4196 4197 4198 4199 4200
		 * }
		 *
		 * When we need to remap, the table has to be cleared for
		 * killing stale mapping since one CPU may not be mapped
		 * to any hw queue.
		 */
J
Jens Axboe 已提交
4201 4202
		for (i = 0; i < set->nr_maps; i++)
			blk_mq_clear_mq_map(&set->map[i]);
4203

4204
		return set->ops->map_queues(set);
J
Jens Axboe 已提交
4205 4206
	} else {
		BUG_ON(set->nr_maps > 1);
4207
		return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
J
Jens Axboe 已提交
4208
	}
4209 4210
}

4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233
static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
				  int cur_nr_hw_queues, int new_nr_hw_queues)
{
	struct blk_mq_tags **new_tags;

	if (cur_nr_hw_queues >= new_nr_hw_queues)
		return 0;

	new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
				GFP_KERNEL, set->numa_node);
	if (!new_tags)
		return -ENOMEM;

	if (set->tags)
		memcpy(new_tags, set->tags, cur_nr_hw_queues *
		       sizeof(*set->tags));
	kfree(set->tags);
	set->tags = new_tags;
	set->nr_hw_queues = new_nr_hw_queues;

	return 0;
}

4234 4235 4236 4237 4238 4239
static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
				int new_nr_hw_queues)
{
	return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
}

4240 4241 4242
/*
 * Alloc a tag set to be associated with one or more request queues.
 * May fail with EINVAL for various error conditions. May adjust the
4243
 * requested depth down, if it's too large. In that case, the set
4244 4245
 * value will be stored in set->queue_depth.
 */
4246 4247
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
J
Jens Axboe 已提交
4248
	int i, ret;
4249

B
Bart Van Assche 已提交
4250 4251
	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);

4252 4253
	if (!set->nr_hw_queues)
		return -EINVAL;
4254
	if (!set->queue_depth)
4255 4256 4257 4258
		return -EINVAL;
	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
		return -EINVAL;

C
Christoph Hellwig 已提交
4259
	if (!set->ops->queue_rq)
4260 4261
		return -EINVAL;

4262 4263 4264
	if (!set->ops->get_budget ^ !set->ops->put_budget)
		return -EINVAL;

4265 4266 4267 4268 4269
	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
		pr_info("blk-mq: reduced tag depth to %u\n",
			BLK_MQ_MAX_DEPTH);
		set->queue_depth = BLK_MQ_MAX_DEPTH;
	}
4270

J
Jens Axboe 已提交
4271 4272 4273 4274 4275
	if (!set->nr_maps)
		set->nr_maps = 1;
	else if (set->nr_maps > HCTX_MAX_TYPES)
		return -EINVAL;

4276 4277 4278 4279 4280 4281 4282
	/*
	 * If a crashdump is active, then we are potentially in a very
	 * memory constrained environment. Limit us to 1 queue and
	 * 64 tags to prevent using too much memory.
	 */
	if (is_kdump_kernel()) {
		set->nr_hw_queues = 1;
4283
		set->nr_maps = 1;
4284 4285
		set->queue_depth = min(64U, set->queue_depth);
	}
K
Keith Busch 已提交
4286
	/*
4287 4288
	 * There is no use for more h/w queues than cpus if we just have
	 * a single map
K
Keith Busch 已提交
4289
	 */
4290
	if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
K
Keith Busch 已提交
4291
		set->nr_hw_queues = nr_cpu_ids;
4292

4293
	if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
4294
		return -ENOMEM;
4295

4296
	ret = -ENOMEM;
J
Jens Axboe 已提交
4297 4298
	for (i = 0; i < set->nr_maps; i++) {
		set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4299
						  sizeof(set->map[i].mq_map[0]),
J
Jens Axboe 已提交
4300 4301 4302
						  GFP_KERNEL, set->numa_node);
		if (!set->map[i].mq_map)
			goto out_free_mq_map;
4303
		set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
J
Jens Axboe 已提交
4304
	}
4305

4306
	ret = blk_mq_update_queue_map(set);
4307 4308 4309
	if (ret)
		goto out_free_mq_map;

4310
	ret = blk_mq_alloc_set_map_and_rqs(set);
4311
	if (ret)
4312
		goto out_free_mq_map;
4313

4314 4315 4316
	mutex_init(&set->tag_list_lock);
	INIT_LIST_HEAD(&set->tag_list);

4317
	return 0;
4318 4319

out_free_mq_map:
J
Jens Axboe 已提交
4320 4321 4322 4323
	for (i = 0; i < set->nr_maps; i++) {
		kfree(set->map[i].mq_map);
		set->map[i].mq_map = NULL;
	}
4324 4325
	kfree(set->tags);
	set->tags = NULL;
4326
	return ret;
4327 4328 4329
}
EXPORT_SYMBOL(blk_mq_alloc_tag_set);

4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345
/* allocate and initialize a tagset for a simple single-queue device */
int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
		const struct blk_mq_ops *ops, unsigned int queue_depth,
		unsigned int set_flags)
{
	memset(set, 0, sizeof(*set));
	set->ops = ops;
	set->nr_hw_queues = 1;
	set->nr_maps = 1;
	set->queue_depth = queue_depth;
	set->numa_node = NUMA_NO_NODE;
	set->flags = set_flags;
	return blk_mq_alloc_tag_set(set);
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);

4346 4347
void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
J
Jens Axboe 已提交
4348
	int i, j;
4349

4350
	for (i = 0; i < set->nr_hw_queues; i++)
4351
		__blk_mq_free_map_and_rqs(set, i);
4352

4353 4354
	if (blk_mq_is_shared_tags(set->flags)) {
		blk_mq_free_map_and_rqs(set, set->shared_tags,
4355 4356
					BLK_MQ_NO_HCTX_IDX);
	}
4357

J
Jens Axboe 已提交
4358 4359 4360 4361
	for (j = 0; j < set->nr_maps; j++) {
		kfree(set->map[j].mq_map);
		set->map[j].mq_map = NULL;
	}
4362

M
Ming Lei 已提交
4363
	kfree(set->tags);
4364
	set->tags = NULL;
4365 4366 4367
}
EXPORT_SYMBOL(blk_mq_free_tag_set);

4368 4369 4370 4371
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
{
	struct blk_mq_tag_set *set = q->tag_set;
	struct blk_mq_hw_ctx *hctx;
4372 4373
	int ret;
	unsigned long i;
4374

4375
	if (!set)
4376 4377
		return -EINVAL;

4378 4379 4380
	if (q->nr_requests == nr)
		return 0;

4381
	blk_mq_freeze_queue(q);
4382
	blk_mq_quiesce_queue(q);
4383

4384 4385
	ret = 0;
	queue_for_each_hw_ctx(q, hctx, i) {
4386 4387
		if (!hctx->tags)
			continue;
4388 4389 4390 4391
		/*
		 * If we're using an MQ scheduler, just update the scheduler
		 * queue depth. This is similar to what the old code would do.
		 */
4392
		if (hctx->sched_tags) {
4393
			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4394 4395 4396 4397
						      nr, true);
		} else {
			ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
						      false);
4398
		}
4399 4400
		if (ret)
			break;
4401 4402
		if (q->elevator && q->elevator->type->ops.depth_updated)
			q->elevator->type->ops.depth_updated(hctx);
4403
	}
4404
	if (!ret) {
4405
		q->nr_requests = nr;
4406
		if (blk_mq_is_shared_tags(set->flags)) {
4407
			if (q->elevator)
4408
				blk_mq_tag_update_sched_shared_tags(q);
4409
			else
4410
				blk_mq_tag_resize_shared_tags(set, nr);
4411
		}
4412
	}
4413

4414
	blk_mq_unquiesce_queue(q);
4415 4416
	blk_mq_unfreeze_queue(q);

4417 4418 4419
	return ret;
}

4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466
/*
 * request_queue and elevator_type pair.
 * It is just used by __blk_mq_update_nr_hw_queues to cache
 * the elevator_type associated with a request_queue.
 */
struct blk_mq_qe_pair {
	struct list_head node;
	struct request_queue *q;
	struct elevator_type *type;
};

/*
 * Cache the elevator_type in qe pair list and switch the
 * io scheduler to 'none'
 */
static bool blk_mq_elv_switch_none(struct list_head *head,
		struct request_queue *q)
{
	struct blk_mq_qe_pair *qe;

	if (!q->elevator)
		return true;

	qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
	if (!qe)
		return false;

	INIT_LIST_HEAD(&qe->node);
	qe->q = q;
	qe->type = q->elevator->type;
	list_add(&qe->node, head);

	mutex_lock(&q->sysfs_lock);
	/*
	 * After elevator_switch_mq, the previous elevator_queue will be
	 * released by elevator_release. The reference of the io scheduler
	 * module get by elevator_get will also be put. So we need to get
	 * a reference of the io scheduler module here to prevent it to be
	 * removed.
	 */
	__module_get(qe->type->elevator_owner);
	elevator_switch_mq(q, NULL);
	mutex_unlock(&q->sysfs_lock);

	return true;
}

4467 4468
static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
						struct request_queue *q)
4469 4470 4471 4472
{
	struct blk_mq_qe_pair *qe;

	list_for_each_entry(qe, head, node)
4473 4474
		if (qe->q == q)
			return qe;
4475

4476 4477
	return NULL;
}
4478

4479 4480 4481 4482 4483 4484 4485 4486 4487 4488
static void blk_mq_elv_switch_back(struct list_head *head,
				  struct request_queue *q)
{
	struct blk_mq_qe_pair *qe;
	struct elevator_type *t;

	qe = blk_lookup_qe_pair(head, q);
	if (!qe)
		return;
	t = qe->type;
4489 4490 4491 4492 4493 4494 4495 4496
	list_del(&qe->node);
	kfree(qe);

	mutex_lock(&q->sysfs_lock);
	elevator_switch_mq(q, t);
	mutex_unlock(&q->sysfs_lock);
}

4497 4498
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
							int nr_hw_queues)
K
Keith Busch 已提交
4499 4500
{
	struct request_queue *q;
4501
	LIST_HEAD(head);
4502
	int prev_nr_hw_queues;
K
Keith Busch 已提交
4503

4504 4505
	lockdep_assert_held(&set->tag_list_lock);

4506
	if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
K
Keith Busch 已提交
4507
		nr_hw_queues = nr_cpu_ids;
4508 4509 4510
	if (nr_hw_queues < 1)
		return;
	if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
K
Keith Busch 已提交
4511 4512 4513 4514
		return;

	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_freeze_queue(q);
4515 4516 4517 4518 4519 4520 4521 4522
	/*
	 * Switch IO scheduler to 'none', cleaning up the data associated
	 * with the previous scheduler. We will switch back once we are done
	 * updating the new sw to hw queue mappings.
	 */
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		if (!blk_mq_elv_switch_none(&head, q))
			goto switch_back;
K
Keith Busch 已提交
4523

4524 4525 4526 4527 4528
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_debugfs_unregister_hctxs(q);
		blk_mq_sysfs_unregister(q);
	}

4529
	prev_nr_hw_queues = set->nr_hw_queues;
4530 4531 4532 4533
	if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
	    0)
		goto reregister;

K
Keith Busch 已提交
4534
	set->nr_hw_queues = nr_hw_queues;
4535
fallback:
4536
	blk_mq_update_queue_map(set);
K
Keith Busch 已提交
4537 4538
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_realloc_hw_ctxs(set, q);
4539
		blk_mq_update_poll_flag(q);
4540
		if (q->nr_hw_queues != set->nr_hw_queues) {
4541 4542
			int i = prev_nr_hw_queues;

4543 4544
			pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
					nr_hw_queues, prev_nr_hw_queues);
4545 4546 4547
			for (; i < set->nr_hw_queues; i++)
				__blk_mq_free_map_and_rqs(set, i);

4548
			set->nr_hw_queues = prev_nr_hw_queues;
4549
			blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4550 4551
			goto fallback;
		}
4552 4553 4554
		blk_mq_map_swqueue(q);
	}

4555
reregister:
4556 4557 4558
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_sysfs_register(q);
		blk_mq_debugfs_register_hctxs(q);
K
Keith Busch 已提交
4559 4560
	}

4561 4562 4563 4564
switch_back:
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_elv_switch_back(&head, q);

K
Keith Busch 已提交
4565 4566 4567
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_unfreeze_queue(q);
}
4568 4569 4570 4571 4572 4573 4574

void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{
	mutex_lock(&set->tag_list_lock);
	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
	mutex_unlock(&set->tag_list_lock);
}
K
Keith Busch 已提交
4575 4576
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);

4577 4578 4579
/* Enable polling stats and return whether they were already enabled. */
static bool blk_poll_stats_enable(struct request_queue *q)
{
4580
	if (q->poll_stat)
4581
		return true;
4582 4583

	return blk_stats_alloc_enable(q);
4584 4585 4586 4587 4588 4589 4590 4591
}

static void blk_mq_poll_stats_start(struct request_queue *q)
{
	/*
	 * We don't arm the callback if polling stats are not enabled or the
	 * callback is already active.
	 */
4592
	if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
4593 4594 4595 4596 4597 4598 4599 4600
		return;

	blk_stat_activate_msecs(q->poll_cb, 100);
}

static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
{
	struct request_queue *q = cb->data;
4601
	int bucket;
4602

4603 4604 4605 4606
	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
		if (cb->stat[bucket].nr_samples)
			q->poll_stat[bucket] = cb->stat[bucket];
	}
4607 4608
}

4609 4610 4611 4612
static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
				       struct request *rq)
{
	unsigned long ret = 0;
4613
	int bucket;
4614 4615 4616 4617 4618

	/*
	 * If stats collection isn't on, don't sleep but turn it on for
	 * future users
	 */
4619
	if (!blk_poll_stats_enable(q))
4620 4621 4622 4623 4624 4625 4626 4627
		return 0;

	/*
	 * As an optimistic guess, use half of the mean service time
	 * for this type of request. We can (and should) make this smarter.
	 * For instance, if the completion latencies are tight, we can
	 * get closer than just half the mean. This is especially
	 * important on devices where the completion latencies are longer
4628 4629
	 * than ~10 usec. We do use the stats for the relevant IO size
	 * if available which does lead to better estimates.
4630
	 */
4631 4632 4633 4634 4635 4636
	bucket = blk_mq_poll_stats_bkt(rq);
	if (bucket < 0)
		return ret;

	if (q->poll_stat[bucket].nr_samples)
		ret = (q->poll_stat[bucket].mean + 1) / 2;
4637 4638 4639 4640

	return ret;
}

4641
static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
4642
{
4643 4644
	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
	struct request *rq = blk_qc_to_rq(hctx, qc);
4645 4646
	struct hrtimer_sleeper hs;
	enum hrtimer_mode mode;
4647
	unsigned int nsecs;
4648 4649
	ktime_t kt;

4650 4651 4652 4653 4654
	/*
	 * If a request has completed on queue that uses an I/O scheduler, we
	 * won't get back a request from blk_qc_to_rq.
	 */
	if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
4655 4656 4657
		return false;

	/*
4658
	 * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
4659 4660 4661 4662
	 *
	 *  0:	use half of prev avg
	 * >0:	use this specific value
	 */
4663
	if (q->poll_nsec > 0)
4664 4665
		nsecs = q->poll_nsec;
	else
4666
		nsecs = blk_mq_poll_nsecs(q, rq);
4667 4668

	if (!nsecs)
4669 4670
		return false;

J
Jens Axboe 已提交
4671
	rq->rq_flags |= RQF_MQ_POLL_SLEPT;
4672 4673 4674 4675 4676

	/*
	 * This will be replaced with the stats tracking code, using
	 * 'avg_completion_time / 2' as the pre-sleep target.
	 */
T
Thomas Gleixner 已提交
4677
	kt = nsecs;
4678 4679

	mode = HRTIMER_MODE_REL;
4680
	hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
4681 4682 4683
	hrtimer_set_expires(&hs.timer, kt);

	do {
T
Tejun Heo 已提交
4684
		if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
4685 4686
			break;
		set_current_state(TASK_UNINTERRUPTIBLE);
4687
		hrtimer_sleeper_start_expires(&hs, mode);
4688 4689 4690 4691 4692 4693 4694 4695
		if (hs.task)
			io_schedule();
		hrtimer_cancel(&hs.timer);
		mode = HRTIMER_MODE_ABS;
	} while (hs.task && !signal_pending(current));

	__set_current_state(TASK_RUNNING);
	destroy_hrtimer_on_stack(&hs.timer);
4696

4697
	/*
4698 4699 4700 4701 4702
	 * If we sleep, have the caller restart the poll loop to reset the
	 * state.  Like for the other success return cases, the caller is
	 * responsible for checking if the IO completed.  If the IO isn't
	 * complete, we'll get called again and will go straight to the busy
	 * poll loop.
4703 4704 4705 4706
	 */
	return true;
}

4707
static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
4708
			       struct io_comp_batch *iob, unsigned int flags)
J
Jens Axboe 已提交
4709
{
4710 4711 4712
	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
	long state = get_current_state();
	int ret;
J
Jens Axboe 已提交
4713

4714
	do {
4715
		ret = q->mq_ops->poll(hctx, iob);
J
Jens Axboe 已提交
4716
		if (ret > 0) {
4717
			__set_current_state(TASK_RUNNING);
4718
			return ret;
J
Jens Axboe 已提交
4719 4720 4721
		}

		if (signal_pending_state(state, current))
4722
			__set_current_state(TASK_RUNNING);
4723
		if (task_is_running(current))
4724
			return 1;
4725

4726
		if (ret < 0 || (flags & BLK_POLL_ONESHOT))
J
Jens Axboe 已提交
4727 4728
			break;
		cpu_relax();
4729
	} while (!need_resched());
J
Jens Axboe 已提交
4730

4731
	__set_current_state(TASK_RUNNING);
4732
	return 0;
J
Jens Axboe 已提交
4733
}
4734

4735 4736
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
		unsigned int flags)
4737
{
4738 4739
	if (!(flags & BLK_POLL_NOSLEEP) &&
	    q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
4740
		if (blk_mq_poll_hybrid(q, cookie))
4741
			return 1;
4742
	}
4743
	return blk_mq_poll_classic(q, cookie, iob, flags);
J
Jens Axboe 已提交
4744 4745
}

J
Jens Axboe 已提交
4746 4747 4748 4749 4750 4751
unsigned int blk_mq_rq_cpu(struct request *rq)
{
	return rq->mq_ctx->cpu;
}
EXPORT_SYMBOL(blk_mq_rq_cpu);

4752 4753 4754 4755
void blk_mq_cancel_work_sync(struct request_queue *q)
{
	if (queue_is_mq(q)) {
		struct blk_mq_hw_ctx *hctx;
4756
		unsigned long i;
4757 4758 4759 4760 4761 4762 4763 4764

		cancel_delayed_work_sync(&q->requeue_work);

		queue_for_each_hw_ctx(q, hctx, i)
			cancel_delayed_work_sync(&hctx->run_work);
	}
}

4765 4766
static int __init blk_mq_init(void)
{
4767 4768 4769
	int i;

	for_each_possible_cpu(i)
4770
		init_llist_head(&per_cpu(blk_cpu_done, i));
4771 4772 4773 4774 4775
	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);

	cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
				  "block/softirq:dead", NULL,
				  blk_softirq_cpu_dead);
4776 4777
	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
				blk_mq_hctx_notify_dead);
4778 4779 4780
	cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
				blk_mq_hctx_notify_online,
				blk_mq_hctx_notify_offline);
4781 4782 4783
	return 0;
}
subsys_initcall(blk_mq_init);