blk-mq.c 56.5 KB
Newer Older
1 2 3 4 5 6
/*
 * Block multiqueue core code
 *
 * Copyright (C) 2013-2014 Jens Axboe
 * Copyright (C) 2013-2014 Christoph Hellwig
 */
7 8 9 10 11
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
12
#include <linux/kmemleak.h>
13 14 15 16 17 18 19 20 21 22 23
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>
#include <linux/llist.h>
#include <linux/list_sort.h>
#include <linux/cpu.h>
#include <linux/cache.h>
#include <linux/sched/sysctl.h>
#include <linux/delay.h>
24
#include <linux/crash_dump.h>
25
#include <linux/prefetch.h>
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41

#include <trace/events/block.h>

#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"

static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);

/*
 * Check if any of the ctx's have pending work in this hardware queue
 */
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
42
	return sbitmap_any_bit_set(&hctx->ctx_map);
43 44
}

45 46 47 48 49 50
/*
 * Mark this ctx as having pending work in this hardware queue
 */
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
				     struct blk_mq_ctx *ctx)
{
51 52
	if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
		sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
53 54 55 56 57
}

static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
				      struct blk_mq_ctx *ctx)
{
58
	sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
59 60
}

61
void blk_mq_freeze_queue_start(struct request_queue *q)
62
{
63
	int freeze_depth;
64

65 66
	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
	if (freeze_depth == 1) {
67
		percpu_ref_kill(&q->q_usage_counter);
68
		blk_mq_run_hw_queues(q, false);
69
	}
70
}
71
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
72 73 74

static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
75
	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
76 77
}

78 79 80 81
/*
 * Guarantee no request is in use, so we can change any data structure of
 * the queue afterward.
 */
82
void blk_freeze_queue(struct request_queue *q)
83
{
84 85 86 87 88 89 90
	/*
	 * In the !blk_mq case we are only calling this to kill the
	 * q_usage_counter, otherwise this increases the freeze depth
	 * and waits for it to return to zero.  For this reason there is
	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
	 * exported to drivers as the only user for unfreeze is blk_mq.
	 */
91 92 93
	blk_mq_freeze_queue_start(q);
	blk_mq_freeze_queue_wait(q);
}
94 95 96 97 98 99 100 101 102

void blk_mq_freeze_queue(struct request_queue *q)
{
	/*
	 * ...just an alias to keep freeze and unfreeze actions balanced
	 * in the blk_mq_* namespace
	 */
	blk_freeze_queue(q);
}
103
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
104

105
void blk_mq_unfreeze_queue(struct request_queue *q)
106
{
107
	int freeze_depth;
108

109 110 111
	freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
	WARN_ON_ONCE(freeze_depth < 0);
	if (!freeze_depth) {
112
		percpu_ref_reinit(&q->q_usage_counter);
113
		wake_up_all(&q->mq_freeze_wq);
114
	}
115
}
116
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
117

118 119 120 121 122 123 124 125
void blk_mq_wake_waiters(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	queue_for_each_hw_ctx(q, hctx, i)
		if (blk_mq_hw_queue_mapped(hctx))
			blk_mq_tag_wakeup_all(hctx->tags, true);
126 127 128 129 130 131 132

	/*
	 * If we are called because the queue has now been marked as
	 * dying, we need to ensure that processes currently waiting on
	 * the queue are notified as well.
	 */
	wake_up_all(&q->mq_freeze_wq);
133 134
}

135 136 137 138 139 140
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
{
	return blk_mq_has_free_tags(hctx->tags);
}
EXPORT_SYMBOL(blk_mq_can_queue);

141
static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
142 143
			       struct request *rq, int op,
			       unsigned int op_flags)
144
{
145
	if (blk_queue_io_stat(q))
146
		op_flags |= REQ_IO_STAT;
147

148 149 150
	INIT_LIST_HEAD(&rq->queuelist);
	/* csd/requeue_work/fifo_time is initialized before use */
	rq->q = q;
151
	rq->mq_ctx = ctx;
152
	req_set_op_attrs(rq, op, op_flags);
153 154 155 156 157 158
	/* do not touch atomic flags, it needs atomic ops against the timer */
	rq->cpu = -1;
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
	rq->rq_disk = NULL;
	rq->part = NULL;
159
	rq->start_time = jiffies;
160 161
#ifdef CONFIG_BLK_CGROUP
	rq->rl = NULL;
162
	set_start_time_ns(rq);
163 164 165 166 167 168 169 170 171 172
	rq->io_start_time_ns = 0;
#endif
	rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
	rq->nr_integrity_segments = 0;
#endif
	rq->special = NULL;
	/* tag was already set */
	rq->errors = 0;

173 174
	rq->cmd = rq->__cmd;

175 176 177 178 179 180
	rq->extra_len = 0;
	rq->sense_len = 0;
	rq->resid_len = 0;
	rq->sense = NULL;

	INIT_LIST_HEAD(&rq->timeout_list);
181 182
	rq->timeout = 0;

183 184 185 186
	rq->end_io = NULL;
	rq->end_io_data = NULL;
	rq->next_rq = NULL;

187
	ctx->rq_dispatched[rw_is_sync(op, op_flags)]++;
188 189
}

190
static struct request *
191
__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
192 193 194 195
{
	struct request *rq;
	unsigned int tag;

196
	tag = blk_mq_get_tag(data);
197
	if (tag != BLK_MQ_TAG_FAIL) {
198
		rq = data->hctx->tags->rqs[tag];
199

200
		if (blk_mq_tag_busy(data->hctx)) {
201
			rq->cmd_flags = REQ_MQ_INFLIGHT;
202
			atomic_inc(&data->hctx->nr_active);
203 204 205
		}

		rq->tag = tag;
206
		blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags);
207 208 209 210 211 212
		return rq;
	}

	return NULL;
}

213 214
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
		unsigned int flags)
215
{
216 217
	struct blk_mq_ctx *ctx;
	struct blk_mq_hw_ctx *hctx;
218
	struct request *rq;
219
	struct blk_mq_alloc_data alloc_data;
220
	int ret;
221

222
	ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
223 224
	if (ret)
		return ERR_PTR(ret);
225

226
	ctx = blk_mq_get_ctx(q);
C
Christoph Hellwig 已提交
227
	hctx = blk_mq_map_queue(q, ctx->cpu);
228
	blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
229
	rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
230
	blk_mq_put_ctx(ctx);
231

K
Keith Busch 已提交
232
	if (!rq) {
233
		blk_queue_exit(q);
234
		return ERR_PTR(-EWOULDBLOCK);
K
Keith Busch 已提交
235
	}
236 237 238 239

	rq->__data_len = 0;
	rq->__sector = (sector_t) -1;
	rq->bio = rq->biotail = NULL;
240 241
	return rq;
}
242
EXPORT_SYMBOL(blk_mq_alloc_request);
243

M
Ming Lin 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
		unsigned int flags, unsigned int hctx_idx)
{
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
	struct request *rq;
	struct blk_mq_alloc_data alloc_data;
	int ret;

	/*
	 * If the tag allocator sleeps we could get an allocation for a
	 * different hardware context.  No need to complicate the low level
	 * allocator for this for the rare use case of a command tied to
	 * a specific queue.
	 */
	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
		return ERR_PTR(-EINVAL);

	if (hctx_idx >= q->nr_hw_queues)
		return ERR_PTR(-EIO);

	ret = blk_queue_enter(q, true);
	if (ret)
		return ERR_PTR(ret);

269 270 271 272
	/*
	 * Check if the hardware context is actually mapped to anything.
	 * If not tell the caller that it should skip this queue.
	 */
M
Ming Lin 已提交
273
	hctx = q->queue_hw_ctx[hctx_idx];
274 275 276 277
	if (!blk_mq_hw_queue_mapped(hctx)) {
		ret = -EXDEV;
		goto out_queue_exit;
	}
M
Ming Lin 已提交
278 279 280 281 282
	ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));

	blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
	rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
	if (!rq) {
283 284
		ret = -EWOULDBLOCK;
		goto out_queue_exit;
M
Ming Lin 已提交
285 286 287
	}

	return rq;
288 289 290 291

out_queue_exit:
	blk_queue_exit(q);
	return ERR_PTR(ret);
M
Ming Lin 已提交
292 293 294
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);

295 296 297 298 299 300
static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
				  struct blk_mq_ctx *ctx, struct request *rq)
{
	const int tag = rq->tag;
	struct request_queue *q = rq->q;

301 302
	if (rq->cmd_flags & REQ_MQ_INFLIGHT)
		atomic_dec(&hctx->nr_active);
303
	rq->cmd_flags = 0;
304

305
	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
306
	blk_mq_put_tag(hctx, ctx, tag);
307
	blk_queue_exit(q);
308 309
}

310
void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
311 312 313 314 315
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;

	ctx->rq_completed[rq_is_sync(rq)]++;
	__blk_mq_free_request(hctx, ctx, rq);
316 317 318 319 320 321

}
EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);

void blk_mq_free_request(struct request *rq)
{
C
Christoph Hellwig 已提交
322
	blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
323
}
J
Jens Axboe 已提交
324
EXPORT_SYMBOL_GPL(blk_mq_free_request);
325

326
inline void __blk_mq_end_request(struct request *rq, int error)
327
{
M
Ming Lei 已提交
328 329
	blk_account_io_done(rq);

C
Christoph Hellwig 已提交
330
	if (rq->end_io) {
331
		rq->end_io(rq, error);
C
Christoph Hellwig 已提交
332 333 334
	} else {
		if (unlikely(blk_bidi_rq(rq)))
			blk_mq_free_request(rq->next_rq);
335
		blk_mq_free_request(rq);
C
Christoph Hellwig 已提交
336
	}
337
}
338
EXPORT_SYMBOL(__blk_mq_end_request);
339

340
void blk_mq_end_request(struct request *rq, int error)
341 342 343
{
	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
		BUG();
344
	__blk_mq_end_request(rq, error);
345
}
346
EXPORT_SYMBOL(blk_mq_end_request);
347

348
static void __blk_mq_complete_request_remote(void *data)
349
{
350
	struct request *rq = data;
351

352
	rq->q->softirq_done_fn(rq);
353 354
}

355
static void blk_mq_ipi_complete_request(struct request *rq)
356 357
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;
C
Christoph Hellwig 已提交
358
	bool shared = false;
359 360
	int cpu;

C
Christoph Hellwig 已提交
361
	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
362 363 364
		rq->q->softirq_done_fn(rq);
		return;
	}
365 366

	cpu = get_cpu();
C
Christoph Hellwig 已提交
367 368 369 370
	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
		shared = cpus_share_cache(cpu, ctx->cpu);

	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
371
		rq->csd.func = __blk_mq_complete_request_remote;
372 373
		rq->csd.info = rq;
		rq->csd.flags = 0;
374
		smp_call_function_single_async(ctx->cpu, &rq->csd);
375
	} else {
376
		rq->q->softirq_done_fn(rq);
377
	}
378 379
	put_cpu();
}
380

381
static void __blk_mq_complete_request(struct request *rq)
382 383 384 385
{
	struct request_queue *q = rq->q;

	if (!q->softirq_done_fn)
386
		blk_mq_end_request(rq, rq->errors);
387 388 389 390
	else
		blk_mq_ipi_complete_request(rq);
}

391 392 393 394 395 396 397 398
/**
 * blk_mq_complete_request - end I/O on a request
 * @rq:		the request being processed
 *
 * Description:
 *	Ends all I/O on a request. It does not handle partial completions.
 *	The actual completion happens out-of-order, through a IPI handler.
 **/
399
void blk_mq_complete_request(struct request *rq, int error)
400
{
401 402 403
	struct request_queue *q = rq->q;

	if (unlikely(blk_should_fake_timeout(q)))
404
		return;
405 406
	if (!blk_mark_rq_complete(rq)) {
		rq->errors = error;
407
		__blk_mq_complete_request(rq);
408
	}
409 410
}
EXPORT_SYMBOL(blk_mq_complete_request);
411

412 413 414 415 416 417
int blk_mq_request_started(struct request *rq)
{
	return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
}
EXPORT_SYMBOL_GPL(blk_mq_request_started);

418
void blk_mq_start_request(struct request *rq)
419 420 421 422 423
{
	struct request_queue *q = rq->q;

	trace_block_rq_issue(q, rq);

C
Christoph Hellwig 已提交
424
	rq->resid_len = blk_rq_bytes(rq);
C
Christoph Hellwig 已提交
425 426
	if (unlikely(blk_bidi_rq(rq)))
		rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
C
Christoph Hellwig 已提交
427

428
	blk_add_timer(rq);
429

430 431 432 433 434 435
	/*
	 * Ensure that ->deadline is visible before set the started
	 * flag and clear the completed flag.
	 */
	smp_mb__before_atomic();

436 437 438 439 440 441
	/*
	 * Mark us as started and clear complete. Complete might have been
	 * set if requeue raced with timeout, which then marked it as
	 * complete. So be sure to clear complete again when we start
	 * the request, otherwise we'll ignore the completion event.
	 */
442 443 444 445
	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
		set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
	if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
		clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
446 447 448 449 450 451 452 453 454

	if (q->dma_drain_size && blk_rq_bytes(rq)) {
		/*
		 * Make sure space for the drain appears.  We know we can do
		 * this because max_hw_segments has been adjusted to be one
		 * fewer than the device can handle.
		 */
		rq->nr_phys_segments++;
	}
455
}
456
EXPORT_SYMBOL(blk_mq_start_request);
457

458
static void __blk_mq_requeue_request(struct request *rq)
459 460 461 462
{
	struct request_queue *q = rq->q;

	trace_block_rq_requeue(q, rq);
463

464 465 466 467
	if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
		if (q->dma_drain_size && blk_rq_bytes(rq))
			rq->nr_phys_segments--;
	}
468 469
}

470 471 472 473 474
void blk_mq_requeue_request(struct request *rq)
{
	__blk_mq_requeue_request(rq);

	BUG_ON(blk_queued_rq(rq));
475
	blk_mq_add_to_requeue_list(rq, true);
476 477 478
}
EXPORT_SYMBOL(blk_mq_requeue_request);

479 480 481
static void blk_mq_requeue_work(struct work_struct *work)
{
	struct request_queue *q =
482
		container_of(work, struct request_queue, requeue_work.work);
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
	LIST_HEAD(rq_list);
	struct request *rq, *next;
	unsigned long flags;

	spin_lock_irqsave(&q->requeue_lock, flags);
	list_splice_init(&q->requeue_list, &rq_list);
	spin_unlock_irqrestore(&q->requeue_lock, flags);

	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
		if (!(rq->cmd_flags & REQ_SOFTBARRIER))
			continue;

		rq->cmd_flags &= ~REQ_SOFTBARRIER;
		list_del_init(&rq->queuelist);
		blk_mq_insert_request(rq, true, false, false);
	}

	while (!list_empty(&rq_list)) {
		rq = list_entry(rq_list.next, struct request, queuelist);
		list_del_init(&rq->queuelist);
		blk_mq_insert_request(rq, false, false, false);
	}

506 507 508 509 510
	/*
	 * Use the start variant of queue running here, so that running
	 * the requeue work will kick stopped queues.
	 */
	blk_mq_start_hw_queues(q);
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
}

void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
{
	struct request_queue *q = rq->q;
	unsigned long flags;

	/*
	 * We abuse this flag that is otherwise used by the I/O scheduler to
	 * request head insertation from the workqueue.
	 */
	BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);

	spin_lock_irqsave(&q->requeue_lock, flags);
	if (at_head) {
		rq->cmd_flags |= REQ_SOFTBARRIER;
		list_add(&rq->queuelist, &q->requeue_list);
	} else {
		list_add_tail(&rq->queuelist, &q->requeue_list);
	}
	spin_unlock_irqrestore(&q->requeue_lock, flags);
}
EXPORT_SYMBOL(blk_mq_add_to_requeue_list);

535 536
void blk_mq_cancel_requeue_work(struct request_queue *q)
{
537
	cancel_delayed_work_sync(&q->requeue_work);
538 539 540
}
EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);

541 542
void blk_mq_kick_requeue_list(struct request_queue *q)
{
543
	kblockd_schedule_delayed_work(&q->requeue_work, 0);
544 545 546
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);

547 548 549 550 551 552 553 554
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
				    unsigned long msecs)
{
	kblockd_schedule_delayed_work(&q->requeue_work,
				      msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
void blk_mq_abort_requeue_list(struct request_queue *q)
{
	unsigned long flags;
	LIST_HEAD(rq_list);

	spin_lock_irqsave(&q->requeue_lock, flags);
	list_splice_init(&q->requeue_list, &rq_list);
	spin_unlock_irqrestore(&q->requeue_lock, flags);

	while (!list_empty(&rq_list)) {
		struct request *rq;

		rq = list_first_entry(&rq_list, struct request, queuelist);
		list_del_init(&rq->queuelist);
		rq->errors = -EIO;
		blk_mq_end_request(rq, rq->errors);
	}
}
EXPORT_SYMBOL(blk_mq_abort_requeue_list);

575 576
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
{
577 578
	if (tag < tags->nr_tags) {
		prefetch(tags->rqs[tag]);
579
		return tags->rqs[tag];
580
	}
581 582

	return NULL;
583 584 585
}
EXPORT_SYMBOL(blk_mq_tag_to_rq);

586
struct blk_mq_timeout_data {
587 588
	unsigned long next;
	unsigned int next_set;
589 590
};

591
void blk_mq_rq_timed_out(struct request *req, bool reserved)
592
{
593 594
	struct blk_mq_ops *ops = req->q->mq_ops;
	enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
595 596 597 598 599 600 601 602 603 604

	/*
	 * We know that complete is set at this point. If STARTED isn't set
	 * anymore, then the request isn't active and the "timeout" should
	 * just be ignored. This can happen due to the bitflag ordering.
	 * Timeout first checks if STARTED is set, and if it is, assumes
	 * the request is active. But if we race with completion, then
	 * we both flags will get cleared. So check here again, and ignore
	 * a timeout event with a request that isn't active.
	 */
605 606
	if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
		return;
607

608
	if (ops->timeout)
609
		ret = ops->timeout(req, reserved);
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624

	switch (ret) {
	case BLK_EH_HANDLED:
		__blk_mq_complete_request(req);
		break;
	case BLK_EH_RESET_TIMER:
		blk_add_timer(req);
		blk_clear_rq_complete(req);
		break;
	case BLK_EH_NOT_HANDLED:
		break;
	default:
		printk(KERN_ERR "block: bad eh return: %d\n", ret);
		break;
	}
625
}
626

627 628 629 630
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
		struct request *rq, void *priv, bool reserved)
{
	struct blk_mq_timeout_data *data = priv;
631

632 633 634 635 636
	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
		/*
		 * If a request wasn't started before the queue was
		 * marked dying, kill it here or it'll go unnoticed.
		 */
637 638 639 640
		if (unlikely(blk_queue_dying(rq->q))) {
			rq->errors = -EIO;
			blk_mq_end_request(rq, rq->errors);
		}
641
		return;
642
	}
643

644 645
	if (time_after_eq(jiffies, rq->deadline)) {
		if (!blk_mark_rq_complete(rq))
646
			blk_mq_rq_timed_out(rq, reserved);
647 648 649 650
	} else if (!data->next_set || time_after(data->next, rq->deadline)) {
		data->next = rq->deadline;
		data->next_set = 1;
	}
651 652
}

653
static void blk_mq_timeout_work(struct work_struct *work)
654
{
655 656
	struct request_queue *q =
		container_of(work, struct request_queue, timeout_work);
657 658 659 660 661
	struct blk_mq_timeout_data data = {
		.next		= 0,
		.next_set	= 0,
	};
	int i;
662

663 664 665 666 667 668 669 670 671 672 673 674 675 676
	/* A deadlock might occur if a request is stuck requiring a
	 * timeout at the same time a queue freeze is waiting
	 * completion, since the timeout code would not be able to
	 * acquire the queue reference here.
	 *
	 * That's why we don't use blk_queue_enter here; instead, we use
	 * percpu_ref_tryget directly, because we need to be able to
	 * obtain a reference even in the short window between the queue
	 * starting to freeze, by dropping the first reference in
	 * blk_mq_freeze_queue_start, and the moment the last request is
	 * consumed, marked by the instant q_usage_counter reaches
	 * zero.
	 */
	if (!percpu_ref_tryget(&q->q_usage_counter))
677 678
		return;

679
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
680

681 682 683
	if (data.next_set) {
		data.next = blk_rq_timeout(round_jiffies_up(data.next));
		mod_timer(&q->timeout, data.next);
684
	} else {
685 686
		struct blk_mq_hw_ctx *hctx;

687 688 689 690 691
		queue_for_each_hw_ctx(q, hctx, i) {
			/* the hctx may be unmapped, so check it here */
			if (blk_mq_hw_queue_mapped(hctx))
				blk_mq_tag_idle(hctx);
		}
692
	}
693
	blk_queue_exit(q);
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
}

/*
 * Reverse check our software queue for entries that we could potentially
 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
 * too much time checking for merges.
 */
static bool blk_mq_attempt_merge(struct request_queue *q,
				 struct blk_mq_ctx *ctx, struct bio *bio)
{
	struct request *rq;
	int checked = 8;

	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
		int el_ret;

		if (!checked--)
			break;

		if (!blk_rq_merge_ok(rq, bio))
			continue;

		el_ret = blk_try_merge(rq, bio);
		if (el_ret == ELEVATOR_BACK_MERGE) {
			if (bio_attempt_back_merge(q, rq, bio)) {
				ctx->rq_merged++;
				return true;
			}
			break;
		} else if (el_ret == ELEVATOR_FRONT_MERGE) {
			if (bio_attempt_front_merge(q, rq, bio)) {
				ctx->rq_merged++;
				return true;
			}
			break;
		}
	}

	return false;
}

735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
struct flush_busy_ctx_data {
	struct blk_mq_hw_ctx *hctx;
	struct list_head *list;
};

static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
{
	struct flush_busy_ctx_data *flush_data = data;
	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];

	sbitmap_clear_bit(sb, bitnr);
	spin_lock(&ctx->lock);
	list_splice_tail_init(&ctx->rq_list, flush_data->list);
	spin_unlock(&ctx->lock);
	return true;
}

753 754 755 756 757 758
/*
 * Process software queues that have been marked busy, splicing them
 * to the for-dispatch
 */
static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
{
759 760 761 762
	struct flush_busy_ctx_data data = {
		.hctx = hctx,
		.list = list,
	};
763

764
	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
765 766
}

767 768 769 770
static inline unsigned int queued_to_index(unsigned int queued)
{
	if (!queued)
		return 0;
771

772
	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
773 774
}

775 776 777 778 779 780 781 782 783 784 785
/*
 * Run this hardware queue, pulling any software queues mapped to it in.
 * Note that this function currently has various problems around ordering
 * of IO. In particular, we'd like FIFO behaviour on handling existing
 * items on the hctx->dispatch list. Ignore that for now.
 */
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	struct request_queue *q = hctx->queue;
	struct request *rq;
	LIST_HEAD(rq_list);
786 787
	LIST_HEAD(driver_list);
	struct list_head *dptr;
788
	int queued;
789

790
	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
791 792
		return;

793 794 795
	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
		cpu_online(hctx->next_cpu));

796 797 798 799 800
	hctx->run++;

	/*
	 * Touch any software queue that has pending entries.
	 */
801
	flush_busy_ctxs(hctx, &rq_list);
802 803 804 805 806 807 808 809 810 811 812 813

	/*
	 * If we have previous entries on our dispatch list, grab them
	 * and stuff them at the front for more fair dispatch.
	 */
	if (!list_empty_careful(&hctx->dispatch)) {
		spin_lock(&hctx->lock);
		if (!list_empty(&hctx->dispatch))
			list_splice_init(&hctx->dispatch, &rq_list);
		spin_unlock(&hctx->lock);
	}

814 815 816 817 818 819
	/*
	 * Start off with dptr being NULL, so we start the first request
	 * immediately, even if we have more pending.
	 */
	dptr = NULL;

820 821 822
	/*
	 * Now process all the entries, sending them to the driver.
	 */
823
	queued = 0;
824
	while (!list_empty(&rq_list)) {
825
		struct blk_mq_queue_data bd;
826 827 828 829 830
		int ret;

		rq = list_first_entry(&rq_list, struct request, queuelist);
		list_del_init(&rq->queuelist);

831 832 833 834 835
		bd.rq = rq;
		bd.list = dptr;
		bd.last = list_empty(&rq_list);

		ret = q->mq_ops->queue_rq(hctx, &bd);
836 837 838
		switch (ret) {
		case BLK_MQ_RQ_QUEUE_OK:
			queued++;
839
			break;
840 841
		case BLK_MQ_RQ_QUEUE_BUSY:
			list_add(&rq->queuelist, &rq_list);
842
			__blk_mq_requeue_request(rq);
843 844 845 846
			break;
		default:
			pr_err("blk-mq: bad return on queue: %d\n", ret);
		case BLK_MQ_RQ_QUEUE_ERROR:
847
			rq->errors = -EIO;
848
			blk_mq_end_request(rq, rq->errors);
849 850 851 852 853
			break;
		}

		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
			break;
854 855 856 857 858 859 860

		/*
		 * We've done the first request. If we have more than 1
		 * left in the list, set dptr to defer issue.
		 */
		if (!dptr && rq_list.next != rq_list.prev)
			dptr = &driver_list;
861 862
	}

863
	hctx->dispatched[queued_to_index(queued)]++;
864 865 866 867 868 869 870 871 872

	/*
	 * Any items that need requeuing? Stuff them into hctx->dispatch,
	 * that is where we will continue on next queue run.
	 */
	if (!list_empty(&rq_list)) {
		spin_lock(&hctx->lock);
		list_splice(&rq_list, &hctx->dispatch);
		spin_unlock(&hctx->lock);
873 874 875 876 877 878 879 880 881 882
		/*
		 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
		 * it's possible the queue is stopped and restarted again
		 * before this. Queue restart will dispatch requests. And since
		 * requests in rq_list aren't added into hctx->dispatch yet,
		 * the requests in rq_list might get lost.
		 *
		 * blk_mq_run_hw_queue() already checks the STOPPED bit
		 **/
		blk_mq_run_hw_queue(hctx, true);
883 884 885
	}
}

886 887 888 889 890 891 892 893
/*
 * It'd be great if the workqueue API had a way to pass
 * in a mask and had some smarts for more clever placement.
 * For now we just round-robin here, switching for every
 * BLK_MQ_CPU_WORK_BATCH queued items.
 */
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{
894 895
	if (hctx->queue->nr_hw_queues == 1)
		return WORK_CPU_UNBOUND;
896 897

	if (--hctx->next_cpu_batch <= 0) {
898
		int cpu = hctx->next_cpu, next_cpu;
899 900 901 902 903 904 905

		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
		if (next_cpu >= nr_cpu_ids)
			next_cpu = cpumask_first(hctx->cpumask);

		hctx->next_cpu = next_cpu;
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
906 907

		return cpu;
908 909
	}

910
	return hctx->next_cpu;
911 912
}

913 914
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
915 916
	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
	    !blk_mq_hw_queue_mapped(hctx)))
917 918
		return;

919
	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
920 921
		int cpu = get_cpu();
		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
922
			__blk_mq_run_hw_queue(hctx);
923
			put_cpu();
924 925
			return;
		}
926

927
		put_cpu();
928
	}
929

930
	kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
931 932
}

933
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
934 935 936 937 938 939 940
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if ((!blk_mq_hctx_has_pending(hctx) &&
		    list_empty_careful(&hctx->dispatch)) ||
941
		    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
942 943
			continue;

944
		blk_mq_run_hw_queue(hctx, async);
945 946
	}
}
947
EXPORT_SYMBOL(blk_mq_run_hw_queues);
948 949 950

void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
951
	cancel_work(&hctx->run_work);
952
	cancel_delayed_work(&hctx->delay_work);
953 954 955 956
	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
EXPORT_SYMBOL(blk_mq_stop_hw_queue);

957 958 959 960 961 962 963 964 965 966
void blk_mq_stop_hw_queues(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_stop_hw_queue(hctx);
}
EXPORT_SYMBOL(blk_mq_stop_hw_queues);

967 968 969
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
970

971
	blk_mq_run_hw_queue(hctx, false);
972 973 974
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);

975 976 977 978 979 980 981 982 983 984
void blk_mq_start_hw_queues(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_start_hw_queue(hctx);
}
EXPORT_SYMBOL(blk_mq_start_hw_queues);

985
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
986 987 988 989 990 991 992 993 994
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
			continue;

		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
995
		blk_mq_run_hw_queue(hctx, async);
996 997 998 999
	}
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);

1000
static void blk_mq_run_work_fn(struct work_struct *work)
1001 1002 1003
{
	struct blk_mq_hw_ctx *hctx;

1004
	hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
1005

1006 1007 1008
	__blk_mq_run_hw_queue(hctx);
}

1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
static void blk_mq_delay_work_fn(struct work_struct *work)
{
	struct blk_mq_hw_ctx *hctx;

	hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);

	if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
		__blk_mq_run_hw_queue(hctx);
}

void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{
1021 1022
	if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
		return;
1023

1024 1025
	kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
			&hctx->delay_work, msecs_to_jiffies(msecs));
1026 1027 1028
}
EXPORT_SYMBOL(blk_mq_delay_queue);

1029 1030 1031
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
					    struct request *rq,
					    bool at_head)
1032
{
J
Jens Axboe 已提交
1033 1034
	struct blk_mq_ctx *ctx = rq->mq_ctx;

1035 1036
	trace_block_rq_insert(hctx->queue, rq);

1037 1038 1039 1040
	if (at_head)
		list_add(&rq->queuelist, &ctx->rq_list);
	else
		list_add_tail(&rq->queuelist, &ctx->rq_list);
1041
}
1042

1043 1044 1045 1046 1047
static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
				    struct request *rq, bool at_head)
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;

J
Jens Axboe 已提交
1048
	__blk_mq_insert_req_list(hctx, rq, at_head);
1049 1050 1051
	blk_mq_hctx_mark_pending(hctx, ctx);
}

1052
void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
J
Jens Axboe 已提交
1053
			   bool async)
1054
{
J
Jens Axboe 已提交
1055
	struct blk_mq_ctx *ctx = rq->mq_ctx;
1056
	struct request_queue *q = rq->q;
C
Christoph Hellwig 已提交
1057
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1058

1059 1060 1061
	spin_lock(&ctx->lock);
	__blk_mq_insert_request(hctx, rq, at_head);
	spin_unlock(&ctx->lock);
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073

	if (run_queue)
		blk_mq_run_hw_queue(hctx, async);
}

static void blk_mq_insert_requests(struct request_queue *q,
				     struct blk_mq_ctx *ctx,
				     struct list_head *list,
				     int depth,
				     bool from_schedule)

{
C
Christoph Hellwig 已提交
1074
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086

	trace_block_unplug(q, depth, !from_schedule);

	/*
	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
	 * offline now
	 */
	spin_lock(&ctx->lock);
	while (!list_empty(list)) {
		struct request *rq;

		rq = list_first_entry(list, struct request, queuelist);
J
Jens Axboe 已提交
1087
		BUG_ON(rq->mq_ctx != ctx);
1088
		list_del_init(&rq->queuelist);
J
Jens Axboe 已提交
1089
		__blk_mq_insert_req_list(hctx, rq, false);
1090
	}
1091
	blk_mq_hctx_mark_pending(hctx, ctx);
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
	spin_unlock(&ctx->lock);

	blk_mq_run_hw_queue(hctx, from_schedule);
}

static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
{
	struct request *rqa = container_of(a, struct request, queuelist);
	struct request *rqb = container_of(b, struct request, queuelist);

	return !(rqa->mq_ctx < rqb->mq_ctx ||
		 (rqa->mq_ctx == rqb->mq_ctx &&
		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
}

void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
	struct blk_mq_ctx *this_ctx;
	struct request_queue *this_q;
	struct request *rq;
	LIST_HEAD(list);
	LIST_HEAD(ctx_list);
	unsigned int depth;

	list_splice_init(&plug->mq_list, &list);

	list_sort(NULL, &list, plug_ctx_cmp);

	this_q = NULL;
	this_ctx = NULL;
	depth = 0;

	while (!list_empty(&list)) {
		rq = list_entry_rq(list.next);
		list_del_init(&rq->queuelist);
		BUG_ON(!rq->q);
		if (rq->mq_ctx != this_ctx) {
			if (this_ctx) {
				blk_mq_insert_requests(this_q, this_ctx,
							&ctx_list, depth,
							from_schedule);
			}

			this_ctx = rq->mq_ctx;
			this_q = rq->q;
			depth = 0;
		}

		depth++;
		list_add_tail(&rq->queuelist, &ctx_list);
	}

	/*
	 * If 'this_ctx' is set, we know we have entries to complete
	 * on 'ctx_list'. Do those.
	 */
	if (this_ctx) {
		blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
				       from_schedule);
	}
}

static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
{
	init_request_from_bio(rq, bio);
1157

1158
	blk_account_io_start(rq, 1);
1159 1160
}

1161 1162 1163 1164 1165 1166
static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
{
	return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
		!blk_queue_nomerges(hctx->queue);
}

1167 1168 1169
static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
					 struct blk_mq_ctx *ctx,
					 struct request *rq, struct bio *bio)
1170
{
1171
	if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
1172 1173 1174 1175 1176 1177 1178
		blk_mq_bio_to_request(rq, bio);
		spin_lock(&ctx->lock);
insert_rq:
		__blk_mq_insert_request(hctx, rq, false);
		spin_unlock(&ctx->lock);
		return false;
	} else {
1179 1180
		struct request_queue *q = hctx->queue;

1181 1182 1183 1184 1185
		spin_lock(&ctx->lock);
		if (!blk_mq_attempt_merge(q, ctx, bio)) {
			blk_mq_bio_to_request(rq, bio);
			goto insert_rq;
		}
1186

1187 1188 1189
		spin_unlock(&ctx->lock);
		__blk_mq_free_request(hctx, ctx, rq);
		return true;
1190
	}
1191
}
1192

1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
struct blk_map_ctx {
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
};

static struct request *blk_mq_map_request(struct request_queue *q,
					  struct bio *bio,
					  struct blk_map_ctx *data)
{
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
	struct request *rq;
1205 1206
	int op = bio_data_dir(bio);
	int op_flags = 0;
1207
	struct blk_mq_alloc_data alloc_data;
1208

1209
	blk_queue_enter_live(q);
1210
	ctx = blk_mq_get_ctx(q);
C
Christoph Hellwig 已提交
1211
	hctx = blk_mq_map_queue(q, ctx->cpu);
1212

J
Jens Axboe 已提交
1213
	if (rw_is_sync(bio_op(bio), bio->bi_opf))
1214
		op_flags |= REQ_SYNC;
1215

1216
	trace_block_getrq(q, bio, op);
1217
	blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
1218
	rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
1219 1220

	hctx->queued++;
1221 1222 1223 1224 1225
	data->hctx = hctx;
	data->ctx = ctx;
	return rq;
}

1226
static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
1227 1228 1229
{
	int ret;
	struct request_queue *q = rq->q;
C
Christoph Hellwig 已提交
1230
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
1231 1232 1233 1234 1235
	struct blk_mq_queue_data bd = {
		.rq = rq,
		.list = NULL,
		.last = 1
	};
1236
	blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
1237 1238 1239 1240 1241 1242 1243

	/*
	 * For OK queue, we are done. For error, kill it. Any other
	 * error (busy), just add it to our list as we previously
	 * would have done
	 */
	ret = q->mq_ops->queue_rq(hctx, &bd);
1244 1245
	if (ret == BLK_MQ_RQ_QUEUE_OK) {
		*cookie = new_cookie;
1246
		return 0;
1247
	}
1248

1249 1250 1251 1252 1253 1254 1255
	__blk_mq_requeue_request(rq);

	if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
		*cookie = BLK_QC_T_NONE;
		rq->errors = -EIO;
		blk_mq_end_request(rq, rq->errors);
		return 0;
1256
	}
1257 1258

	return -1;
1259 1260
}

1261 1262 1263 1264 1265
/*
 * Multiple hardware queue variant. This will not use per-process plugs,
 * but will attempt to bypass the hctx queueing if we can go straight to
 * hardware for SYNC IO.
 */
1266
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1267
{
J
Jens Axboe 已提交
1268 1269
	const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
	const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1270 1271
	struct blk_map_ctx data;
	struct request *rq;
1272 1273
	unsigned int request_count = 0;
	struct blk_plug *plug;
1274
	struct request *same_queue_rq = NULL;
1275
	blk_qc_t cookie;
1276 1277 1278 1279

	blk_queue_bounce(q, &bio);

	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1280
		bio_io_error(bio);
1281
		return BLK_QC_T_NONE;
1282 1283
	}

1284 1285
	blk_queue_split(q, &bio, q->bio_split);

1286 1287 1288
	if (!is_flush_fua && !blk_queue_nomerges(q) &&
	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
		return BLK_QC_T_NONE;
1289

1290 1291
	rq = blk_mq_map_request(q, bio, &data);
	if (unlikely(!rq))
1292
		return BLK_QC_T_NONE;
1293

1294
	cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1295 1296 1297 1298 1299 1300 1301

	if (unlikely(is_flush_fua)) {
		blk_mq_bio_to_request(rq, bio);
		blk_insert_flush(rq);
		goto run_queue;
	}

1302
	plug = current->plug;
1303 1304 1305 1306 1307
	/*
	 * If the driver supports defer issued based on 'last', then
	 * queue it up like normal since we can potentially save some
	 * CPU this way.
	 */
1308 1309 1310
	if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
	    !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
		struct request *old_rq = NULL;
1311 1312 1313 1314

		blk_mq_bio_to_request(rq, bio);

		/*
1315
		 * We do limited pluging. If the bio can be merged, do that.
1316 1317
		 * Otherwise the existing request in the plug list will be
		 * issued. So the plug list will have one request at most
1318
		 */
1319
		if (plug) {
1320 1321
			/*
			 * The plug list might get flushed before this. If that
1322 1323 1324
			 * happens, same_queue_rq is invalid and plug list is
			 * empty
			 */
1325 1326
			if (same_queue_rq && !list_empty(&plug->mq_list)) {
				old_rq = same_queue_rq;
1327
				list_del_init(&old_rq->queuelist);
1328
			}
1329 1330 1331 1332 1333
			list_add_tail(&rq->queuelist, &plug->mq_list);
		} else /* is_sync */
			old_rq = rq;
		blk_mq_put_ctx(data.ctx);
		if (!old_rq)
1334 1335 1336
			goto done;
		if (!blk_mq_direct_issue_request(old_rq, &cookie))
			goto done;
1337
		blk_mq_insert_request(old_rq, false, true, true);
1338
		goto done;
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
	}

	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
		/*
		 * For a SYNC request, send it to the hardware immediately. For
		 * an ASYNC request, just ensure that we run it later on. The
		 * latter allows for merging opportunities and more efficient
		 * dispatching.
		 */
run_queue:
		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
	}
	blk_mq_put_ctx(data.ctx);
1352 1353
done:
	return cookie;
1354 1355 1356 1357 1358 1359
}

/*
 * Single hardware queue variant. This will attempt to use any per-process
 * plug for merging and IO deferral.
 */
1360
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1361
{
J
Jens Axboe 已提交
1362 1363
	const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
	const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1364 1365
	struct blk_plug *plug;
	unsigned int request_count = 0;
1366 1367
	struct blk_map_ctx data;
	struct request *rq;
1368
	blk_qc_t cookie;
1369 1370 1371 1372

	blk_queue_bounce(q, &bio);

	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1373
		bio_io_error(bio);
1374
		return BLK_QC_T_NONE;
1375 1376
	}

1377 1378
	blk_queue_split(q, &bio, q->bio_split);

1379 1380 1381 1382 1383
	if (!is_flush_fua && !blk_queue_nomerges(q)) {
		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
			return BLK_QC_T_NONE;
	} else
		request_count = blk_plug_queued_count(q);
1384 1385

	rq = blk_mq_map_request(q, bio, &data);
1386
	if (unlikely(!rq))
1387
		return BLK_QC_T_NONE;
1388

1389
	cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401

	if (unlikely(is_flush_fua)) {
		blk_mq_bio_to_request(rq, bio);
		blk_insert_flush(rq);
		goto run_queue;
	}

	/*
	 * A task plug currently exists. Since this is completely lockless,
	 * utilize that to temporarily store requests until the task is
	 * either done or scheduled away.
	 */
1402 1403 1404
	plug = current->plug;
	if (plug) {
		blk_mq_bio_to_request(rq, bio);
M
Ming Lei 已提交
1405
		if (!request_count)
1406
			trace_block_plug(q);
1407 1408 1409 1410

		blk_mq_put_ctx(data.ctx);

		if (request_count >= BLK_MAX_REQUEST_COUNT) {
1411 1412
			blk_flush_plug_list(plug, false);
			trace_block_plug(q);
1413
		}
1414

1415
		list_add_tail(&rq->queuelist, &plug->mq_list);
1416
		return cookie;
1417 1418
	}

1419 1420 1421 1422 1423 1424 1425 1426 1427
	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
		/*
		 * For a SYNC request, send it to the hardware immediately. For
		 * an ASYNC request, just ensure that we run it later on. The
		 * latter allows for merging opportunities and more efficient
		 * dispatching.
		 */
run_queue:
		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1428 1429
	}

1430
	blk_mq_put_ctx(data.ctx);
1431
	return cookie;
1432 1433
}

1434 1435
static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
		struct blk_mq_tags *tags, unsigned int hctx_idx)
1436
{
1437
	struct page *page;
1438

1439
	if (tags->rqs && set->ops->exit_request) {
1440
		int i;
1441

1442 1443
		for (i = 0; i < tags->nr_tags; i++) {
			if (!tags->rqs[i])
1444
				continue;
1445 1446
			set->ops->exit_request(set->driver_data, tags->rqs[i],
						hctx_idx, i);
1447
			tags->rqs[i] = NULL;
1448
		}
1449 1450
	}

1451 1452
	while (!list_empty(&tags->page_list)) {
		page = list_first_entry(&tags->page_list, struct page, lru);
1453
		list_del_init(&page->lru);
1454 1455 1456 1457 1458
		/*
		 * Remove kmemleak object previously allocated in
		 * blk_mq_init_rq_map().
		 */
		kmemleak_free(page_address(page));
1459 1460 1461
		__free_pages(page, page->private);
	}

1462
	kfree(tags->rqs);
1463

1464
	blk_mq_free_tags(tags);
1465 1466 1467 1468
}

static size_t order_to_size(unsigned int order)
{
1469
	return (size_t)PAGE_SIZE << order;
1470 1471
}

1472 1473
static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
		unsigned int hctx_idx)
1474
{
1475
	struct blk_mq_tags *tags;
1476 1477 1478
	unsigned int i, j, entries_per_page, max_order = 4;
	size_t rq_size, left;

1479
	tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
S
Shaohua Li 已提交
1480 1481
				set->numa_node,
				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1482 1483
	if (!tags)
		return NULL;
1484

1485 1486
	INIT_LIST_HEAD(&tags->page_list);

1487 1488 1489
	tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
				 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
				 set->numa_node);
1490 1491 1492 1493
	if (!tags->rqs) {
		blk_mq_free_tags(tags);
		return NULL;
	}
1494 1495 1496 1497 1498

	/*
	 * rq_size is the size of the request plus driver payload, rounded
	 * to the cacheline size
	 */
1499
	rq_size = round_up(sizeof(struct request) + set->cmd_size,
1500
				cache_line_size());
1501
	left = rq_size * set->queue_depth;
1502

1503
	for (i = 0; i < set->queue_depth; ) {
1504 1505 1506 1507 1508
		int this_order = max_order;
		struct page *page;
		int to_do;
		void *p;

1509
		while (this_order && left < order_to_size(this_order - 1))
1510 1511 1512
			this_order--;

		do {
1513
			page = alloc_pages_node(set->numa_node,
1514
				GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1515
				this_order);
1516 1517 1518 1519 1520 1521 1522 1523 1524
			if (page)
				break;
			if (!this_order--)
				break;
			if (order_to_size(this_order) < rq_size)
				break;
		} while (1);

		if (!page)
1525
			goto fail;
1526 1527

		page->private = this_order;
1528
		list_add_tail(&page->lru, &tags->page_list);
1529 1530

		p = page_address(page);
1531 1532 1533 1534 1535
		/*
		 * Allow kmemleak to scan these pages as they contain pointers
		 * to additional allocations like via ops->init_request().
		 */
		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
1536
		entries_per_page = order_to_size(this_order) / rq_size;
1537
		to_do = min(entries_per_page, set->queue_depth - i);
1538 1539
		left -= to_do * rq_size;
		for (j = 0; j < to_do; j++) {
1540 1541 1542 1543
			tags->rqs[i] = p;
			if (set->ops->init_request) {
				if (set->ops->init_request(set->driver_data,
						tags->rqs[i], hctx_idx, i,
1544 1545
						set->numa_node)) {
					tags->rqs[i] = NULL;
1546
					goto fail;
1547
				}
1548 1549
			}

1550 1551 1552 1553
			p += rq_size;
			i++;
		}
	}
1554
	return tags;
1555

1556 1557 1558
fail:
	blk_mq_free_rq_map(set, tags, hctx_idx);
	return NULL;
1559 1560
}

J
Jens Axboe 已提交
1561 1562 1563 1564 1565
/*
 * 'cpu' is going away. splice any existing rq_list entries from this
 * software queue to the hw queue dispatch list, and ensure that it
 * gets run.
 */
1566
static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1567
{
1568
	struct blk_mq_hw_ctx *hctx;
1569 1570 1571
	struct blk_mq_ctx *ctx;
	LIST_HEAD(tmp);

1572
	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
J
Jens Axboe 已提交
1573
	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1574 1575 1576 1577 1578 1579 1580 1581 1582

	spin_lock(&ctx->lock);
	if (!list_empty(&ctx->rq_list)) {
		list_splice_init(&ctx->rq_list, &tmp);
		blk_mq_hctx_clear_pending(hctx, ctx);
	}
	spin_unlock(&ctx->lock);

	if (list_empty(&tmp))
1583
		return 0;
1584

J
Jens Axboe 已提交
1585 1586 1587
	spin_lock(&hctx->lock);
	list_splice_tail_init(&tmp, &hctx->dispatch);
	spin_unlock(&hctx->lock);
1588 1589

	blk_mq_run_hw_queue(hctx, true);
1590
	return 0;
1591 1592
}

1593
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1594
{
1595 1596
	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
					    &hctx->cpuhp_dead);
1597 1598
}

1599
/* hctx->ctxs will be freed in queue's release handler */
1600 1601 1602 1603
static void blk_mq_exit_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
1604 1605
	unsigned flush_start_tag = set->queue_depth;

1606 1607
	blk_mq_tag_idle(hctx);

1608 1609 1610 1611 1612
	if (set->ops->exit_request)
		set->ops->exit_request(set->driver_data,
				       hctx->fq->flush_rq, hctx_idx,
				       flush_start_tag + hctx_idx);

1613 1614 1615
	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);

1616
	blk_mq_remove_cpuhp(hctx);
1617
	blk_free_flush_queue(hctx->fq);
1618
	sbitmap_free(&hctx->ctx_map);
1619 1620
}

M
Ming Lei 已提交
1621 1622 1623 1624 1625 1626 1627 1628 1629
static void blk_mq_exit_hw_queues(struct request_queue *q,
		struct blk_mq_tag_set *set, int nr_queue)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (i == nr_queue)
			break;
1630
		blk_mq_exit_hctx(q, set, hctx, i);
M
Ming Lei 已提交
1631 1632 1633 1634 1635 1636 1637 1638 1639
	}
}

static void blk_mq_free_hw_queues(struct request_queue *q,
		struct blk_mq_tag_set *set)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

1640
	queue_for_each_hw_ctx(q, hctx, i)
M
Ming Lei 已提交
1641 1642 1643
		free_cpumask_var(hctx->cpumask);
}

1644 1645 1646
static int blk_mq_init_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1647
{
1648
	int node;
1649
	unsigned flush_start_tag = set->queue_depth;
1650 1651 1652 1653 1654

	node = hctx->numa_node;
	if (node == NUMA_NO_NODE)
		node = hctx->numa_node = set->numa_node;

1655
	INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
1656 1657 1658 1659 1660
	INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
	spin_lock_init(&hctx->lock);
	INIT_LIST_HEAD(&hctx->dispatch);
	hctx->queue = q;
	hctx->queue_num = hctx_idx;
1661
	hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
1662

1663
	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
1664 1665

	hctx->tags = set->tags[hctx_idx];
1666 1667

	/*
1668 1669
	 * Allocate space for all possible cpus to avoid allocation at
	 * runtime
1670
	 */
1671 1672 1673 1674
	hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
					GFP_KERNEL, node);
	if (!hctx->ctxs)
		goto unregister_cpu_notifier;
1675

1676 1677
	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
			      node))
1678
		goto free_ctxs;
1679

1680
	hctx->nr_ctx = 0;
1681

1682 1683 1684
	if (set->ops->init_hctx &&
	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
		goto free_bitmap;
1685

1686 1687 1688
	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
	if (!hctx->fq)
		goto exit_hctx;
1689

1690 1691 1692 1693 1694
	if (set->ops->init_request &&
	    set->ops->init_request(set->driver_data,
				   hctx->fq->flush_rq, hctx_idx,
				   flush_start_tag + hctx_idx, node))
		goto free_fq;
1695

1696
	return 0;
1697

1698 1699 1700 1701 1702
 free_fq:
	kfree(hctx->fq);
 exit_hctx:
	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);
1703
 free_bitmap:
1704
	sbitmap_free(&hctx->ctx_map);
1705 1706 1707
 free_ctxs:
	kfree(hctx->ctxs);
 unregister_cpu_notifier:
1708
	blk_mq_remove_cpuhp(hctx);
1709 1710
	return -1;
}
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730

static void blk_mq_init_cpu_queues(struct request_queue *q,
				   unsigned int nr_hw_queues)
{
	unsigned int i;

	for_each_possible_cpu(i) {
		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
		struct blk_mq_hw_ctx *hctx;

		memset(__ctx, 0, sizeof(*__ctx));
		__ctx->cpu = i;
		spin_lock_init(&__ctx->lock);
		INIT_LIST_HEAD(&__ctx->rq_list);
		__ctx->queue = q;

		/* If the cpu isn't online, the cpu is mapped to first hctx */
		if (!cpu_online(i))
			continue;

C
Christoph Hellwig 已提交
1731
		hctx = blk_mq_map_queue(q, i);
1732

1733 1734 1735 1736 1737
		/*
		 * Set local node, IFF we have more than one hw queue. If
		 * not, we remain on the home node of the device
		 */
		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1738
			hctx->numa_node = local_memory_node(cpu_to_node(i));
1739 1740 1741
	}
}

1742 1743
static void blk_mq_map_swqueue(struct request_queue *q,
			       const struct cpumask *online_mask)
1744 1745 1746 1747
{
	unsigned int i;
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
M
Ming Lei 已提交
1748
	struct blk_mq_tag_set *set = q->tag_set;
1749

1750 1751 1752 1753 1754
	/*
	 * Avoid others reading imcomplete hctx->cpumask through sysfs
	 */
	mutex_lock(&q->sysfs_lock);

1755
	queue_for_each_hw_ctx(q, hctx, i) {
1756
		cpumask_clear(hctx->cpumask);
1757 1758 1759 1760 1761 1762
		hctx->nr_ctx = 0;
	}

	/*
	 * Map software to hardware queues
	 */
1763
	for_each_possible_cpu(i) {
1764
		/* If the cpu isn't online, the cpu is mapped to first hctx */
1765
		if (!cpumask_test_cpu(i, online_mask))
1766 1767
			continue;

1768
		ctx = per_cpu_ptr(q->queue_ctx, i);
C
Christoph Hellwig 已提交
1769
		hctx = blk_mq_map_queue(q, i);
K
Keith Busch 已提交
1770

1771
		cpumask_set_cpu(i, hctx->cpumask);
1772 1773 1774
		ctx->index_hw = hctx->nr_ctx;
		hctx->ctxs[hctx->nr_ctx++] = ctx;
	}
1775

1776 1777
	mutex_unlock(&q->sysfs_lock);

1778
	queue_for_each_hw_ctx(q, hctx, i) {
1779
		/*
1780 1781
		 * If no software queues are mapped to this hardware queue,
		 * disable it and free the request entries.
1782 1783 1784 1785 1786 1787
		 */
		if (!hctx->nr_ctx) {
			if (set->tags[i]) {
				blk_mq_free_rq_map(set, set->tags[i], i);
				set->tags[i] = NULL;
			}
M
Ming Lei 已提交
1788
			hctx->tags = NULL;
1789 1790 1791
			continue;
		}

M
Ming Lei 已提交
1792 1793 1794 1795 1796 1797
		/* unmapped hw queue can be remapped after CPU topo changed */
		if (!set->tags[i])
			set->tags[i] = blk_mq_init_rq_map(set, i);
		hctx->tags = set->tags[i];
		WARN_ON(!hctx->tags);

1798 1799 1800 1801 1802
		/*
		 * Set the map size to the number of mapped software queues.
		 * This is more accurate and more efficient than looping
		 * over all possibly mapped software queues.
		 */
1803
		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
1804

1805 1806 1807
		/*
		 * Initialize batch roundrobin counts
		 */
1808 1809 1810
		hctx->next_cpu = cpumask_first(hctx->cpumask);
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
	}
1811 1812
}

1813
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
1814 1815 1816 1817
{
	struct blk_mq_hw_ctx *hctx;
	int i;

1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
	queue_for_each_hw_ctx(q, hctx, i) {
		if (shared)
			hctx->flags |= BLK_MQ_F_TAG_SHARED;
		else
			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
	}
}

static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
{
	struct request_queue *q;
1829 1830 1831

	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_freeze_queue(q);
1832
		queue_set_hctx_shared(q, shared);
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842
		blk_mq_unfreeze_queue(q);
	}
}

static void blk_mq_del_queue_tag_set(struct request_queue *q)
{
	struct blk_mq_tag_set *set = q->tag_set;

	mutex_lock(&set->tag_list_lock);
	list_del_init(&q->tag_set_list);
1843 1844 1845 1846 1847 1848
	if (list_is_singular(&set->tag_list)) {
		/* just transitioned to unshared */
		set->flags &= ~BLK_MQ_F_TAG_SHARED;
		/* update existing queue */
		blk_mq_update_tag_set_depth(set, false);
	}
1849 1850 1851 1852 1853 1854 1855 1856 1857
	mutex_unlock(&set->tag_list_lock);
}

static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
				     struct request_queue *q)
{
	q->tag_set = set;

	mutex_lock(&set->tag_list_lock);
1858 1859 1860 1861 1862 1863 1864 1865 1866

	/* Check to see if we're transitioning to shared (from 1 to 2 queues). */
	if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
		set->flags |= BLK_MQ_F_TAG_SHARED;
		/* update existing queue */
		blk_mq_update_tag_set_depth(set, true);
	}
	if (set->flags & BLK_MQ_F_TAG_SHARED)
		queue_set_hctx_shared(q, true);
1867
	list_add_tail(&q->tag_set_list, &set->tag_list);
1868

1869 1870 1871
	mutex_unlock(&set->tag_list_lock);
}

1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
/*
 * It is the actual release handler for mq, but we do it from
 * request queue's release handler for avoiding use-after-free
 * and headache because q->mq_kobj shouldn't have been introduced,
 * but we can't group ctx/kctx kobj without it.
 */
void blk_mq_release(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	/* hctx kobj stays in hctx */
1884 1885 1886 1887
	queue_for_each_hw_ctx(q, hctx, i) {
		if (!hctx)
			continue;
		kfree(hctx->ctxs);
1888
		kfree(hctx);
1889
	}
1890

1891 1892
	q->mq_map = NULL;

1893 1894 1895 1896 1897 1898
	kfree(q->queue_hw_ctx);

	/* ctx kobj stays in queue_ctx */
	free_percpu(q->queue_ctx);
}

1899
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
{
	struct request_queue *uninit_q, *q;

	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
	if (!uninit_q)
		return ERR_PTR(-ENOMEM);

	q = blk_mq_init_allocated_queue(set, uninit_q);
	if (IS_ERR(q))
		blk_cleanup_queue(uninit_q);

	return q;
}
EXPORT_SYMBOL(blk_mq_init_queue);

K
Keith Busch 已提交
1915 1916
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
						struct request_queue *q)
1917
{
K
Keith Busch 已提交
1918 1919
	int i, j;
	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
1920

K
Keith Busch 已提交
1921
	blk_mq_sysfs_unregister(q);
1922
	for (i = 0; i < set->nr_hw_queues; i++) {
K
Keith Busch 已提交
1923
		int node;
1924

K
Keith Busch 已提交
1925 1926 1927 1928
		if (hctxs[i])
			continue;

		node = blk_mq_hw_queue_to_node(q->mq_map, i);
1929 1930
		hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
					GFP_KERNEL, node);
1931
		if (!hctxs[i])
K
Keith Busch 已提交
1932
			break;
1933

1934
		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
K
Keith Busch 已提交
1935 1936 1937 1938 1939
						node)) {
			kfree(hctxs[i]);
			hctxs[i] = NULL;
			break;
		}
1940

1941
		atomic_set(&hctxs[i]->nr_active, 0);
1942
		hctxs[i]->numa_node = node;
1943
		hctxs[i]->queue_num = i;
K
Keith Busch 已提交
1944 1945 1946 1947 1948 1949 1950 1951

		if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
			free_cpumask_var(hctxs[i]->cpumask);
			kfree(hctxs[i]);
			hctxs[i] = NULL;
			break;
		}
		blk_mq_hctx_kobj_init(hctxs[i]);
1952
	}
K
Keith Busch 已提交
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
	for (j = i; j < q->nr_hw_queues; j++) {
		struct blk_mq_hw_ctx *hctx = hctxs[j];

		if (hctx) {
			if (hctx->tags) {
				blk_mq_free_rq_map(set, hctx->tags, j);
				set->tags[j] = NULL;
			}
			blk_mq_exit_hctx(q, set, hctx, j);
			free_cpumask_var(hctx->cpumask);
			kobject_put(&hctx->kobj);
			kfree(hctx->ctxs);
			kfree(hctx);
			hctxs[j] = NULL;

		}
	}
	q->nr_hw_queues = i;
	blk_mq_sysfs_register(q);
}

struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
						  struct request_queue *q)
{
M
Ming Lei 已提交
1977 1978 1979
	/* mark the queue as mq asap */
	q->mq_ops = set->ops;

K
Keith Busch 已提交
1980 1981
	q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
	if (!q->queue_ctx)
M
Ming Lin 已提交
1982
		goto err_exit;
K
Keith Busch 已提交
1983 1984 1985 1986 1987 1988

	q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
						GFP_KERNEL, set->numa_node);
	if (!q->queue_hw_ctx)
		goto err_percpu;

1989
	q->mq_map = set->mq_map;
K
Keith Busch 已提交
1990 1991 1992 1993

	blk_mq_realloc_hw_ctxs(set, q);
	if (!q->nr_hw_queues)
		goto err_hctxs;
1994

1995
	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
1996
	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
1997 1998 1999

	q->nr_queues = nr_cpu_ids;

2000
	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2001

2002 2003 2004
	if (!(set->flags & BLK_MQ_F_SG_MERGE))
		q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;

2005 2006
	q->sg_reserved_size = INT_MAX;

2007
	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2008 2009 2010
	INIT_LIST_HEAD(&q->requeue_list);
	spin_lock_init(&q->requeue_lock);

2011 2012 2013 2014 2015
	if (q->nr_hw_queues > 1)
		blk_queue_make_request(q, blk_mq_make_request);
	else
		blk_queue_make_request(q, blk_sq_make_request);

2016 2017 2018 2019 2020
	/*
	 * Do this after blk_queue_make_request() overrides it...
	 */
	q->nr_requests = set->queue_depth;

2021 2022
	if (set->ops->complete)
		blk_queue_softirq_done(q, set->ops->complete);
2023

2024
	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2025

2026
	get_online_cpus();
2027 2028
	mutex_lock(&all_q_mutex);

2029
	list_add_tail(&q->all_q_node, &all_q_list);
2030
	blk_mq_add_queue_tag_set(set, q);
2031
	blk_mq_map_swqueue(q, cpu_online_mask);
2032

2033
	mutex_unlock(&all_q_mutex);
2034
	put_online_cpus();
2035

2036
	return q;
2037

2038
err_hctxs:
K
Keith Busch 已提交
2039
	kfree(q->queue_hw_ctx);
2040
err_percpu:
K
Keith Busch 已提交
2041
	free_percpu(q->queue_ctx);
M
Ming Lin 已提交
2042 2043
err_exit:
	q->mq_ops = NULL;
2044 2045
	return ERR_PTR(-ENOMEM);
}
2046
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2047 2048 2049

void blk_mq_free_queue(struct request_queue *q)
{
M
Ming Lei 已提交
2050
	struct blk_mq_tag_set	*set = q->tag_set;
2051

2052 2053 2054 2055
	mutex_lock(&all_q_mutex);
	list_del_init(&q->all_q_node);
	mutex_unlock(&all_q_mutex);

2056 2057
	blk_mq_del_queue_tag_set(q);

M
Ming Lei 已提交
2058 2059
	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
	blk_mq_free_hw_queues(q, set);
2060 2061 2062
}

/* Basically redo blk_mq_init_queue with queue frozen */
2063 2064
static void blk_mq_queue_reinit(struct request_queue *q,
				const struct cpumask *online_mask)
2065
{
2066
	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2067

2068 2069
	blk_mq_sysfs_unregister(q);

2070 2071 2072 2073 2074 2075
	/*
	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
	 * we should change hctx numa_node according to new topology (this
	 * involves free and re-allocate memory, worthy doing?)
	 */

2076
	blk_mq_map_swqueue(q, online_mask);
2077

2078
	blk_mq_sysfs_register(q);
2079 2080
}

2081 2082 2083 2084 2085 2086 2087 2088
/*
 * New online cpumask which is going to be set in this hotplug event.
 * Declare this cpumasks as global as cpu-hotplug operation is invoked
 * one-by-one and dynamically allocating this could result in a failure.
 */
static struct cpumask cpuhp_online_new;

static void blk_mq_queue_reinit_work(void)
2089 2090 2091 2092
{
	struct request_queue *q;

	mutex_lock(&all_q_mutex);
2093 2094 2095 2096 2097 2098 2099 2100 2101
	/*
	 * We need to freeze and reinit all existing queues.  Freezing
	 * involves synchronous wait for an RCU grace period and doing it
	 * one by one may take a long time.  Start freezing all queues in
	 * one swoop and then wait for the completions so that freezing can
	 * take place in parallel.
	 */
	list_for_each_entry(q, &all_q_list, all_q_node)
		blk_mq_freeze_queue_start(q);
2102
	list_for_each_entry(q, &all_q_list, all_q_node) {
2103 2104
		blk_mq_freeze_queue_wait(q);

2105 2106 2107 2108 2109 2110 2111
		/*
		 * timeout handler can't touch hw queue during the
		 * reinitialization
		 */
		del_timer_sync(&q->timeout);
	}

2112
	list_for_each_entry(q, &all_q_list, all_q_node)
2113
		blk_mq_queue_reinit(q, &cpuhp_online_new);
2114 2115 2116 2117

	list_for_each_entry(q, &all_q_list, all_q_node)
		blk_mq_unfreeze_queue(q);

2118
	mutex_unlock(&all_q_mutex);
2119 2120 2121 2122
}

static int blk_mq_queue_reinit_dead(unsigned int cpu)
{
2123
	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149
	blk_mq_queue_reinit_work();
	return 0;
}

/*
 * Before hotadded cpu starts handling requests, new mappings must be
 * established.  Otherwise, these requests in hw queue might never be
 * dispatched.
 *
 * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
 * for CPU0, and ctx1 for CPU1).
 *
 * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
 * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
 *
 * And then while running hw queue, flush_busy_ctxs() finds bit0 is set in
 * pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
 * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list
 * is ignored.
 */
static int blk_mq_queue_reinit_prepare(unsigned int cpu)
{
	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
	cpumask_set_cpu(cpu, &cpuhp_online_new);
	blk_mq_queue_reinit_work();
	return 0;
2150 2151
}

2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{
	int i;

	for (i = 0; i < set->nr_hw_queues; i++) {
		set->tags[i] = blk_mq_init_rq_map(set, i);
		if (!set->tags[i])
			goto out_unwind;
	}

	return 0;

out_unwind:
	while (--i >= 0)
		blk_mq_free_rq_map(set, set->tags[i], i);

	return -ENOMEM;
}

/*
 * Allocate the request maps associated with this tag_set. Note that this
 * may reduce the depth asked for, if memory is tight. set->queue_depth
 * will be updated to reflect the allocated depth.
 */
static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{
	unsigned int depth;
	int err;

	depth = set->queue_depth;
	do {
		err = __blk_mq_alloc_rq_maps(set);
		if (!err)
			break;

		set->queue_depth >>= 1;
		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
			err = -ENOMEM;
			break;
		}
	} while (set->queue_depth);

	if (!set->queue_depth || err) {
		pr_err("blk-mq: failed to allocate request map\n");
		return -ENOMEM;
	}

	if (depth != set->queue_depth)
		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
						depth, set->queue_depth);

	return 0;
}

2206 2207 2208 2209 2210 2211
/*
 * Alloc a tag set to be associated with one or more request queues.
 * May fail with EINVAL for various error conditions. May adjust the
 * requested depth down, if if it too large. In that case, the set
 * value will be stored in set->queue_depth.
 */
2212 2213
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
2214 2215
	int ret;

B
Bart Van Assche 已提交
2216 2217
	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);

2218 2219
	if (!set->nr_hw_queues)
		return -EINVAL;
2220
	if (!set->queue_depth)
2221 2222 2223 2224
		return -EINVAL;
	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
		return -EINVAL;

C
Christoph Hellwig 已提交
2225
	if (!set->ops->queue_rq)
2226 2227
		return -EINVAL;

2228 2229 2230 2231 2232
	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
		pr_info("blk-mq: reduced tag depth to %u\n",
			BLK_MQ_MAX_DEPTH);
		set->queue_depth = BLK_MQ_MAX_DEPTH;
	}
2233

2234 2235 2236 2237 2238 2239 2240 2241 2242
	/*
	 * If a crashdump is active, then we are potentially in a very
	 * memory constrained environment. Limit us to 1 queue and
	 * 64 tags to prevent using too much memory.
	 */
	if (is_kdump_kernel()) {
		set->nr_hw_queues = 1;
		set->queue_depth = min(64U, set->queue_depth);
	}
K
Keith Busch 已提交
2243 2244 2245 2246 2247
	/*
	 * There is no use for more h/w queues than cpus.
	 */
	if (set->nr_hw_queues > nr_cpu_ids)
		set->nr_hw_queues = nr_cpu_ids;
2248

K
Keith Busch 已提交
2249
	set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2250 2251
				 GFP_KERNEL, set->numa_node);
	if (!set->tags)
2252
		return -ENOMEM;
2253

2254 2255 2256
	ret = -ENOMEM;
	set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
			GFP_KERNEL, set->numa_node);
2257 2258 2259
	if (!set->mq_map)
		goto out_free_tags;

2260 2261 2262 2263 2264 2265 2266 2267 2268
	if (set->ops->map_queues)
		ret = set->ops->map_queues(set);
	else
		ret = blk_mq_map_queues(set);
	if (ret)
		goto out_free_mq_map;

	ret = blk_mq_alloc_rq_maps(set);
	if (ret)
2269
		goto out_free_mq_map;
2270

2271 2272 2273
	mutex_init(&set->tag_list_lock);
	INIT_LIST_HEAD(&set->tag_list);

2274
	return 0;
2275 2276 2277 2278 2279

out_free_mq_map:
	kfree(set->mq_map);
	set->mq_map = NULL;
out_free_tags:
2280 2281
	kfree(set->tags);
	set->tags = NULL;
2282
	return ret;
2283 2284 2285 2286 2287 2288 2289
}
EXPORT_SYMBOL(blk_mq_alloc_tag_set);

void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
	int i;

K
Keith Busch 已提交
2290
	for (i = 0; i < nr_cpu_ids; i++) {
2291
		if (set->tags[i])
2292 2293 2294
			blk_mq_free_rq_map(set, set->tags[i], i);
	}

2295 2296 2297
	kfree(set->mq_map);
	set->mq_map = NULL;

M
Ming Lei 已提交
2298
	kfree(set->tags);
2299
	set->tags = NULL;
2300 2301 2302
}
EXPORT_SYMBOL(blk_mq_free_tag_set);

2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
{
	struct blk_mq_tag_set *set = q->tag_set;
	struct blk_mq_hw_ctx *hctx;
	int i, ret;

	if (!set || nr > set->queue_depth)
		return -EINVAL;

	ret = 0;
	queue_for_each_hw_ctx(q, hctx, i) {
2314 2315
		if (!hctx->tags)
			continue;
2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326
		ret = blk_mq_tag_update_depth(hctx->tags, nr);
		if (ret)
			break;
	}

	if (!ret)
		q->nr_requests = nr;

	return ret;
}

K
Keith Busch 已提交
2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{
	struct request_queue *q;

	if (nr_hw_queues > nr_cpu_ids)
		nr_hw_queues = nr_cpu_ids;
	if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
		return;

	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_freeze_queue(q);

	set->nr_hw_queues = nr_hw_queues;
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_realloc_hw_ctxs(set, q);

		if (q->nr_hw_queues > 1)
			blk_queue_make_request(q, blk_mq_make_request);
		else
			blk_queue_make_request(q, blk_sq_make_request);

		blk_mq_queue_reinit(q, cpu_online_mask);
	}

	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_unfreeze_queue(q);
}
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);

2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
void blk_mq_disable_hotplug(void)
{
	mutex_lock(&all_q_mutex);
}

void blk_mq_enable_hotplug(void)
{
	mutex_unlock(&all_q_mutex);
}

2366 2367
static int __init blk_mq_init(void)
{
2368 2369
	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
				blk_mq_hctx_notify_dead);
2370

2371 2372 2373
	cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
				  blk_mq_queue_reinit_prepare,
				  blk_mq_queue_reinit_dead);
2374 2375 2376
	return 0;
}
subsys_initcall(blk_mq_init);