blk-mq.c 77.3 KB
Newer Older
1 2 3 4 5 6
/*
 * Block multiqueue core code
 *
 * Copyright (C) 2013-2014 Jens Axboe
 * Copyright (C) 2013-2014 Christoph Hellwig
 */
7 8 9 10 11
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
12
#include <linux/kmemleak.h>
13 14 15 16 17 18 19 20 21 22
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>
#include <linux/llist.h>
#include <linux/list_sort.h>
#include <linux/cpu.h>
#include <linux/cache.h>
#include <linux/sched/sysctl.h>
23
#include <linux/sched/topology.h>
24
#include <linux/sched/signal.h>
25
#include <linux/delay.h>
26
#include <linux/crash_dump.h>
27
#include <linux/prefetch.h>
28 29 30 31 32 33

#include <trace/events/block.h>

#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
34
#include "blk-mq-debugfs.h"
35
#include "blk-mq-tag.h"
36
#include "blk-stat.h"
37
#include "blk-mq-sched.h"
38
#include "blk-rq-qos.h"
39

40
static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
41 42 43
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);

44 45 46 47
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
	int ddir, bytes, bucket;

J
Jens Axboe 已提交
48
	ddir = rq_data_dir(rq);
49 50 51 52 53 54 55 56 57 58 59 60
	bytes = blk_rq_bytes(rq);

	bucket = ddir + 2*(ilog2(bytes) - 9);

	if (bucket < 0)
		return -1;
	else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
		return ddir + BLK_MQ_POLL_STATS_BKTS - 2;

	return bucket;
}

61 62 63
/*
 * Check if any of the ctx's have pending work in this hardware queue
 */
64
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
65
{
66 67
	return !list_empty_careful(&hctx->dispatch) ||
		sbitmap_any_bit_set(&hctx->ctx_map) ||
68
			blk_mq_sched_has_work(hctx);
69 70
}

71 72 73 74 75 76
/*
 * Mark this ctx as having pending work in this hardware queue
 */
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
				     struct blk_mq_ctx *ctx)
{
77 78
	if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
		sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
79 80 81 82 83
}

static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
				      struct blk_mq_ctx *ctx)
{
84
	sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
85 86
}

87 88 89 90 91 92 93 94 95 96 97
struct mq_inflight {
	struct hd_struct *part;
	unsigned int *inflight;
};

static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
				  struct request *rq, void *priv,
				  bool reserved)
{
	struct mq_inflight *mi = priv;

98 99 100 101 102 103 104 105 106
	/*
	 * index[0] counts the specific partition that was asked for. index[1]
	 * counts the ones that are active on the whole device, so increment
	 * that if mi->part is indeed a partition, and not a whole device.
	 */
	if (rq->part == mi->part)
		mi->inflight[0]++;
	if (mi->part->partno)
		mi->inflight[1]++;
107 108 109 110 111 112 113
}

void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
		      unsigned int inflight[2])
{
	struct mq_inflight mi = { .part = part, .inflight = inflight, };

114
	inflight[0] = inflight[1] = 0;
115 116 117
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
				     struct request *rq, void *priv,
				     bool reserved)
{
	struct mq_inflight *mi = priv;

	if (rq->part == mi->part)
		mi->inflight[rq_data_dir(rq)]++;
}

void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
			 unsigned int inflight[2])
{
	struct mq_inflight mi = { .part = part, .inflight = inflight, };

	inflight[0] = inflight[1] = 0;
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
}

137
void blk_freeze_queue_start(struct request_queue *q)
138
{
139
	int freeze_depth;
140

141 142
	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
	if (freeze_depth == 1) {
143
		percpu_ref_kill(&q->q_usage_counter);
144 145
		if (q->mq_ops)
			blk_mq_run_hw_queues(q, false);
146
	}
147
}
148
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
149

150
void blk_mq_freeze_queue_wait(struct request_queue *q)
151
{
152
	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
153
}
154
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
155

156 157 158 159 160 161 162 163
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
				     unsigned long timeout)
{
	return wait_event_timeout(q->mq_freeze_wq,
					percpu_ref_is_zero(&q->q_usage_counter),
					timeout);
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
164

165 166 167 168
/*
 * Guarantee no request is in use, so we can change any data structure of
 * the queue afterward.
 */
169
void blk_freeze_queue(struct request_queue *q)
170
{
171 172 173 174 175 176 177
	/*
	 * In the !blk_mq case we are only calling this to kill the
	 * q_usage_counter, otherwise this increases the freeze depth
	 * and waits for it to return to zero.  For this reason there is
	 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
	 * exported to drivers as the only user for unfreeze is blk_mq.
	 */
178
	blk_freeze_queue_start(q);
179 180
	if (!q->mq_ops)
		blk_drain_queue(q);
181 182
	blk_mq_freeze_queue_wait(q);
}
183 184 185 186 187 188 189 190 191

void blk_mq_freeze_queue(struct request_queue *q)
{
	/*
	 * ...just an alias to keep freeze and unfreeze actions balanced
	 * in the blk_mq_* namespace
	 */
	blk_freeze_queue(q);
}
192
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
193

194
void blk_mq_unfreeze_queue(struct request_queue *q)
195
{
196
	int freeze_depth;
197

198 199 200
	freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
	WARN_ON_ONCE(freeze_depth < 0);
	if (!freeze_depth) {
201
		percpu_ref_reinit(&q->q_usage_counter);
202
		wake_up_all(&q->mq_freeze_wq);
203
	}
204
}
205
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
206

207 208 209 210 211 212
/*
 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
 * mpt3sas driver such that this function can be removed.
 */
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
{
213
	blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
214 215 216
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);

217
/**
218
 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
219 220 221
 * @q: request queue.
 *
 * Note: this function does not prevent that the struct request end_io()
222 223 224
 * callback function is invoked. Once this function is returned, we make
 * sure no dispatch can happen until the queue is unquiesced via
 * blk_mq_unquiesce_queue().
225 226 227 228 229 230 231
 */
void blk_mq_quiesce_queue(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;
	bool rcu = false;

232
	blk_mq_quiesce_queue_nowait(q);
233

234 235
	queue_for_each_hw_ctx(q, hctx, i) {
		if (hctx->flags & BLK_MQ_F_BLOCKING)
236
			synchronize_srcu(hctx->srcu);
237 238 239 240 241 242 243 244
		else
			rcu = true;
	}
	if (rcu)
		synchronize_rcu();
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);

245 246 247 248 249 250 251 252 253
/*
 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
 * @q: request queue.
 *
 * This function recovers queue into the state before quiescing
 * which is done by blk_mq_quiesce_queue.
 */
void blk_mq_unquiesce_queue(struct request_queue *q)
{
254
	blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
255

256 257
	/* dispatch requests which are inserted during quiescing */
	blk_mq_run_hw_queues(q, true);
258 259 260
}
EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);

261 262 263 264 265 266 267 268 269 270
void blk_mq_wake_waiters(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	queue_for_each_hw_ctx(q, hctx, i)
		if (blk_mq_hw_queue_mapped(hctx))
			blk_mq_tag_wakeup_all(hctx->tags, true);
}

271 272 273 274 275 276
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
{
	return blk_mq_has_free_tags(hctx->tags);
}
EXPORT_SYMBOL(blk_mq_can_queue);

277 278
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
		unsigned int tag, unsigned int op)
279
{
280 281
	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
	struct request *rq = tags->static_rqs[tag];
282
	req_flags_t rq_flags = 0;
283

284 285 286 287
	if (data->flags & BLK_MQ_REQ_INTERNAL) {
		rq->tag = -1;
		rq->internal_tag = tag;
	} else {
288
		if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
289
			rq_flags = RQF_MQ_INFLIGHT;
290 291 292 293 294 295 296
			atomic_inc(&data->hctx->nr_active);
		}
		rq->tag = tag;
		rq->internal_tag = -1;
		data->hctx->tags->rqs[rq->tag] = rq;
	}

297
	/* csd/requeue_work/fifo_time is initialized before use */
298 299
	rq->q = data->q;
	rq->mq_ctx = data->ctx;
300
	rq->rq_flags = rq_flags;
301
	rq->cpu = -1;
302
	rq->cmd_flags = op;
303 304
	if (data->flags & BLK_MQ_REQ_PREEMPT)
		rq->rq_flags |= RQF_PREEMPT;
305
	if (blk_queue_io_stat(data->q))
306
		rq->rq_flags |= RQF_IO_STAT;
307
	INIT_LIST_HEAD(&rq->queuelist);
308 309 310 311
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
	rq->rq_disk = NULL;
	rq->part = NULL;
312
	rq->start_time_ns = ktime_get_ns();
313
	rq->io_start_time_ns = 0;
314 315 316 317 318 319 320
	rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
	rq->nr_integrity_segments = 0;
#endif
	rq->special = NULL;
	/* tag was already set */
	rq->extra_len = 0;
321
	rq->__deadline = 0;
322 323

	INIT_LIST_HEAD(&rq->timeout_list);
324 325
	rq->timeout = 0;

326 327 328 329
	rq->end_io = NULL;
	rq->end_io_data = NULL;
	rq->next_rq = NULL;

330 331 332 333
#ifdef CONFIG_BLK_CGROUP
	rq->rl = NULL;
#endif

334
	data->ctx->rq_dispatched[op_is_sync(op)]++;
K
Keith Busch 已提交
335
	refcount_set(&rq->ref, 1);
336
	return rq;
337 338
}

339 340 341 342 343 344
static struct request *blk_mq_get_request(struct request_queue *q,
		struct bio *bio, unsigned int op,
		struct blk_mq_alloc_data *data)
{
	struct elevator_queue *e = q->elevator;
	struct request *rq;
345
	unsigned int tag;
346
	bool put_ctx_on_error = false;
347 348 349

	blk_queue_enter_live(q);
	data->q = q;
350 351 352 353
	if (likely(!data->ctx)) {
		data->ctx = blk_mq_get_ctx(q);
		put_ctx_on_error = true;
	}
354 355
	if (likely(!data->hctx))
		data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
356 357
	if (op & REQ_NOWAIT)
		data->flags |= BLK_MQ_REQ_NOWAIT;
358 359 360 361 362 363

	if (e) {
		data->flags |= BLK_MQ_REQ_INTERNAL;

		/*
		 * Flush requests are special and go directly to the
364 365
		 * dispatch list. Don't include reserved tags in the
		 * limiting, as it isn't useful.
366
		 */
367 368
		if (!op_is_flush(op) && e->type->ops.mq.limit_depth &&
		    !(data->flags & BLK_MQ_REQ_RESERVED))
369
			e->type->ops.mq.limit_depth(op, data);
370 371
	} else {
		blk_mq_tag_busy(data->hctx);
372 373
	}

374 375
	tag = blk_mq_get_tag(data);
	if (tag == BLK_MQ_TAG_FAIL) {
376 377
		if (put_ctx_on_error) {
			blk_mq_put_ctx(data->ctx);
378 379
			data->ctx = NULL;
		}
380 381
		blk_queue_exit(q);
		return NULL;
382 383
	}

384
	rq = blk_mq_rq_ctx_init(data, tag, op);
385 386
	if (!op_is_flush(op)) {
		rq->elv.icq = NULL;
387
		if (e && e->type->ops.mq.prepare_request) {
388 389 390
			if (e->type->icq_cache && rq_ioc(bio))
				blk_mq_sched_assign_ioc(rq, bio);

391 392
			e->type->ops.mq.prepare_request(rq, bio);
			rq->rq_flags |= RQF_ELVPRIV;
393
		}
394 395 396
	}
	data->hctx->queued++;
	return rq;
397 398
}

399
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
400
		blk_mq_req_flags_t flags)
401
{
402
	struct blk_mq_alloc_data alloc_data = { .flags = flags };
403
	struct request *rq;
404
	int ret;
405

406
	ret = blk_queue_enter(q, flags);
407 408
	if (ret)
		return ERR_PTR(ret);
409

410
	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
411
	blk_queue_exit(q);
412

413
	if (!rq)
414
		return ERR_PTR(-EWOULDBLOCK);
415

416 417
	blk_mq_put_ctx(alloc_data.ctx);

418 419 420
	rq->__data_len = 0;
	rq->__sector = (sector_t) -1;
	rq->bio = rq->biotail = NULL;
421 422
	return rq;
}
423
EXPORT_SYMBOL(blk_mq_alloc_request);
424

425
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
426
	unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
M
Ming Lin 已提交
427
{
428
	struct blk_mq_alloc_data alloc_data = { .flags = flags };
M
Ming Lin 已提交
429
	struct request *rq;
430
	unsigned int cpu;
M
Ming Lin 已提交
431 432 433 434 435 436 437 438 439 440 441 442 443 444
	int ret;

	/*
	 * If the tag allocator sleeps we could get an allocation for a
	 * different hardware context.  No need to complicate the low level
	 * allocator for this for the rare use case of a command tied to
	 * a specific queue.
	 */
	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
		return ERR_PTR(-EINVAL);

	if (hctx_idx >= q->nr_hw_queues)
		return ERR_PTR(-EIO);

445
	ret = blk_queue_enter(q, flags);
M
Ming Lin 已提交
446 447 448
	if (ret)
		return ERR_PTR(ret);

449 450 451 452
	/*
	 * Check if the hardware context is actually mapped to anything.
	 * If not tell the caller that it should skip this queue.
	 */
453 454 455 456
	alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
	if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
		blk_queue_exit(q);
		return ERR_PTR(-EXDEV);
457
	}
458
	cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
459
	alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
M
Ming Lin 已提交
460

461
	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
462
	blk_queue_exit(q);
463

464 465 466 467
	if (!rq)
		return ERR_PTR(-EWOULDBLOCK);

	return rq;
M
Ming Lin 已提交
468 469 470
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);

K
Keith Busch 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
static void __blk_mq_free_request(struct request *rq)
{
	struct request_queue *q = rq->q;
	struct blk_mq_ctx *ctx = rq->mq_ctx;
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
	const int sched_tag = rq->internal_tag;

	if (rq->tag != -1)
		blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
	if (sched_tag != -1)
		blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
	blk_mq_sched_restart(hctx);
	blk_queue_exit(q);
}

486
void blk_mq_free_request(struct request *rq)
487 488
{
	struct request_queue *q = rq->q;
489 490 491 492
	struct elevator_queue *e = q->elevator;
	struct blk_mq_ctx *ctx = rq->mq_ctx;
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);

493
	if (rq->rq_flags & RQF_ELVPRIV) {
494 495 496 497 498 499 500
		if (e && e->type->ops.mq.finish_request)
			e->type->ops.mq.finish_request(rq);
		if (rq->elv.icq) {
			put_io_context(rq->elv.icq->ioc);
			rq->elv.icq = NULL;
		}
	}
501

502
	ctx->rq_completed[rq_is_sync(rq)]++;
503
	if (rq->rq_flags & RQF_MQ_INFLIGHT)
504
		atomic_dec(&hctx->nr_active);
J
Jens Axboe 已提交
505

506 507 508
	if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
		laptop_io_completion(q->backing_dev_info);

509
	rq_qos_done(q, rq);
510

S
Shaohua Li 已提交
511 512 513
	if (blk_rq_rl(rq))
		blk_put_rl(blk_rq_rl(rq));

K
Keith Busch 已提交
514 515 516
	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
	if (refcount_dec_and_test(&rq->ref))
		__blk_mq_free_request(rq);
517
}
J
Jens Axboe 已提交
518
EXPORT_SYMBOL_GPL(blk_mq_free_request);
519

520
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
521
{
522 523
	u64 now = ktime_get_ns();

524 525
	if (rq->rq_flags & RQF_STATS) {
		blk_mq_poll_stats_start(rq->q);
526
		blk_stat_add(rq, now);
527 528
	}

529
	blk_account_io_done(rq, now);
M
Ming Lei 已提交
530

C
Christoph Hellwig 已提交
531
	if (rq->end_io) {
532
		rq_qos_done(rq->q, rq);
533
		rq->end_io(rq, error);
C
Christoph Hellwig 已提交
534 535 536
	} else {
		if (unlikely(blk_bidi_rq(rq)))
			blk_mq_free_request(rq->next_rq);
537
		blk_mq_free_request(rq);
C
Christoph Hellwig 已提交
538
	}
539
}
540
EXPORT_SYMBOL(__blk_mq_end_request);
541

542
void blk_mq_end_request(struct request *rq, blk_status_t error)
543 544 545
{
	if (blk_update_request(rq, error, blk_rq_bytes(rq)))
		BUG();
546
	__blk_mq_end_request(rq, error);
547
}
548
EXPORT_SYMBOL(blk_mq_end_request);
549

550
static void __blk_mq_complete_request_remote(void *data)
551
{
552
	struct request *rq = data;
553

554
	rq->q->softirq_done_fn(rq);
555 556
}

557
static void __blk_mq_complete_request(struct request *rq)
558 559
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;
C
Christoph Hellwig 已提交
560
	bool shared = false;
561 562
	int cpu;

563
	if (!blk_mq_mark_complete(rq))
K
Keith Busch 已提交
564
		return;
565 566 567
	if (rq->internal_tag != -1)
		blk_mq_sched_completed_request(rq);

C
Christoph Hellwig 已提交
568
	if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
569 570 571
		rq->q->softirq_done_fn(rq);
		return;
	}
572 573

	cpu = get_cpu();
C
Christoph Hellwig 已提交
574 575 576 577
	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
		shared = cpus_share_cache(cpu, ctx->cpu);

	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
578
		rq->csd.func = __blk_mq_complete_request_remote;
579 580
		rq->csd.info = rq;
		rq->csd.flags = 0;
581
		smp_call_function_single_async(ctx->cpu, &rq->csd);
582
	} else {
583
		rq->q->softirq_done_fn(rq);
584
	}
585 586
	put_cpu();
}
587

588
static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
589
	__releases(hctx->srcu)
590 591 592 593
{
	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
		rcu_read_unlock();
	else
594
		srcu_read_unlock(hctx->srcu, srcu_idx);
595 596 597
}

static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
598
	__acquires(hctx->srcu)
599
{
600 601 602
	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
		/* shut up gcc false positive */
		*srcu_idx = 0;
603
		rcu_read_lock();
604
	} else
605
		*srcu_idx = srcu_read_lock(hctx->srcu);
606 607
}

608 609 610 611 612 613 614 615
/**
 * blk_mq_complete_request - end I/O on a request
 * @rq:		the request being processed
 *
 * Description:
 *	Ends all I/O on a request. It does not handle partial completions.
 *	The actual completion happens out-of-order, through a IPI handler.
 **/
616
void blk_mq_complete_request(struct request *rq)
617
{
K
Keith Busch 已提交
618
	if (unlikely(blk_should_fake_timeout(rq->q)))
619
		return;
K
Keith Busch 已提交
620
	__blk_mq_complete_request(rq);
621 622
}
EXPORT_SYMBOL(blk_mq_complete_request);
623

624 625
int blk_mq_request_started(struct request *rq)
{
T
Tejun Heo 已提交
626
	return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
627 628 629
}
EXPORT_SYMBOL_GPL(blk_mq_request_started);

630
void blk_mq_start_request(struct request *rq)
631 632 633
{
	struct request_queue *q = rq->q;

634 635
	blk_mq_sched_started_request(rq);

636 637
	trace_block_rq_issue(q, rq);

638
	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
639 640 641 642
		rq->io_start_time_ns = ktime_get_ns();
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
		rq->throtl_size = blk_rq_sectors(rq);
#endif
643
		rq->rq_flags |= RQF_STATS;
644
		rq_qos_issue(q, rq);
645 646
	}

647
	WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
648

649
	blk_add_timer(rq);
K
Keith Busch 已提交
650
	WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
651 652 653 654 655 656 657 658 659

	if (q->dma_drain_size && blk_rq_bytes(rq)) {
		/*
		 * Make sure space for the drain appears.  We know we can do
		 * this because max_hw_segments has been adjusted to be one
		 * fewer than the device can handle.
		 */
		rq->nr_phys_segments++;
	}
660
}
661
EXPORT_SYMBOL(blk_mq_start_request);
662

663
static void __blk_mq_requeue_request(struct request *rq)
664 665 666
{
	struct request_queue *q = rq->q;

667 668
	blk_mq_put_driver_tag(rq);

669
	trace_block_rq_requeue(q, rq);
670
	rq_qos_requeue(q, rq);
671

K
Keith Busch 已提交
672 673
	if (blk_mq_request_started(rq)) {
		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
674
		rq->rq_flags &= ~RQF_TIMED_OUT;
675 676 677
		if (q->dma_drain_size && blk_rq_bytes(rq))
			rq->nr_phys_segments--;
	}
678 679
}

680
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
681 682 683
{
	__blk_mq_requeue_request(rq);

684 685 686
	/* this request will be re-inserted to io scheduler queue */
	blk_mq_sched_requeue_request(rq);

687
	BUG_ON(blk_queued_rq(rq));
688
	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
689 690 691
}
EXPORT_SYMBOL(blk_mq_requeue_request);

692 693 694
static void blk_mq_requeue_work(struct work_struct *work)
{
	struct request_queue *q =
695
		container_of(work, struct request_queue, requeue_work.work);
696 697 698
	LIST_HEAD(rq_list);
	struct request *rq, *next;

699
	spin_lock_irq(&q->requeue_lock);
700
	list_splice_init(&q->requeue_list, &rq_list);
701
	spin_unlock_irq(&q->requeue_lock);
702 703

	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
704
		if (!(rq->rq_flags & RQF_SOFTBARRIER))
705 706
			continue;

707
		rq->rq_flags &= ~RQF_SOFTBARRIER;
708
		list_del_init(&rq->queuelist);
709
		blk_mq_sched_insert_request(rq, true, false, false);
710 711 712 713 714
	}

	while (!list_empty(&rq_list)) {
		rq = list_entry(rq_list.next, struct request, queuelist);
		list_del_init(&rq->queuelist);
715
		blk_mq_sched_insert_request(rq, false, false, false);
716 717
	}

718
	blk_mq_run_hw_queues(q, false);
719 720
}

721 722
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
				bool kick_requeue_list)
723 724 725 726 727 728
{
	struct request_queue *q = rq->q;
	unsigned long flags;

	/*
	 * We abuse this flag that is otherwise used by the I/O scheduler to
729
	 * request head insertion from the workqueue.
730
	 */
731
	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
732 733 734

	spin_lock_irqsave(&q->requeue_lock, flags);
	if (at_head) {
735
		rq->rq_flags |= RQF_SOFTBARRIER;
736 737 738 739 740
		list_add(&rq->queuelist, &q->requeue_list);
	} else {
		list_add_tail(&rq->queuelist, &q->requeue_list);
	}
	spin_unlock_irqrestore(&q->requeue_lock, flags);
741 742 743

	if (kick_requeue_list)
		blk_mq_kick_requeue_list(q);
744 745 746 747 748
}
EXPORT_SYMBOL(blk_mq_add_to_requeue_list);

void blk_mq_kick_requeue_list(struct request_queue *q)
{
749
	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
750 751 752
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);

753 754 755
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
				    unsigned long msecs)
{
756 757
	kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
				    msecs_to_jiffies(msecs));
758 759 760
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);

761 762
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
{
763 764
	if (tag < tags->nr_tags) {
		prefetch(tags->rqs[tag]);
765
		return tags->rqs[tag];
766
	}
767 768

	return NULL;
769 770 771
}
EXPORT_SYMBOL(blk_mq_tag_to_rq);

772
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
773
{
774
	req->rq_flags |= RQF_TIMED_OUT;
775 776 777 778 779 780 781
	if (req->q->mq_ops->timeout) {
		enum blk_eh_timer_return ret;

		ret = req->q->mq_ops->timeout(req, reserved);
		if (ret == BLK_EH_DONE)
			return;
		WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
782
	}
783 784

	blk_add_timer(req);
785
}
786

K
Keith Busch 已提交
787
static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
788
{
K
Keith Busch 已提交
789
	unsigned long deadline;
790

K
Keith Busch 已提交
791 792
	if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
		return false;
793 794
	if (rq->rq_flags & RQF_TIMED_OUT)
		return false;
795

K
Keith Busch 已提交
796 797 798
	deadline = blk_rq_deadline(rq);
	if (time_after_eq(jiffies, deadline))
		return true;
799

K
Keith Busch 已提交
800 801 802 803 804
	if (*next == 0)
		*next = deadline;
	else if (time_after(*next, deadline))
		*next = deadline;
	return false;
805 806
}

K
Keith Busch 已提交
807
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
808 809
		struct request *rq, void *priv, bool reserved)
{
K
Keith Busch 已提交
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
	unsigned long *next = priv;

	/*
	 * Just do a quick check if it is expired before locking the request in
	 * so we're not unnecessarilly synchronizing across CPUs.
	 */
	if (!blk_mq_req_expired(rq, next))
		return;

	/*
	 * We have reason to believe the request may be expired. Take a
	 * reference on the request to lock this request lifetime into its
	 * currently allocated context to prevent it from being reallocated in
	 * the event the completion by-passes this timeout handler.
	 *
	 * If the reference was already released, then the driver beat the
	 * timeout handler to posting a natural completion.
	 */
	if (!refcount_inc_not_zero(&rq->ref))
		return;

831
	/*
K
Keith Busch 已提交
832 833 834 835
	 * The request is now locked and cannot be reallocated underneath the
	 * timeout handler's processing. Re-verify this exact request is truly
	 * expired; if it is not expired, then the request was completed and
	 * reallocated as a new request.
836
	 */
K
Keith Busch 已提交
837
	if (blk_mq_req_expired(rq, next))
838
		blk_mq_rq_timed_out(rq, reserved);
K
Keith Busch 已提交
839 840
	if (refcount_dec_and_test(&rq->ref))
		__blk_mq_free_request(rq);
841 842
}

843
static void blk_mq_timeout_work(struct work_struct *work)
844
{
845 846
	struct request_queue *q =
		container_of(work, struct request_queue, timeout_work);
K
Keith Busch 已提交
847
	unsigned long next = 0;
848
	struct blk_mq_hw_ctx *hctx;
849
	int i;
850

851 852 853 854 855 856 857 858 859
	/* A deadlock might occur if a request is stuck requiring a
	 * timeout at the same time a queue freeze is waiting
	 * completion, since the timeout code would not be able to
	 * acquire the queue reference here.
	 *
	 * That's why we don't use blk_queue_enter here; instead, we use
	 * percpu_ref_tryget directly, because we need to be able to
	 * obtain a reference even in the short window between the queue
	 * starting to freeze, by dropping the first reference in
860
	 * blk_freeze_queue_start, and the moment the last request is
861 862 863 864
	 * consumed, marked by the instant q_usage_counter reaches
	 * zero.
	 */
	if (!percpu_ref_tryget(&q->q_usage_counter))
865 866
		return;

K
Keith Busch 已提交
867
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
868

K
Keith Busch 已提交
869 870
	if (next != 0) {
		mod_timer(&q->timeout, next);
871
	} else {
872 873 874 875 876 877
		/*
		 * Request timeouts are handled as a forward rolling timer. If
		 * we end up here it means that no requests are pending and
		 * also that no request has been pending for a while. Mark
		 * each hctx as idle.
		 */
878 879 880 881 882
		queue_for_each_hw_ctx(q, hctx, i) {
			/* the hctx may be unmapped, so check it here */
			if (blk_mq_hw_queue_mapped(hctx))
				blk_mq_tag_idle(hctx);
		}
883
	}
884
	blk_queue_exit(q);
885 886
}

887 888 889 890 891 892 893 894 895 896 897 898 899
struct flush_busy_ctx_data {
	struct blk_mq_hw_ctx *hctx;
	struct list_head *list;
};

static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
{
	struct flush_busy_ctx_data *flush_data = data;
	struct blk_mq_hw_ctx *hctx = flush_data->hctx;
	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];

	spin_lock(&ctx->lock);
	list_splice_tail_init(&ctx->rq_list, flush_data->list);
900
	sbitmap_clear_bit(sb, bitnr);
901 902 903 904
	spin_unlock(&ctx->lock);
	return true;
}

905 906 907 908
/*
 * Process software queues that have been marked busy, splicing them
 * to the for-dispatch
 */
909
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
910
{
911 912 913 914
	struct flush_busy_ctx_data data = {
		.hctx = hctx,
		.list = list,
	};
915

916
	sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
917
}
918
EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
919

920 921 922 923 924 925 926 927 928 929 930 931 932
struct dispatch_rq_data {
	struct blk_mq_hw_ctx *hctx;
	struct request *rq;
};

static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
		void *data)
{
	struct dispatch_rq_data *dispatch_data = data;
	struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
	struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];

	spin_lock(&ctx->lock);
H
huhai 已提交
933
	if (!list_empty(&ctx->rq_list)) {
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
		dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
		list_del_init(&dispatch_data->rq->queuelist);
		if (list_empty(&ctx->rq_list))
			sbitmap_clear_bit(sb, bitnr);
	}
	spin_unlock(&ctx->lock);

	return !dispatch_data->rq;
}

struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
					struct blk_mq_ctx *start)
{
	unsigned off = start ? start->index_hw : 0;
	struct dispatch_rq_data data = {
		.hctx = hctx,
		.rq   = NULL,
	};

	__sbitmap_for_each_set(&hctx->ctx_map, off,
			       dispatch_rq_from_ctx, &data);

	return data.rq;
}

959 960 961 962
static inline unsigned int queued_to_index(unsigned int queued)
{
	if (!queued)
		return 0;
963

964
	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
965 966
}

967
bool blk_mq_get_driver_tag(struct request *rq)
968 969 970 971
{
	struct blk_mq_alloc_data data = {
		.q = rq->q,
		.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
972
		.flags = BLK_MQ_REQ_NOWAIT,
973
	};
974
	bool shared;
975

976 977
	if (rq->tag != -1)
		goto done;
978

979 980 981
	if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
		data.flags |= BLK_MQ_REQ_RESERVED;

982
	shared = blk_mq_tag_busy(data.hctx);
983 984
	rq->tag = blk_mq_get_tag(&data);
	if (rq->tag >= 0) {
985
		if (shared) {
986 987 988
			rq->rq_flags |= RQF_MQ_INFLIGHT;
			atomic_inc(&data.hctx->nr_active);
		}
989 990 991
		data.hctx->tags->rqs[rq->tag] = rq;
	}

992 993
done:
	return rq->tag != -1;
994 995
}

996 997
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
				int flags, void *key)
998 999 1000 1001 1002
{
	struct blk_mq_hw_ctx *hctx;

	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);

1003
	spin_lock(&hctx->dispatch_wait_lock);
1004
	list_del_init(&wait->entry);
1005 1006
	spin_unlock(&hctx->dispatch_wait_lock);

1007 1008 1009 1010
	blk_mq_run_hw_queue(hctx, true);
	return 1;
}

1011 1012
/*
 * Mark us waiting for a tag. For shared tags, this involves hooking us into
1013 1014
 * the tag wakeups. For non-shared tags, we can simply mark us needing a
 * restart. For both cases, take care to check the condition again after
1015 1016
 * marking us as waiting.
 */
1017
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1018
				 struct request *rq)
1019
{
1020
	struct wait_queue_head *wq;
1021 1022
	wait_queue_entry_t *wait;
	bool ret;
1023

1024 1025 1026
	if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
		if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
			set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
1027

1028 1029 1030 1031 1032 1033 1034 1035
		/*
		 * It's possible that a tag was freed in the window between the
		 * allocation failure and adding the hardware queue to the wait
		 * queue.
		 *
		 * Don't clear RESTART here, someone else could have set it.
		 * At most this will cost an extra queue run.
		 */
1036
		return blk_mq_get_driver_tag(rq);
1037 1038
	}

1039
	wait = &hctx->dispatch_wait;
1040 1041 1042
	if (!list_empty_careful(&wait->entry))
		return false;

1043 1044 1045 1046
	wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;

	spin_lock_irq(&wq->lock);
	spin_lock(&hctx->dispatch_wait_lock);
1047
	if (!list_empty(&wait->entry)) {
1048 1049
		spin_unlock(&hctx->dispatch_wait_lock);
		spin_unlock_irq(&wq->lock);
1050
		return false;
1051 1052
	}

1053 1054
	wait->flags &= ~WQ_FLAG_EXCLUSIVE;
	__add_wait_queue(wq, wait);
1055

1056
	/*
1057 1058 1059
	 * It's possible that a tag was freed in the window between the
	 * allocation failure and adding the hardware queue to the wait
	 * queue.
1060
	 */
1061
	ret = blk_mq_get_driver_tag(rq);
1062
	if (!ret) {
1063 1064
		spin_unlock(&hctx->dispatch_wait_lock);
		spin_unlock_irq(&wq->lock);
1065
		return false;
1066
	}
1067 1068 1069 1070 1071 1072

	/*
	 * We got a tag, remove ourselves from the wait queue to ensure
	 * someone else gets the wakeup.
	 */
	list_del_init(&wait->entry);
1073 1074
	spin_unlock(&hctx->dispatch_wait_lock);
	spin_unlock_irq(&wq->lock);
1075 1076

	return true;
1077 1078
}

1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
#define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
#define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
/*
 * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
 * - EWMA is one simple way to compute running average value
 * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
 * - take 4 as factor for avoiding to get too small(0) result, and this
 *   factor doesn't matter because EWMA decreases exponentially
 */
static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
{
	unsigned int ewma;

	if (hctx->queue->elevator)
		return;

	ewma = hctx->dispatch_busy;

	if (!ewma && !busy)
		return;

	ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
	if (busy)
		ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
	ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;

	hctx->dispatch_busy = ewma;
}

1108 1109
#define BLK_MQ_RESOURCE_DELAY	3		/* ms units */

1110 1111 1112
/*
 * Returns true if we did some work AND can potentially do more.
 */
1113
bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1114
			     bool got_budget)
1115
{
1116
	struct blk_mq_hw_ctx *hctx;
1117
	struct request *rq, *nxt;
1118
	bool no_tag = false;
1119
	int errors, queued;
1120
	blk_status_t ret = BLK_STS_OK;
1121

1122 1123 1124
	if (list_empty(list))
		return false;

1125 1126
	WARN_ON(!list_is_singular(list) && got_budget);

1127 1128 1129
	/*
	 * Now process all the entries, sending them to the driver.
	 */
1130
	errors = queued = 0;
1131
	do {
1132
		struct blk_mq_queue_data bd;
1133

1134
		rq = list_first_entry(list, struct request, queuelist);
1135 1136 1137 1138 1139

		hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
		if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
			break;

1140
		if (!blk_mq_get_driver_tag(rq)) {
1141
			/*
1142
			 * The initial allocation attempt failed, so we need to
1143 1144 1145 1146
			 * rerun the hardware queue when a tag is freed. The
			 * waitqueue takes care of that. If the queue is run
			 * before we add this entry back on the dispatch list,
			 * we'll re-run it below.
1147
			 */
1148
			if (!blk_mq_mark_tag_wait(hctx, rq)) {
1149
				blk_mq_put_dispatch_budget(hctx);
1150 1151 1152 1153 1154 1155
				/*
				 * For non-shared tags, the RESTART check
				 * will suffice.
				 */
				if (hctx->flags & BLK_MQ_F_TAG_SHARED)
					no_tag = true;
1156 1157 1158 1159
				break;
			}
		}

1160 1161
		list_del_init(&rq->queuelist);

1162
		bd.rq = rq;
1163 1164 1165 1166 1167 1168 1169 1170 1171

		/*
		 * Flag last if we have no more requests, or if we have more
		 * but can't assign a driver tag to it.
		 */
		if (list_empty(list))
			bd.last = true;
		else {
			nxt = list_first_entry(list, struct request, queuelist);
1172
			bd.last = !blk_mq_get_driver_tag(nxt);
1173
		}
1174 1175

		ret = q->mq_ops->queue_rq(hctx, &bd);
1176
		if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
1177 1178
			/*
			 * If an I/O scheduler has been configured and we got a
1179 1180
			 * driver tag for the next request already, free it
			 * again.
1181 1182 1183 1184 1185
			 */
			if (!list_empty(list)) {
				nxt = list_first_entry(list, struct request, queuelist);
				blk_mq_put_driver_tag(nxt);
			}
1186
			list_add(&rq->queuelist, list);
1187
			__blk_mq_requeue_request(rq);
1188
			break;
1189 1190 1191
		}

		if (unlikely(ret != BLK_STS_OK)) {
1192
			errors++;
1193
			blk_mq_end_request(rq, BLK_STS_IOERR);
1194
			continue;
1195 1196
		}

1197
		queued++;
1198
	} while (!list_empty(list));
1199

1200
	hctx->dispatched[queued_to_index(queued)]++;
1201 1202 1203 1204 1205

	/*
	 * Any items that need requeuing? Stuff them into hctx->dispatch,
	 * that is where we will continue on next queue run.
	 */
1206
	if (!list_empty(list)) {
1207 1208
		bool needs_restart;

1209
		spin_lock(&hctx->lock);
1210
		list_splice_init(list, &hctx->dispatch);
1211
		spin_unlock(&hctx->lock);
1212

1213
		/*
1214 1215 1216
		 * If SCHED_RESTART was set by the caller of this function and
		 * it is no longer set that means that it was cleared by another
		 * thread and hence that a queue rerun is needed.
1217
		 *
1218 1219 1220 1221
		 * If 'no_tag' is set, that means that we failed getting
		 * a driver tag with an I/O scheduler attached. If our dispatch
		 * waitqueue is no longer active, ensure that we run the queue
		 * AFTER adding our entries back to the list.
1222
		 *
1223 1224 1225 1226 1227 1228 1229
		 * If no I/O scheduler has been configured it is possible that
		 * the hardware queue got stopped and restarted before requests
		 * were pushed back onto the dispatch list. Rerun the queue to
		 * avoid starvation. Notes:
		 * - blk_mq_run_hw_queue() checks whether or not a queue has
		 *   been stopped before rerunning a queue.
		 * - Some but not all block drivers stop a queue before
1230
		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1231
		 *   and dm-rq.
1232 1233 1234 1235
		 *
		 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
		 * bit is set, run queue after a delay to avoid IO stalls
		 * that could otherwise occur if the queue is idle.
1236
		 */
1237 1238
		needs_restart = blk_mq_sched_needs_restart(hctx);
		if (!needs_restart ||
1239
		    (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
1240
			blk_mq_run_hw_queue(hctx, true);
1241 1242
		else if (needs_restart && (ret == BLK_STS_RESOURCE))
			blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
1243

1244
		blk_mq_update_dispatch_busy(hctx, true);
1245
		return false;
1246 1247
	} else
		blk_mq_update_dispatch_busy(hctx, false);
1248

1249 1250 1251 1252 1253 1254 1255
	/*
	 * If the host/device is unable to accept more work, inform the
	 * caller of that.
	 */
	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
		return false;

1256
	return (queued + errors) != 0;
1257 1258
}

1259 1260 1261 1262
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	int srcu_idx;

1263 1264 1265
	/*
	 * We should be running this queue from one of the CPUs that
	 * are mapped to it.
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
	 *
	 * There are at least two related races now between setting
	 * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
	 * __blk_mq_run_hw_queue():
	 *
	 * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
	 *   but later it becomes online, then this warning is harmless
	 *   at all
	 *
	 * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
	 *   but later it becomes offline, then the warning can't be
	 *   triggered, and we depend on blk-mq timeout handler to
	 *   handle dispatched requests to this hctx
1279
	 */
1280 1281 1282 1283 1284 1285 1286
	if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
		cpu_online(hctx->next_cpu)) {
		printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
			raw_smp_processor_id(),
			cpumask_empty(hctx->cpumask) ? "inactive": "active");
		dump_stack();
	}
1287

1288 1289 1290 1291 1292 1293
	/*
	 * We can't run the queue inline with ints disabled. Ensure that
	 * we catch bad users of this early.
	 */
	WARN_ON_ONCE(in_interrupt());

1294
	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1295

1296 1297 1298
	hctx_lock(hctx, &srcu_idx);
	blk_mq_sched_dispatch_requests(hctx);
	hctx_unlock(hctx, srcu_idx);
1299 1300
}

1301 1302 1303 1304 1305 1306 1307 1308 1309
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
{
	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);

	if (cpu >= nr_cpu_ids)
		cpu = cpumask_first(hctx->cpumask);
	return cpu;
}

1310 1311 1312 1313 1314 1315 1316 1317
/*
 * It'd be great if the workqueue API had a way to pass
 * in a mask and had some smarts for more clever placement.
 * For now we just round-robin here, switching for every
 * BLK_MQ_CPU_WORK_BATCH queued items.
 */
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{
1318
	bool tried = false;
1319
	int next_cpu = hctx->next_cpu;
1320

1321 1322
	if (hctx->queue->nr_hw_queues == 1)
		return WORK_CPU_UNBOUND;
1323 1324

	if (--hctx->next_cpu_batch <= 0) {
1325
select_cpu:
1326
		next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
1327
				cpu_online_mask);
1328
		if (next_cpu >= nr_cpu_ids)
1329
			next_cpu = blk_mq_first_mapped_cpu(hctx);
1330 1331 1332
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
	}

1333 1334 1335 1336
	/*
	 * Do unbound schedule if we can't find a online CPU for this hctx,
	 * and it should only happen in the path of handling CPU DEAD.
	 */
1337
	if (!cpu_online(next_cpu)) {
1338 1339 1340 1341 1342 1343 1344 1345 1346
		if (!tried) {
			tried = true;
			goto select_cpu;
		}

		/*
		 * Make sure to re-select CPU next time once after CPUs
		 * in hctx->cpumask become online again.
		 */
1347
		hctx->next_cpu = next_cpu;
1348 1349 1350
		hctx->next_cpu_batch = 1;
		return WORK_CPU_UNBOUND;
	}
1351 1352 1353

	hctx->next_cpu = next_cpu;
	return next_cpu;
1354 1355
}

1356 1357
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
					unsigned long msecs)
1358
{
1359
	if (unlikely(blk_mq_hctx_stopped(hctx)))
1360 1361
		return;

1362
	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1363 1364
		int cpu = get_cpu();
		if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1365
			__blk_mq_run_hw_queue(hctx);
1366
			put_cpu();
1367 1368
			return;
		}
1369

1370
		put_cpu();
1371
	}
1372

1373 1374
	kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
				    msecs_to_jiffies(msecs));
1375 1376 1377 1378 1379 1380 1381 1382
}

void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{
	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);

1383
bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1384
{
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
	int srcu_idx;
	bool need_run;

	/*
	 * When queue is quiesced, we may be switching io scheduler, or
	 * updating nr_hw_queues, or other things, and we can't run queue
	 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
	 *
	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
	 * quiesced.
	 */
1396 1397 1398 1399
	hctx_lock(hctx, &srcu_idx);
	need_run = !blk_queue_quiesced(hctx->queue) &&
		blk_mq_hctx_has_pending(hctx);
	hctx_unlock(hctx, srcu_idx);
1400 1401

	if (need_run) {
1402 1403 1404 1405 1406
		__blk_mq_delay_run_hw_queue(hctx, async, 0);
		return true;
	}

	return false;
1407
}
O
Omar Sandoval 已提交
1408
EXPORT_SYMBOL(blk_mq_run_hw_queue);
1409

1410
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1411 1412 1413 1414 1415
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i) {
1416
		if (blk_mq_hctx_stopped(hctx))
1417 1418
			continue;

1419
		blk_mq_run_hw_queue(hctx, async);
1420 1421
	}
}
1422
EXPORT_SYMBOL(blk_mq_run_hw_queues);
1423

1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
/**
 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
 * @q: request queue.
 *
 * The caller is responsible for serializing this function against
 * blk_mq_{start,stop}_hw_queue().
 */
bool blk_mq_queue_stopped(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		if (blk_mq_hctx_stopped(hctx))
			return true;

	return false;
}
EXPORT_SYMBOL(blk_mq_queue_stopped);

1444 1445 1446
/*
 * This function is often used for pausing .queue_rq() by driver when
 * there isn't enough resource or some conditions aren't satisfied, and
1447
 * BLK_STS_RESOURCE is usually returned.
1448 1449 1450 1451 1452
 *
 * We do not guarantee that dispatch can be drained or blocked
 * after blk_mq_stop_hw_queue() returns. Please use
 * blk_mq_quiesce_queue() for that requirement.
 */
1453 1454
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
1455
	cancel_delayed_work(&hctx->run_work);
1456

1457
	set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1458
}
1459
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1460

1461 1462 1463
/*
 * This function is often used for pausing .queue_rq() by driver when
 * there isn't enough resource or some conditions aren't satisfied, and
1464
 * BLK_STS_RESOURCE is usually returned.
1465 1466 1467 1468 1469
 *
 * We do not guarantee that dispatch can be drained or blocked
 * after blk_mq_stop_hw_queues() returns. Please use
 * blk_mq_quiesce_queue() for that requirement.
 */
1470 1471
void blk_mq_stop_hw_queues(struct request_queue *q)
{
1472 1473 1474 1475 1476
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_stop_hw_queue(hctx);
1477 1478 1479
}
EXPORT_SYMBOL(blk_mq_stop_hw_queues);

1480 1481 1482
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1483

1484
	blk_mq_run_hw_queue(hctx, false);
1485 1486 1487
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);

1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
void blk_mq_start_hw_queues(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_start_hw_queue(hctx);
}
EXPORT_SYMBOL(blk_mq_start_hw_queues);

1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
	if (!blk_mq_hctx_stopped(hctx))
		return;

	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
	blk_mq_run_hw_queue(hctx, async);
}
EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);

1508
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1509 1510 1511 1512
{
	struct blk_mq_hw_ctx *hctx;
	int i;

1513 1514
	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_start_stopped_hw_queue(hctx, async);
1515 1516 1517
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);

1518
static void blk_mq_run_work_fn(struct work_struct *work)
1519 1520 1521
{
	struct blk_mq_hw_ctx *hctx;

1522
	hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1523

1524
	/*
M
Ming Lei 已提交
1525
	 * If we are stopped, don't run the queue.
1526
	 */
M
Ming Lei 已提交
1527
	if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
1528
		return;
1529 1530 1531 1532

	__blk_mq_run_hw_queue(hctx);
}

1533 1534 1535
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
					    struct request *rq,
					    bool at_head)
1536
{
J
Jens Axboe 已提交
1537 1538
	struct blk_mq_ctx *ctx = rq->mq_ctx;

1539 1540
	lockdep_assert_held(&ctx->lock);

1541 1542
	trace_block_rq_insert(hctx->queue, rq);

1543 1544 1545 1546
	if (at_head)
		list_add(&rq->queuelist, &ctx->rq_list);
	else
		list_add_tail(&rq->queuelist, &ctx->rq_list);
1547
}
1548

1549 1550
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
			     bool at_head)
1551 1552 1553
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;

1554 1555
	lockdep_assert_held(&ctx->lock);

J
Jens Axboe 已提交
1556
	__blk_mq_insert_req_list(hctx, rq, at_head);
1557 1558 1559
	blk_mq_hctx_mark_pending(hctx, ctx);
}

1560 1561 1562 1563
/*
 * Should only be used carefully, when the caller knows we want to
 * bypass a potential IO scheduler on the target device.
 */
1564
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
1565 1566 1567 1568 1569 1570 1571 1572
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);

	spin_lock(&hctx->lock);
	list_add_tail(&rq->queuelist, &hctx->dispatch);
	spin_unlock(&hctx->lock);

1573 1574
	if (run_queue)
		blk_mq_run_hw_queue(hctx, false);
1575 1576
}

1577 1578
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
			    struct list_head *list)
1579 1580

{
1581 1582
	struct request *rq;

1583 1584 1585 1586
	/*
	 * preemption doesn't flush plug list, so it's possible ctx->cpu is
	 * offline now
	 */
1587
	list_for_each_entry(rq, list, queuelist) {
J
Jens Axboe 已提交
1588
		BUG_ON(rq->mq_ctx != ctx);
1589
		trace_block_rq_insert(hctx->queue, rq);
1590
	}
1591 1592 1593

	spin_lock(&ctx->lock);
	list_splice_tail_init(list, &ctx->rq_list);
1594
	blk_mq_hctx_mark_pending(hctx, ctx);
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
	spin_unlock(&ctx->lock);
}

static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
{
	struct request *rqa = container_of(a, struct request, queuelist);
	struct request *rqb = container_of(b, struct request, queuelist);

	return !(rqa->mq_ctx < rqb->mq_ctx ||
		 (rqa->mq_ctx == rqb->mq_ctx &&
		  blk_rq_pos(rqa) < blk_rq_pos(rqb)));
}

void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
	struct blk_mq_ctx *this_ctx;
	struct request_queue *this_q;
	struct request *rq;
	LIST_HEAD(list);
	LIST_HEAD(ctx_list);
	unsigned int depth;

	list_splice_init(&plug->mq_list, &list);

	list_sort(NULL, &list, plug_ctx_cmp);

	this_q = NULL;
	this_ctx = NULL;
	depth = 0;

	while (!list_empty(&list)) {
		rq = list_entry_rq(list.next);
		list_del_init(&rq->queuelist);
		BUG_ON(!rq->q);
		if (rq->mq_ctx != this_ctx) {
			if (this_ctx) {
1631
				trace_block_unplug(this_q, depth, !from_schedule);
1632 1633 1634
				blk_mq_sched_insert_requests(this_q, this_ctx,
								&ctx_list,
								from_schedule);
1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
			}

			this_ctx = rq->mq_ctx;
			this_q = rq->q;
			depth = 0;
		}

		depth++;
		list_add_tail(&rq->queuelist, &ctx_list);
	}

	/*
	 * If 'this_ctx' is set, we know we have entries to complete
	 * on 'ctx_list'. Do those.
	 */
	if (this_ctx) {
1651
		trace_block_unplug(this_q, depth, !from_schedule);
1652 1653
		blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
						from_schedule);
1654 1655 1656 1657 1658
	}
}

static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
{
1659
	blk_init_request_from_bio(rq, bio);
1660

S
Shaohua Li 已提交
1661 1662
	blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));

1663
	blk_account_io_start(rq, true);
1664 1665
}

1666 1667
static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
1668 1669 1670 1671
	if (rq->tag != -1)
		return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);

	return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1672 1673
}

1674 1675 1676
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
					    struct request *rq,
					    blk_qc_t *cookie)
1677 1678 1679 1680
{
	struct request_queue *q = rq->q;
	struct blk_mq_queue_data bd = {
		.rq = rq,
1681
		.last = true,
1682
	};
1683
	blk_qc_t new_cookie;
1684
	blk_status_t ret;
1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695

	new_cookie = request_to_qc_t(hctx, rq);

	/*
	 * For OK queue, we are done. For error, caller may kill it.
	 * Any other error (busy), just add it to our list as we
	 * previously would have done.
	 */
	ret = q->mq_ops->queue_rq(hctx, &bd);
	switch (ret) {
	case BLK_STS_OK:
1696
		blk_mq_update_dispatch_busy(hctx, false);
1697 1698 1699
		*cookie = new_cookie;
		break;
	case BLK_STS_RESOURCE:
1700
	case BLK_STS_DEV_RESOURCE:
1701
		blk_mq_update_dispatch_busy(hctx, true);
1702 1703 1704
		__blk_mq_requeue_request(rq);
		break;
	default:
1705
		blk_mq_update_dispatch_busy(hctx, false);
1706 1707 1708 1709 1710 1711 1712 1713 1714
		*cookie = BLK_QC_T_NONE;
		break;
	}

	return ret;
}

static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
						struct request *rq,
1715 1716
						blk_qc_t *cookie,
						bool bypass_insert)
1717 1718
{
	struct request_queue *q = rq->q;
M
Ming Lei 已提交
1719 1720
	bool run_queue = true;

1721 1722 1723 1724
	/*
	 * RCU or SRCU read lock is needed before checking quiesced flag.
	 *
	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
1725
	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
1726 1727
	 * and avoid driver to try to dispatch again.
	 */
1728
	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
M
Ming Lei 已提交
1729
		run_queue = false;
1730
		bypass_insert = false;
M
Ming Lei 已提交
1731 1732
		goto insert;
	}
1733

1734
	if (q->elevator && !bypass_insert)
1735 1736
		goto insert;

1737
	if (!blk_mq_get_dispatch_budget(hctx))
1738 1739
		goto insert;

1740
	if (!blk_mq_get_driver_tag(rq)) {
1741
		blk_mq_put_dispatch_budget(hctx);
1742
		goto insert;
1743
	}
1744

1745
	return __blk_mq_issue_directly(hctx, rq, cookie);
1746
insert:
1747 1748
	if (bypass_insert)
		return BLK_STS_RESOURCE;
1749

1750
	blk_mq_request_bypass_insert(rq, run_queue);
1751
	return BLK_STS_OK;
1752 1753
}

1754 1755 1756
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
		struct request *rq, blk_qc_t *cookie)
{
1757
	blk_status_t ret;
1758
	int srcu_idx;
1759

1760
	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1761

1762
	hctx_lock(hctx, &srcu_idx);
1763

1764
	ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
1765
	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
1766
		blk_mq_request_bypass_insert(rq, true);
1767 1768 1769
	else if (ret != BLK_STS_OK)
		blk_mq_end_request(rq, ret);

1770
	hctx_unlock(hctx, srcu_idx);
1771 1772
}

1773
blk_status_t blk_mq_request_issue_directly(struct request *rq)
1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
{
	blk_status_t ret;
	int srcu_idx;
	blk_qc_t unused_cookie;
	struct blk_mq_ctx *ctx = rq->mq_ctx;
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);

	hctx_lock(hctx, &srcu_idx);
	ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
	hctx_unlock(hctx, srcu_idx);

	return ret;
1786 1787
}

1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
		struct list_head *list)
{
	while (!list_empty(list)) {
		blk_status_t ret;
		struct request *rq = list_first_entry(list, struct request,
				queuelist);

		list_del_init(&rq->queuelist);
		ret = blk_mq_request_issue_directly(rq);
		if (ret != BLK_STS_OK) {
1799 1800
			if (ret == BLK_STS_RESOURCE ||
					ret == BLK_STS_DEV_RESOURCE) {
1801 1802
				blk_mq_request_bypass_insert(rq,
							list_empty(list));
1803 1804 1805
				break;
			}
			blk_mq_end_request(rq, ret);
1806 1807 1808 1809
		}
	}
}

1810
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1811
{
1812
	const int is_sync = op_is_sync(bio->bi_opf);
1813
	const int is_flush_fua = op_is_flush(bio->bi_opf);
1814
	struct blk_mq_alloc_data data = { .flags = 0 };
1815
	struct request *rq;
1816
	unsigned int request_count = 0;
1817
	struct blk_plug *plug;
1818
	struct request *same_queue_rq = NULL;
1819
	blk_qc_t cookie;
1820 1821 1822

	blk_queue_bounce(q, &bio);

1823
	blk_queue_split(q, &bio);
1824

1825
	if (!bio_integrity_prep(bio))
1826
		return BLK_QC_T_NONE;
1827

1828 1829 1830
	if (!is_flush_fua && !blk_queue_nomerges(q) &&
	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
		return BLK_QC_T_NONE;
1831

1832 1833 1834
	if (blk_mq_sched_bio_merge(q, bio))
		return BLK_QC_T_NONE;

1835
	rq_qos_throttle(q, bio, NULL);
J
Jens Axboe 已提交
1836

1837 1838
	trace_block_getrq(q, bio, bio->bi_opf);

1839
	rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
J
Jens Axboe 已提交
1840
	if (unlikely(!rq)) {
1841
		rq_qos_cleanup(q, bio);
1842 1843
		if (bio->bi_opf & REQ_NOWAIT)
			bio_wouldblock_error(bio);
1844
		return BLK_QC_T_NONE;
J
Jens Axboe 已提交
1845 1846
	}

1847
	rq_qos_track(q, rq, bio);
1848

1849
	cookie = request_to_qc_t(data.hctx, rq);
1850

1851
	plug = current->plug;
1852
	if (unlikely(is_flush_fua)) {
1853
		blk_mq_put_ctx(data.ctx);
1854
		blk_mq_bio_to_request(rq, bio);
1855 1856 1857 1858

		/* bypass scheduler for flush rq */
		blk_insert_flush(rq);
		blk_mq_run_hw_queue(data.hctx, true);
1859
	} else if (plug && q->nr_hw_queues == 1) {
1860 1861
		struct request *last = NULL;

1862
		blk_mq_put_ctx(data.ctx);
1863
		blk_mq_bio_to_request(rq, bio);
1864 1865 1866 1867 1868 1869 1870

		/*
		 * @request_count may become stale because of schedule
		 * out, so check the list again.
		 */
		if (list_empty(&plug->mq_list))
			request_count = 0;
1871 1872 1873
		else if (blk_queue_nomerges(q))
			request_count = blk_plug_queued_count(q);

M
Ming Lei 已提交
1874
		if (!request_count)
1875
			trace_block_plug(q);
1876 1877
		else
			last = list_entry_rq(plug->mq_list.prev);
1878

1879 1880
		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1881 1882
			blk_flush_plug_list(plug, false);
			trace_block_plug(q);
1883
		}
1884

1885
		list_add_tail(&rq->queuelist, &plug->mq_list);
1886
	} else if (plug && !blk_queue_nomerges(q)) {
1887
		blk_mq_bio_to_request(rq, bio);
1888 1889

		/*
1890
		 * We do limited plugging. If the bio can be merged, do that.
1891 1892
		 * Otherwise the existing request in the plug list will be
		 * issued. So the plug list will have one request at most
1893 1894
		 * The plug list might get flushed before this. If that happens,
		 * the plug list is empty, and same_queue_rq is invalid.
1895
		 */
1896 1897 1898 1899 1900 1901
		if (list_empty(&plug->mq_list))
			same_queue_rq = NULL;
		if (same_queue_rq)
			list_del_init(&same_queue_rq->queuelist);
		list_add_tail(&rq->queuelist, &plug->mq_list);

1902 1903
		blk_mq_put_ctx(data.ctx);

1904 1905 1906
		if (same_queue_rq) {
			data.hctx = blk_mq_map_queue(q,
					same_queue_rq->mq_ctx->cpu);
1907 1908
			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
					&cookie);
1909
		}
1910 1911
	} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
			!data.hctx->dispatch_busy)) {
1912
		blk_mq_put_ctx(data.ctx);
1913 1914
		blk_mq_bio_to_request(rq, bio);
		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
1915
	} else {
1916
		blk_mq_put_ctx(data.ctx);
1917
		blk_mq_bio_to_request(rq, bio);
1918
		blk_mq_sched_insert_request(rq, false, true, true);
1919
	}
1920

1921
	return cookie;
1922 1923
}

1924 1925
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		     unsigned int hctx_idx)
1926
{
1927
	struct page *page;
1928

1929
	if (tags->rqs && set->ops->exit_request) {
1930
		int i;
1931

1932
		for (i = 0; i < tags->nr_tags; i++) {
J
Jens Axboe 已提交
1933 1934 1935
			struct request *rq = tags->static_rqs[i];

			if (!rq)
1936
				continue;
1937
			set->ops->exit_request(set, rq, hctx_idx);
J
Jens Axboe 已提交
1938
			tags->static_rqs[i] = NULL;
1939
		}
1940 1941
	}

1942 1943
	while (!list_empty(&tags->page_list)) {
		page = list_first_entry(&tags->page_list, struct page, lru);
1944
		list_del_init(&page->lru);
1945 1946 1947 1948 1949
		/*
		 * Remove kmemleak object previously allocated in
		 * blk_mq_init_rq_map().
		 */
		kmemleak_free(page_address(page));
1950 1951
		__free_pages(page, page->private);
	}
1952
}
1953

1954 1955
void blk_mq_free_rq_map(struct blk_mq_tags *tags)
{
1956
	kfree(tags->rqs);
1957
	tags->rqs = NULL;
J
Jens Axboe 已提交
1958 1959
	kfree(tags->static_rqs);
	tags->static_rqs = NULL;
1960

1961
	blk_mq_free_tags(tags);
1962 1963
}

1964 1965 1966 1967
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
					unsigned int hctx_idx,
					unsigned int nr_tags,
					unsigned int reserved_tags)
1968
{
1969
	struct blk_mq_tags *tags;
1970
	int node;
1971

1972 1973 1974 1975 1976
	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
	if (node == NUMA_NO_NODE)
		node = set->numa_node;

	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
S
Shaohua Li 已提交
1977
				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1978 1979
	if (!tags)
		return NULL;
1980

1981
	tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
1982
				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1983
				 node);
1984 1985 1986 1987
	if (!tags->rqs) {
		blk_mq_free_tags(tags);
		return NULL;
	}
1988

1989 1990 1991
	tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
					GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
					node);
J
Jens Axboe 已提交
1992 1993 1994 1995 1996 1997
	if (!tags->static_rqs) {
		kfree(tags->rqs);
		blk_mq_free_tags(tags);
		return NULL;
	}

1998 1999 2000 2001 2002 2003 2004 2005
	return tags;
}

static size_t order_to_size(unsigned int order)
{
	return (size_t)PAGE_SIZE << order;
}

2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
			       unsigned int hctx_idx, int node)
{
	int ret;

	if (set->ops->init_request) {
		ret = set->ops->init_request(set, rq, hctx_idx, node);
		if (ret)
			return ret;
	}

K
Keith Busch 已提交
2017
	WRITE_ONCE(rq->state, MQ_RQ_IDLE);
2018 2019 2020
	return 0;
}

2021 2022 2023 2024 2025
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		     unsigned int hctx_idx, unsigned int depth)
{
	unsigned int i, j, entries_per_page, max_order = 4;
	size_t rq_size, left;
2026 2027 2028 2029 2030
	int node;

	node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
	if (node == NUMA_NO_NODE)
		node = set->numa_node;
2031 2032 2033

	INIT_LIST_HEAD(&tags->page_list);

2034 2035 2036 2037
	/*
	 * rq_size is the size of the request plus driver payload, rounded
	 * to the cacheline size
	 */
2038
	rq_size = round_up(sizeof(struct request) + set->cmd_size,
2039
				cache_line_size());
2040
	left = rq_size * depth;
2041

2042
	for (i = 0; i < depth; ) {
2043 2044 2045 2046 2047
		int this_order = max_order;
		struct page *page;
		int to_do;
		void *p;

2048
		while (this_order && left < order_to_size(this_order - 1))
2049 2050 2051
			this_order--;

		do {
2052
			page = alloc_pages_node(node,
2053
				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
2054
				this_order);
2055 2056 2057 2058 2059 2060 2061 2062 2063
			if (page)
				break;
			if (!this_order--)
				break;
			if (order_to_size(this_order) < rq_size)
				break;
		} while (1);

		if (!page)
2064
			goto fail;
2065 2066

		page->private = this_order;
2067
		list_add_tail(&page->lru, &tags->page_list);
2068 2069

		p = page_address(page);
2070 2071 2072 2073
		/*
		 * Allow kmemleak to scan these pages as they contain pointers
		 * to additional allocations like via ops->init_request().
		 */
2074
		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
2075
		entries_per_page = order_to_size(this_order) / rq_size;
2076
		to_do = min(entries_per_page, depth - i);
2077 2078
		left -= to_do * rq_size;
		for (j = 0; j < to_do; j++) {
J
Jens Axboe 已提交
2079 2080 2081
			struct request *rq = p;

			tags->static_rqs[i] = rq;
2082 2083 2084
			if (blk_mq_init_request(set, rq, hctx_idx, node)) {
				tags->static_rqs[i] = NULL;
				goto fail;
2085 2086
			}

2087 2088 2089 2090
			p += rq_size;
			i++;
		}
	}
2091
	return 0;
2092

2093
fail:
2094 2095
	blk_mq_free_rqs(set, tags, hctx_idx);
	return -ENOMEM;
2096 2097
}

J
Jens Axboe 已提交
2098 2099 2100 2101 2102
/*
 * 'cpu' is going away. splice any existing rq_list entries from this
 * software queue to the hw queue dispatch list, and ensure that it
 * gets run.
 */
2103
static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
2104
{
2105
	struct blk_mq_hw_ctx *hctx;
2106 2107 2108
	struct blk_mq_ctx *ctx;
	LIST_HEAD(tmp);

2109
	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
J
Jens Axboe 已提交
2110
	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
2111 2112 2113 2114 2115 2116 2117 2118 2119

	spin_lock(&ctx->lock);
	if (!list_empty(&ctx->rq_list)) {
		list_splice_init(&ctx->rq_list, &tmp);
		blk_mq_hctx_clear_pending(hctx, ctx);
	}
	spin_unlock(&ctx->lock);

	if (list_empty(&tmp))
2120
		return 0;
2121

J
Jens Axboe 已提交
2122 2123 2124
	spin_lock(&hctx->lock);
	list_splice_tail_init(&tmp, &hctx->dispatch);
	spin_unlock(&hctx->lock);
2125 2126

	blk_mq_run_hw_queue(hctx, true);
2127
	return 0;
2128 2129
}

2130
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2131
{
2132 2133
	cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
					    &hctx->cpuhp_dead);
2134 2135
}

2136
/* hctx->ctxs will be freed in queue's release handler */
2137 2138 2139 2140
static void blk_mq_exit_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
2141 2142
	blk_mq_debugfs_unregister_hctx(hctx);

2143 2144
	if (blk_mq_hw_queue_mapped(hctx))
		blk_mq_tag_idle(hctx);
2145

2146
	if (set->ops->exit_request)
2147
		set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
2148

2149 2150 2151
	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);

2152
	if (hctx->flags & BLK_MQ_F_BLOCKING)
2153
		cleanup_srcu_struct(hctx->srcu);
2154

2155
	blk_mq_remove_cpuhp(hctx);
2156
	blk_free_flush_queue(hctx->fq);
2157
	sbitmap_free(&hctx->ctx_map);
2158 2159
}

M
Ming Lei 已提交
2160 2161 2162 2163 2164 2165 2166 2167 2168
static void blk_mq_exit_hw_queues(struct request_queue *q,
		struct blk_mq_tag_set *set, int nr_queue)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (i == nr_queue)
			break;
2169
		blk_mq_exit_hctx(q, set, hctx, i);
M
Ming Lei 已提交
2170 2171 2172
	}
}

2173 2174 2175
static int blk_mq_init_hctx(struct request_queue *q,
		struct blk_mq_tag_set *set,
		struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2176
{
2177 2178 2179 2180 2181 2182
	int node;

	node = hctx->numa_node;
	if (node == NUMA_NO_NODE)
		node = hctx->numa_node = set->numa_node;

2183
	INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
2184 2185 2186
	spin_lock_init(&hctx->lock);
	INIT_LIST_HEAD(&hctx->dispatch);
	hctx->queue = q;
2187
	hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
2188

2189
	cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2190 2191

	hctx->tags = set->tags[hctx_idx];
2192 2193

	/*
2194 2195
	 * Allocate space for all possible cpus to avoid allocation at
	 * runtime
2196
	 */
2197
	hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
2198 2199 2200
					GFP_KERNEL, node);
	if (!hctx->ctxs)
		goto unregister_cpu_notifier;
2201

2202 2203
	if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
			      node))
2204
		goto free_ctxs;
2205

2206
	hctx->nr_ctx = 0;
2207

2208
	spin_lock_init(&hctx->dispatch_wait_lock);
2209 2210 2211
	init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
	INIT_LIST_HEAD(&hctx->dispatch_wait.entry);

2212 2213 2214
	if (set->ops->init_hctx &&
	    set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
		goto free_bitmap;
2215

2216 2217
	hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
	if (!hctx->fq)
2218
		goto exit_hctx;
2219

2220
	if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
2221
		goto free_fq;
2222

2223
	if (hctx->flags & BLK_MQ_F_BLOCKING)
2224
		init_srcu_struct(hctx->srcu);
2225

2226 2227
	blk_mq_debugfs_register_hctx(q, hctx);

2228
	return 0;
2229

2230 2231 2232 2233 2234
 free_fq:
	kfree(hctx->fq);
 exit_hctx:
	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);
2235
 free_bitmap:
2236
	sbitmap_free(&hctx->ctx_map);
2237 2238 2239
 free_ctxs:
	kfree(hctx->ctxs);
 unregister_cpu_notifier:
2240
	blk_mq_remove_cpuhp(hctx);
2241 2242
	return -1;
}
2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261

static void blk_mq_init_cpu_queues(struct request_queue *q,
				   unsigned int nr_hw_queues)
{
	unsigned int i;

	for_each_possible_cpu(i) {
		struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
		struct blk_mq_hw_ctx *hctx;

		__ctx->cpu = i;
		spin_lock_init(&__ctx->lock);
		INIT_LIST_HEAD(&__ctx->rq_list);
		__ctx->queue = q;

		/*
		 * Set local node, IFF we have more than one hw queue. If
		 * not, we remain on the home node of the device
		 */
2262
		hctx = blk_mq_map_queue(q, i);
2263
		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2264
			hctx->numa_node = local_memory_node(cpu_to_node(i));
2265 2266 2267
	}
}

2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
{
	int ret = 0;

	set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
					set->queue_depth, set->reserved_tags);
	if (!set->tags[hctx_idx])
		return false;

	ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
				set->queue_depth);
	if (!ret)
		return true;

	blk_mq_free_rq_map(set->tags[hctx_idx]);
	set->tags[hctx_idx] = NULL;
	return false;
}

static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
					 unsigned int hctx_idx)
{
2290 2291 2292 2293 2294
	if (set->tags[hctx_idx]) {
		blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
		blk_mq_free_rq_map(set->tags[hctx_idx]);
		set->tags[hctx_idx] = NULL;
	}
2295 2296
}

2297
static void blk_mq_map_swqueue(struct request_queue *q)
2298
{
2299
	unsigned int i, hctx_idx;
2300 2301
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
M
Ming Lei 已提交
2302
	struct blk_mq_tag_set *set = q->tag_set;
2303

2304 2305 2306 2307 2308
	/*
	 * Avoid others reading imcomplete hctx->cpumask through sysfs
	 */
	mutex_lock(&q->sysfs_lock);

2309
	queue_for_each_hw_ctx(q, hctx, i) {
2310
		cpumask_clear(hctx->cpumask);
2311
		hctx->nr_ctx = 0;
2312
		hctx->dispatch_from = NULL;
2313 2314 2315
	}

	/*
2316
	 * Map software to hardware queues.
2317 2318
	 *
	 * If the cpu isn't present, the cpu is mapped to first hctx.
2319
	 */
2320
	for_each_possible_cpu(i) {
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333
		hctx_idx = q->mq_map[i];
		/* unmapped hw queue can be remapped after CPU topo changed */
		if (!set->tags[hctx_idx] &&
		    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
			/*
			 * If tags initialization fail for some hctx,
			 * that hctx won't be brought online.  In this
			 * case, remap the current ctx to hctx[0] which
			 * is guaranteed to always have tags allocated
			 */
			q->mq_map[i] = 0;
		}

2334
		ctx = per_cpu_ptr(q->queue_ctx, i);
C
Christoph Hellwig 已提交
2335
		hctx = blk_mq_map_queue(q, i);
K
Keith Busch 已提交
2336

2337
		cpumask_set_cpu(i, hctx->cpumask);
2338 2339 2340
		ctx->index_hw = hctx->nr_ctx;
		hctx->ctxs[hctx->nr_ctx++] = ctx;
	}
2341

2342 2343
	mutex_unlock(&q->sysfs_lock);

2344
	queue_for_each_hw_ctx(q, hctx, i) {
2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359
		/*
		 * If no software queues are mapped to this hardware queue,
		 * disable it and free the request entries.
		 */
		if (!hctx->nr_ctx) {
			/* Never unmap queue 0.  We need it as a
			 * fallback in case of a new remap fails
			 * allocation
			 */
			if (i && set->tags[i])
				blk_mq_free_map_and_requests(set, i);

			hctx->tags = NULL;
			continue;
		}
2360

M
Ming Lei 已提交
2361 2362 2363
		hctx->tags = set->tags[i];
		WARN_ON(!hctx->tags);

2364 2365 2366 2367 2368
		/*
		 * Set the map size to the number of mapped software queues.
		 * This is more accurate and more efficient than looping
		 * over all possibly mapped software queues.
		 */
2369
		sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2370

2371 2372 2373
		/*
		 * Initialize batch roundrobin counts
		 */
2374
		hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
2375 2376
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
	}
2377 2378
}

2379 2380 2381 2382
/*
 * Caller needs to ensure that we're either frozen/quiesced, or that
 * the queue isn't live yet.
 */
2383
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2384 2385 2386 2387
{
	struct blk_mq_hw_ctx *hctx;
	int i;

2388
	queue_for_each_hw_ctx(q, hctx, i) {
2389
		if (shared)
2390
			hctx->flags |= BLK_MQ_F_TAG_SHARED;
2391
		else
2392 2393 2394 2395
			hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
	}
}

2396 2397
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
					bool shared)
2398 2399
{
	struct request_queue *q;
2400

2401 2402
	lockdep_assert_held(&set->tag_list_lock);

2403 2404
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_freeze_queue(q);
2405
		queue_set_hctx_shared(q, shared);
2406 2407 2408 2409 2410 2411 2412 2413 2414
		blk_mq_unfreeze_queue(q);
	}
}

static void blk_mq_del_queue_tag_set(struct request_queue *q)
{
	struct blk_mq_tag_set *set = q->tag_set;

	mutex_lock(&set->tag_list_lock);
2415
	list_del_rcu(&q->tag_set_list);
2416 2417 2418 2419 2420 2421
	if (list_is_singular(&set->tag_list)) {
		/* just transitioned to unshared */
		set->flags &= ~BLK_MQ_F_TAG_SHARED;
		/* update existing queue */
		blk_mq_update_tag_set_depth(set, false);
	}
2422
	mutex_unlock(&set->tag_list_lock);
2423
	INIT_LIST_HEAD(&q->tag_set_list);
2424 2425 2426 2427 2428 2429 2430 2431
}

static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
				     struct request_queue *q)
{
	q->tag_set = set;

	mutex_lock(&set->tag_list_lock);
2432

2433 2434 2435 2436 2437
	/*
	 * Check to see if we're transitioning to shared (from 1 to 2 queues).
	 */
	if (!list_empty(&set->tag_list) &&
	    !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2438 2439 2440 2441 2442 2443
		set->flags |= BLK_MQ_F_TAG_SHARED;
		/* update existing queue */
		blk_mq_update_tag_set_depth(set, true);
	}
	if (set->flags & BLK_MQ_F_TAG_SHARED)
		queue_set_hctx_shared(q, true);
2444
	list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2445

2446 2447 2448
	mutex_unlock(&set->tag_list_lock);
}

2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460
/*
 * It is the actual release handler for mq, but we do it from
 * request queue's release handler for avoiding use-after-free
 * and headache because q->mq_kobj shouldn't have been introduced,
 * but we can't group ctx/kctx kobj without it.
 */
void blk_mq_release(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

	/* hctx kobj stays in hctx */
2461 2462 2463
	queue_for_each_hw_ctx(q, hctx, i) {
		if (!hctx)
			continue;
2464
		kobject_put(&hctx->kobj);
2465
	}
2466

2467 2468
	q->mq_map = NULL;

2469 2470
	kfree(q->queue_hw_ctx);

2471 2472 2473 2474 2475 2476
	/*
	 * release .mq_kobj and sw queue's kobject now because
	 * both share lifetime with request queue.
	 */
	blk_mq_sysfs_deinit(q);

2477 2478 2479
	free_percpu(q->queue_ctx);
}

2480
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2481 2482 2483
{
	struct request_queue *uninit_q, *q;

2484
	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL);
2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495
	if (!uninit_q)
		return ERR_PTR(-ENOMEM);

	q = blk_mq_init_allocated_queue(set, uninit_q);
	if (IS_ERR(q))
		blk_cleanup_queue(uninit_q);

	return q;
}
EXPORT_SYMBOL(blk_mq_init_queue);

2496 2497 2498 2499
static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
{
	int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);

2500
	BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
2501 2502 2503 2504 2505 2506 2507 2508 2509
			   __alignof__(struct blk_mq_hw_ctx)) !=
		     sizeof(struct blk_mq_hw_ctx));

	if (tag_set->flags & BLK_MQ_F_BLOCKING)
		hw_ctx_size += sizeof(struct srcu_struct);

	return hw_ctx_size;
}

K
Keith Busch 已提交
2510 2511
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
						struct request_queue *q)
2512
{
K
Keith Busch 已提交
2513 2514
	int i, j;
	struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2515

K
Keith Busch 已提交
2516
	blk_mq_sysfs_unregister(q);
2517 2518 2519

	/* protect against switching io scheduler  */
	mutex_lock(&q->sysfs_lock);
2520
	for (i = 0; i < set->nr_hw_queues; i++) {
K
Keith Busch 已提交
2521
		int node;
2522

K
Keith Busch 已提交
2523 2524 2525 2526
		if (hctxs[i])
			continue;

		node = blk_mq_hw_queue_to_node(q->mq_map, i);
2527
		hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
2528
					GFP_KERNEL, node);
2529
		if (!hctxs[i])
K
Keith Busch 已提交
2530
			break;
2531

2532
		if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
K
Keith Busch 已提交
2533 2534 2535 2536 2537
						node)) {
			kfree(hctxs[i]);
			hctxs[i] = NULL;
			break;
		}
2538

2539
		atomic_set(&hctxs[i]->nr_active, 0);
2540
		hctxs[i]->numa_node = node;
2541
		hctxs[i]->queue_num = i;
K
Keith Busch 已提交
2542 2543 2544 2545 2546 2547 2548 2549

		if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
			free_cpumask_var(hctxs[i]->cpumask);
			kfree(hctxs[i]);
			hctxs[i] = NULL;
			break;
		}
		blk_mq_hctx_kobj_init(hctxs[i]);
2550
	}
K
Keith Busch 已提交
2551 2552 2553 2554
	for (j = i; j < q->nr_hw_queues; j++) {
		struct blk_mq_hw_ctx *hctx = hctxs[j];

		if (hctx) {
2555 2556
			if (hctx->tags)
				blk_mq_free_map_and_requests(set, j);
K
Keith Busch 已提交
2557 2558 2559 2560 2561 2562 2563
			blk_mq_exit_hctx(q, set, hctx, j);
			kobject_put(&hctx->kobj);
			hctxs[j] = NULL;

		}
	}
	q->nr_hw_queues = i;
2564
	mutex_unlock(&q->sysfs_lock);
K
Keith Busch 已提交
2565 2566 2567 2568 2569 2570
	blk_mq_sysfs_register(q);
}

struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
						  struct request_queue *q)
{
M
Ming Lei 已提交
2571 2572 2573
	/* mark the queue as mq asap */
	q->mq_ops = set->ops;

2574
	q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2575 2576
					     blk_mq_poll_stats_bkt,
					     BLK_MQ_POLL_STATS_BKTS, q);
2577 2578 2579
	if (!q->poll_cb)
		goto err_exit;

K
Keith Busch 已提交
2580 2581
	q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
	if (!q->queue_ctx)
M
Ming Lin 已提交
2582
		goto err_exit;
K
Keith Busch 已提交
2583

2584 2585 2586
	/* init q->mq_kobj and sw queues' kobjects */
	blk_mq_sysfs_init(q);

2587
	q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)),
K
Keith Busch 已提交
2588 2589 2590 2591
						GFP_KERNEL, set->numa_node);
	if (!q->queue_hw_ctx)
		goto err_percpu;

2592
	q->mq_map = set->mq_map;
K
Keith Busch 已提交
2593 2594 2595 2596

	blk_mq_realloc_hw_ctxs(set, q);
	if (!q->nr_hw_queues)
		goto err_hctxs;
2597

2598
	INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2599
	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2600 2601 2602

	q->nr_queues = nr_cpu_ids;

2603
	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2604

2605
	if (!(set->flags & BLK_MQ_F_SG_MERGE))
2606
		queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
2607

2608 2609
	q->sg_reserved_size = INT_MAX;

2610
	INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2611 2612 2613
	INIT_LIST_HEAD(&q->requeue_list);
	spin_lock_init(&q->requeue_lock);

2614
	blk_queue_make_request(q, blk_mq_make_request);
2615 2616
	if (q->mq_ops->poll)
		q->poll_fn = blk_mq_poll;
2617

2618 2619 2620 2621 2622
	/*
	 * Do this after blk_queue_make_request() overrides it...
	 */
	q->nr_requests = set->queue_depth;

2623 2624 2625 2626 2627
	/*
	 * Default to classic polling
	 */
	q->poll_nsec = -1;

2628 2629
	if (set->ops->complete)
		blk_queue_softirq_done(q, set->ops->complete);
2630

2631
	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2632
	blk_mq_add_queue_tag_set(set, q);
2633
	blk_mq_map_swqueue(q);
2634

2635 2636 2637
	if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
		int ret;

2638
		ret = elevator_init_mq(q);
2639 2640 2641 2642
		if (ret)
			return ERR_PTR(ret);
	}

2643
	return q;
2644

2645
err_hctxs:
K
Keith Busch 已提交
2646
	kfree(q->queue_hw_ctx);
2647
err_percpu:
K
Keith Busch 已提交
2648
	free_percpu(q->queue_ctx);
M
Ming Lin 已提交
2649 2650
err_exit:
	q->mq_ops = NULL;
2651 2652
	return ERR_PTR(-ENOMEM);
}
2653
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2654 2655 2656

void blk_mq_free_queue(struct request_queue *q)
{
M
Ming Lei 已提交
2657
	struct blk_mq_tag_set	*set = q->tag_set;
2658

2659
	blk_mq_del_queue_tag_set(q);
M
Ming Lei 已提交
2660
	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2661 2662 2663
}

/* Basically redo blk_mq_init_queue with queue frozen */
2664
static void blk_mq_queue_reinit(struct request_queue *q)
2665
{
2666
	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2667

2668
	blk_mq_debugfs_unregister_hctxs(q);
2669 2670
	blk_mq_sysfs_unregister(q);

2671 2672
	/*
	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2673 2674
	 * we should change hctx numa_node according to the new topology (this
	 * involves freeing and re-allocating memory, worth doing?)
2675
	 */
2676
	blk_mq_map_swqueue(q);
2677

2678
	blk_mq_sysfs_register(q);
2679
	blk_mq_debugfs_register_hctxs(q);
2680 2681
}

2682 2683 2684 2685
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{
	int i;

2686 2687
	for (i = 0; i < set->nr_hw_queues; i++)
		if (!__blk_mq_alloc_rq_map(set, i))
2688 2689 2690 2691 2692 2693
			goto out_unwind;

	return 0;

out_unwind:
	while (--i >= 0)
2694
		blk_mq_free_rq_map(set->tags[i]);
2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733

	return -ENOMEM;
}

/*
 * Allocate the request maps associated with this tag_set. Note that this
 * may reduce the depth asked for, if memory is tight. set->queue_depth
 * will be updated to reflect the allocated depth.
 */
static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{
	unsigned int depth;
	int err;

	depth = set->queue_depth;
	do {
		err = __blk_mq_alloc_rq_maps(set);
		if (!err)
			break;

		set->queue_depth >>= 1;
		if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
			err = -ENOMEM;
			break;
		}
	} while (set->queue_depth);

	if (!set->queue_depth || err) {
		pr_err("blk-mq: failed to allocate request map\n");
		return -ENOMEM;
	}

	if (depth != set->queue_depth)
		pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
						depth, set->queue_depth);

	return 0;
}

2734 2735
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750
	if (set->ops->map_queues) {
		/*
		 * transport .map_queues is usually done in the following
		 * way:
		 *
		 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
		 * 	mask = get_cpu_mask(queue)
		 * 	for_each_cpu(cpu, mask)
		 * 		set->mq_map[cpu] = queue;
		 * }
		 *
		 * When we need to remap, the table has to be cleared for
		 * killing stale mapping since one CPU may not be mapped
		 * to any hw queue.
		 */
2751
		blk_mq_clear_mq_map(set);
2752

2753
		return set->ops->map_queues(set);
2754
	} else
2755 2756 2757
		return blk_mq_map_queues(set);
}

2758 2759 2760
/*
 * Alloc a tag set to be associated with one or more request queues.
 * May fail with EINVAL for various error conditions. May adjust the
2761
 * requested depth down, if it's too large. In that case, the set
2762 2763
 * value will be stored in set->queue_depth.
 */
2764 2765
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
2766 2767
	int ret;

B
Bart Van Assche 已提交
2768 2769
	BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);

2770 2771
	if (!set->nr_hw_queues)
		return -EINVAL;
2772
	if (!set->queue_depth)
2773 2774 2775 2776
		return -EINVAL;
	if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
		return -EINVAL;

C
Christoph Hellwig 已提交
2777
	if (!set->ops->queue_rq)
2778 2779
		return -EINVAL;

2780 2781 2782
	if (!set->ops->get_budget ^ !set->ops->put_budget)
		return -EINVAL;

2783 2784 2785 2786 2787
	if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
		pr_info("blk-mq: reduced tag depth to %u\n",
			BLK_MQ_MAX_DEPTH);
		set->queue_depth = BLK_MQ_MAX_DEPTH;
	}
2788

2789 2790 2791 2792 2793 2794 2795 2796 2797
	/*
	 * If a crashdump is active, then we are potentially in a very
	 * memory constrained environment. Limit us to 1 queue and
	 * 64 tags to prevent using too much memory.
	 */
	if (is_kdump_kernel()) {
		set->nr_hw_queues = 1;
		set->queue_depth = min(64U, set->queue_depth);
	}
K
Keith Busch 已提交
2798 2799 2800 2801 2802
	/*
	 * There is no use for more h/w queues than cpus.
	 */
	if (set->nr_hw_queues > nr_cpu_ids)
		set->nr_hw_queues = nr_cpu_ids;
2803

2804
	set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *),
2805 2806
				 GFP_KERNEL, set->numa_node);
	if (!set->tags)
2807
		return -ENOMEM;
2808

2809
	ret = -ENOMEM;
2810 2811
	set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
				   GFP_KERNEL, set->numa_node);
2812 2813 2814
	if (!set->mq_map)
		goto out_free_tags;

2815
	ret = blk_mq_update_queue_map(set);
2816 2817 2818 2819 2820
	if (ret)
		goto out_free_mq_map;

	ret = blk_mq_alloc_rq_maps(set);
	if (ret)
2821
		goto out_free_mq_map;
2822

2823 2824 2825
	mutex_init(&set->tag_list_lock);
	INIT_LIST_HEAD(&set->tag_list);

2826
	return 0;
2827 2828 2829 2830 2831

out_free_mq_map:
	kfree(set->mq_map);
	set->mq_map = NULL;
out_free_tags:
2832 2833
	kfree(set->tags);
	set->tags = NULL;
2834
	return ret;
2835 2836 2837 2838 2839 2840 2841
}
EXPORT_SYMBOL(blk_mq_alloc_tag_set);

void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
	int i;

2842 2843
	for (i = 0; i < nr_cpu_ids; i++)
		blk_mq_free_map_and_requests(set, i);
2844

2845 2846 2847
	kfree(set->mq_map);
	set->mq_map = NULL;

M
Ming Lei 已提交
2848
	kfree(set->tags);
2849
	set->tags = NULL;
2850 2851 2852
}
EXPORT_SYMBOL(blk_mq_free_tag_set);

2853 2854 2855 2856 2857 2858
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
{
	struct blk_mq_tag_set *set = q->tag_set;
	struct blk_mq_hw_ctx *hctx;
	int i, ret;

2859
	if (!set)
2860 2861
		return -EINVAL;

2862
	blk_mq_freeze_queue(q);
2863
	blk_mq_quiesce_queue(q);
2864

2865 2866
	ret = 0;
	queue_for_each_hw_ctx(q, hctx, i) {
2867 2868
		if (!hctx->tags)
			continue;
2869 2870 2871 2872
		/*
		 * If we're using an MQ scheduler, just update the scheduler
		 * queue depth. This is similar to what the old code would do.
		 */
2873
		if (!hctx->sched_tags) {
2874
			ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
2875 2876 2877 2878 2879
							false);
		} else {
			ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
							nr, true);
		}
2880 2881 2882 2883 2884 2885 2886
		if (ret)
			break;
	}

	if (!ret)
		q->nr_requests = nr;

2887
	blk_mq_unquiesce_queue(q);
2888 2889
	blk_mq_unfreeze_queue(q);

2890 2891 2892
	return ret;
}

2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962
/*
 * request_queue and elevator_type pair.
 * It is just used by __blk_mq_update_nr_hw_queues to cache
 * the elevator_type associated with a request_queue.
 */
struct blk_mq_qe_pair {
	struct list_head node;
	struct request_queue *q;
	struct elevator_type *type;
};

/*
 * Cache the elevator_type in qe pair list and switch the
 * io scheduler to 'none'
 */
static bool blk_mq_elv_switch_none(struct list_head *head,
		struct request_queue *q)
{
	struct blk_mq_qe_pair *qe;

	if (!q->elevator)
		return true;

	qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
	if (!qe)
		return false;

	INIT_LIST_HEAD(&qe->node);
	qe->q = q;
	qe->type = q->elevator->type;
	list_add(&qe->node, head);

	mutex_lock(&q->sysfs_lock);
	/*
	 * After elevator_switch_mq, the previous elevator_queue will be
	 * released by elevator_release. The reference of the io scheduler
	 * module get by elevator_get will also be put. So we need to get
	 * a reference of the io scheduler module here to prevent it to be
	 * removed.
	 */
	__module_get(qe->type->elevator_owner);
	elevator_switch_mq(q, NULL);
	mutex_unlock(&q->sysfs_lock);

	return true;
}

static void blk_mq_elv_switch_back(struct list_head *head,
		struct request_queue *q)
{
	struct blk_mq_qe_pair *qe;
	struct elevator_type *t = NULL;

	list_for_each_entry(qe, head, node)
		if (qe->q == q) {
			t = qe->type;
			break;
		}

	if (!t)
		return;

	list_del(&qe->node);
	kfree(qe);

	mutex_lock(&q->sysfs_lock);
	elevator_switch_mq(q, t);
	mutex_unlock(&q->sysfs_lock);
}

2963 2964
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
							int nr_hw_queues)
K
Keith Busch 已提交
2965 2966
{
	struct request_queue *q;
2967
	LIST_HEAD(head);
K
Keith Busch 已提交
2968

2969 2970
	lockdep_assert_held(&set->tag_list_lock);

K
Keith Busch 已提交
2971 2972 2973 2974 2975 2976 2977
	if (nr_hw_queues > nr_cpu_ids)
		nr_hw_queues = nr_cpu_ids;
	if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
		return;

	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_freeze_queue(q);
2978 2979 2980 2981
	/*
	 * Sync with blk_mq_queue_tag_busy_iter.
	 */
	synchronize_rcu();
2982 2983 2984 2985 2986 2987 2988 2989
	/*
	 * Switch IO scheduler to 'none', cleaning up the data associated
	 * with the previous scheduler. We will switch back once we are done
	 * updating the new sw to hw queue mappings.
	 */
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		if (!blk_mq_elv_switch_none(&head, q))
			goto switch_back;
K
Keith Busch 已提交
2990 2991

	set->nr_hw_queues = nr_hw_queues;
2992
	blk_mq_update_queue_map(set);
K
Keith Busch 已提交
2993 2994
	list_for_each_entry(q, &set->tag_list, tag_set_list) {
		blk_mq_realloc_hw_ctxs(set, q);
2995
		blk_mq_queue_reinit(q);
K
Keith Busch 已提交
2996 2997
	}

2998 2999 3000 3001
switch_back:
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_elv_switch_back(&head, q);

K
Keith Busch 已提交
3002 3003 3004
	list_for_each_entry(q, &set->tag_list, tag_set_list)
		blk_mq_unfreeze_queue(q);
}
3005 3006 3007 3008 3009 3010 3011

void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{
	mutex_lock(&set->tag_list_lock);
	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
	mutex_unlock(&set->tag_list_lock);
}
K
Keith Busch 已提交
3012 3013
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);

3014 3015 3016 3017
/* Enable polling stats and return whether they were already enabled. */
static bool blk_poll_stats_enable(struct request_queue *q)
{
	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3018
	    blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039
		return true;
	blk_stat_add_callback(q, q->poll_cb);
	return false;
}

static void blk_mq_poll_stats_start(struct request_queue *q)
{
	/*
	 * We don't arm the callback if polling stats are not enabled or the
	 * callback is already active.
	 */
	if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
	    blk_stat_is_active(q->poll_cb))
		return;

	blk_stat_activate_msecs(q->poll_cb, 100);
}

static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
{
	struct request_queue *q = cb->data;
3040
	int bucket;
3041

3042 3043 3044 3045
	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
		if (cb->stat[bucket].nr_samples)
			q->poll_stat[bucket] = cb->stat[bucket];
	}
3046 3047
}

3048 3049 3050 3051 3052
static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
				       struct blk_mq_hw_ctx *hctx,
				       struct request *rq)
{
	unsigned long ret = 0;
3053
	int bucket;
3054 3055 3056 3057 3058

	/*
	 * If stats collection isn't on, don't sleep but turn it on for
	 * future users
	 */
3059
	if (!blk_poll_stats_enable(q))
3060 3061 3062 3063 3064 3065 3066 3067
		return 0;

	/*
	 * As an optimistic guess, use half of the mean service time
	 * for this type of request. We can (and should) make this smarter.
	 * For instance, if the completion latencies are tight, we can
	 * get closer than just half the mean. This is especially
	 * important on devices where the completion latencies are longer
3068 3069
	 * than ~10 usec. We do use the stats for the relevant IO size
	 * if available which does lead to better estimates.
3070
	 */
3071 3072 3073 3074 3075 3076
	bucket = blk_mq_poll_stats_bkt(rq);
	if (bucket < 0)
		return ret;

	if (q->poll_stat[bucket].nr_samples)
		ret = (q->poll_stat[bucket].mean + 1) / 2;
3077 3078 3079 3080

	return ret;
}

3081
static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
3082
				     struct blk_mq_hw_ctx *hctx,
3083 3084 3085 3086
				     struct request *rq)
{
	struct hrtimer_sleeper hs;
	enum hrtimer_mode mode;
3087
	unsigned int nsecs;
3088 3089
	ktime_t kt;

J
Jens Axboe 已提交
3090
	if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107
		return false;

	/*
	 * poll_nsec can be:
	 *
	 * -1:	don't ever hybrid sleep
	 *  0:	use half of prev avg
	 * >0:	use this specific value
	 */
	if (q->poll_nsec == -1)
		return false;
	else if (q->poll_nsec > 0)
		nsecs = q->poll_nsec;
	else
		nsecs = blk_mq_poll_nsecs(q, hctx, rq);

	if (!nsecs)
3108 3109
		return false;

J
Jens Axboe 已提交
3110
	rq->rq_flags |= RQF_MQ_POLL_SLEPT;
3111 3112 3113 3114 3115

	/*
	 * This will be replaced with the stats tracking code, using
	 * 'avg_completion_time / 2' as the pre-sleep target.
	 */
T
Thomas Gleixner 已提交
3116
	kt = nsecs;
3117 3118 3119 3120 3121 3122 3123

	mode = HRTIMER_MODE_REL;
	hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
	hrtimer_set_expires(&hs.timer, kt);

	hrtimer_init_sleeper(&hs, current);
	do {
T
Tejun Heo 已提交
3124
		if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138
			break;
		set_current_state(TASK_UNINTERRUPTIBLE);
		hrtimer_start_expires(&hs.timer, mode);
		if (hs.task)
			io_schedule();
		hrtimer_cancel(&hs.timer);
		mode = HRTIMER_MODE_ABS;
	} while (hs.task && !signal_pending(current));

	__set_current_state(TASK_RUNNING);
	destroy_hrtimer_on_stack(&hs.timer);
	return true;
}

J
Jens Axboe 已提交
3139 3140 3141 3142 3143
static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
	struct request_queue *q = hctx->queue;
	long state;

3144 3145 3146 3147 3148 3149 3150
	/*
	 * If we sleep, have the caller restart the poll loop to reset
	 * the state. Like for the other success return cases, the
	 * caller is responsible for checking if the IO completed. If
	 * the IO isn't complete, we'll get called again and will go
	 * straight to the busy poll loop.
	 */
3151
	if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
3152 3153
		return true;

J
Jens Axboe 已提交
3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178
	hctx->poll_considered++;

	state = current->state;
	while (!need_resched()) {
		int ret;

		hctx->poll_invoked++;

		ret = q->mq_ops->poll(hctx, rq->tag);
		if (ret > 0) {
			hctx->poll_success++;
			set_current_state(TASK_RUNNING);
			return true;
		}

		if (signal_pending_state(state, current))
			set_current_state(TASK_RUNNING);

		if (current->state == TASK_RUNNING)
			return true;
		if (ret < 0)
			break;
		cpu_relax();
	}

3179
	__set_current_state(TASK_RUNNING);
J
Jens Axboe 已提交
3180 3181 3182
	return false;
}

3183
static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
J
Jens Axboe 已提交
3184 3185 3186 3187
{
	struct blk_mq_hw_ctx *hctx;
	struct request *rq;

3188
	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
J
Jens Axboe 已提交
3189 3190 3191
		return false;

	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
3192 3193
	if (!blk_qc_t_is_internal(cookie))
		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3194
	else {
3195
		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3196 3197 3198 3199 3200 3201 3202 3203 3204
		/*
		 * With scheduling, if the request has completed, we'll
		 * get a NULL return here, as we clear the sched tag when
		 * that happens. The request still remains valid, like always,
		 * so we should be safe with just the NULL check.
		 */
		if (!rq)
			return false;
	}
J
Jens Axboe 已提交
3205 3206 3207 3208

	return __blk_mq_poll(hctx, rq);
}

3209 3210
static int __init blk_mq_init(void)
{
3211 3212
	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
				blk_mq_hctx_notify_dead);
3213 3214 3215
	return 0;
}
subsys_initcall(blk_mq_init);