blk-mq-sched.c 19.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9
/*
 * blk-mq scheduling framework
 *
 * Copyright (C) 2016 Jens Axboe
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/blk-mq.h>
10
#include <linux/list_sort.h>
11 12 13 14 15

#include <trace/events/block.h>

#include "blk.h"
#include "blk-mq.h"
16
#include "blk-mq-debugfs.h"
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-wbt.h"

void blk_mq_sched_free_hctx_data(struct request_queue *q,
				 void (*exit)(struct blk_mq_hw_ctx *))
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (exit && hctx->sched_data)
			exit(hctx);
		kfree(hctx->sched_data);
		hctx->sched_data = NULL;
	}
}
EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);

D
Damien Le Moal 已提交
36
void blk_mq_sched_assign_ioc(struct request *rq)
37
{
38
	struct request_queue *q = rq->q;
39
	struct io_context *ioc;
40 41
	struct io_cq *icq;

42 43 44 45 46 47 48
	/*
	 * May not have an IO context if it's a passthrough request
	 */
	ioc = current->io_context;
	if (!ioc)
		return;

49
	spin_lock_irq(&q->queue_lock);
50
	icq = ioc_lookup_icq(ioc, q);
51
	spin_unlock_irq(&q->queue_lock);
52 53 54 55 56 57

	if (!icq) {
		icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
		if (!icq)
			return;
	}
58
	get_io_context(icq->ioc);
59
	rq->elv.icq = icq;
60 61
}

62 63 64 65
/*
 * Mark a hardware queue as needing a restart. For shared queues, maintain
 * a count of how many hardware queues are marked for restart.
 */
66
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
67 68 69 70
{
	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
		return;

71
	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
72
}
73
EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
74

75
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
76 77
{
	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
78 79
		return;
	clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
80

81
	blk_mq_run_hw_queue(hctx, true);
82 83
}

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
static int sched_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
{
	struct request *rqa = container_of(a, struct request, queuelist);
	struct request *rqb = container_of(b, struct request, queuelist);

	return rqa->mq_hctx > rqb->mq_hctx;
}

static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
{
	struct blk_mq_hw_ctx *hctx =
		list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
	struct request *rq;
	LIST_HEAD(hctx_list);
	unsigned int count = 0;

	list_for_each_entry(rq, rq_list, queuelist) {
		if (rq->mq_hctx != hctx) {
			list_cut_before(&hctx_list, rq_list, &rq->queuelist);
			goto dispatch;
		}
		count++;
	}
	list_splice_tail_init(rq_list, &hctx_list);

dispatch:
110
	return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
111 112
}

113 114
#define BLK_MQ_BUDGET_DELAY	3		/* ms units */

115 116 117 118
/*
 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
 * its queue by itself in its completion handler, so we don't need to
 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
119 120 121
 *
 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
 * be run again.  This is necessary to avoid starving flushes.
122
 */
123
static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
124 125 126
{
	struct request_queue *q = hctx->queue;
	struct elevator_queue *e = q->elevator;
127 128 129
	bool multi_hctxs = false, run_queue = false;
	bool dispatched = false, busy = false;
	unsigned int max_dispatch;
130
	LIST_HEAD(rq_list);
131 132 133 134 135 136
	int count = 0;

	if (hctx->dispatch_busy)
		max_dispatch = 1;
	else
		max_dispatch = hctx->queue->nr_requests;
137 138

	do {
139 140
		struct request *rq;

141
		if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
142
			break;
143

144
		if (!list_empty_careful(&hctx->dispatch)) {
145
			busy = true;
146 147 148
			break;
		}

149
		if (!blk_mq_get_dispatch_budget(q))
150
			break;
151

152
		rq = e->type->ops.dispatch_request(hctx);
153
		if (!rq) {
154
			blk_mq_put_dispatch_budget(q);
155 156 157 158 159 160 161
			/*
			 * We're releasing without dispatching. Holding the
			 * budget could have blocked any "hctx"s with the
			 * same queue and if we didn't dispatch then there's
			 * no guarantee anyone will kick the queue.  Kick it
			 * ourselves.
			 */
162
			run_queue = true;
163 164 165 166 167 168 169 170
			break;
		}

		/*
		 * Now this rq owns the budget which has to be released
		 * if this rq won't be queued to driver via .queue_rq()
		 * in blk_mq_dispatch_rq_list().
		 */
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
		list_add_tail(&rq->queuelist, &rq_list);
		if (rq->mq_hctx != hctx)
			multi_hctxs = true;
	} while (++count < max_dispatch);

	if (!count) {
		if (run_queue)
			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
	} else if (multi_hctxs) {
		/*
		 * Requests from different hctx may be dequeued from some
		 * schedulers, such as bfq and deadline.
		 *
		 * Sort the requests in the list according to their hctx,
		 * dispatch batching requests from same hctx at a time.
		 */
		list_sort(NULL, &rq_list, sched_rq_cmp);
		do {
			dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
		} while (!list_empty(&rq_list));
	} else {
		dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
	}

	if (busy)
		return -EAGAIN;
	return !!dispatched;
}

static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
{
	int ret;

	do {
		ret = __blk_mq_do_dispatch_sched(hctx);
	} while (ret == 1);
207 208

	return ret;
209 210
}

211 212 213
static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
					  struct blk_mq_ctx *ctx)
{
214
	unsigned short idx = ctx->index_hw[hctx->type];
215 216 217 218 219 220 221

	if (++idx == hctx->nr_ctx)
		idx = 0;

	return hctx->ctxs[idx];
}

222 223 224 225
/*
 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
 * its queue by itself in its completion handler, so we don't need to
 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
226 227 228
 *
 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
 * to be run again.  This is necessary to avoid starving flushes.
229
 */
230
static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
231 232 233 234
{
	struct request_queue *q = hctx->queue;
	LIST_HEAD(rq_list);
	struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
235
	int ret = 0;
236
	struct request *rq;
237 238

	do {
239 240 241 242 243
		if (!list_empty_careful(&hctx->dispatch)) {
			ret = -EAGAIN;
			break;
		}

244 245 246
		if (!sbitmap_any_bit_set(&hctx->ctx_map))
			break;

247
		if (!blk_mq_get_dispatch_budget(q))
248
			break;
249 250 251

		rq = blk_mq_dequeue_from_ctx(hctx, ctx);
		if (!rq) {
252
			blk_mq_put_dispatch_budget(q);
253 254 255 256 257 258 259 260
			/*
			 * We're releasing without dispatching. Holding the
			 * budget could have blocked any "hctx"s with the
			 * same queue and if we didn't dispatch then there's
			 * no guarantee anyone will kick the queue.  Kick it
			 * ourselves.
			 */
			blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
261 262 263 264 265 266 267 268 269 270 271 272 273
			break;
		}

		/*
		 * Now this rq owns the budget which has to be released
		 * if this rq won't be queued to driver via .queue_rq()
		 * in blk_mq_dispatch_rq_list().
		 */
		list_add(&rq->queuelist, &rq_list);

		/* round robin for fair dispatch */
		ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);

274
	} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
275 276

	WRITE_ONCE(hctx->dispatch_from, ctx);
277
	return ret;
278 279
}

280
static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
281
{
282 283
	struct request_queue *q = hctx->queue;
	struct elevator_queue *e = q->elevator;
284
	const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
285
	int ret = 0;
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
	LIST_HEAD(rq_list);

	/*
	 * If we have previous entries on our dispatch list, grab them first for
	 * more fair dispatch.
	 */
	if (!list_empty_careful(&hctx->dispatch)) {
		spin_lock(&hctx->lock);
		if (!list_empty(&hctx->dispatch))
			list_splice_init(&hctx->dispatch, &rq_list);
		spin_unlock(&hctx->lock);
	}

	/*
	 * Only ask the scheduler for requests, if we didn't have residual
	 * requests from the dispatch list. This is to avoid the case where
	 * we only ever dispatch a fraction of the requests available because
	 * of low device queue depth. Once we pull requests out of the IO
	 * scheduler, we can no longer merge or sort them. So it's best to
	 * leave them there for as long as we can. Mark the hw queue as
	 * needing a restart in that case.
307 308 309 310
	 *
	 * We want to dispatch from the scheduler if there was nothing
	 * on the dispatch list or we were able to dispatch from the
	 * dispatch list.
311
	 */
312
	if (!list_empty(&rq_list)) {
313
		blk_mq_sched_mark_restart_hctx(hctx);
314
		if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
315
			if (has_sched_dispatch)
316
				ret = blk_mq_do_dispatch_sched(hctx);
317
			else
318
				ret = blk_mq_do_dispatch_ctx(hctx);
319
		}
320
	} else if (has_sched_dispatch) {
321
		ret = blk_mq_do_dispatch_sched(hctx);
322 323
	} else if (hctx->dispatch_busy) {
		/* dequeue request one by one from sw queue if queue is busy */
324
		ret = blk_mq_do_dispatch_ctx(hctx);
325
	} else {
326
		blk_mq_flush_busy_ctxs(hctx, &rq_list);
327
		blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
328
	}
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350

	return ret;
}

void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
	struct request_queue *q = hctx->queue;

	/* RCU or SRCU read lock is needed before checking quiesced flag */
	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
		return;

	hctx->run++;

	/*
	 * A return of -EAGAIN is an indication that hctx->dispatch is not
	 * empty and we must run again in order to avoid starving flushes.
	 */
	if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
		if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
			blk_mq_run_hw_queue(hctx, true);
	}
351 352
}

353
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
354
		unsigned int nr_segs, struct request **merged_request)
355 356 357
{
	struct request *rq;

358 359
	switch (elv_merge(q, &rq, bio)) {
	case ELEVATOR_BACK_MERGE:
360 361
		if (!blk_mq_sched_allow_merge(q, rq, bio))
			return false;
362
		if (!bio_attempt_back_merge(rq, bio, nr_segs))
363 364 365 366 367 368
			return false;
		*merged_request = attempt_back_merge(q, rq);
		if (!*merged_request)
			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
		return true;
	case ELEVATOR_FRONT_MERGE:
369 370
		if (!blk_mq_sched_allow_merge(q, rq, bio))
			return false;
371
		if (!bio_attempt_front_merge(rq, bio, nr_segs))
372 373 374 375 376
			return false;
		*merged_request = attempt_front_merge(q, rq);
		if (!*merged_request)
			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
		return true;
377 378
	case ELEVATOR_DISCARD_MERGE:
		return bio_attempt_discard_merge(q, rq, bio);
379 380
	default:
		return false;
381 382 383 384
	}
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);

385
/*
386 387
 * Iterate list of requests and see if we can merge this bio with any
 * of them.
388
 */
389
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
390
			   struct bio *bio, unsigned int nr_segs)
391 392 393 394
{
	struct request *rq;
	int checked = 8;

395
	list_for_each_entry_reverse(rq, list, queuelist) {
396 397 398 399 400 401 402 403 404 405 406
		bool merged = false;

		if (!checked--)
			break;

		if (!blk_rq_merge_ok(rq, bio))
			continue;

		switch (blk_try_merge(rq, bio)) {
		case ELEVATOR_BACK_MERGE:
			if (blk_mq_sched_allow_merge(q, rq, bio))
407 408
				merged = bio_attempt_back_merge(rq, bio,
						nr_segs);
409 410 411
			break;
		case ELEVATOR_FRONT_MERGE:
			if (blk_mq_sched_allow_merge(q, rq, bio))
412 413
				merged = bio_attempt_front_merge(rq, bio,
						nr_segs);
414 415 416 417 418 419 420 421 422 423 424 425 426
			break;
		case ELEVATOR_DISCARD_MERGE:
			merged = bio_attempt_discard_merge(q, rq, bio);
			break;
		default:
			continue;
		}

		return merged;
	}

	return false;
}
427 428 429 430 431 432 433 434
EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge);

/*
 * Reverse check our software queue for entries that we could potentially
 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
 * too much time checking for merges.
 */
static bool blk_mq_attempt_merge(struct request_queue *q,
M
Ming Lei 已提交
435
				 struct blk_mq_hw_ctx *hctx,
436 437
				 struct blk_mq_ctx *ctx, struct bio *bio,
				 unsigned int nr_segs)
438
{
M
Ming Lei 已提交
439 440
	enum hctx_type type = hctx->type;

441 442
	lockdep_assert_held(&ctx->lock);

443
	if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
444 445 446 447 448 449
		ctx->rq_merged++;
		return true;
	}

	return false;
}
450

451 452
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
		unsigned int nr_segs)
453 454
{
	struct elevator_queue *e = q->elevator;
455
	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
456
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
457
	bool ret = false;
M
Ming Lei 已提交
458
	enum hctx_type type;
459

460
	if (e && e->type->ops.bio_merge)
461
		return e->type->ops.bio_merge(hctx, bio, nr_segs);
462

M
Ming Lei 已提交
463
	type = hctx->type;
464
	if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
M
Ming Lei 已提交
465
			!list_empty_careful(&ctx->rq_lists[type])) {
466 467
		/* default per sw-queue merge */
		spin_lock(&ctx->lock);
468
		ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs);
469 470 471 472
		spin_unlock(&ctx->lock);
	}

	return ret;
473 474 475 476 477 478 479 480 481 482 483 484 485 486
}

bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
{
	return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);

void blk_mq_sched_request_inserted(struct request *rq)
{
	trace_block_rq_insert(rq->q, rq);
}
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);

487
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
488
				       bool has_sched,
489
				       struct request *rq)
490
{
491 492 493 494 495 496 497 498 499 500 501 502
	/*
	 * dispatch flush and passthrough rq directly
	 *
	 * passthrough request has to be added to hctx->dispatch directly.
	 * For some reason, device may be in one situation which can't
	 * handle FS request, so STS_RESOURCE is always returned and the
	 * FS request will be added to hctx->dispatch. However passthrough
	 * request may be required at that time for fixing the problem. If
	 * passthrough request is added to scheduler queue, there isn't any
	 * chance to dispatch it given we prioritize requests in hctx->dispatch.
	 */
	if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
503 504
		return true;

505
	if (has_sched)
506 507
		rq->rq_flags |= RQF_SORTED;

508
	return false;
509 510
}

511
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
512
				 bool run_queue, bool async)
513 514 515 516
{
	struct request_queue *q = rq->q;
	struct elevator_queue *e = q->elevator;
	struct blk_mq_ctx *ctx = rq->mq_ctx;
517
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
518

519 520
	/* flush rq in flush machinery need to be dispatched directly */
	if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
521 522
		blk_insert_flush(rq);
		goto run;
523 524
	}

525 526
	WARN_ON(e && (rq->tag != -1));

527
	if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
		/*
		 * Firstly normal IO request is inserted to scheduler queue or
		 * sw queue, meantime we add flush request to dispatch queue(
		 * hctx->dispatch) directly and there is at most one in-flight
		 * flush request for each hw queue, so it doesn't matter to add
		 * flush request to tail or front of the dispatch queue.
		 *
		 * Secondly in case of NCQ, flush request belongs to non-NCQ
		 * command, and queueing it will fail when there is any
		 * in-flight normal IO request(NCQ command). When adding flush
		 * rq to the front of hctx->dispatch, it is easier to introduce
		 * extra time to flush rq's latency because of S_SCHED_RESTART
		 * compared with adding to the tail of dispatch queue, then
		 * chance of flush merge is increased, and less flush requests
		 * will be issued to controller. It is observed that ~10% time
		 * is saved in blktests block/004 on disk attached to AHCI/NCQ
		 * drive when adding flush rq to the front of hctx->dispatch.
		 *
		 * Simply queue flush rq to the front of hctx->dispatch so that
		 * intensive flush workloads can benefit in case of NCQ HW.
		 */
		at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
550
		blk_mq_request_bypass_insert(rq, at_head, false);
551
		goto run;
552
	}
553

554
	if (e && e->type->ops.insert_requests) {
555 556 557
		LIST_HEAD(list);

		list_add(&rq->queuelist, &list);
558
		e->type->ops.insert_requests(hctx, &list, at_head);
559 560 561 562 563 564
	} else {
		spin_lock(&ctx->lock);
		__blk_mq_insert_request(hctx, rq, at_head);
		spin_unlock(&ctx->lock);
	}

565
run:
566 567 568 569
	if (run_queue)
		blk_mq_run_hw_queue(hctx, async);
}

570
void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
571 572 573
				  struct blk_mq_ctx *ctx,
				  struct list_head *list, bool run_queue_async)
{
574
	struct elevator_queue *e;
575 576 577 578 579 580 581 582
	struct request_queue *q = hctx->queue;

	/*
	 * blk_mq_sched_insert_requests() is called from flush plug
	 * context only, and hold one usage counter to prevent queue
	 * from being released.
	 */
	percpu_ref_get(&q->q_usage_counter);
583

584
	e = hctx->queue->elevator;
585 586
	if (e && e->type->ops.insert_requests)
		e->type->ops.insert_requests(hctx, list, false);
587 588 589 590 591 592
	else {
		/*
		 * try to issue requests directly if the hw queue isn't
		 * busy in case of 'none' scheduler, and this way may save
		 * us one extra enqueue & dequeue to sw queue.
		 */
593
		if (!hctx->dispatch_busy && !e && !run_queue_async) {
594
			blk_mq_try_issue_list_directly(hctx, list);
595
			if (list_empty(list))
596
				goto out;
597 598
		}
		blk_mq_insert_requests(hctx, ctx, list);
599
	}
600 601

	blk_mq_run_hw_queue(hctx, run_queue_async);
602 603
 out:
	percpu_ref_put(&q->q_usage_counter);
604 605
}

606 607 608 609 610 611 612 613 614 615 616
static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
				   struct blk_mq_hw_ctx *hctx,
				   unsigned int hctx_idx)
{
	if (hctx->sched_tags) {
		blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
		blk_mq_free_rq_map(hctx->sched_tags);
		hctx->sched_tags = NULL;
	}
}

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
static int blk_mq_sched_alloc_tags(struct request_queue *q,
				   struct blk_mq_hw_ctx *hctx,
				   unsigned int hctx_idx)
{
	struct blk_mq_tag_set *set = q->tag_set;
	int ret;

	hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
					       set->reserved_tags);
	if (!hctx->sched_tags)
		return -ENOMEM;

	ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
	if (ret)
		blk_mq_sched_free_tags(set, hctx, hctx_idx);

	return ret;
}

636
/* called in queue's release handler, tagset has gone away */
637
static void blk_mq_sched_tags_teardown(struct request_queue *q)
638 639
{
	struct blk_mq_hw_ctx *hctx;
640 641
	int i;

642 643 644 645 646 647
	queue_for_each_hw_ctx(q, hctx, i) {
		if (hctx->sched_tags) {
			blk_mq_free_rq_map(hctx->sched_tags);
			hctx->sched_tags = NULL;
		}
	}
648 649 650 651 652
}

int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
	struct blk_mq_hw_ctx *hctx;
653
	struct elevator_queue *eq;
654 655 656 657 658
	unsigned int i;
	int ret;

	if (!e) {
		q->elevator = NULL;
659
		q->nr_requests = q->tag_set->queue_depth;
660 661
		return 0;
	}
662 663

	/*
664 665 666
	 * Default to double of smaller one between hw queue_depth and 128,
	 * since we don't split into sync/async like the old code did.
	 * Additionally, this is a per-hw queue depth.
667
	 */
668 669
	q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
				   BLKDEV_MAX_RQ);
670 671

	queue_for_each_hw_ctx(q, hctx, i) {
672
		ret = blk_mq_sched_alloc_tags(q, hctx, i);
673
		if (ret)
674
			goto err;
675 676
	}

677
	ret = e->ops.init_sched(q, e);
678 679
	if (ret)
		goto err;
680

681 682 683
	blk_mq_debugfs_register_sched(q);

	queue_for_each_hw_ctx(q, hctx, i) {
684 685
		if (e->ops.init_hctx) {
			ret = e->ops.init_hctx(hctx, i);
686 687
			if (ret) {
				eq = q->elevator;
688
				blk_mq_sched_free_requests(q);
689 690 691 692 693
				blk_mq_exit_sched(q, eq);
				kobject_put(&eq->kobj);
				return ret;
			}
		}
694
		blk_mq_debugfs_register_sched_hctx(q, hctx);
695 696
	}

697 698
	return 0;

699
err:
700
	blk_mq_sched_free_requests(q);
701 702
	blk_mq_sched_tags_teardown(q);
	q->elevator = NULL;
703
	return ret;
704
}
705

706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
/*
 * called in either blk_queue_cleanup or elevator_switch, tagset
 * is required for freeing requests
 */
void blk_mq_sched_free_requests(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (hctx->sched_tags)
			blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
	}
}

721 722
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
723 724 725
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

726 727
	queue_for_each_hw_ctx(q, hctx, i) {
		blk_mq_debugfs_unregister_sched_hctx(hctx);
728 729
		if (e->type->ops.exit_hctx && hctx->sched_data) {
			e->type->ops.exit_hctx(hctx, i);
730
			hctx->sched_data = NULL;
731 732
		}
	}
733
	blk_mq_debugfs_unregister_sched(q);
734 735
	if (e->type->ops.exit_sched)
		e->type->ops.exit_sched(e);
736 737 738
	blk_mq_sched_tags_teardown(q);
	q->elevator = NULL;
}