blk-mq-sched.c 15.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * blk-mq scheduling framework
 *
 * Copyright (C) 2016 Jens Axboe
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/blk-mq.h>

#include <trace/events/block.h>

#include "blk.h"
#include "blk-mq.h"
14
#include "blk-mq-debugfs.h"
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-wbt.h"

void blk_mq_sched_free_hctx_data(struct request_queue *q,
				 void (*exit)(struct blk_mq_hw_ctx *))
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (exit && hctx->sched_data)
			exit(hctx);
		kfree(hctx->sched_data);
		hctx->sched_data = NULL;
	}
}
EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);

34
void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
35
{
36 37
	struct request_queue *q = rq->q;
	struct io_context *ioc = rq_ioc(bio);
38 39 40 41 42 43 44 45 46 47 48
	struct io_cq *icq;

	spin_lock_irq(q->queue_lock);
	icq = ioc_lookup_icq(ioc, q);
	spin_unlock_irq(q->queue_lock);

	if (!icq) {
		icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
		if (!icq)
			return;
	}
49
	get_io_context(icq->ioc);
50
	rq->elv.icq = icq;
51 52
}

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
/*
 * Mark a hardware queue as needing a restart. For shared queues, maintain
 * a count of how many hardware queues are marked for restart.
 */
static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
		return;

	if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
		struct request_queue *q = hctx->queue;

		if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
			atomic_inc(&q->shared_hctx_restart);
	} else
		set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}

static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
		return false;

	if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
		struct request_queue *q = hctx->queue;

		if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
			atomic_dec(&q->shared_hctx_restart);
	} else
		clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);

	if (blk_mq_hctx_has_pending(hctx)) {
		blk_mq_run_hw_queue(hctx, true);
		return true;
	}

	return false;
}

92 93
/* return true if hctx need to run again */
static bool blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
94 95 96 97 98 99
{
	struct request_queue *q = hctx->queue;
	struct elevator_queue *e = q->elevator;
	LIST_HEAD(rq_list);

	do {
100 101
		struct request *rq;
		blk_status_t ret;
102

103 104
		if (e->type->ops.mq.has_work &&
				!e->type->ops.mq.has_work(hctx))
105
			break;
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

		ret = blk_mq_get_dispatch_budget(hctx);
		if (ret == BLK_STS_RESOURCE)
			return true;

		rq = e->type->ops.mq.dispatch_request(hctx);
		if (!rq) {
			blk_mq_put_dispatch_budget(hctx);
			break;
		} else if (ret != BLK_STS_OK) {
			blk_mq_end_request(rq, ret);
			continue;
		}

		/*
		 * Now this rq owns the budget which has to be released
		 * if this rq won't be queued to driver via .queue_rq()
		 * in blk_mq_dispatch_rq_list().
		 */
125
		list_add(&rq->queuelist, &rq_list);
126 127 128
	} while (blk_mq_dispatch_rq_list(q, &rq_list, true));

	return false;
129 130
}

131 132
/* return true if hw queue need to be run again */
bool blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
133
{
134 135
	struct request_queue *q = hctx->queue;
	struct elevator_queue *e = q->elevator;
136
	const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
137
	LIST_HEAD(rq_list);
138
	bool run_queue = false;
139

140 141
	/* RCU or SRCU read lock is needed before checking quiesced flag */
	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
142
		return false;
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164

	hctx->run++;

	/*
	 * If we have previous entries on our dispatch list, grab them first for
	 * more fair dispatch.
	 */
	if (!list_empty_careful(&hctx->dispatch)) {
		spin_lock(&hctx->lock);
		if (!list_empty(&hctx->dispatch))
			list_splice_init(&hctx->dispatch, &rq_list);
		spin_unlock(&hctx->lock);
	}

	/*
	 * Only ask the scheduler for requests, if we didn't have residual
	 * requests from the dispatch list. This is to avoid the case where
	 * we only ever dispatch a fraction of the requests available because
	 * of low device queue depth. Once we pull requests out of the IO
	 * scheduler, we can no longer merge or sort them. So it's best to
	 * leave them there for as long as we can. Mark the hw queue as
	 * needing a restart in that case.
165 166 167 168
	 *
	 * We want to dispatch from the scheduler if there was nothing
	 * on the dispatch list or we were able to dispatch from the
	 * dispatch list.
169
	 */
170
	if (!list_empty(&rq_list)) {
171
		blk_mq_sched_mark_restart_hctx(hctx);
172 173 174
		if (blk_mq_dispatch_rq_list(q, &rq_list, false) &&
				has_sched_dispatch)
			run_queue = blk_mq_do_dispatch_sched(hctx);
175
	} else if (has_sched_dispatch) {
176
		run_queue = blk_mq_do_dispatch_sched(hctx);
177
	} else {
178
		blk_mq_flush_busy_ctxs(hctx, &rq_list);
179
		blk_mq_dispatch_rq_list(q, &rq_list, false);
180
	}
181 182 183 184 185 186 187 188

	if (run_queue && !blk_mq_sched_needs_restart(hctx) &&
			!test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state)) {
		blk_mq_sched_mark_restart_hctx(hctx);
		return true;
	}

	return false;
189 190
}

191 192
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
			    struct request **merged_request)
193 194 195
{
	struct request *rq;

196 197
	switch (elv_merge(q, &rq, bio)) {
	case ELEVATOR_BACK_MERGE:
198 199
		if (!blk_mq_sched_allow_merge(q, rq, bio))
			return false;
200 201 202 203 204 205 206
		if (!bio_attempt_back_merge(q, rq, bio))
			return false;
		*merged_request = attempt_back_merge(q, rq);
		if (!*merged_request)
			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
		return true;
	case ELEVATOR_FRONT_MERGE:
207 208
		if (!blk_mq_sched_allow_merge(q, rq, bio))
			return false;
209 210 211 212 213 214 215 216
		if (!bio_attempt_front_merge(q, rq, bio))
			return false;
		*merged_request = attempt_front_merge(q, rq);
		if (!*merged_request)
			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
		return true;
	default:
		return false;
217 218 219 220
	}
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);

221 222 223 224 225 226 227 228 229 230 231
/*
 * Reverse check our software queue for entries that we could potentially
 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
 * too much time checking for merges.
 */
static bool blk_mq_attempt_merge(struct request_queue *q,
				 struct blk_mq_ctx *ctx, struct bio *bio)
{
	struct request *rq;
	int checked = 8;

232 233
	lockdep_assert_held(&ctx->lock);

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
	list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
		bool merged = false;

		if (!checked--)
			break;

		if (!blk_rq_merge_ok(rq, bio))
			continue;

		switch (blk_try_merge(rq, bio)) {
		case ELEVATOR_BACK_MERGE:
			if (blk_mq_sched_allow_merge(q, rq, bio))
				merged = bio_attempt_back_merge(q, rq, bio);
			break;
		case ELEVATOR_FRONT_MERGE:
			if (blk_mq_sched_allow_merge(q, rq, bio))
				merged = bio_attempt_front_merge(q, rq, bio);
			break;
		case ELEVATOR_DISCARD_MERGE:
			merged = bio_attempt_discard_merge(q, rq, bio);
			break;
		default:
			continue;
		}

		if (merged)
			ctx->rq_merged++;
		return merged;
	}

	return false;
}

267 268 269
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{
	struct elevator_queue *e = q->elevator;
270 271 272
	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
	bool ret = false;
273

274
	if (e && e->type->ops.mq.bio_merge) {
275 276 277 278
		blk_mq_put_ctx(ctx);
		return e->type->ops.mq.bio_merge(hctx, bio);
	}

279 280 281 282 283 284 285 286 287
	if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) {
		/* default per sw-queue merge */
		spin_lock(&ctx->lock);
		ret = blk_mq_attempt_merge(q, ctx, bio);
		spin_unlock(&ctx->lock);
	}

	blk_mq_put_ctx(ctx);
	return ret;
288 289 290 291 292 293 294 295 296 297 298 299 300 301
}

bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
{
	return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);

void blk_mq_sched_request_inserted(struct request *rq)
{
	trace_block_rq_insert(rq->q, rq);
}
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);

302 303
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
				       struct request *rq)
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
{
	if (rq->tag == -1) {
		rq->rq_flags |= RQF_SORTED;
		return false;
	}

	/*
	 * If we already have a real request tag, send directly to
	 * the dispatch list.
	 */
	spin_lock(&hctx->lock);
	list_add(&rq->queuelist, &hctx->dispatch);
	spin_unlock(&hctx->lock);
	return true;
}

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
/**
 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
 * @pos:    loop cursor.
 * @skip:   the list element that will not be examined. Iteration starts at
 *          @skip->next.
 * @head:   head of the list to examine. This list must have at least one
 *          element, namely @skip.
 * @member: name of the list_head structure within typeof(*pos).
 */
#define list_for_each_entry_rcu_rr(pos, skip, head, member)		\
	for ((pos) = (skip);						\
	     (pos = (pos)->member.next != (head) ? list_entry_rcu(	\
			(pos)->member.next, typeof(*pos), member) :	\
	      list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
	     (pos) != (skip); )
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
/*
 * Called after a driver tag has been freed to check whether a hctx needs to
 * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
 * queues in a round-robin fashion if the tag set of @hctx is shared with other
 * hardware queues.
 */
void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
{
	struct blk_mq_tags *const tags = hctx->tags;
	struct blk_mq_tag_set *const set = hctx->queue->tag_set;
	struct request_queue *const queue = hctx->queue, *q;
	struct blk_mq_hw_ctx *hctx2;
	unsigned int i, j;

	if (set->flags & BLK_MQ_F_TAG_SHARED) {
351 352 353 354 355 356 357
		/*
		 * If this is 0, then we know that no hardware queues
		 * have RESTART marked. We're done.
		 */
		if (!atomic_read(&queue->shared_hctx_restart))
			return;

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
		rcu_read_lock();
		list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
					   tag_set_list) {
			queue_for_each_hw_ctx(q, hctx2, i)
				if (hctx2->tags == tags &&
				    blk_mq_sched_restart_hctx(hctx2))
					goto done;
		}
		j = hctx->queue_num + 1;
		for (i = 0; i < queue->nr_hw_queues; i++, j++) {
			if (j == queue->nr_hw_queues)
				j = 0;
			hctx2 = queue->queue_hw_ctx[j];
			if (hctx2->tags == tags &&
			    blk_mq_sched_restart_hctx(hctx2))
				break;
374
		}
375 376
done:
		rcu_read_unlock();
377
	} else {
378 379 380 381
		blk_mq_sched_restart_hctx(hctx);
	}
}

382 383 384 385 386 387 388 389 390 391 392 393
/*
 * Add flush/fua to the queue. If we fail getting a driver tag, then
 * punt to the requeue list. Requeue will re-invoke us from a context
 * that's safe to block from.
 */
static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx,
				      struct request *rq, bool can_block)
{
	if (blk_mq_get_driver_tag(rq, &hctx, can_block)) {
		blk_insert_flush(rq);
		blk_mq_run_hw_queue(hctx, true);
	} else
394
		blk_mq_add_to_requeue_list(rq, false, true);
395 396 397 398 399 400 401 402 403 404
}

void blk_mq_sched_insert_request(struct request *rq, bool at_head,
				 bool run_queue, bool async, bool can_block)
{
	struct request_queue *q = rq->q;
	struct elevator_queue *e = q->elevator;
	struct blk_mq_ctx *ctx = rq->mq_ctx;
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);

405
	if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) {
406 407 408 409
		blk_mq_sched_insert_flush(hctx, rq, can_block);
		return;
	}

410 411 412
	if (e && blk_mq_sched_bypass_insert(hctx, rq))
		goto run;

413 414 415 416 417 418 419 420 421 422 423
	if (e && e->type->ops.mq.insert_requests) {
		LIST_HEAD(list);

		list_add(&rq->queuelist, &list);
		e->type->ops.mq.insert_requests(hctx, &list, at_head);
	} else {
		spin_lock(&ctx->lock);
		__blk_mq_insert_request(hctx, rq, at_head);
		spin_unlock(&ctx->lock);
	}

424
run:
425 426 427 428 429 430 431 432 433 434 435
	if (run_queue)
		blk_mq_run_hw_queue(hctx, async);
}

void blk_mq_sched_insert_requests(struct request_queue *q,
				  struct blk_mq_ctx *ctx,
				  struct list_head *list, bool run_queue_async)
{
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
	struct elevator_queue *e = hctx->queue->elevator;

436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
	if (e) {
		struct request *rq, *next;

		/*
		 * We bypass requests that already have a driver tag assigned,
		 * which should only be flushes. Flushes are only ever inserted
		 * as single requests, so we shouldn't ever hit the
		 * WARN_ON_ONCE() below (but let's handle it just in case).
		 */
		list_for_each_entry_safe(rq, next, list, queuelist) {
			if (WARN_ON_ONCE(rq->tag != -1)) {
				list_del_init(&rq->queuelist);
				blk_mq_sched_bypass_insert(hctx, rq);
			}
		}
	}

453 454 455 456 457 458 459 460
	if (e && e->type->ops.mq.insert_requests)
		e->type->ops.mq.insert_requests(hctx, list, false);
	else
		blk_mq_insert_requests(hctx, ctx, list);

	blk_mq_run_hw_queue(hctx, run_queue_async);
}

461 462 463 464 465 466 467 468 469 470 471
static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
				   struct blk_mq_hw_ctx *hctx,
				   unsigned int hctx_idx)
{
	if (hctx->sched_tags) {
		blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
		blk_mq_free_rq_map(hctx->sched_tags);
		hctx->sched_tags = NULL;
	}
}

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
static int blk_mq_sched_alloc_tags(struct request_queue *q,
				   struct blk_mq_hw_ctx *hctx,
				   unsigned int hctx_idx)
{
	struct blk_mq_tag_set *set = q->tag_set;
	int ret;

	hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
					       set->reserved_tags);
	if (!hctx->sched_tags)
		return -ENOMEM;

	ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
	if (ret)
		blk_mq_sched_free_tags(set, hctx, hctx_idx);

	return ret;
}

491
static void blk_mq_sched_tags_teardown(struct request_queue *q)
492 493 494
{
	struct blk_mq_tag_set *set = q->tag_set;
	struct blk_mq_hw_ctx *hctx;
495 496 497 498 499 500
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_sched_free_tags(set, hctx, i);
}

501 502 503 504
int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
			   unsigned int hctx_idx)
{
	struct elevator_queue *e = q->elevator;
505
	int ret;
506 507 508 509

	if (!e)
		return 0;

510 511 512 513 514 515 516 517 518 519 520 521
	ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
	if (ret)
		return ret;

	if (e->type->ops.mq.init_hctx) {
		ret = e->type->ops.mq.init_hctx(hctx, hctx_idx);
		if (ret) {
			blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
			return ret;
		}
	}

522 523
	blk_mq_debugfs_register_sched_hctx(q, hctx);

524
	return 0;
525 526 527 528 529 530 531 532 533 534
}

void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
			    unsigned int hctx_idx)
{
	struct elevator_queue *e = q->elevator;

	if (!e)
		return;

535 536
	blk_mq_debugfs_unregister_sched_hctx(hctx);

537 538 539 540 541
	if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
		e->type->ops.mq.exit_hctx(hctx, hctx_idx);
		hctx->sched_data = NULL;
	}

542 543 544
	blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
}

545 546 547
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
	struct blk_mq_hw_ctx *hctx;
548
	struct elevator_queue *eq;
549 550 551 552 553 554 555
	unsigned int i;
	int ret;

	if (!e) {
		q->elevator = NULL;
		return 0;
	}
556 557

	/*
558 559 560
	 * Default to double of smaller one between hw queue_depth and 128,
	 * since we don't split into sync/async like the old code did.
	 * Additionally, this is a per-hw queue depth.
561
	 */
562 563
	q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
				   BLKDEV_MAX_RQ);
564 565

	queue_for_each_hw_ctx(q, hctx, i) {
566
		ret = blk_mq_sched_alloc_tags(q, hctx, i);
567
		if (ret)
568
			goto err;
569 570
	}

571 572 573
	ret = e->ops.mq.init_sched(q, e);
	if (ret)
		goto err;
574

575 576 577 578
	blk_mq_debugfs_register_sched(q);

	queue_for_each_hw_ctx(q, hctx, i) {
		if (e->ops.mq.init_hctx) {
579 580 581 582 583 584 585 586
			ret = e->ops.mq.init_hctx(hctx, i);
			if (ret) {
				eq = q->elevator;
				blk_mq_exit_sched(q, eq);
				kobject_put(&eq->kobj);
				return ret;
			}
		}
587
		blk_mq_debugfs_register_sched_hctx(q, hctx);
588 589
	}

590 591
	return 0;

592
err:
593 594
	blk_mq_sched_tags_teardown(q);
	q->elevator = NULL;
595
	return ret;
596
}
597

598 599
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
600 601 602
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

603 604 605 606 607
	queue_for_each_hw_ctx(q, hctx, i) {
		blk_mq_debugfs_unregister_sched_hctx(hctx);
		if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
			e->type->ops.mq.exit_hctx(hctx, i);
			hctx->sched_data = NULL;
608 609
		}
	}
610
	blk_mq_debugfs_unregister_sched(q);
611 612 613 614 615 616
	if (e->type->ops.mq.exit_sched)
		e->type->ops.mq.exit_sched(e);
	blk_mq_sched_tags_teardown(q);
	q->elevator = NULL;
}

617 618 619 620 621 622 623 624 625 626
int blk_mq_sched_init(struct request_queue *q)
{
	int ret;

	mutex_lock(&q->sysfs_lock);
	ret = elevator_init(q, NULL);
	mutex_unlock(&q->sysfs_lock);

	return ret;
}