blk-mq-sched.c 13.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * blk-mq scheduling framework
 *
 * Copyright (C) 2016 Jens Axboe
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/blk-mq.h>

#include <trace/events/block.h>

#include "blk.h"
#include "blk-mq.h"
15
#include "blk-mq-debugfs.h"
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-wbt.h"

void blk_mq_sched_free_hctx_data(struct request_queue *q,
				 void (*exit)(struct blk_mq_hw_ctx *))
{
	struct blk_mq_hw_ctx *hctx;
	int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		if (exit && hctx->sched_data)
			exit(hctx);
		kfree(hctx->sched_data);
		hctx->sched_data = NULL;
	}
}
EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);

D
Damien Le Moal 已提交
35
void blk_mq_sched_assign_ioc(struct request *rq)
36
{
37
	struct request_queue *q = rq->q;
38
	struct io_context *ioc;
39 40
	struct io_cq *icq;

41 42 43 44 45 46 47
	/*
	 * May not have an IO context if it's a passthrough request
	 */
	ioc = current->io_context;
	if (!ioc)
		return;

48
	spin_lock_irq(&q->queue_lock);
49
	icq = ioc_lookup_icq(ioc, q);
50
	spin_unlock_irq(&q->queue_lock);
51 52 53 54 55 56

	if (!icq) {
		icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
		if (!icq)
			return;
	}
57
	get_io_context(icq->ioc);
58
	rq->elv.icq = icq;
59 60
}

61 62 63 64
/*
 * Mark a hardware queue as needing a restart. For shared queues, maintain
 * a count of how many hardware queues are marked for restart.
 */
65
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
66 67 68 69
{
	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
		return;

70
	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
71
}
72
EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
73

74
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
75 76
{
	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
77 78
		return;
	clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
79

80
	blk_mq_run_hw_queue(hctx, true);
81 82
}

83 84 85 86 87 88
/*
 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
 * its queue by itself in its completion handler, so we don't need to
 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
 */
static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
89 90 91 92 93 94
{
	struct request_queue *q = hctx->queue;
	struct elevator_queue *e = q->elevator;
	LIST_HEAD(rq_list);

	do {
95
		struct request *rq;
96

97
		if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
98
			break;
99

100
		if (!blk_mq_get_dispatch_budget(hctx))
101
			break;
102

103
		rq = e->type->ops.dispatch_request(hctx);
104 105 106 107 108 109 110 111 112 113
		if (!rq) {
			blk_mq_put_dispatch_budget(hctx);
			break;
		}

		/*
		 * Now this rq owns the budget which has to be released
		 * if this rq won't be queued to driver via .queue_rq()
		 * in blk_mq_dispatch_rq_list().
		 */
114
		list_add(&rq->queuelist, &rq_list);
115
	} while (blk_mq_dispatch_rq_list(q, &rq_list, true));
116 117
}

118 119 120
static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
					  struct blk_mq_ctx *ctx)
{
121
	unsigned short idx = ctx->index_hw[hctx->type];
122 123 124 125 126 127 128

	if (++idx == hctx->nr_ctx)
		idx = 0;

	return hctx->ctxs[idx];
}

129 130 131 132 133 134
/*
 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
 * its queue by itself in its completion handler, so we don't need to
 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
 */
static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
135 136 137 138 139 140 141 142 143 144 145
{
	struct request_queue *q = hctx->queue;
	LIST_HEAD(rq_list);
	struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);

	do {
		struct request *rq;

		if (!sbitmap_any_bit_set(&hctx->ctx_map))
			break;

146
		if (!blk_mq_get_dispatch_budget(hctx))
147
			break;
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169

		rq = blk_mq_dequeue_from_ctx(hctx, ctx);
		if (!rq) {
			blk_mq_put_dispatch_budget(hctx);
			break;
		}

		/*
		 * Now this rq owns the budget which has to be released
		 * if this rq won't be queued to driver via .queue_rq()
		 * in blk_mq_dispatch_rq_list().
		 */
		list_add(&rq->queuelist, &rq_list);

		/* round robin for fair dispatch */
		ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);

	} while (blk_mq_dispatch_rq_list(q, &rq_list, true));

	WRITE_ONCE(hctx->dispatch_from, ctx);
}

170
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
171
{
172 173
	struct request_queue *q = hctx->queue;
	struct elevator_queue *e = q->elevator;
174
	const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
175 176
	LIST_HEAD(rq_list);

177 178
	/* RCU or SRCU read lock is needed before checking quiesced flag */
	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
179
		return;
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201

	hctx->run++;

	/*
	 * If we have previous entries on our dispatch list, grab them first for
	 * more fair dispatch.
	 */
	if (!list_empty_careful(&hctx->dispatch)) {
		spin_lock(&hctx->lock);
		if (!list_empty(&hctx->dispatch))
			list_splice_init(&hctx->dispatch, &rq_list);
		spin_unlock(&hctx->lock);
	}

	/*
	 * Only ask the scheduler for requests, if we didn't have residual
	 * requests from the dispatch list. This is to avoid the case where
	 * we only ever dispatch a fraction of the requests available because
	 * of low device queue depth. Once we pull requests out of the IO
	 * scheduler, we can no longer merge or sort them. So it's best to
	 * leave them there for as long as we can. Mark the hw queue as
	 * needing a restart in that case.
202 203 204 205
	 *
	 * We want to dispatch from the scheduler if there was nothing
	 * on the dispatch list or we were able to dispatch from the
	 * dispatch list.
206
	 */
207
	if (!list_empty(&rq_list)) {
208
		blk_mq_sched_mark_restart_hctx(hctx);
209 210
		if (blk_mq_dispatch_rq_list(q, &rq_list, false)) {
			if (has_sched_dispatch)
211
				blk_mq_do_dispatch_sched(hctx);
212
			else
213
				blk_mq_do_dispatch_ctx(hctx);
214
		}
215
	} else if (has_sched_dispatch) {
216
		blk_mq_do_dispatch_sched(hctx);
217 218
	} else if (hctx->dispatch_busy) {
		/* dequeue request one by one from sw queue if queue is busy */
219
		blk_mq_do_dispatch_ctx(hctx);
220
	} else {
221
		blk_mq_flush_busy_ctxs(hctx, &rq_list);
222
		blk_mq_dispatch_rq_list(q, &rq_list, false);
223
	}
224 225
}

226 227
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
			    struct request **merged_request)
228 229 230
{
	struct request *rq;

231 232
	switch (elv_merge(q, &rq, bio)) {
	case ELEVATOR_BACK_MERGE:
233 234
		if (!blk_mq_sched_allow_merge(q, rq, bio))
			return false;
235 236 237 238 239 240 241
		if (!bio_attempt_back_merge(q, rq, bio))
			return false;
		*merged_request = attempt_back_merge(q, rq);
		if (!*merged_request)
			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
		return true;
	case ELEVATOR_FRONT_MERGE:
242 243
		if (!blk_mq_sched_allow_merge(q, rq, bio))
			return false;
244 245 246 247 248 249
		if (!bio_attempt_front_merge(q, rq, bio))
			return false;
		*merged_request = attempt_front_merge(q, rq);
		if (!*merged_request)
			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
		return true;
250 251
	case ELEVATOR_DISCARD_MERGE:
		return bio_attempt_discard_merge(q, rq, bio);
252 253
	default:
		return false;
254 255 256 257
	}
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);

258
/*
259 260
 * Iterate list of requests and see if we can merge this bio with any
 * of them.
261
 */
262 263
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
			   struct bio *bio)
264 265 266 267
{
	struct request *rq;
	int checked = 8;

268
	list_for_each_entry_reverse(rq, list, queuelist) {
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
		bool merged = false;

		if (!checked--)
			break;

		if (!blk_rq_merge_ok(rq, bio))
			continue;

		switch (blk_try_merge(rq, bio)) {
		case ELEVATOR_BACK_MERGE:
			if (blk_mq_sched_allow_merge(q, rq, bio))
				merged = bio_attempt_back_merge(q, rq, bio);
			break;
		case ELEVATOR_FRONT_MERGE:
			if (blk_mq_sched_allow_merge(q, rq, bio))
				merged = bio_attempt_front_merge(q, rq, bio);
			break;
		case ELEVATOR_DISCARD_MERGE:
			merged = bio_attempt_discard_merge(q, rq, bio);
			break;
		default:
			continue;
		}

		return merged;
	}

	return false;
}
298 299 300 301 302 303 304 305
EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge);

/*
 * Reverse check our software queue for entries that we could potentially
 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
 * too much time checking for merges.
 */
static bool blk_mq_attempt_merge(struct request_queue *q,
M
Ming Lei 已提交
306
				 struct blk_mq_hw_ctx *hctx,
307 308
				 struct blk_mq_ctx *ctx, struct bio *bio)
{
M
Ming Lei 已提交
309 310
	enum hctx_type type = hctx->type;

311 312
	lockdep_assert_held(&ctx->lock);

M
Ming Lei 已提交
313
	if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio)) {
314 315 316 317 318 319
		ctx->rq_merged++;
		return true;
	}

	return false;
}
320

321 322 323
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{
	struct elevator_queue *e = q->elevator;
324
	struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
325
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
326
	bool ret = false;
M
Ming Lei 已提交
327
	enum hctx_type type;
328

329
	if (e && e->type->ops.bio_merge) {
330
		blk_mq_put_ctx(ctx);
331
		return e->type->ops.bio_merge(hctx, bio);
332 333
	}

M
Ming Lei 已提交
334
	type = hctx->type;
335
	if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
M
Ming Lei 已提交
336
			!list_empty_careful(&ctx->rq_lists[type])) {
337 338
		/* default per sw-queue merge */
		spin_lock(&ctx->lock);
M
Ming Lei 已提交
339
		ret = blk_mq_attempt_merge(q, hctx, ctx, bio);
340 341 342 343 344
		spin_unlock(&ctx->lock);
	}

	blk_mq_put_ctx(ctx);
	return ret;
345 346 347 348 349 350 351 352 353 354 355 356 357 358
}

bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
{
	return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);

void blk_mq_sched_request_inserted(struct request *rq)
{
	trace_block_rq_insert(rq->q, rq);
}
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);

359
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
360
				       bool has_sched,
361
				       struct request *rq)
362
{
363 364 365 366 367 368 369 370
	/* dispatch flush rq directly */
	if (rq->rq_flags & RQF_FLUSH_SEQ) {
		spin_lock(&hctx->lock);
		list_add(&rq->queuelist, &hctx->dispatch);
		spin_unlock(&hctx->lock);
		return true;
	}

371
	if (has_sched)
372 373
		rq->rq_flags |= RQF_SORTED;

374
	return false;
375 376
}

377
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
378
				 bool run_queue, bool async)
379 380 381 382
{
	struct request_queue *q = rq->q;
	struct elevator_queue *e = q->elevator;
	struct blk_mq_ctx *ctx = rq->mq_ctx;
383
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
384

385 386
	/* flush rq in flush machinery need to be dispatched directly */
	if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
387 388
		blk_insert_flush(rq);
		goto run;
389 390
	}

391 392
	WARN_ON(e && (rq->tag != -1));

393
	if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
394 395
		goto run;

396
	if (e && e->type->ops.insert_requests) {
397 398 399
		LIST_HEAD(list);

		list_add(&rq->queuelist, &list);
400
		e->type->ops.insert_requests(hctx, &list, at_head);
401 402 403 404 405 406
	} else {
		spin_lock(&ctx->lock);
		__blk_mq_insert_request(hctx, rq, at_head);
		spin_unlock(&ctx->lock);
	}

407
run:
408 409 410 411
	if (run_queue)
		blk_mq_run_hw_queue(hctx, async);
}

412
void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
413 414 415
				  struct blk_mq_ctx *ctx,
				  struct list_head *list, bool run_queue_async)
{
416
	struct elevator_queue *e;
417

418
	e = hctx->queue->elevator;
419 420
	if (e && e->type->ops.insert_requests)
		e->type->ops.insert_requests(hctx, list, false);
421 422 423 424 425 426
	else {
		/*
		 * try to issue requests directly if the hw queue isn't
		 * busy in case of 'none' scheduler, and this way may save
		 * us one extra enqueue & dequeue to sw queue.
		 */
427
		if (!hctx->dispatch_busy && !e && !run_queue_async) {
428
			blk_mq_try_issue_list_directly(hctx, list);
429 430 431 432
			if (list_empty(list))
				return;
		}
		blk_mq_insert_requests(hctx, ctx, list);
433
	}
434 435 436 437

	blk_mq_run_hw_queue(hctx, run_queue_async);
}

438 439 440 441 442 443 444 445 446 447 448
static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
				   struct blk_mq_hw_ctx *hctx,
				   unsigned int hctx_idx)
{
	if (hctx->sched_tags) {
		blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
		blk_mq_free_rq_map(hctx->sched_tags);
		hctx->sched_tags = NULL;
	}
}

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
static int blk_mq_sched_alloc_tags(struct request_queue *q,
				   struct blk_mq_hw_ctx *hctx,
				   unsigned int hctx_idx)
{
	struct blk_mq_tag_set *set = q->tag_set;
	int ret;

	hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
					       set->reserved_tags);
	if (!hctx->sched_tags)
		return -ENOMEM;

	ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
	if (ret)
		blk_mq_sched_free_tags(set, hctx, hctx_idx);

	return ret;
}

468
static void blk_mq_sched_tags_teardown(struct request_queue *q)
469 470 471
{
	struct blk_mq_tag_set *set = q->tag_set;
	struct blk_mq_hw_ctx *hctx;
472 473 474 475 476 477 478 479 480
	int i;

	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_sched_free_tags(set, hctx, i);
}

int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
	struct blk_mq_hw_ctx *hctx;
481
	struct elevator_queue *eq;
482 483 484 485 486
	unsigned int i;
	int ret;

	if (!e) {
		q->elevator = NULL;
487
		q->nr_requests = q->tag_set->queue_depth;
488 489
		return 0;
	}
490 491

	/*
492 493 494
	 * Default to double of smaller one between hw queue_depth and 128,
	 * since we don't split into sync/async like the old code did.
	 * Additionally, this is a per-hw queue depth.
495
	 */
496 497
	q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
				   BLKDEV_MAX_RQ);
498 499

	queue_for_each_hw_ctx(q, hctx, i) {
500
		ret = blk_mq_sched_alloc_tags(q, hctx, i);
501
		if (ret)
502
			goto err;
503 504
	}

505
	ret = e->ops.init_sched(q, e);
506 507
	if (ret)
		goto err;
508

509 510 511
	blk_mq_debugfs_register_sched(q);

	queue_for_each_hw_ctx(q, hctx, i) {
512 513
		if (e->ops.init_hctx) {
			ret = e->ops.init_hctx(hctx, i);
514 515 516 517 518 519 520
			if (ret) {
				eq = q->elevator;
				blk_mq_exit_sched(q, eq);
				kobject_put(&eq->kobj);
				return ret;
			}
		}
521
		blk_mq_debugfs_register_sched_hctx(q, hctx);
522 523
	}

524 525
	return 0;

526
err:
527 528
	blk_mq_sched_tags_teardown(q);
	q->elevator = NULL;
529
	return ret;
530
}
531

532 533
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
534 535 536
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;

537 538
	queue_for_each_hw_ctx(q, hctx, i) {
		blk_mq_debugfs_unregister_sched_hctx(hctx);
539 540
		if (e->type->ops.exit_hctx && hctx->sched_data) {
			e->type->ops.exit_hctx(hctx, i);
541
			hctx->sched_data = NULL;
542 543
		}
	}
544
	blk_mq_debugfs_unregister_sched(q);
545 546
	if (e->type->ops.exit_sched)
		e->type->ops.exit_sched(e);
547 548 549
	blk_mq_sched_tags_teardown(q);
	q->elevator = NULL;
}