blk-mq-tag.c 15.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3 4 5
 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
 * fairer distribution of tags between multiple submitters when a shared tag map
 * is used.
6 7 8
 *
 * Copyright (C) 2013-2014 Jens Axboe
 */
9 10 11 12
#include <linux/kernel.h>
#include <linux/module.h>

#include <linux/blk-mq.h>
13
#include <linux/delay.h>
14 15 16 17
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"

18 19
/*
 * If a previously inactive queue goes active, bump the active user count.
20 21 22
 * We need to do this before try to allocate driver tag, then even if fail
 * to get tag when first time, the other shared-tag users could reserve
 * budget for it.
23 24 25 26 27 28 29 30 31 32 33
 */
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		atomic_inc(&hctx->tags->active_queues);

	return true;
}

/*
34
 * Wakeup all potentially sleeping on tags
35
 */
36
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
37
{
38 39 40
	sbitmap_queue_wake_all(&tags->bitmap_tags);
	if (include_reserve)
		sbitmap_queue_wake_all(&tags->breserved_tags);
41 42
}

43 44 45 46 47 48 49 50 51 52 53 54 55
/*
 * If a previously busy queue goes inactive, potential waiters could now
 * be allowed to queue. Wake them up and check.
 */
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
	struct blk_mq_tags *tags = hctx->tags;

	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		return;

	atomic_dec(&tags->active_queues);

56
	blk_mq_tag_wakeup_all(tags, false);
57 58
}

59 60 61 62 63
/*
 * For shared tag users, we track the number of currently active users
 * and attempt to provide a fair share of the tag depth for each of them.
 */
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
64
				  struct sbitmap_queue *bt)
65 66 67 68 69 70 71 72 73 74 75
{
	unsigned int depth, users;

	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
		return true;
	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		return true;

	/*
	 * Don't try dividing an ant
	 */
76
	if (bt->sb.depth == 1)
77 78 79 80 81 82 83 84 85
		return true;

	users = atomic_read(&hctx->tags->active_queues);
	if (!users)
		return true;

	/*
	 * Allow at least some tags
	 */
86
	depth = max((bt->sb.depth + users - 1) / users, 4U);
87 88 89
	return atomic_read(&hctx->nr_active) < depth;
}

90 91
static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
			    struct sbitmap_queue *bt)
92
{
93 94
	if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
	    !hctx_may_queue(data->hctx, bt))
95
		return BLK_MQ_NO_TAG;
96 97 98 99
	if (data->shallow_depth)
		return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
	else
		return __sbitmap_queue_get(bt);
100 101
}

102
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
103
{
104 105
	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
	struct sbitmap_queue *bt;
106
	struct sbq_wait_state *ws;
J
Jens Axboe 已提交
107
	DEFINE_SBQ_WAIT(wait);
108
	unsigned int tag_offset;
109 110
	int tag;

111 112 113
	if (data->flags & BLK_MQ_REQ_RESERVED) {
		if (unlikely(!tags->nr_reserved_tags)) {
			WARN_ON_ONCE(1);
114
			return BLK_MQ_NO_TAG;
115 116 117 118 119 120 121 122
		}
		bt = &tags->breserved_tags;
		tag_offset = 0;
	} else {
		bt = &tags->bitmap_tags;
		tag_offset = tags->nr_reserved_tags;
	}

123
	tag = __blk_mq_get_tag(data, bt);
124
	if (tag != BLK_MQ_NO_TAG)
125
		goto found_tag;
126

127
	if (data->flags & BLK_MQ_REQ_NOWAIT)
128
		return BLK_MQ_NO_TAG;
129

130
	ws = bt_wait_ptr(bt, data->hctx);
131
	do {
132 133
		struct sbitmap_queue *bt_prev;

B
Bart Van Assche 已提交
134 135 136
		/*
		 * We're out of tags on this hardware queue, kick any
		 * pending IO submits before going to sleep waiting for
137
		 * some to complete.
B
Bart Van Assche 已提交
138
		 */
139
		blk_mq_run_hw_queue(data->hctx, false);
B
Bart Van Assche 已提交
140

141 142 143 144
		/*
		 * Retry tag allocation after running the hardware queue,
		 * as running the queue may also have found completions.
		 */
145
		tag = __blk_mq_get_tag(data, bt);
146
		if (tag != BLK_MQ_NO_TAG)
147 148
			break;

J
Jens Axboe 已提交
149
		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
150 151

		tag = __blk_mq_get_tag(data, bt);
152
		if (tag != BLK_MQ_NO_TAG)
153 154
			break;

155
		bt_prev = bt;
156
		io_schedule();
157

J
Jens Axboe 已提交
158 159
		sbitmap_finish_wait(bt, ws, &wait);

160
		data->ctx = blk_mq_get_ctx(data->q);
161
		data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
162
						data->ctx);
163 164 165 166 167 168
		tags = blk_mq_tags_from_data(data);
		if (data->flags & BLK_MQ_REQ_RESERVED)
			bt = &tags->breserved_tags;
		else
			bt = &tags->bitmap_tags;

169 170 171 172 173 174 175 176
		/*
		 * If destination hw queue is changed, fake wake up on
		 * previous queue for compensating the wake up miss, so
		 * other allocations on previous queue won't be starved.
		 */
		if (bt != bt_prev)
			sbitmap_queue_wake_up(bt_prev);

177
		ws = bt_wait_ptr(bt, data->hctx);
178 179
	} while (1);

J
Jens Axboe 已提交
180
	sbitmap_finish_wait(bt, ws, &wait);
181

182 183
found_tag:
	return tag + tag_offset;
184 185
}

186 187
void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
		    unsigned int tag)
188
{
189
	if (!blk_mq_tag_is_reserved(tags, tag)) {
190 191
		const int real_tag = tag - tags->nr_reserved_tags;

J
Jens Axboe 已提交
192
		BUG_ON(real_tag >= tags->nr_tags);
193
		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
J
Jens Axboe 已提交
194 195
	} else {
		BUG_ON(tag >= tags->nr_reserved_tags);
196
		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
J
Jens Axboe 已提交
197
	}
198 199
}

200 201 202 203 204 205 206 207
struct bt_iter_data {
	struct blk_mq_hw_ctx *hctx;
	busy_iter_fn *fn;
	void *data;
	bool reserved;
};

static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
208
{
209 210 211 212
	struct bt_iter_data *iter_data = data;
	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
	struct blk_mq_tags *tags = hctx->tags;
	bool reserved = iter_data->reserved;
213
	struct request *rq;
214

215 216 217
	if (!reserved)
		bitnr += tags->nr_reserved_tags;
	rq = tags->rqs[bitnr];
218

219 220
	/*
	 * We can hit rq == NULL here, because the tagging functions
221
	 * test and set the bit before assigning ->rqs[].
222 223
	 */
	if (rq && rq->q == hctx->queue)
224
		return iter_data->fn(hctx, rq, iter_data->data, reserved);
225 226
	return true;
}
227

228 229 230 231 232 233 234 235
/**
 * bt_for_each - iterate over the requests associated with a hardware queue
 * @hctx:	Hardware queue to examine.
 * @bt:		sbitmap to examine. This is either the breserved_tags member
 *		or the bitmap_tags member of struct blk_mq_tags.
 * @fn:		Pointer to the function that will be called for each request
 *		associated with @hctx that has been assigned a driver tag.
 *		@fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
236 237
 *		where rq is a pointer to a request. Return true to continue
 *		iterating tags, false to stop.
238 239 240 241
 * @data:	Will be passed as third argument to @fn.
 * @reserved:	Indicates whether @bt is the breserved_tags member or the
 *		bitmap_tags member of struct blk_mq_tags.
 */
242 243 244 245 246 247 248 249 250 251 252
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
			busy_iter_fn *fn, void *data, bool reserved)
{
	struct bt_iter_data iter_data = {
		.hctx = hctx,
		.fn = fn,
		.data = data,
		.reserved = reserved,
	};

	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
253 254
}

255 256 257 258
struct bt_tags_iter_data {
	struct blk_mq_tags *tags;
	busy_tag_iter_fn *fn;
	void *data;
M
Ming Lei 已提交
259
	unsigned int flags;
260 261
};

M
Ming Lei 已提交
262 263 264
#define BT_TAG_ITER_RESERVED		(1 << 0)
#define BT_TAG_ITER_STARTED		(1 << 1)

265
static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
K
Keith Busch 已提交
266
{
267 268
	struct bt_tags_iter_data *iter_data = data;
	struct blk_mq_tags *tags = iter_data->tags;
M
Ming Lei 已提交
269
	bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
K
Keith Busch 已提交
270 271
	struct request *rq;

272 273
	if (!reserved)
		bitnr += tags->nr_reserved_tags;
274 275 276 277 278

	/*
	 * We can hit rq == NULL here, because the tagging functions
	 * test and set the bit before assining ->rqs[].
	 */
279
	rq = tags->rqs[bitnr];
M
Ming Lei 已提交
280 281 282 283 284 285
	if (!rq)
		return true;
	if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
	    !blk_mq_request_started(rq))
		return true;
	return iter_data->fn(rq, iter_data->data, reserved);
286 287
}

288 289 290 291 292 293 294
/**
 * bt_tags_for_each - iterate over the requests in a tag map
 * @tags:	Tag map to iterate over.
 * @bt:		sbitmap to examine. This is either the breserved_tags member
 *		or the bitmap_tags member of struct blk_mq_tags.
 * @fn:		Pointer to the function that will be called for each started
 *		request. @fn will be called as follows: @fn(rq, @data,
295 296
 *		@reserved) where rq is a pointer to a request. Return true
 *		to continue iterating tags, false to stop.
297
 * @data:	Will be passed as second argument to @fn.
M
Ming Lei 已提交
298
 * @flags:	BT_TAG_ITER_*
299
 */
300
static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
M
Ming Lei 已提交
301
			     busy_tag_iter_fn *fn, void *data, unsigned int flags)
302 303 304 305 306
{
	struct bt_tags_iter_data iter_data = {
		.tags = tags,
		.fn = fn,
		.data = data,
M
Ming Lei 已提交
307
		.flags = flags,
308 309 310 311
	};

	if (tags->rqs)
		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
K
Keith Busch 已提交
312 313
}

M
Ming Lei 已提交
314 315 316 317 318 319 320 321 322 323 324
static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
		busy_tag_iter_fn *fn, void *priv, unsigned int flags)
{
	WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);

	if (tags->nr_reserved_tags)
		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
				 flags | BT_TAG_ITER_RESERVED);
	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
}

325
/**
M
Ming Lei 已提交
326
 * blk_mq_all_tag_iter - iterate over all requests in a tag map
327
 * @tags:	Tag map to iterate over.
M
Ming Lei 已提交
328
 * @fn:		Pointer to the function that will be called for each
329 330
 *		request. @fn will be called as follows: @fn(rq, @priv,
 *		reserved) where rq is a pointer to a request. 'reserved'
331 332
 *		indicates whether or not @rq is a reserved request. Return
 *		true to continue iterating tags, false to stop.
333 334
 * @priv:	Will be passed as second argument to @fn.
 */
M
Ming Lei 已提交
335 336
void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
		void *priv)
K
Keith Busch 已提交
337
{
M
Ming Lei 已提交
338
	return __blk_mq_all_tag_iter(tags, fn, priv, 0);
K
Keith Busch 已提交
339 340
}

341 342 343 344 345 346
/**
 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
 * @tagset:	Tag set to iterate over.
 * @fn:		Pointer to the function that will be called for each started
 *		request. @fn will be called as follows: @fn(rq, @priv,
 *		reserved) where rq is a pointer to a request. 'reserved'
347 348
 *		indicates whether or not @rq is a reserved request. Return
 *		true to continue iterating tags, false to stop.
349 350
 * @priv:	Will be passed as second argument to @fn.
 */
351 352 353 354 355 356 357
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
		busy_tag_iter_fn *fn, void *priv)
{
	int i;

	for (i = 0; i < tagset->nr_hw_queues; i++) {
		if (tagset->tags && tagset->tags[i])
M
Ming Lei 已提交
358 359
			__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
					      BT_TAG_ITER_STARTED);
360 361 362 363
	}
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
		void *data, bool reserved)
{
	unsigned *count = data;

	if (blk_mq_request_completed(rq))
		(*count)++;
	return true;
}

/**
 * blk_mq_tagset_wait_completed_request - wait until all completed req's
 * complete funtion is run
 * @tagset:	Tag set to drain completed request
 *
 * Note: This function has to be run after all IO queues are shutdown
 */
void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
{
	while (true) {
		unsigned count = 0;

		blk_mq_tagset_busy_iter(tagset,
				blk_mq_tagset_count_completed_rqs, &count);
		if (!count)
			break;
		msleep(5);
	}
}
EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);

395 396 397 398 399 400 401 402 403 404 405 406 407 408
/**
 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
 * @q:		Request queue to examine.
 * @fn:		Pointer to the function that will be called for each request
 *		on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
 *		reserved) where rq is a pointer to a request and hctx points
 *		to the hardware queue associated with the request. 'reserved'
 *		indicates whether or not @rq is a reserved request.
 * @priv:	Will be passed as third argument to @fn.
 *
 * Note: if @q->tag_set is shared with other request queues then @fn will be
 * called for all requests on all queues that share that tag set and not only
 * for requests associated with @q.
 */
409
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
410
		void *priv)
411
{
412 413 414
	struct blk_mq_hw_ctx *hctx;
	int i;

415
	/*
416 417 418 419 420
	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
	 * while the queue is frozen. So we can use q_usage_counter to avoid
	 * racing with it. __blk_mq_update_nr_hw_queues() uses
	 * synchronize_rcu() to ensure this function left the critical section
	 * below.
421
	 */
422
	if (!percpu_ref_tryget(&q->q_usage_counter))
423
		return;
424 425 426 427 428

	queue_for_each_hw_ctx(q, hctx, i) {
		struct blk_mq_tags *tags = hctx->tags;

		/*
429
		 * If no software queues are currently mapped to this
430 431 432 433 434 435
		 * hardware queue, there's nothing to check
		 */
		if (!blk_mq_hw_queue_mapped(hctx))
			continue;

		if (tags->nr_reserved_tags)
436 437
			bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
		bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
438
	}
439
	blk_queue_exit(q);
440 441
}

442 443
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
		    bool round_robin, int node)
444
{
445 446
	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
				       node);
447 448 449
}

static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
S
Shaohua Li 已提交
450
						   int node, int alloc_policy)
451 452
{
	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
453
	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
454

455
	if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
456
		goto free_tags;
457 458
	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
		     node))
459
		goto free_bitmap_tags;
460 461

	return tags;
462 463 464
free_bitmap_tags:
	sbitmap_queue_free(&tags->bitmap_tags);
free_tags:
465 466 467 468
	kfree(tags);
	return NULL;
}

469
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
S
Shaohua Li 已提交
470 471
				     unsigned int reserved_tags,
				     int node, int alloc_policy)
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
{
	struct blk_mq_tags *tags;

	if (total_tags > BLK_MQ_TAG_MAX) {
		pr_err("blk-mq: tag depth too large\n");
		return NULL;
	}

	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
	if (!tags)
		return NULL;

	tags->nr_tags = total_tags;
	tags->nr_reserved_tags = reserved_tags;

S
Shaohua Li 已提交
487
	return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
488 489 490 491
}

void blk_mq_free_tags(struct blk_mq_tags *tags)
{
492 493
	sbitmap_queue_free(&tags->bitmap_tags);
	sbitmap_queue_free(&tags->breserved_tags);
494 495 496
	kfree(tags);
}

497 498 499
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
			    bool can_grow)
500
{
501 502 503
	struct blk_mq_tags *tags = *tagsptr;

	if (tdepth <= tags->nr_reserved_tags)
504 505 506
		return -EINVAL;

	/*
507 508
	 * If we are allowed to grow beyond the original size, allocate
	 * a new set of tags before freeing the old one.
509
	 */
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
	if (tdepth > tags->nr_tags) {
		struct blk_mq_tag_set *set = hctx->queue->tag_set;
		struct blk_mq_tags *new;
		bool ret;

		if (!can_grow)
			return -EINVAL;

		/*
		 * We need some sort of upper limit, set it high enough that
		 * no valid use cases should require more.
		 */
		if (tdepth > 16 * BLKDEV_MAX_RQ)
			return -EINVAL;

M
Ming Lei 已提交
525 526
		new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
				tags->nr_reserved_tags);
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
		if (!new)
			return -ENOMEM;
		ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
		if (ret) {
			blk_mq_free_rq_map(new);
			return -ENOMEM;
		}

		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
		blk_mq_free_rq_map(*tagsptr);
		*tagsptr = new;
	} else {
		/*
		 * Don't need (or can't) update reserved tags here, they
		 * remain static and should never need resizing.
		 */
M
Ming Lei 已提交
543 544
		sbitmap_queue_resize(&tags->bitmap_tags,
				tdepth - tags->nr_reserved_tags);
545
	}
546

547 548 549
	return 0;
}

B
Bart Van Assche 已提交
550 551 552 553 554 555 556 557 558 559 560 561 562 563
/**
 * blk_mq_unique_tag() - return a tag that is unique queue-wide
 * @rq: request for which to compute a unique tag
 *
 * The tag field in struct request is unique per hardware queue but not over
 * all hardware queues. Hence this function that returns a tag with the
 * hardware context index in the upper bits and the per hardware queue tag in
 * the lower bits.
 *
 * Note: When called for a request that is queued on a non-multiqueue request
 * queue, the hardware context index is set to zero.
 */
u32 blk_mq_unique_tag(struct request *rq)
{
564
	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
B
Bart Van Assche 已提交
565 566 567
		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
}
EXPORT_SYMBOL(blk_mq_unique_tag);