blk-mq-tag.c 17.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3 4 5
 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
 * fairer distribution of tags between multiple submitters when a shared tag map
 * is used.
6 7 8
 *
 * Copyright (C) 2013-2014 Jens Axboe
 */
9 10 11 12
#include <linux/kernel.h>
#include <linux/module.h>

#include <linux/blk-mq.h>
13
#include <linux/delay.h>
14 15 16 17
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"

18 19
/*
 * If a previously inactive queue goes active, bump the active user count.
20 21 22
 * We need to do this before try to allocate driver tag, then even if fail
 * to get tag when first time, the other shared-tag users could reserve
 * budget for it.
23 24 25
 */
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
26 27 28 29 30 31 32 33 34 35 36 37
	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
		struct request_queue *q = hctx->queue;
		struct blk_mq_tag_set *set = q->tag_set;

		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
		    !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
			atomic_inc(&set->active_queues_shared_sbitmap);
	} else {
		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
		    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
			atomic_inc(&hctx->tags->active_queues);
	}
38 39 40 41 42

	return true;
}

/*
43
 * Wakeup all potentially sleeping on tags
44
 */
45
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
46
{
47
	sbitmap_queue_wake_all(tags->bitmap_tags);
48
	if (include_reserve)
49
		sbitmap_queue_wake_all(tags->breserved_tags);
50 51
}

52 53 54 55 56 57 58
/*
 * If a previously busy queue goes inactive, potential waiters could now
 * be allowed to queue. Wake them up and check.
 */
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
	struct blk_mq_tags *tags = hctx->tags;
59 60
	struct request_queue *q = hctx->queue;
	struct blk_mq_tag_set *set = q->tag_set;
61

62 63 64 65 66 67 68 69 70 71
	if (blk_mq_is_sbitmap_shared(hctx->flags)) {
		if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
					&q->queue_flags))
			return;
		atomic_dec(&set->active_queues_shared_sbitmap);
	} else {
		if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
			return;
		atomic_dec(&tags->active_queues);
	}
72

73
	blk_mq_tag_wakeup_all(tags, false);
74 75
}

76 77
static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
			    struct sbitmap_queue *bt)
78
{
79 80
	if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
			!hctx_may_queue(data->hctx, bt))
81
		return BLK_MQ_NO_TAG;
82

83 84 85 86
	if (data->shallow_depth)
		return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
	else
		return __sbitmap_queue_get(bt);
87 88
}

89
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
90
{
91 92
	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
	struct sbitmap_queue *bt;
93
	struct sbq_wait_state *ws;
J
Jens Axboe 已提交
94
	DEFINE_SBQ_WAIT(wait);
95
	unsigned int tag_offset;
96 97
	int tag;

98 99 100
	if (data->flags & BLK_MQ_REQ_RESERVED) {
		if (unlikely(!tags->nr_reserved_tags)) {
			WARN_ON_ONCE(1);
101
			return BLK_MQ_NO_TAG;
102
		}
103
		bt = tags->breserved_tags;
104 105
		tag_offset = 0;
	} else {
106
		bt = tags->bitmap_tags;
107 108 109
		tag_offset = tags->nr_reserved_tags;
	}

110
	tag = __blk_mq_get_tag(data, bt);
111
	if (tag != BLK_MQ_NO_TAG)
112
		goto found_tag;
113

114
	if (data->flags & BLK_MQ_REQ_NOWAIT)
115
		return BLK_MQ_NO_TAG;
116

117
	ws = bt_wait_ptr(bt, data->hctx);
118
	do {
119 120
		struct sbitmap_queue *bt_prev;

B
Bart Van Assche 已提交
121 122 123
		/*
		 * We're out of tags on this hardware queue, kick any
		 * pending IO submits before going to sleep waiting for
124
		 * some to complete.
B
Bart Van Assche 已提交
125
		 */
126
		blk_mq_run_hw_queue(data->hctx, false);
B
Bart Van Assche 已提交
127

128 129 130 131
		/*
		 * Retry tag allocation after running the hardware queue,
		 * as running the queue may also have found completions.
		 */
132
		tag = __blk_mq_get_tag(data, bt);
133
		if (tag != BLK_MQ_NO_TAG)
134 135
			break;

J
Jens Axboe 已提交
136
		sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
137 138

		tag = __blk_mq_get_tag(data, bt);
139
		if (tag != BLK_MQ_NO_TAG)
140 141
			break;

142
		bt_prev = bt;
143
		io_schedule();
144

J
Jens Axboe 已提交
145 146
		sbitmap_finish_wait(bt, ws, &wait);

147
		data->ctx = blk_mq_get_ctx(data->q);
148
		data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
149
						data->ctx);
150 151
		tags = blk_mq_tags_from_data(data);
		if (data->flags & BLK_MQ_REQ_RESERVED)
152
			bt = tags->breserved_tags;
153
		else
154
			bt = tags->bitmap_tags;
155

156 157 158 159 160 161 162 163
		/*
		 * If destination hw queue is changed, fake wake up on
		 * previous queue for compensating the wake up miss, so
		 * other allocations on previous queue won't be starved.
		 */
		if (bt != bt_prev)
			sbitmap_queue_wake_up(bt_prev);

164
		ws = bt_wait_ptr(bt, data->hctx);
165 166
	} while (1);

J
Jens Axboe 已提交
167
	sbitmap_finish_wait(bt, ws, &wait);
168

169
found_tag:
170 171 172 173 174 175 176 177
	/*
	 * Give up this allocation if the hctx is inactive.  The caller will
	 * retry on an active hctx.
	 */
	if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) {
		blk_mq_put_tag(tags, data->ctx, tag + tag_offset);
		return BLK_MQ_NO_TAG;
	}
178
	return tag + tag_offset;
179 180
}

181 182
void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
		    unsigned int tag)
183
{
184
	if (!blk_mq_tag_is_reserved(tags, tag)) {
185 186
		const int real_tag = tag - tags->nr_reserved_tags;

J
Jens Axboe 已提交
187
		BUG_ON(real_tag >= tags->nr_tags);
188
		sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu);
J
Jens Axboe 已提交
189 190
	} else {
		BUG_ON(tag >= tags->nr_reserved_tags);
191
		sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu);
J
Jens Axboe 已提交
192
	}
193 194
}

195 196 197 198 199 200 201
struct bt_iter_data {
	struct blk_mq_hw_ctx *hctx;
	busy_iter_fn *fn;
	void *data;
	bool reserved;
};

202 203 204 205 206 207 208 209 210 211
static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
		unsigned int bitnr)
{
	struct request *rq = tags->rqs[bitnr];

	if (!rq || !refcount_inc_not_zero(&rq->ref))
		return NULL;
	return rq;
}

212
static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
213
{
214 215 216 217
	struct bt_iter_data *iter_data = data;
	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
	struct blk_mq_tags *tags = hctx->tags;
	bool reserved = iter_data->reserved;
218
	struct request *rq;
219
	bool ret = true;
220

221 222
	if (!reserved)
		bitnr += tags->nr_reserved_tags;
223 224
	/*
	 * We can hit rq == NULL here, because the tagging functions
225
	 * test and set the bit before assigning ->rqs[].
226
	 */
227 228 229 230 231 232 233 234
	rq = blk_mq_find_and_get_req(tags, bitnr);
	if (!rq)
		return true;

	if (rq->q == hctx->queue && rq->mq_hctx == hctx)
		ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
	blk_mq_put_rq_ref(rq);
	return ret;
235
}
236

237 238 239 240 241 242 243 244
/**
 * bt_for_each - iterate over the requests associated with a hardware queue
 * @hctx:	Hardware queue to examine.
 * @bt:		sbitmap to examine. This is either the breserved_tags member
 *		or the bitmap_tags member of struct blk_mq_tags.
 * @fn:		Pointer to the function that will be called for each request
 *		associated with @hctx that has been assigned a driver tag.
 *		@fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
245 246
 *		where rq is a pointer to a request. Return true to continue
 *		iterating tags, false to stop.
247 248 249 250
 * @data:	Will be passed as third argument to @fn.
 * @reserved:	Indicates whether @bt is the breserved_tags member or the
 *		bitmap_tags member of struct blk_mq_tags.
 */
251 252 253 254 255 256 257 258 259 260 261
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
			busy_iter_fn *fn, void *data, bool reserved)
{
	struct bt_iter_data iter_data = {
		.hctx = hctx,
		.fn = fn,
		.data = data,
		.reserved = reserved,
	};

	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
262 263
}

264 265 266 267
struct bt_tags_iter_data {
	struct blk_mq_tags *tags;
	busy_tag_iter_fn *fn;
	void *data;
M
Ming Lei 已提交
268
	unsigned int flags;
269 270
};

M
Ming Lei 已提交
271 272
#define BT_TAG_ITER_RESERVED		(1 << 0)
#define BT_TAG_ITER_STARTED		(1 << 1)
M
Ming Lei 已提交
273
#define BT_TAG_ITER_STATIC_RQS		(1 << 2)
M
Ming Lei 已提交
274

275
static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
K
Keith Busch 已提交
276
{
277 278
	struct bt_tags_iter_data *iter_data = data;
	struct blk_mq_tags *tags = iter_data->tags;
M
Ming Lei 已提交
279
	bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
K
Keith Busch 已提交
280
	struct request *rq;
281 282
	bool ret = true;
	bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
K
Keith Busch 已提交
283

284 285
	if (!reserved)
		bitnr += tags->nr_reserved_tags;
286 287 288

	/*
	 * We can hit rq == NULL here, because the tagging functions
M
Ming Lei 已提交
289
	 * test and set the bit before assigning ->rqs[].
290
	 */
291
	if (iter_static_rqs)
M
Ming Lei 已提交
292 293
		rq = tags->static_rqs[bitnr];
	else
294
		rq = blk_mq_find_and_get_req(tags, bitnr);
M
Ming Lei 已提交
295 296
	if (!rq)
		return true;
297 298 299 300 301 302 303

	if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
	    blk_mq_request_started(rq))
		ret = iter_data->fn(rq, iter_data->data, reserved);
	if (!iter_static_rqs)
		blk_mq_put_rq_ref(rq);
	return ret;
304 305
}

306 307 308 309 310 311 312
/**
 * bt_tags_for_each - iterate over the requests in a tag map
 * @tags:	Tag map to iterate over.
 * @bt:		sbitmap to examine. This is either the breserved_tags member
 *		or the bitmap_tags member of struct blk_mq_tags.
 * @fn:		Pointer to the function that will be called for each started
 *		request. @fn will be called as follows: @fn(rq, @data,
313 314
 *		@reserved) where rq is a pointer to a request. Return true
 *		to continue iterating tags, false to stop.
315
 * @data:	Will be passed as second argument to @fn.
M
Ming Lei 已提交
316
 * @flags:	BT_TAG_ITER_*
317
 */
318
static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
M
Ming Lei 已提交
319
			     busy_tag_iter_fn *fn, void *data, unsigned int flags)
320 321 322 323 324
{
	struct bt_tags_iter_data iter_data = {
		.tags = tags,
		.fn = fn,
		.data = data,
M
Ming Lei 已提交
325
		.flags = flags,
326 327 328 329
	};

	if (tags->rqs)
		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
K
Keith Busch 已提交
330 331
}

M
Ming Lei 已提交
332 333 334 335 336 337
static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
		busy_tag_iter_fn *fn, void *priv, unsigned int flags)
{
	WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);

	if (tags->nr_reserved_tags)
338
		bt_tags_for_each(tags, tags->breserved_tags, fn, priv,
M
Ming Lei 已提交
339
				 flags | BT_TAG_ITER_RESERVED);
340
	bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags);
M
Ming Lei 已提交
341 342
}

343
/**
M
Ming Lei 已提交
344
 * blk_mq_all_tag_iter - iterate over all requests in a tag map
345
 * @tags:	Tag map to iterate over.
M
Ming Lei 已提交
346
 * @fn:		Pointer to the function that will be called for each
347 348
 *		request. @fn will be called as follows: @fn(rq, @priv,
 *		reserved) where rq is a pointer to a request. 'reserved'
349 350
 *		indicates whether or not @rq is a reserved request. Return
 *		true to continue iterating tags, false to stop.
351
 * @priv:	Will be passed as second argument to @fn.
M
Ming Lei 已提交
352 353
 *
 * Caller has to pass the tag map from which requests are allocated.
354
 */
M
Ming Lei 已提交
355 356
void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
		void *priv)
K
Keith Busch 已提交
357
{
358
	__blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
K
Keith Busch 已提交
359 360
}

361 362 363 364 365 366
/**
 * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
 * @tagset:	Tag set to iterate over.
 * @fn:		Pointer to the function that will be called for each started
 *		request. @fn will be called as follows: @fn(rq, @priv,
 *		reserved) where rq is a pointer to a request. 'reserved'
367 368
 *		indicates whether or not @rq is a reserved request. Return
 *		true to continue iterating tags, false to stop.
369
 * @priv:	Will be passed as second argument to @fn.
370 371 372
 *
 * We grab one request reference before calling @fn and release it after
 * @fn returns.
373
 */
374 375 376 377 378 379 380
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
		busy_tag_iter_fn *fn, void *priv)
{
	int i;

	for (i = 0; i < tagset->nr_hw_queues; i++) {
		if (tagset->tags && tagset->tags[i])
M
Ming Lei 已提交
381 382
			__blk_mq_all_tag_iter(tagset->tags[i], fn, priv,
					      BT_TAG_ITER_STARTED);
383 384 385 386
	}
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);

387 388 389 390 391 392 393 394 395 396 397
static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
		void *data, bool reserved)
{
	unsigned *count = data;

	if (blk_mq_request_completed(rq))
		(*count)++;
	return true;
}

/**
398 399
 * blk_mq_tagset_wait_completed_request - Wait until all scheduled request
 * completions have finished.
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
 * @tagset:	Tag set to drain completed request
 *
 * Note: This function has to be run after all IO queues are shutdown
 */
void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset)
{
	while (true) {
		unsigned count = 0;

		blk_mq_tagset_busy_iter(tagset,
				blk_mq_tagset_count_completed_rqs, &count);
		if (!count)
			break;
		msleep(5);
	}
}
EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);

418 419 420 421 422 423 424 425 426 427 428 429 430 431
/**
 * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
 * @q:		Request queue to examine.
 * @fn:		Pointer to the function that will be called for each request
 *		on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
 *		reserved) where rq is a pointer to a request and hctx points
 *		to the hardware queue associated with the request. 'reserved'
 *		indicates whether or not @rq is a reserved request.
 * @priv:	Will be passed as third argument to @fn.
 *
 * Note: if @q->tag_set is shared with other request queues then @fn will be
 * called for all requests on all queues that share that tag set and not only
 * for requests associated with @q.
 */
432
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
433
		void *priv)
434
{
435 436 437
	struct blk_mq_hw_ctx *hctx;
	int i;

438
	/*
439 440
	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
	 * while the queue is frozen. So we can use q_usage_counter to avoid
441
	 * racing with it.
442
	 */
443
	if (!percpu_ref_tryget(&q->q_usage_counter))
444
		return;
445 446 447 448 449

	queue_for_each_hw_ctx(q, hctx, i) {
		struct blk_mq_tags *tags = hctx->tags;

		/*
450
		 * If no software queues are currently mapped to this
451 452 453 454 455 456
		 * hardware queue, there's nothing to check
		 */
		if (!blk_mq_hw_queue_mapped(hctx))
			continue;

		if (tags->nr_reserved_tags)
457 458
			bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
		bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
459
	}
460
	blk_queue_exit(q);
461 462
}

463 464
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
		    bool round_robin, int node)
465
{
466 467
	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
				       node);
468 469
}

470 471
static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
				   int node, int alloc_policy)
472 473
{
	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
474
	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
475

476
	if (bt_alloc(&tags->__bitmap_tags, depth, round_robin, node))
477
		return -ENOMEM;
478 479
	if (bt_alloc(&tags->__breserved_tags, tags->nr_reserved_tags,
		     round_robin, node))
480
		goto free_bitmap_tags;
481

482 483 484
	tags->bitmap_tags = &tags->__bitmap_tags;
	tags->breserved_tags = &tags->__breserved_tags;

485
	return 0;
486
free_bitmap_tags:
487
	sbitmap_queue_free(&tags->__bitmap_tags);
488
	return -ENOMEM;
489 490
}

491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags)
{
	unsigned int depth = set->queue_depth - set->reserved_tags;
	int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
	int i, node = set->numa_node;

	if (bt_alloc(&set->__bitmap_tags, depth, round_robin, node))
		return -ENOMEM;
	if (bt_alloc(&set->__breserved_tags, set->reserved_tags,
		     round_robin, node))
		goto free_bitmap_tags;

	for (i = 0; i < set->nr_hw_queues; i++) {
		struct blk_mq_tags *tags = set->tags[i];

		tags->bitmap_tags = &set->__bitmap_tags;
		tags->breserved_tags = &set->__breserved_tags;
	}

	return 0;
free_bitmap_tags:
	sbitmap_queue_free(&set->__bitmap_tags);
	return -ENOMEM;
}

void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set)
{
	sbitmap_queue_free(&set->__bitmap_tags);
	sbitmap_queue_free(&set->__breserved_tags);
}

523
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
S
Shaohua Li 已提交
524
				     unsigned int reserved_tags,
525
				     int node, unsigned int flags)
526
{
527
	int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags);
528 529 530 531 532 533 534 535 536 537 538 539 540 541
	struct blk_mq_tags *tags;

	if (total_tags > BLK_MQ_TAG_MAX) {
		pr_err("blk-mq: tag depth too large\n");
		return NULL;
	}

	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
	if (!tags)
		return NULL;

	tags->nr_tags = total_tags;
	tags->nr_reserved_tags = reserved_tags;

542
	if (blk_mq_is_sbitmap_shared(flags))
543 544
		return tags;

545 546 547 548 549
	if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) {
		kfree(tags);
		return NULL;
	}
	return tags;
550 551
}

552
void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags)
553
{
554
	if (!blk_mq_is_sbitmap_shared(flags)) {
555 556 557
		sbitmap_queue_free(tags->bitmap_tags);
		sbitmap_queue_free(tags->breserved_tags);
	}
558 559 560
	kfree(tags);
}

561 562 563
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
			    bool can_grow)
564
{
565 566 567
	struct blk_mq_tags *tags = *tagsptr;

	if (tdepth <= tags->nr_reserved_tags)
568 569 570
		return -EINVAL;

	/*
571 572
	 * If we are allowed to grow beyond the original size, allocate
	 * a new set of tags before freeing the old one.
573
	 */
574 575
	if (tdepth > tags->nr_tags) {
		struct blk_mq_tag_set *set = hctx->queue->tag_set;
576 577
		/* Only sched tags can grow, so clear HCTX_SHARED flag  */
		unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
578 579 580 581 582 583 584 585 586 587 588 589 590
		struct blk_mq_tags *new;
		bool ret;

		if (!can_grow)
			return -EINVAL;

		/*
		 * We need some sort of upper limit, set it high enough that
		 * no valid use cases should require more.
		 */
		if (tdepth > 16 * BLKDEV_MAX_RQ)
			return -EINVAL;

M
Ming Lei 已提交
591
		new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
592
				tags->nr_reserved_tags, flags);
593 594 595 596
		if (!new)
			return -ENOMEM;
		ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
		if (ret) {
597
			blk_mq_free_rq_map(new, flags);
598 599 600 601
			return -ENOMEM;
		}

		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
602
		blk_mq_free_rq_map(*tagsptr, flags);
603 604 605 606 607 608
		*tagsptr = new;
	} else {
		/*
		 * Don't need (or can't) update reserved tags here, they
		 * remain static and should never need resizing.
		 */
609
		sbitmap_queue_resize(tags->bitmap_tags,
M
Ming Lei 已提交
610
				tdepth - tags->nr_reserved_tags);
611
	}
612

613 614 615
	return 0;
}

616 617 618 619 620
void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size)
{
	sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags);
}

B
Bart Van Assche 已提交
621 622 623 624 625 626 627 628 629 630 631 632 633 634
/**
 * blk_mq_unique_tag() - return a tag that is unique queue-wide
 * @rq: request for which to compute a unique tag
 *
 * The tag field in struct request is unique per hardware queue but not over
 * all hardware queues. Hence this function that returns a tag with the
 * hardware context index in the upper bits and the per hardware queue tag in
 * the lower bits.
 *
 * Note: When called for a request that is queued on a non-multiqueue request
 * queue, the hardware context index is set to zero.
 */
u32 blk_mq_unique_tag(struct request *rq)
{
635
	return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
B
Bart Van Assche 已提交
636 637 638
		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
}
EXPORT_SYMBOL(blk_mq_unique_tag);