sbitmap.c 19.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6
/*
 * Copyright (C) 2016 Facebook
 * Copyright (C) 2013-2014 Jens Axboe
 */

7
#include <linux/sched.h>
8
#include <linux/random.h>
9
#include <linux/sbitmap.h>
10
#include <linux/seq_file.h>
11

12
static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
13
{
14
	unsigned depth = sb->depth;
15

16 17
	sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
	if (!sb->alloc_hint)
18 19
		return -ENOMEM;

20
	if (depth && !sb->round_robin) {
21 22 23
		int i;

		for_each_possible_cpu(i)
24
			*per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth;
25 26 27 28
	}
	return 0;
}

29
static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
30 31 32 33
						    unsigned int depth)
{
	unsigned hint;

34
	hint = this_cpu_read(*sb->alloc_hint);
35 36
	if (unlikely(hint >= depth)) {
		hint = depth ? prandom_u32() % depth : 0;
37
		this_cpu_write(*sb->alloc_hint, hint);
38 39 40 41 42
	}

	return hint;
}

43
static inline void update_alloc_hint_after_get(struct sbitmap *sb,
44 45 46 47 48 49
					       unsigned int depth,
					       unsigned int hint,
					       unsigned int nr)
{
	if (nr == -1) {
		/* If the map is full, a hint won't do us much good. */
50 51
		this_cpu_write(*sb->alloc_hint, 0);
	} else if (nr == hint || unlikely(sb->round_robin)) {
52 53 54 55
		/* Only update the hint if we used it. */
		hint = nr + 1;
		if (hint >= depth - 1)
			hint = 0;
56
		this_cpu_write(*sb->alloc_hint, hint);
57 58 59
	}
}

60 61 62
/*
 * See if we have deferred clears that we can batch move
 */
63
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
64
{
65
	unsigned long mask;
66

P
Pavel Begunkov 已提交
67 68
	if (!READ_ONCE(map->cleared))
		return false;
69 70 71 72

	/*
	 * First get a stable cleared mask, setting the old mask to 0.
	 */
73
	mask = xchg(&map->cleared, 0);
74 75 76 77

	/*
	 * Now clear the masked bits in our free word
	 */
78 79
	atomic_long_andnot(mask, (atomic_long_t *)&map->word);
	BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
P
Pavel Begunkov 已提交
80
	return true;
81 82
}

83
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
84 85
		      gfp_t flags, int node, bool round_robin,
		      bool alloc_hint)
86 87 88 89
{
	unsigned int bits_per_word;
	unsigned int i;

90 91 92
	if (shift < 0)
		shift = sbitmap_calculate_shift(depth);

93 94 95 96 97 98 99
	bits_per_word = 1U << shift;
	if (bits_per_word > BITS_PER_LONG)
		return -EINVAL;

	sb->shift = shift;
	sb->depth = depth;
	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
100
	sb->round_robin = round_robin;
101 102 103 104 105 106

	if (depth == 0) {
		sb->map = NULL;
		return 0;
	}

107 108 109 110 111 112 113
	if (alloc_hint) {
		if (init_alloc_hint(sb, flags))
			return -ENOMEM;
	} else {
		sb->alloc_hint = NULL;
	}

114
	sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
115 116
	if (!sb->map) {
		free_percpu(sb->alloc_hint);
117
		return -ENOMEM;
118
	}
119 120 121 122 123 124 125 126 127 128 129 130 131 132

	for (i = 0; i < sb->map_nr; i++) {
		sb->map[i].depth = min(depth, bits_per_word);
		depth -= sb->map[i].depth;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_init_node);

void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
{
	unsigned int bits_per_word = 1U << sb->shift;
	unsigned int i;

133
	for (i = 0; i < sb->map_nr; i++)
134
		sbitmap_deferred_clear(&sb->map[i]);
135

136 137 138 139 140 141 142 143 144 145
	sb->depth = depth;
	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);

	for (i = 0; i < sb->map_nr; i++) {
		sb->map[i].depth = min(depth, bits_per_word);
		depth -= sb->map[i].depth;
	}
}
EXPORT_SYMBOL_GPL(sbitmap_resize);

146 147
static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
			      unsigned int hint, bool wrap)
148 149 150
{
	int nr;

P
Pavel Begunkov 已提交
151 152 153
	/* don't wrap if starting from 0 */
	wrap = wrap && hint;

154
	while (1) {
155 156
		nr = find_next_zero_bit(word, depth, hint);
		if (unlikely(nr >= depth)) {
157 158 159 160 161
			/*
			 * We started with an offset, and we didn't reset the
			 * offset to 0 in a failure case, so start from 0 to
			 * exhaust the map.
			 */
P
Pavel Begunkov 已提交
162 163
			if (hint && wrap) {
				hint = 0;
164 165 166 167 168
				continue;
			}
			return -1;
		}

169
		if (!test_and_set_bit_lock(nr, word))
170 171 172
			break;

		hint = nr + 1;
173
		if (hint >= depth - 1)
174 175 176 177 178 179
			hint = 0;
	}

	return nr;
}

180
static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
181
				     unsigned int alloc_hint)
182
{
183
	struct sbitmap_word *map = &sb->map[index];
184 185 186
	int nr;

	do {
187
		nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint,
188
					!sb->round_robin);
189 190
		if (nr != -1)
			break;
191
		if (!sbitmap_deferred_clear(map))
192 193 194 195 196 197
			break;
	} while (1);

	return nr;
}

198
static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
199 200 201 202 203 204
{
	unsigned int i, index;
	int nr = -1;

	index = SB_NR_TO_INDEX(sb, alloc_hint);

205 206 207 208 209
	/*
	 * Unless we're doing round robin tag allocation, just use the
	 * alloc_hint to find the right word index. No point in looping
	 * twice in find_next_zero_bit() for that case.
	 */
210
	if (sb->round_robin)
211 212 213 214
		alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
	else
		alloc_hint = 0;

215
	for (i = 0; i < sb->map_nr; i++) {
216
		nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
217 218 219 220 221 222
		if (nr != -1) {
			nr += index << sb->shift;
			break;
		}

		/* Jump to next index. */
223 224
		alloc_hint = 0;
		if (++index >= sb->map_nr)
225 226 227 228 229
			index = 0;
	}

	return nr;
}
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245

int sbitmap_get(struct sbitmap *sb)
{
	int nr;
	unsigned int hint, depth;

	if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
		return -1;

	depth = READ_ONCE(sb->depth);
	hint = update_alloc_hint_before_get(sb, depth);
	nr = __sbitmap_get(sb, hint);
	update_alloc_hint_after_get(sb, depth, hint, nr);

	return nr;
}
246 247
EXPORT_SYMBOL_GPL(sbitmap_get);

248 249 250
static int __sbitmap_get_shallow(struct sbitmap *sb,
				 unsigned int alloc_hint,
				 unsigned long shallow_depth)
251 252 253 254 255 256 257
{
	unsigned int i, index;
	int nr = -1;

	index = SB_NR_TO_INDEX(sb, alloc_hint);

	for (i = 0; i < sb->map_nr; i++) {
258
again:
259 260 261 262 263 264 265 266
		nr = __sbitmap_get_word(&sb->map[index].word,
					min(sb->map[index].depth, shallow_depth),
					SB_NR_TO_BIT(sb, alloc_hint), true);
		if (nr != -1) {
			nr += index << sb->shift;
			break;
		}

267
		if (sbitmap_deferred_clear(&sb->map[index]))
268 269
			goto again;

270 271 272 273 274 275 276 277 278 279 280 281
		/* Jump to next index. */
		index++;
		alloc_hint = index << sb->shift;

		if (index >= sb->map_nr) {
			index = 0;
			alloc_hint = 0;
		}
	}

	return nr;
}
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
{
	int nr;
	unsigned int hint, depth;

	if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
		return -1;

	depth = READ_ONCE(sb->depth);
	hint = update_alloc_hint_before_get(sb, depth);
	nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
	update_alloc_hint_after_get(sb, depth, hint, nr);

	return nr;
}
298 299
EXPORT_SYMBOL_GPL(sbitmap_get_shallow);

300 301 302 303 304
bool sbitmap_any_bit_set(const struct sbitmap *sb)
{
	unsigned int i;

	for (i = 0; i < sb->map_nr; i++) {
305
		if (sb->map[i].word & ~sb->map[i].cleared)
306 307 308 309 310 311
			return true;
	}
	return false;
}
EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);

312
static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
313
{
314
	unsigned int i, weight = 0;
315 316 317 318

	for (i = 0; i < sb->map_nr; i++) {
		const struct sbitmap_word *word = &sb->map[i];

319 320 321 322
		if (set)
			weight += bitmap_weight(&word->word, word->depth);
		else
			weight += bitmap_weight(&word->cleared, word->depth);
323 324 325
	}
	return weight;
}
326

M
Ming Lei 已提交
327
static unsigned int sbitmap_cleared(const struct sbitmap *sb)
328
{
M
Ming Lei 已提交
329
	return __sbitmap_weight(sb, false);
330 331
}

M
Ming Lei 已提交
332
unsigned int sbitmap_weight(const struct sbitmap *sb)
333
{
M
Ming Lei 已提交
334
	return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
335
}
M
Ming Lei 已提交
336
EXPORT_SYMBOL_GPL(sbitmap_weight);
337

338 339 340
void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
{
	seq_printf(m, "depth=%u\n", sb->depth);
M
Ming Lei 已提交
341
	seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
342
	seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
	seq_printf(m, "map_nr=%u\n", sb->map_nr);
}
EXPORT_SYMBOL_GPL(sbitmap_show);

static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
{
	if ((offset & 0xf) == 0) {
		if (offset != 0)
			seq_putc(m, '\n');
		seq_printf(m, "%08x:", offset);
	}
	if ((offset & 0x1) == 0)
		seq_putc(m, ' ');
	seq_printf(m, "%02x", byte);
}

void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
{
	u8 byte = 0;
	unsigned int byte_bits = 0;
	unsigned int offset = 0;
	int i;

	for (i = 0; i < sb->map_nr; i++) {
		unsigned long word = READ_ONCE(sb->map[i].word);
369
		unsigned long cleared = READ_ONCE(sb->map[i].cleared);
370 371
		unsigned int word_bits = READ_ONCE(sb->map[i].depth);

372 373
		word &= ~cleared;

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
		while (word_bits > 0) {
			unsigned int bits = min(8 - byte_bits, word_bits);

			byte |= (word & (BIT(bits) - 1)) << byte_bits;
			byte_bits += bits;
			if (byte_bits == 8) {
				emit_byte(m, offset, byte);
				byte = 0;
				byte_bits = 0;
				offset++;
			}
			word >>= bits;
			word_bits -= bits;
		}
	}
	if (byte_bits) {
		emit_byte(m, offset, byte);
		offset++;
	}
	if (offset)
		seq_putc(m, '\n');
}
EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);

398 399
static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
					unsigned int depth)
400 401
{
	unsigned int wake_batch;
402
	unsigned int shallow_depth;
403 404 405

	/*
	 * For each batch, we wake up one queue. We need to make sure that our
406 407 408 409 410 411 412 413 414 415 416 417 418
	 * batch size is small enough that the full depth of the bitmap,
	 * potentially limited by a shallow depth, is enough to wake up all of
	 * the queues.
	 *
	 * Each full word of the bitmap has bits_per_word bits, and there might
	 * be a partial word. There are depth / bits_per_word full words and
	 * depth % bits_per_word bits left over. In bitwise arithmetic:
	 *
	 * bits_per_word = 1 << shift
	 * depth / bits_per_word = depth >> shift
	 * depth % bits_per_word = depth & ((1 << shift) - 1)
	 *
	 * Each word can be limited to sbq->min_shallow_depth bits.
419
	 */
420 421 422 423 424
	shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
	depth = ((depth >> sbq->sb.shift) * shallow_depth +
		 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
	wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
			     SBQ_WAKE_BATCH);
425 426 427 428 429

	return wake_batch;
}

int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
430
			    int shift, bool round_robin, gfp_t flags, int node)
431 432 433 434
{
	int ret;
	int i;

435
	ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
436
				round_robin, true);
437 438 439
	if (ret)
		return ret;

440 441
	sbq->min_shallow_depth = UINT_MAX;
	sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
442
	atomic_set(&sbq->wake_index, 0);
J
Jens Axboe 已提交
443
	atomic_set(&sbq->ws_active, 0);
444

445
	sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
446 447 448 449 450 451 452 453 454
	if (!sbq->ws) {
		sbitmap_free(&sbq->sb);
		return -ENOMEM;
	}

	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
		init_waitqueue_head(&sbq->ws[i].wait);
		atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
	}
455

456 457 458 459
	return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);

460 461
static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
					    unsigned int wake_batch)
462
{
463 464 465 466 467
	int i;

	if (sbq->wake_batch != wake_batch) {
		WRITE_ONCE(sbq->wake_batch, wake_batch);
		/*
468 469 470
		 * Pairs with the memory barrier in sbitmap_queue_wake_up()
		 * to ensure that the batch size is updated before the wait
		 * counts.
471
		 */
472
		smp_mb();
473 474 475
		for (i = 0; i < SBQ_WAIT_QUEUES; i++)
			atomic_set(&sbq->ws[i].wait_cnt, 1);
	}
476 477
}

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
					    unsigned int depth)
{
	unsigned int wake_batch;

	wake_batch = sbq_calc_wake_batch(sbq, depth);
	__sbitmap_queue_update_wake_batch(sbq, wake_batch);
}

void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
					    unsigned int users)
{
	unsigned int wake_batch;

	wake_batch = clamp_val((sbq->sb.depth + users - 1) /
			users, 4, SBQ_WAKE_BATCH);
	__sbitmap_queue_update_wake_batch(sbq, wake_batch);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);

498 499 500
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
{
	sbitmap_queue_update_wake_batch(sbq, depth);
501 502 503 504
	sbitmap_resize(&sbq->sb, depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_resize);

505
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
506
{
507
	return sbitmap_get(&sbq->sb);
508 509 510
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);

511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
					unsigned int *offset)
{
	struct sbitmap *sb = &sbq->sb;
	unsigned int hint, depth;
	unsigned long index, nr;
	int i;

	if (unlikely(sb->round_robin))
		return 0;

	depth = READ_ONCE(sb->depth);
	hint = update_alloc_hint_before_get(sb, depth);

	index = SB_NR_TO_INDEX(sb, hint);

	for (i = 0; i < sb->map_nr; i++) {
		struct sbitmap_word *map = &sb->map[index];
		unsigned long get_mask;

		sbitmap_deferred_clear(map);
		if (map->word == (1UL << (map->depth - 1)) - 1)
			continue;

		nr = find_first_zero_bit(&map->word, map->depth);
		if (nr + nr_tags <= map->depth) {
			atomic_long_t *ptr = (atomic_long_t *) &map->word;
			int map_tags = min_t(int, nr_tags, map->depth);
			unsigned long val, ret;

			get_mask = ((1UL << map_tags) - 1) << nr;
			do {
				val = READ_ONCE(map->word);
				ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
			} while (ret != val);
			get_mask = (get_mask & ~ret) >> nr;
			if (get_mask) {
				*offset = nr + (index << sb->shift);
				update_alloc_hint_after_get(sb, depth, hint,
							*offset + map_tags - 1);
				return get_mask;
			}
		}
		/* Jump to next index. */
		if (++index >= sb->map_nr)
			index = 0;
	}

	return 0;
}

562 563 564
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
				unsigned int shallow_depth)
{
565 566
	WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);

567
	return sbitmap_get_shallow(&sbq->sb, shallow_depth);
568 569 570
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);

571 572 573 574 575 576 577 578
void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
				     unsigned int min_shallow_depth)
{
	sbq->min_shallow_depth = min_shallow_depth;
	sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);

579 580 581 582
static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
{
	int i, wake_index;

J
Jens Axboe 已提交
583 584 585
	if (!atomic_read(&sbq->ws_active))
		return NULL;

586 587 588 589 590
	wake_index = atomic_read(&sbq->wake_index);
	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
		struct sbq_wait_state *ws = &sbq->ws[wake_index];

		if (waitqueue_active(&ws->wait)) {
591 592
			if (wake_index != atomic_read(&sbq->wake_index))
				atomic_set(&sbq->wake_index, wake_index);
593 594 595 596 597 598 599 600 601
			return ws;
		}

		wake_index = sbq_index_inc(wake_index);
	}

	return NULL;
}

602
static bool __sbq_wake_up(struct sbitmap_queue *sbq)
603 604
{
	struct sbq_wait_state *ws;
605
	unsigned int wake_batch;
606 607 608 609
	int wait_cnt;

	ws = sbq_wake_ptr(sbq);
	if (!ws)
610
		return false;
611 612

	wait_cnt = atomic_dec_return(&ws->wait_cnt);
613
	if (wait_cnt <= 0) {
614 615
		int ret;

616
		wake_batch = READ_ONCE(sbq->wake_batch);
617

618 619 620 621 622 623
		/*
		 * Pairs with the memory barrier in sbitmap_queue_resize() to
		 * ensure that we see the batch size update before the wait
		 * count is reset.
		 */
		smp_mb__before_atomic();
624

625
		/*
626 627 628
		 * For concurrent callers of this, the one that failed the
		 * atomic_cmpxhcg() race should call this function again
		 * to wakeup a new batch on a different 'ws'.
629
		 */
630 631 632 633 634 635 636 637
		ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
		if (ret == wait_cnt) {
			sbq_index_atomic_inc(&sbq->wake_index);
			wake_up_nr(&ws->wait, wake_batch);
			return false;
		}

		return true;
638
	}
639 640 641 642

	return false;
}

643
void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
644 645 646
{
	while (__sbq_wake_up(sbq))
		;
647
}
648
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
649

650 651 652
static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
{
	if (likely(!sb->round_robin && tag < sb->depth))
J
Jens Axboe 已提交
653
		data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
}

void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
				int *tags, int nr_tags)
{
	struct sbitmap *sb = &sbq->sb;
	unsigned long *addr = NULL;
	unsigned long mask = 0;
	int i;

	smp_mb__before_atomic();
	for (i = 0; i < nr_tags; i++) {
		const int tag = tags[i] - offset;
		unsigned long *this_addr;

		/* since we're clearing a batch, skip the deferred map */
		this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
		if (!addr) {
			addr = this_addr;
		} else if (addr != this_addr) {
			atomic_long_andnot(mask, (atomic_long_t *) addr);
			mask = 0;
			addr = this_addr;
		}
		mask |= (1UL << SB_NR_TO_BIT(sb, tag));
	}

	if (mask)
		atomic_long_andnot(mask, (atomic_long_t *) addr);

	smp_mb__after_atomic();
	sbitmap_queue_wake_up(sbq);
	sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
					tags[nr_tags - 1] - offset);
}

690
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
691
			 unsigned int cpu)
692
{
693 694 695
	/*
	 * Once the clear bit is set, the bit may be allocated out.
	 *
Z
Zhen Lei 已提交
696
	 * Orders READ/WRITE on the associated instance(such as request
697 698 699 700 701 702 703
	 * of blk_mq) by this bit for avoiding race with re-allocation,
	 * and its pair is the memory barrier implied in __sbitmap_get_word.
	 *
	 * One invariant is that the clear bit has to be zero when the bit
	 * is in use.
	 */
	smp_mb__before_atomic();
704 705
	sbitmap_deferred_clear_bit(&sbq->sb, nr);

706 707 708 709 710 711 712 713
	/*
	 * Pairs with the memory barrier in set_current_state() to ensure the
	 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
	 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
	 * waiter. See the comment on waitqueue_active().
	 */
	smp_mb__after_atomic();
	sbitmap_queue_wake_up(sbq);
714
	sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
715 716 717 718 719 720 721 722
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);

void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
{
	int i, wake_index;

	/*
723
	 * Pairs with the memory barrier in set_current_state() like in
724
	 * sbitmap_queue_wake_up().
725 726 727 728 729 730 731 732 733 734 735 736 737
	 */
	smp_mb();
	wake_index = atomic_read(&sbq->wake_index);
	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
		struct sbq_wait_state *ws = &sbq->ws[wake_index];

		if (waitqueue_active(&ws->wait))
			wake_up(&ws->wait);

		wake_index = sbq_index_inc(wake_index);
	}
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
738 739 740 741 742 743 744 745 746 747 748 749 750 751

void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
{
	bool first;
	int i;

	sbitmap_show(&sbq->sb, m);

	seq_puts(m, "alloc_hint={");
	first = true;
	for_each_possible_cpu(i) {
		if (!first)
			seq_puts(m, ", ");
		first = false;
752
		seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
753 754 755 756 757
	}
	seq_puts(m, "}\n");

	seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
	seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
J
Jens Axboe 已提交
758
	seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
759 760 761 762 763 764 765 766 767 768 769

	seq_puts(m, "ws={\n");
	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
		struct sbq_wait_state *ws = &sbq->ws[i];

		seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
			   atomic_read(&ws->wait_cnt),
			   waitqueue_active(&ws->wait) ? "active" : "inactive");
	}
	seq_puts(m, "}\n");

770
	seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
771
	seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
772 773
}
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
J
Jens Axboe 已提交
774

775 776 777 778 779 780 781
void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
			    struct sbq_wait_state *ws,
			    struct sbq_wait *sbq_wait)
{
	if (!sbq_wait->sbq) {
		sbq_wait->sbq = sbq;
		atomic_inc(&sbq->ws_active);
782
		add_wait_queue(&ws->wait, &sbq_wait->wait);
783 784 785 786 787 788 789 790 791 792 793 794 795 796
	}
}
EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);

void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
{
	list_del_init(&sbq_wait->wait.entry);
	if (sbq_wait->sbq) {
		atomic_dec(&sbq_wait->sbq->ws_active);
		sbq_wait->sbq = NULL;
	}
}
EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);

J
Jens Axboe 已提交
797 798 799 800
void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
			     struct sbq_wait_state *ws,
			     struct sbq_wait *sbq_wait, int state)
{
801
	if (!sbq_wait->sbq) {
J
Jens Axboe 已提交
802
		atomic_inc(&sbq->ws_active);
803
		sbq_wait->sbq = sbq;
J
Jens Axboe 已提交
804 805 806 807 808 809 810 811 812
	}
	prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
}
EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);

void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
			 struct sbq_wait *sbq_wait)
{
	finish_wait(&ws->wait, &sbq_wait->wait);
813
	if (sbq_wait->sbq) {
J
Jens Axboe 已提交
814
		atomic_dec(&sbq->ws_active);
815
		sbq_wait->sbq = NULL;
J
Jens Axboe 已提交
816 817 818
	}
}
EXPORT_SYMBOL_GPL(sbitmap_finish_wait);