rhashtable.c 20.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Based on the following paper:
 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
 *
 * Code partially derived from nft_hash
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
23
#include <linux/jhash.h>
24 25 26 27 28 29 30 31 32 33 34
#include <linux/random.h>
#include <linux/rhashtable.h>

#define HASH_DEFAULT_SIZE	64UL
#define HASH_MIN_SIZE		4UL

#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))

#ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
{
35
	return ht->p.mutex_is_held(ht->p.parent);
36 37 38 39
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
#endif

40
static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
{
	return (void *) he - ht->p.head_offset;
}

static u32 __hashfn(const struct rhashtable *ht, const void *key,
		      u32 len, u32 hsize)
{
	u32 h;

	h = ht->p.hashfn(key, len, ht->p.hash_rnd);

	return h & (hsize - 1);
}

/**
 * rhashtable_hashfn - compute hash for key of given length
57
 * @ht:		hash table to compute for
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
 * @key:	pointer to key
 * @len:	length of key
 *
 * Computes the hash value using the hash function provided in the 'hashfn'
 * of struct rhashtable_params. The returned value is guaranteed to be
 * smaller than the number of buckets in the hash table.
 */
u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
{
	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);

	return __hashfn(ht, key, len, tbl->size);
}
EXPORT_SYMBOL_GPL(rhashtable_hashfn);

static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
{
	if (unlikely(!ht->p.key_len)) {
		u32 h;

		h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);

		return h & (hsize - 1);
	}

	return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
}

/**
 * rhashtable_obj_hashfn - compute hash for hashed object
88
 * @ht:		hash table to compute for
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
 * @ptr:	pointer to hashed object
 *
 * Computes the hash value using the hash function `hashfn` respectively
 * 'obj_hashfn' depending on whether the hash table is set up to work with
 * a fixed length key. The returned value is guaranteed to be smaller than
 * the number of buckets in the hash table.
 */
u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
{
	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);

	return obj_hashfn(ht, ptr, tbl->size);
}
EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);

static u32 head_hashfn(const struct rhashtable *ht,
		       const struct rhash_head *he, u32 hsize)
{
	return obj_hashfn(ht, rht_obj(ht, he), hsize);
}

110
static struct bucket_table *bucket_table_alloc(size_t nbuckets)
111 112 113 114 115
{
	struct bucket_table *tbl;
	size_t size;

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
116
	tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
	if (tbl == NULL)
		tbl = vzalloc(size);

	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

	return tbl;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	kvfree(tbl);
}

/**
 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
{
	/* Expand table when exceeding 75% load */
	return ht->nelems > (new_size / 4 * 3);
}
EXPORT_SYMBOL_GPL(rht_grow_above_75);

/**
 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
{
	/* Shrink table beneath 30% load */
	return ht->nelems < (new_size * 3 / 10);
}
EXPORT_SYMBOL_GPL(rht_shrink_below_30);

static void hashtable_chain_unzip(const struct rhashtable *ht,
				  const struct bucket_table *new_tbl,
				  struct bucket_table *old_tbl, size_t n)
{
	struct rhash_head *he, *p, *next;
	unsigned int h;

	/* Old bucket empty, no work needed. */
	p = rht_dereference(old_tbl->buckets[n], ht);
	if (!p)
		return;

	/* Advance the old bucket pointer one or more times until it
	 * reaches a node that doesn't hash to the same bucket as the
	 * previous node p. Call the previous node p;
	 */
	h = head_hashfn(ht, p, new_tbl->size);
	rht_for_each(he, p->next, ht) {
		if (head_hashfn(ht, he, new_tbl->size) != h)
			break;
		p = he;
	}
	RCU_INIT_POINTER(old_tbl->buckets[n], p->next);

	/* Find the subsequent node which does hash to the same
	 * bucket as node P, or NULL if no such node exists.
	 */
	next = NULL;
	if (he) {
		rht_for_each(he, he->next, ht) {
			if (head_hashfn(ht, he, new_tbl->size) == h) {
				next = he;
				break;
			}
		}
	}

	/* Set p's next pointer to that subsequent node pointer,
	 * bypassing the nodes which do not hash to p's bucket
	 */
	RCU_INIT_POINTER(p->next, next);
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
 * A secondary bucket array is allocated and the hash entries are migrated
 * while keeping them on both lists until the end of the RCU grace period.
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
 */
213
int rhashtable_expand(struct rhashtable *ht)
214 215 216 217 218 219 220 221 222 223 224
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
	struct rhash_head *he;
	unsigned int i, h;
	bool complete;

	ASSERT_RHT_MUTEX(ht);

	if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
		return 0;

225
	new_tbl = bucket_table_alloc(old_tbl->size * 2);
226 227 228 229 230 231
	if (new_tbl == NULL)
		return -ENOMEM;

	ht->shift++;

	/* For each new bucket, search the corresponding old bucket
232
	 * for the first entry that hashes to the new bucket, and
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
	 * link the new bucket to that entry. Since all the entries
	 * which will end up in the new bucket appear in the same
	 * old bucket, this constructs an entirely valid new hash
	 * table, but with multiple buckets "zipped" together into a
	 * single imprecise chain.
	 */
	for (i = 0; i < new_tbl->size; i++) {
		h = i & (old_tbl->size - 1);
		rht_for_each(he, old_tbl->buckets[h], ht) {
			if (head_hashfn(ht, he, new_tbl->size) == i) {
				RCU_INIT_POINTER(new_tbl->buckets[i], he);
				break;
			}
		}
	}

	/* Publish the new table pointer. Lookups may now traverse
250 251
	 * the new table, but they will not benefit from any
	 * additional efficiency until later steps unzip the buckets.
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	 */
	rcu_assign_pointer(ht->tbl, new_tbl);

	/* Unzip interleaved hash chains */
	do {
		/* Wait for readers. All new readers will see the new
		 * table, and thus no references to the old table will
		 * remain.
		 */
		synchronize_rcu();

		/* For each bucket in the old table (each of which
		 * contains items from multiple buckets of the new
		 * table): ...
		 */
		complete = true;
		for (i = 0; i < old_tbl->size; i++) {
			hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
			if (old_tbl->buckets[i] != NULL)
				complete = false;
		}
	} while (!complete);

	bucket_table_free(old_tbl);
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_expand);

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
 */
290
int rhashtable_shrink(struct rhashtable *ht)
291 292 293 294 295 296 297
{
	struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
	struct rhash_head __rcu **pprev;
	unsigned int i;

	ASSERT_RHT_MUTEX(ht);

298
	if (ht->shift <= ht->p.min_shift)
299 300
		return 0;

301
	ntbl = bucket_table_alloc(tbl->size / 2);
302 303 304 305 306
	if (ntbl == NULL)
		return -ENOMEM;

	ht->shift--;

307
	/* Link each bucket in the new table to the first bucket
308 309 310 311 312 313
	 * in the old table that contains entries which will hash
	 * to the new bucket.
	 */
	for (i = 0; i < ntbl->size; i++) {
		ntbl->buckets[i] = tbl->buckets[i];

314
		/* Link each bucket in the new table to the first bucket
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
		 * in the old table that contains entries which will hash
		 * to the new bucket.
		 */
		for (pprev = &ntbl->buckets[i]; *pprev != NULL;
		     pprev = &rht_dereference(*pprev, ht)->next)
			;
		RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
	}

	/* Publish the new, valid hash table */
	rcu_assign_pointer(ht->tbl, ntbl);

	/* Wait for readers. No new readers will have references to the
	 * old hash table.
	 */
	synchronize_rcu();

	bucket_table_free(tbl);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);

/**
 * rhashtable_insert - insert object into hash hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Will automatically grow the table via rhashtable_expand() if the the
 * grow_decision function specified at rhashtable_init() returns true.
 *
 * The caller must ensure that no concurrent table mutations occur. It is
 * however valid to have concurrent lookups if they are RCU protected.
 */
349
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
350 351 352 353 354 355 356 357 358 359 360 361
{
	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
	u32 hash;

	ASSERT_RHT_MUTEX(ht);

	hash = head_hashfn(ht, obj, tbl->size);
	RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
	rcu_assign_pointer(tbl->buckets[hash], obj);
	ht->nelems++;

	if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
362
		rhashtable_expand(ht);
363 364 365 366 367 368 369 370 371 372 373 374 375 376
}
EXPORT_SYMBOL_GPL(rhashtable_insert);

/**
 * rhashtable_remove_pprev - remove object from hash table given previous element
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 * @pprev:	pointer to previous element
 *
 * Identical to rhashtable_remove() but caller is alreayd aware of the element
 * in front of the element to be deleted. This is in particular useful for
 * deletion when combined with walking or lookup.
 */
void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
377
			     struct rhash_head __rcu **pprev)
378 379 380 381 382 383 384 385 386 387
{
	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);

	ASSERT_RHT_MUTEX(ht);

	RCU_INIT_POINTER(*pprev, obj->next);
	ht->nelems--;

	if (ht->p.shrink_decision &&
	    ht->p.shrink_decision(ht, tbl->size))
388
		rhashtable_shrink(ht);
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
}
EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);

/**
 * rhashtable_remove - remove object from hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Since the hash chain is single linked, the removal operation needs to
 * walk the bucket chain upon removal. The removal operation is thus
 * considerable slow if the hash table is not correctly sized.
 *
 * Will automatically shrink the table via rhashtable_expand() if the the
 * shrink_decision function specified at rhashtable_init() returns true.
 *
 * The caller must ensure that no concurrent table mutations occur. It is
 * however valid to have concurrent lookups if they are RCU protected.
 */
407
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
{
	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
	struct rhash_head __rcu **pprev;
	struct rhash_head *he;
	u32 h;

	ASSERT_RHT_MUTEX(ht);

	h = head_hashfn(ht, obj, tbl->size);

	pprev = &tbl->buckets[h];
	rht_for_each(he, tbl->buckets[h], ht) {
		if (he != obj) {
			pprev = &he->next;
			continue;
		}

425
		rhashtable_remove_pprev(ht, he, pprev);
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
		return true;
	}

	return false;
}
EXPORT_SYMBOL_GPL(rhashtable_remove);

/**
 * rhashtable_lookup - lookup key in hash table
 * @ht:		hash table
 * @key:	pointer to key
 *
 * Computes the hash value for the key and traverses the bucket chain looking
 * for a entry with an identical key. The first matching entry is returned.
 *
 * This lookup function may only be used for fixed key hash table (key_len
 * paramter set). It will BUG() if used inappropriately.
 *
 * Lookups may occur in parallel with hash mutations as long as the lookup is
 * guarded by rcu_read_lock(). The caller must take care of this.
 */
void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
{
	const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
	struct rhash_head *he;
	u32 h;

	BUG_ON(!ht->p.key_len);

	h = __hashfn(ht, key, ht->p.key_len, tbl->size);
	rht_for_each_rcu(he, tbl->buckets[h], ht) {
		if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
			   ht->p.key_len))
			continue;
		return (void *) he - ht->p.head_offset;
	}

	return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup);

/**
 * rhashtable_lookup_compare - search hash table with compare function
 * @ht:		hash table
 * @hash:	hash value of desired entry
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Traverses the bucket chain behind the provided hash value and calls the
 * specified compare function for each entry.
 *
 * Lookups may occur in parallel with hash mutations as long as the lookup is
 * guarded by rcu_read_lock(). The caller must take care of this.
 *
 * Returns the first entry on which the compare function returned true.
 */
void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
				bool (*compare)(void *, void *), void *arg)
{
	const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
	struct rhash_head *he;

	if (unlikely(hash >= tbl->size))
		return NULL;

	rht_for_each_rcu(he, tbl->buckets[hash], ht) {
		if (!compare(rht_obj(ht, he), arg))
			continue;
		return (void *) he - ht->p.head_offset;
	}

	return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);

501
static size_t rounded_hashtable_size(struct rhashtable_params *params)
502
{
503 504
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
		   1UL << params->min_shift);
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
}

/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
527
 *	.hashfn = jhash,
528
 * #ifdef CONFIG_PROVE_LOCKING
529
 *	.mutex_is_held = &my_mutex_is_held,
530
 * #endif
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
 * u32 my_hash_fn(const void *data, u32 seed)
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
548
 *	.hashfn = jhash,
549
 *	.obj_hashfn = my_hash_fn,
550
 * #ifdef CONFIG_PROVE_LOCKING
551
 *	.mutex_is_held = &my_mutex_is_held,
552
 * #endif
553 554 555 556 557 558 559 560 561 562 563 564 565
 * };
 */
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

	if ((params->key_len && !params->hashfn) ||
	    (!params->key_len && !params->obj_hashfn))
		return -EINVAL;

566 567 568
	params->min_shift = max_t(size_t, params->min_shift,
				  ilog2(HASH_MIN_SIZE));

569
	if (params->nelem_hint)
570
		size = rounded_hashtable_size(params);
571

572
	tbl = bucket_table_alloc(size);
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
	if (tbl == NULL)
		return -ENOMEM;

	memset(ht, 0, sizeof(*ht));
	ht->shift = ilog2(tbl->size);
	memcpy(&ht->p, params, sizeof(*params));
	RCU_INIT_POINTER(ht->tbl, tbl);

	if (!ht->p.hash_rnd)
		get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
 * rhashtable_destroy - destroy hash table
 * @ht:		the hash table to destroy
 *
592 593 594
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
595 596 597
 */
void rhashtable_destroy(const struct rhashtable *ht)
{
598
	bucket_table_free(ht->tbl);
599 600 601 602 603 604 605 606 607 608 609 610 611 612
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);

/**************************************************************************
 * Self Test
 **************************************************************************/

#ifdef CONFIG_TEST_RHASHTABLE

#define TEST_HT_SIZE	8
#define TEST_ENTRIES	2048
#define TEST_PTR	((void *) 0xdeadbeef)
#define TEST_NEXPANDS	4

613
#ifdef CONFIG_PROVE_LOCKING
614
static int test_mutex_is_held(void *parent)
615 616 617
{
	return 1;
}
618
#endif
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655

struct test_obj {
	void			*ptr;
	int			value;
	struct rhash_head	node;
};

static int __init test_rht_lookup(struct rhashtable *ht)
{
	unsigned int i;

	for (i = 0; i < TEST_ENTRIES * 2; i++) {
		struct test_obj *obj;
		bool expected = !(i % 2);
		u32 key = i;

		obj = rhashtable_lookup(ht, &key);

		if (expected && !obj) {
			pr_warn("Test failed: Could not find key %u\n", key);
			return -ENOENT;
		} else if (!expected && obj) {
			pr_warn("Test failed: Unexpected entry found for key %u\n",
				key);
			return -EEXIST;
		} else if (expected && obj) {
			if (obj->ptr != TEST_PTR || obj->value != i) {
				pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
					obj->ptr, TEST_PTR, obj->value, i);
				return -EINVAL;
			}
		}
	}

	return 0;
}

656
static void test_bucket_stats(struct rhashtable *ht, bool quiet)
657
{
658
	unsigned int cnt, rcu_cnt, i, total = 0;
659
	struct test_obj *obj;
660
	struct bucket_table *tbl;
661

662
	tbl = rht_dereference_rcu(ht->tbl, ht);
663
	for (i = 0; i < tbl->size; i++) {
664
		rcu_cnt = cnt = 0;
665 666 667 668 669 670 671 672 673 674 675

		if (!quiet)
			pr_info(" [%#4x/%zu]", i, tbl->size);

		rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
			cnt++;
			total++;
			if (!quiet)
				pr_cont(" [%p],", obj);
		}

676 677 678 679 680 681 682
		rht_for_each_entry_rcu(obj, tbl->buckets[i], node)
			rcu_cnt++;

		if (rcu_cnt != cnt)
			pr_warn("Test failed: Chain count mismach %d != %d",
				cnt, rcu_cnt);

683 684 685 686 687 688 689
		if (!quiet)
			pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
				i, tbl->buckets[i], cnt);
	}

	pr_info("  Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
		total, ht->nelems, TEST_ENTRIES);
690 691 692

	if (total != ht->nelems || total != TEST_ENTRIES)
		pr_warn("Test failed: Total count mismatch ^^^");
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
}

static int __init test_rhashtable(struct rhashtable *ht)
{
	struct bucket_table *tbl;
	struct test_obj *obj, *next;
	int err;
	unsigned int i;

	/*
	 * Insertion Test:
	 * Insert TEST_ENTRIES into table with all keys even numbers
	 */
	pr_info("  Adding %d keys\n", TEST_ENTRIES);
	for (i = 0; i < TEST_ENTRIES; i++) {
		struct test_obj *obj;

		obj = kzalloc(sizeof(*obj), GFP_KERNEL);
		if (!obj) {
			err = -ENOMEM;
			goto error;
		}

		obj->ptr = TEST_PTR;
		obj->value = i * 2;

719
		rhashtable_insert(ht, &obj->node);
720 721 722
	}

	rcu_read_lock();
723
	test_bucket_stats(ht, true);
724 725 726 727 728
	test_rht_lookup(ht);
	rcu_read_unlock();

	for (i = 0; i < TEST_NEXPANDS; i++) {
		pr_info("  Table expansion iteration %u...\n", i);
729
		rhashtable_expand(ht);
730 731 732 733 734 735 736 737 738

		rcu_read_lock();
		pr_info("  Verifying lookups...\n");
		test_rht_lookup(ht);
		rcu_read_unlock();
	}

	for (i = 0; i < TEST_NEXPANDS; i++) {
		pr_info("  Table shrinkage iteration %u...\n", i);
739
		rhashtable_shrink(ht);
740 741 742 743 744 745 746

		rcu_read_lock();
		pr_info("  Verifying lookups...\n");
		test_rht_lookup(ht);
		rcu_read_unlock();
	}

747 748 749 750
	rcu_read_lock();
	test_bucket_stats(ht, true);
	rcu_read_unlock();

751 752 753 754 755 756 757
	pr_info("  Deleting %d keys\n", TEST_ENTRIES);
	for (i = 0; i < TEST_ENTRIES; i++) {
		u32 key = i * 2;

		obj = rhashtable_lookup(ht, &key);
		BUG_ON(!obj);

758
		rhashtable_remove(ht, &obj->node);
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
		kfree(obj);
	}

	return 0;

error:
	tbl = rht_dereference_rcu(ht->tbl, ht);
	for (i = 0; i < tbl->size; i++)
		rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
			kfree(obj);

	return err;
}

static int __init test_rht_init(void)
{
	struct rhashtable ht;
	struct rhashtable_params params = {
		.nelem_hint = TEST_HT_SIZE,
		.head_offset = offsetof(struct test_obj, node),
		.key_offset = offsetof(struct test_obj, value),
		.key_len = sizeof(int),
781
		.hashfn = jhash,
782
#ifdef CONFIG_PROVE_LOCKING
783
		.mutex_is_held = &test_mutex_is_held,
784
#endif
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
		.grow_decision = rht_grow_above_75,
		.shrink_decision = rht_shrink_below_30,
	};
	int err;

	pr_info("Running resizable hashtable tests...\n");

	err = rhashtable_init(&ht, &params);
	if (err < 0) {
		pr_warn("Test failed: Unable to initialize hashtable: %d\n",
			err);
		return err;
	}

	err = test_rhashtable(&ht);

	rhashtable_destroy(&ht);

	return err;
}

subsys_initcall(test_rht_init);

#endif /* CONFIG_TEST_RHASHTABLE */