rhashtable.c 20.5 KB
Newer Older
1 2 3
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
4
 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5
 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 7 8
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Code partially derived from nft_hash
9 10
 * Rewritten with rehash code from br_multicast plus single list
 * pointer as suggested by Josh Triplett
11 12 13 14 15 16
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

17
#include <linux/atomic.h>
18 19 20
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
E
Eric Dumazet 已提交
21
#include <linux/sched.h>
22 23 24
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
25
#include <linux/jhash.h>
26 27
#include <linux/random.h>
#include <linux/rhashtable.h>
28
#include <linux/err.h>
29
#include <linux/export.h>
30 31

#define HASH_DEFAULT_SIZE	64UL
32
#define HASH_MIN_SIZE		4U
33 34
#define BUCKET_LOCKS_PER_CPU   128UL

35
static u32 head_hashfn(struct rhashtable *ht,
36 37
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
38
{
39
	return rht_head_hashfn(ht, tbl, he, ht->p);
40 41
}

42 43 44 45 46 47 48 49 50 51 52
#ifdef CONFIG_PROVE_LOCKING
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))

int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
53
	spinlock_t *lock = rht_bucket_lock(tbl, hash);
54 55 56 57 58 59 60 61 62

	return (debug_locks) ? lockdep_is_held(lock) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else
#define ASSERT_RHT_MUTEX(HT)
#endif


63 64
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
			      gfp_t gfp)
65 66 67 68 69 70 71 72 73 74 75
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

76 77
	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);
78 79 80

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
81 82
		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
		    gfp == GFP_KERNEL)
83 84 85 86
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
87
					   gfp);
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

106 107 108 109 110
static void bucket_table_free_rcu(struct rcu_head *head)
{
	bucket_table_free(container_of(head, struct bucket_table, rcu));
}

111
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
112 113
					       size_t nbuckets,
					       gfp_t gfp)
114
{
115
	struct bucket_table *tbl = NULL;
116
	size_t size;
117
	int i;
118 119

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
120 121 122 123
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
	    gfp != GFP_KERNEL)
		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
	if (tbl == NULL && gfp == GFP_KERNEL)
124 125 126 127 128 129
		tbl = vzalloc(size);
	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

130
	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
131 132 133
		bucket_table_free(tbl);
		return NULL;
	}
134

135 136
	INIT_LIST_HEAD(&tbl->walkers);

137 138
	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));

139 140 141
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

142
	return tbl;
143 144
}

145 146 147 148 149 150 151 152 153 154 155 156 157
static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
						  struct bucket_table *tbl)
{
	struct bucket_table *new_tbl;

	do {
		new_tbl = tbl;
		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	} while (tbl);

	return new_tbl;
}

158
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
159
{
160
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
161 162
	struct bucket_table *new_tbl = rhashtable_last_table(ht,
		rht_dereference_rcu(old_tbl->future_tbl, ht));
163 164 165 166
	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
	int err = -ENOENT;
	struct rhash_head *head, *next, *entry;
	spinlock_t *new_bucket_lock;
167
	unsigned int new_hash;
168 169 170 171 172 173 174

	rht_for_each(entry, old_tbl, old_hash) {
		err = 0;
		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);

		if (rht_is_a_nulls(next))
			break;
175

176 177
		pprev = &entry->next;
	}
178

179 180
	if (err)
		goto out;
181

182
	new_hash = head_hashfn(ht, new_tbl, entry);
183

184
	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
185

186
	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
187 188
	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
				      new_tbl, new_hash);
189

190 191 192 193
	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
	else
		RCU_INIT_POINTER(entry->next, head);
194

195 196
	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
	spin_unlock(new_bucket_lock);
197

198
	rcu_assign_pointer(*pprev, next);
199

200 201 202
out:
	return err;
}
203

204 205
static void rhashtable_rehash_chain(struct rhashtable *ht,
				    unsigned int old_hash)
206 207 208 209
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	spinlock_t *old_bucket_lock;

210
	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
211

212 213 214
	spin_lock_bh(old_bucket_lock);
	while (!rhashtable_rehash_one(ht, old_hash))
		;
215
	old_tbl->rehash++;
216
	spin_unlock_bh(old_bucket_lock);
217 218
}

219 220 221
static int rhashtable_rehash_attach(struct rhashtable *ht,
				    struct bucket_table *old_tbl,
				    struct bucket_table *new_tbl)
222
{
223 224 225 226 227 228 229 230
	/* Protect future_tbl using the first bucket lock. */
	spin_lock_bh(old_tbl->locks);

	/* Did somebody beat us to it? */
	if (rcu_access_pointer(old_tbl->future_tbl)) {
		spin_unlock_bh(old_tbl->locks);
		return -EEXIST;
	}
231

232 233 234
	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 */
235
	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
236

H
Herbert Xu 已提交
237 238 239
	/* Ensure the new table is visible to readers. */
	smp_wmb();

240 241 242 243 244 245 246 247 248 249
	spin_unlock_bh(old_tbl->locks);

	return 0;
}

static int rhashtable_rehash_table(struct rhashtable *ht)
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	struct bucket_table *new_tbl;
	struct rhashtable_walker *walker;
250
	unsigned int old_hash;
251 252 253 254 255

	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
	if (!new_tbl)
		return 0;

256 257 258 259 260 261
	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
		rhashtable_rehash_chain(ht, old_hash);

	/* Publish the new table pointer. */
	rcu_assign_pointer(ht->tbl, new_tbl);

262
	spin_lock(&ht->lock);
263 264
	list_for_each_entry(walker, &old_tbl->walkers, list)
		walker->tbl = NULL;
265
	spin_unlock(&ht->lock);
266

267 268 269 270
	/* Wait for readers. All new readers will see the new
	 * table, and thus no references to the old table will
	 * remain.
	 */
271
	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
272 273

	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
274 275 276 277 278 279
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
280
 * A secondary bucket array is allocated and the hash entries are migrated.
281 282 283 284
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
285 286 287 288 289
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
290
 */
291
static int rhashtable_expand(struct rhashtable *ht)
292 293
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
294
	int err;
295 296 297

	ASSERT_RHT_MUTEX(ht);

298 299
	old_tbl = rhashtable_last_table(ht, old_tbl);

300
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
301 302 303
	if (new_tbl == NULL)
		return -ENOMEM;

304 305 306 307 308
	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
	if (err)
		bucket_table_free(new_tbl);

	return err;
309 310 311 312 313 314
}

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
H
Herbert Xu 已提交
315 316
 * This function shrinks the hash table to fit, i.e., the smallest
 * size would not cause it to expand right away automatically.
317
 *
318 319 320
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
321 322
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
323 324 325
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
326
 */
327
static int rhashtable_shrink(struct rhashtable *ht)
328
{
329
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
330
	unsigned int size;
331
	int err;
332 333 334

	ASSERT_RHT_MUTEX(ht);

335
	size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
H
Herbert Xu 已提交
336 337 338 339 340 341
	if (size < ht->p.min_size)
		size = ht->p.min_size;

	if (old_tbl->size <= size)
		return 0;

342 343 344
	if (rht_dereference(old_tbl->future_tbl, ht))
		return -EEXIST;

345
	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
346
	if (new_tbl == NULL)
347 348
		return -ENOMEM;

349 350 351 352 353
	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
	if (err)
		bucket_table_free(new_tbl);

	return err;
354 355
}

356 357 358 359
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;
360
	int err = 0;
361

362
	ht = container_of(work, struct rhashtable, run_work);
363
	mutex_lock(&ht->mutex);
364

365
	tbl = rht_dereference(ht->tbl, ht);
366
	tbl = rhashtable_last_table(ht, tbl);
367

368
	if (rht_grow_above_75(ht, tbl))
369
		rhashtable_expand(ht);
370
	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
371
		rhashtable_shrink(ht);
372 373 374

	err = rhashtable_rehash_table(ht);

375
	mutex_unlock(&ht->mutex);
376 377 378

	if (err)
		schedule_work(&ht->run_work);
379 380
}

381 382
static bool rhashtable_check_elasticity(struct rhashtable *ht,
					struct bucket_table *tbl,
383
					unsigned int hash)
384
{
385
	unsigned int elasticity = ht->elasticity;
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	struct rhash_head *head;

	rht_for_each(head, tbl, hash)
		if (!--elasticity)
			return true;

	return false;
}

int rhashtable_insert_rehash(struct rhashtable *ht)
{
	struct bucket_table *old_tbl;
	struct bucket_table *new_tbl;
	struct bucket_table *tbl;
	unsigned int size;
	int err;

	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	tbl = rhashtable_last_table(ht, old_tbl);

	size = tbl->size;

	if (rht_grow_above_75(ht, tbl))
		size *= 2;
410 411
	/* Do not schedule more than one rehash */
	else if (old_tbl != tbl)
412 413 414
		return -EBUSY;

	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
415 416 417 418 419
	if (new_tbl == NULL) {
		/* Schedule async resize/rehash to try allocation
		 * non-atomic context.
		 */
		schedule_work(&ht->run_work);
420
		return -ENOMEM;
421
	}
422 423 424 425 426 427 428 429 430 431 432 433 434

	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
	if (err) {
		bucket_table_free(new_tbl);
		if (err == -EEXIST)
			err = 0;
	} else
		schedule_work(&ht->run_work);

	return err;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);

435 436 437 438 439
int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
			   struct rhash_head *obj,
			   struct bucket_table *tbl)
{
	struct rhash_head *head;
440
	unsigned int hash;
441
	int err;
442

443
	tbl = rhashtable_last_table(ht, tbl);
444 445 446
	hash = head_hashfn(ht, tbl, obj);
	spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);

447
	err = -EEXIST;
448 449 450
	if (key && rhashtable_lookup_fast(ht, key, ht->p))
		goto exit;

451 452 453 454
	err = -E2BIG;
	if (unlikely(rht_grow_above_max(ht, tbl)))
		goto exit;

455 456 457 458 459
	err = -EAGAIN;
	if (rhashtable_check_elasticity(ht, tbl, hash) ||
	    rht_grow_above_100(ht, tbl))
		goto exit;

460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
	err = 0;

	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);

	RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);

exit:
	spin_unlock(rht_bucket_lock(tbl, hash));

	return err;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);

477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
/**
 * rhashtable_walk_init - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may sleep so you must not call it from interrupt
 * context or with spin locks held.
 *
 * You must call rhashtable_walk_exit if this function returns
 * successfully.
 */
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
{
	iter->ht = ht;
	iter->p = NULL;
	iter->slot = 0;
	iter->skip = 0;

	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
	if (!iter->walker)
		return -ENOMEM;

	mutex_lock(&ht->mutex);
510 511
	iter->walker->tbl = rht_dereference(ht->tbl, ht);
	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
	mutex_unlock(&ht->mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_init.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
	mutex_lock(&iter->ht->mutex);
527 528
	if (iter->walker->tbl)
		list_del(&iter->walker->list);
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
	mutex_unlock(&iter->ht->mutex);
	kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);

/**
 * rhashtable_walk_start - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk.  Note that we take the RCU lock in all
 * cases including when we return an error.  So you must always call
 * rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 */
int rhashtable_walk_start(struct rhashtable_iter *iter)
549
	__acquires(RCU)
550
{
551 552 553 554 555 556 557
	struct rhashtable *ht = iter->ht;

	mutex_lock(&ht->mutex);

	if (iter->walker->tbl)
		list_del(&iter->walker->list);

558 559
	rcu_read_lock();

560 561 562 563
	mutex_unlock(&ht->mutex);

	if (!iter->walker->tbl) {
		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
		return -EAGAIN;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start);

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
585
	struct bucket_table *tbl = iter->walker->tbl;
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
	struct rhashtable *ht = iter->ht;
	struct rhash_head *p = iter->p;

	if (p) {
		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
		goto next;
	}

	for (; iter->slot < tbl->size; iter->slot++) {
		int skip = iter->skip;

		rht_for_each_rcu(p, tbl, iter->slot) {
			if (!skip)
				break;
			skip--;
		}

next:
		if (!rht_is_a_nulls(p)) {
			iter->skip++;
			iter->p = p;
T
Thomas Graf 已提交
607
			return rht_obj(ht, p);
608 609 610 611 612
		}

		iter->skip = 0;
	}

613 614 615
	/* Ensure we see any new tables. */
	smp_rmb();

616 617
	iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	if (iter->walker->tbl) {
618 619 620 621 622
		iter->slot = 0;
		iter->skip = 0;
		return ERR_PTR(-EAGAIN);
	}

623 624
	iter->p = NULL;

T
Thomas Graf 已提交
625
	return NULL;
626 627 628 629 630 631 632 633 634 635
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
636
	__releases(RCU)
637
{
638 639 640 641
	struct rhashtable *ht;
	struct bucket_table *tbl = iter->walker->tbl;

	if (!tbl)
642
		goto out;
643 644 645

	ht = iter->ht;

646
	spin_lock(&ht->lock);
647
	if (tbl->rehash < tbl->size)
648 649 650
		list_add(&iter->walker->list, &tbl->walkers);
	else
		iter->walker->tbl = NULL;
651
	spin_unlock(&ht->lock);
652

653
	iter->p = NULL;
654 655 656

out:
	rcu_read_unlock();
657 658 659
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);

660
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
661
{
662
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
663
		   (unsigned long)params->min_size);
664 665
}

666 667 668 669 670
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
{
	return jhash2(key, length, seed);
}

671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
691
 *	.hashfn = jhash,
692
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
693 694 695 696 697 698 699 700
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
701
 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
702 703 704 705 706 707 708 709
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
710
 *	.hashfn = jhash,
711 712 713
 *	.obj_hashfn = my_hash_fn,
 * };
 */
714 715
int rhashtable_init(struct rhashtable *ht,
		    const struct rhashtable_params *params)
716 717 718 719 720 721
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

722
	if ((!params->key_len && !params->obj_hashfn) ||
723
	    (params->obj_hashfn && !params->obj_cmpfn))
724 725
		return -EINVAL;

726 727 728
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

729
	if (params->nelem_hint)
730
		size = rounded_hashtable_size(params);
731

732 733
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
734
	spin_lock_init(&ht->lock);
735 736
	memcpy(&ht->p, params, sizeof(*params));

737 738 739 740 741 742
	if (params->min_size)
		ht->p.min_size = roundup_pow_of_two(params->min_size);

	if (params->max_size)
		ht->p.max_size = rounddown_pow_of_two(params->max_size);

743 744 745 746 747 748
	if (params->insecure_max_entries)
		ht->p.insecure_max_entries =
			rounddown_pow_of_two(params->insecure_max_entries);
	else
		ht->p.insecure_max_entries = ht->p.max_size * 2;

749
	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
750

751 752 753 754 755 756 757 758 759 760 761 762
	/* The maximum (not average) chain length grows with the
	 * size of the hash table, at a rate of (log N)/(log log N).
	 * The value of 16 is selected so that even if the hash
	 * table grew to 2^32 you would not expect the maximum
	 * chain length to exceed it unless we are under attack
	 * (or extremely unlucky).
	 *
	 * As this limit is only to detect attacks, we don't need
	 * to set it to a lower value as you'd need the chain
	 * length to vastly exceed 16 to have any real effect
	 * on the system.
	 */
763 764 765
	if (!params->insecure_elasticity)
		ht->elasticity = 16;

766 767 768 769 770
	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

771 772 773 774 775 776 777 778 779 780
	ht->key_len = ht->p.key_len;
	if (!params->hashfn) {
		ht->p.hashfn = jhash;

		if (!(ht->key_len & (sizeof(u32) - 1))) {
			ht->key_len /= sizeof(u32);
			ht->p.hashfn = rhashtable_jhash2;
		}
	}

781
	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
782 783 784
	if (tbl == NULL)
		return -ENOMEM;

785
	atomic_set(&ht->nelems, 0);
786

787 788
	RCU_INIT_POINTER(ht->tbl, tbl);

789
	INIT_WORK(&ht->run_work, rht_deferred_worker);
790

791 792 793 794 795
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
796
 * rhashtable_free_and_destroy - free elements and destroy hash table
797
 * @ht:		the hash table to destroy
798 799
 * @free_fn:	callback to release resources of element
 * @arg:	pointer passed to free_fn
800
 *
801 802 803 804 805 806 807 808
 * Stops an eventual async resize. If defined, invokes free_fn for each
 * element to releasal resources. Please note that RCU protected
 * readers may still be accessing the elements. Releasing of resources
 * must occur in a compatible manner. Then frees the bucket array.
 *
 * This function will eventually sleep to wait for an async resize
 * to complete. The caller is responsible that no further write operations
 * occurs in parallel.
809
 */
810 811 812
void rhashtable_free_and_destroy(struct rhashtable *ht,
				 void (*free_fn)(void *ptr, void *arg),
				 void *arg)
813
{
814 815
	const struct bucket_table *tbl;
	unsigned int i;
816

817
	cancel_work_sync(&ht->run_work);
818

819
	mutex_lock(&ht->mutex);
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
	tbl = rht_dereference(ht->tbl, ht);
	if (free_fn) {
		for (i = 0; i < tbl->size; i++) {
			struct rhash_head *pos, *next;

			for (pos = rht_dereference(tbl->buckets[i], ht),
			     next = !rht_is_a_nulls(pos) ?
					rht_dereference(pos->next, ht) : NULL;
			     !rht_is_a_nulls(pos);
			     pos = next,
			     next = !rht_is_a_nulls(pos) ?
					rht_dereference(pos->next, ht) : NULL)
				free_fn(rht_obj(ht, pos), arg);
		}
	}

	bucket_table_free(tbl);
837
	mutex_unlock(&ht->mutex);
838
}
839 840 841 842 843 844
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);

void rhashtable_destroy(struct rhashtable *ht)
{
	return rhashtable_free_and_destroy(ht, NULL, NULL);
}
845
EXPORT_SYMBOL_GPL(rhashtable_destroy);