rhashtable.c 20.6 KB
Newer Older
1 2 3
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
4
 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5
 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 7 8
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Code partially derived from nft_hash
9 10
 * Rewritten with rehash code from br_multicast plus single list
 * pointer as suggested by Josh Triplett
11 12 13 14 15 16
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

17
#include <linux/atomic.h>
18 19 20
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
E
Eric Dumazet 已提交
21
#include <linux/sched.h>
22 23 24
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
25
#include <linux/jhash.h>
26 27
#include <linux/random.h>
#include <linux/rhashtable.h>
28
#include <linux/err.h>
29
#include <linux/export.h>
30 31

#define HASH_DEFAULT_SIZE	64UL
32
#define HASH_MIN_SIZE		4U
33 34
#define BUCKET_LOCKS_PER_CPU   128UL

35
static u32 head_hashfn(struct rhashtable *ht,
36 37
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
38
{
39
	return rht_head_hashfn(ht, tbl, he, ht->p);
40 41
}

42 43 44 45 46 47 48 49 50 51 52
#ifdef CONFIG_PROVE_LOCKING
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))

int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
53
	spinlock_t *lock = rht_bucket_lock(tbl, hash);
54 55 56 57 58 59 60 61 62

	return (debug_locks) ? lockdep_is_held(lock) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else
#define ASSERT_RHT_MUTEX(HT)
#endif


63 64
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
			      gfp_t gfp)
65 66 67 68 69 70 71 72 73 74 75
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

76 77
	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);
78 79 80

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
81 82
		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
		    gfp == GFP_KERNEL)
83 84 85 86
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
87
					   gfp);
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

106 107 108 109 110
static void bucket_table_free_rcu(struct rcu_head *head)
{
	bucket_table_free(container_of(head, struct bucket_table, rcu));
}

111
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
112 113
					       size_t nbuckets,
					       gfp_t gfp)
114
{
115
	struct bucket_table *tbl = NULL;
116
	size_t size;
117
	int i;
118 119

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
120 121 122 123
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
	    gfp != GFP_KERNEL)
		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
	if (tbl == NULL && gfp == GFP_KERNEL)
124 125 126 127 128 129
		tbl = vzalloc(size);
	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

130
	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
131 132 133
		bucket_table_free(tbl);
		return NULL;
	}
134

135 136
	INIT_LIST_HEAD(&tbl->walkers);

137 138
	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));

139 140 141
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

142
	return tbl;
143 144
}

145 146 147 148 149 150 151 152 153 154 155 156 157
static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
						  struct bucket_table *tbl)
{
	struct bucket_table *new_tbl;

	do {
		new_tbl = tbl;
		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	} while (tbl);

	return new_tbl;
}

158
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
159
{
160
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
161 162
	struct bucket_table *new_tbl = rhashtable_last_table(ht,
		rht_dereference_rcu(old_tbl->future_tbl, ht));
163 164 165 166
	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
	int err = -ENOENT;
	struct rhash_head *head, *next, *entry;
	spinlock_t *new_bucket_lock;
167
	unsigned int new_hash;
168 169 170 171 172 173 174

	rht_for_each(entry, old_tbl, old_hash) {
		err = 0;
		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);

		if (rht_is_a_nulls(next))
			break;
175

176 177
		pprev = &entry->next;
	}
178

179 180
	if (err)
		goto out;
181

182
	new_hash = head_hashfn(ht, new_tbl, entry);
183

184
	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
185

186
	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
187 188
	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
				      new_tbl, new_hash);
189

190
	RCU_INIT_POINTER(entry->next, head);
191

192 193
	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
	spin_unlock(new_bucket_lock);
194

195
	rcu_assign_pointer(*pprev, next);
196

197 198 199
out:
	return err;
}
200

201 202
static void rhashtable_rehash_chain(struct rhashtable *ht,
				    unsigned int old_hash)
203 204 205 206
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	spinlock_t *old_bucket_lock;

207
	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
208

209 210 211
	spin_lock_bh(old_bucket_lock);
	while (!rhashtable_rehash_one(ht, old_hash))
		;
212
	old_tbl->rehash++;
213
	spin_unlock_bh(old_bucket_lock);
214 215
}

216 217 218
static int rhashtable_rehash_attach(struct rhashtable *ht,
				    struct bucket_table *old_tbl,
				    struct bucket_table *new_tbl)
219
{
220 221 222 223 224 225 226 227
	/* Protect future_tbl using the first bucket lock. */
	spin_lock_bh(old_tbl->locks);

	/* Did somebody beat us to it? */
	if (rcu_access_pointer(old_tbl->future_tbl)) {
		spin_unlock_bh(old_tbl->locks);
		return -EEXIST;
	}
228

229 230 231
	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 */
232
	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
233

234 235 236 237 238 239 240 241 242 243
	spin_unlock_bh(old_tbl->locks);

	return 0;
}

static int rhashtable_rehash_table(struct rhashtable *ht)
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	struct bucket_table *new_tbl;
	struct rhashtable_walker *walker;
244
	unsigned int old_hash;
245 246 247 248 249

	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
	if (!new_tbl)
		return 0;

250 251 252 253 254 255
	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
		rhashtable_rehash_chain(ht, old_hash);

	/* Publish the new table pointer. */
	rcu_assign_pointer(ht->tbl, new_tbl);

256
	spin_lock(&ht->lock);
257 258
	list_for_each_entry(walker, &old_tbl->walkers, list)
		walker->tbl = NULL;
259
	spin_unlock(&ht->lock);
260

261 262 263 264
	/* Wait for readers. All new readers will see the new
	 * table, and thus no references to the old table will
	 * remain.
	 */
265
	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
266 267

	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
268 269 270 271 272 273
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
274
 * A secondary bucket array is allocated and the hash entries are migrated.
275 276 277 278
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
279 280 281 282 283
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
284
 */
285
static int rhashtable_expand(struct rhashtable *ht)
286 287
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
288
	int err;
289 290 291

	ASSERT_RHT_MUTEX(ht);

292 293
	old_tbl = rhashtable_last_table(ht, old_tbl);

294
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
295 296 297
	if (new_tbl == NULL)
		return -ENOMEM;

298 299 300 301 302
	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
	if (err)
		bucket_table_free(new_tbl);

	return err;
303 304 305 306 307 308
}

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
H
Herbert Xu 已提交
309 310
 * This function shrinks the hash table to fit, i.e., the smallest
 * size would not cause it to expand right away automatically.
311
 *
312 313 314
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
315 316
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
317 318 319
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
320
 */
321
static int rhashtable_shrink(struct rhashtable *ht)
322
{
323
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
324
	unsigned int size;
325
	int err;
326 327 328

	ASSERT_RHT_MUTEX(ht);

329
	size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
H
Herbert Xu 已提交
330 331 332 333 334 335
	if (size < ht->p.min_size)
		size = ht->p.min_size;

	if (old_tbl->size <= size)
		return 0;

336 337 338
	if (rht_dereference(old_tbl->future_tbl, ht))
		return -EEXIST;

339
	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
340
	if (new_tbl == NULL)
341 342
		return -ENOMEM;

343 344 345 346 347
	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
	if (err)
		bucket_table_free(new_tbl);

	return err;
348 349
}

350 351 352 353
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;
354
	int err = 0;
355

356
	ht = container_of(work, struct rhashtable, run_work);
357
	mutex_lock(&ht->mutex);
358

359
	tbl = rht_dereference(ht->tbl, ht);
360
	tbl = rhashtable_last_table(ht, tbl);
361

362
	if (rht_grow_above_75(ht, tbl))
363
		rhashtable_expand(ht);
364
	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
365
		rhashtable_shrink(ht);
366 367 368

	err = rhashtable_rehash_table(ht);

369
	mutex_unlock(&ht->mutex);
370 371 372

	if (err)
		schedule_work(&ht->run_work);
373 374
}

375 376
static bool rhashtable_check_elasticity(struct rhashtable *ht,
					struct bucket_table *tbl,
377
					unsigned int hash)
378
{
379
	unsigned int elasticity = ht->elasticity;
380 381 382 383 384 385 386 387 388
	struct rhash_head *head;

	rht_for_each(head, tbl, hash)
		if (!--elasticity)
			return true;

	return false;
}

389 390
int rhashtable_insert_rehash(struct rhashtable *ht,
			     struct bucket_table *tbl)
391 392 393 394 395 396 397 398 399 400
{
	struct bucket_table *old_tbl;
	struct bucket_table *new_tbl;
	unsigned int size;
	int err;

	old_tbl = rht_dereference_rcu(ht->tbl, ht);

	size = tbl->size;

401 402
	err = -EBUSY;

403 404
	if (rht_grow_above_75(ht, tbl))
		size *= 2;
405 406
	/* Do not schedule more than one rehash */
	else if (old_tbl != tbl)
407 408 409
		goto fail;

	err = -ENOMEM;
410 411

	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
412 413
	if (new_tbl == NULL)
		goto fail;
414 415 416 417 418 419 420 421 422 423

	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
	if (err) {
		bucket_table_free(new_tbl);
		if (err == -EEXIST)
			err = 0;
	} else
		schedule_work(&ht->run_work);

	return err;
424 425 426 427 428 429 430 431 432 433 434

fail:
	/* Do not fail the insert if someone else did a rehash. */
	if (likely(rcu_dereference_raw(tbl->future_tbl)))
		return 0;

	/* Schedule async rehash to retry allocation in process context. */
	if (err == -ENOMEM)
		schedule_work(&ht->run_work);

	return err;
435 436 437
}
EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);

438 439 440 441
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
					    const void *key,
					    struct rhash_head *obj,
					    struct bucket_table *tbl)
442 443
{
	struct rhash_head *head;
444
	unsigned int hash;
445
	int err;
446

447
	tbl = rhashtable_last_table(ht, tbl);
448 449 450
	hash = head_hashfn(ht, tbl, obj);
	spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);

451
	err = -EEXIST;
452 453 454
	if (key && rhashtable_lookup_fast(ht, key, ht->p))
		goto exit;

455 456 457 458
	err = -E2BIG;
	if (unlikely(rht_grow_above_max(ht, tbl)))
		goto exit;

459 460 461 462 463
	err = -EAGAIN;
	if (rhashtable_check_elasticity(ht, tbl, hash) ||
	    rht_grow_above_100(ht, tbl))
		goto exit;

464 465 466 467 468 469 470 471 472 473 474 475 476
	err = 0;

	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);

	RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);

exit:
	spin_unlock(rht_bucket_lock(tbl, hash));

477 478 479 480 481 482
	if (err == 0)
		return NULL;
	else if (err == -EAGAIN)
		return tbl;
	else
		return ERR_PTR(err);
483 484 485
}
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);

486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
/**
 * rhashtable_walk_init - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may sleep so you must not call it from interrupt
 * context or with spin locks held.
 *
 * You must call rhashtable_walk_exit if this function returns
 * successfully.
 */
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
{
	iter->ht = ht;
	iter->p = NULL;
	iter->slot = 0;
	iter->skip = 0;

	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
	if (!iter->walker)
		return -ENOMEM;

518
	spin_lock(&ht->lock);
519 520
	iter->walker->tbl =
		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
521
	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
522
	spin_unlock(&ht->lock);
523 524 525 526 527 528 529 530 531 532 533 534 535

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_init.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
536
	spin_lock(&iter->ht->lock);
537 538
	if (iter->walker->tbl)
		list_del(&iter->walker->list);
539
	spin_unlock(&iter->ht->lock);
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
	kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);

/**
 * rhashtable_walk_start - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk.  Note that we take the RCU lock in all
 * cases including when we return an error.  So you must always call
 * rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 */
int rhashtable_walk_start(struct rhashtable_iter *iter)
559
	__acquires(RCU)
560
{
561 562
	struct rhashtable *ht = iter->ht;

563
	rcu_read_lock();
564

565
	spin_lock(&ht->lock);
566 567
	if (iter->walker->tbl)
		list_del(&iter->walker->list);
568
	spin_unlock(&ht->lock);
569 570 571

	if (!iter->walker->tbl) {
		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
		return -EAGAIN;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start);

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
593
	struct bucket_table *tbl = iter->walker->tbl;
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
	struct rhashtable *ht = iter->ht;
	struct rhash_head *p = iter->p;

	if (p) {
		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
		goto next;
	}

	for (; iter->slot < tbl->size; iter->slot++) {
		int skip = iter->skip;

		rht_for_each_rcu(p, tbl, iter->slot) {
			if (!skip)
				break;
			skip--;
		}

next:
		if (!rht_is_a_nulls(p)) {
			iter->skip++;
			iter->p = p;
T
Thomas Graf 已提交
615
			return rht_obj(ht, p);
616 617 618 619 620
		}

		iter->skip = 0;
	}

621 622
	iter->p = NULL;

623 624 625
	/* Ensure we see any new tables. */
	smp_rmb();

626 627
	iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	if (iter->walker->tbl) {
628 629 630 631 632
		iter->slot = 0;
		iter->skip = 0;
		return ERR_PTR(-EAGAIN);
	}

T
Thomas Graf 已提交
633
	return NULL;
634 635 636 637 638 639 640 641 642 643
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
644
	__releases(RCU)
645
{
646 647 648 649
	struct rhashtable *ht;
	struct bucket_table *tbl = iter->walker->tbl;

	if (!tbl)
650
		goto out;
651 652 653

	ht = iter->ht;

654
	spin_lock(&ht->lock);
655
	if (tbl->rehash < tbl->size)
656 657 658
		list_add(&iter->walker->list, &tbl->walkers);
	else
		iter->walker->tbl = NULL;
659
	spin_unlock(&ht->lock);
660

661
	iter->p = NULL;
662 663 664

out:
	rcu_read_unlock();
665 666 667
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);

668
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
669
{
670
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
671
		   (unsigned long)params->min_size);
672 673
}

674 675 676 677 678
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
{
	return jhash2(key, length, seed);
}

679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
699
 *	.hashfn = jhash,
700
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
701 702 703 704 705 706 707 708
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
709
 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
710 711 712 713 714 715 716 717
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
718
 *	.hashfn = jhash,
719 720 721
 *	.obj_hashfn = my_hash_fn,
 * };
 */
722 723
int rhashtable_init(struct rhashtable *ht,
		    const struct rhashtable_params *params)
724 725 726 727 728 729
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

730
	if ((!params->key_len && !params->obj_hashfn) ||
731
	    (params->obj_hashfn && !params->obj_cmpfn))
732 733
		return -EINVAL;

734 735 736
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

737 738
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
739
	spin_lock_init(&ht->lock);
740 741
	memcpy(&ht->p, params, sizeof(*params));

742 743 744 745 746 747
	if (params->min_size)
		ht->p.min_size = roundup_pow_of_two(params->min_size);

	if (params->max_size)
		ht->p.max_size = rounddown_pow_of_two(params->max_size);

748 749 750 751 752 753
	if (params->insecure_max_entries)
		ht->p.insecure_max_entries =
			rounddown_pow_of_two(params->insecure_max_entries);
	else
		ht->p.insecure_max_entries = ht->p.max_size * 2;

754
	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
755

756 757 758
	if (params->nelem_hint)
		size = rounded_hashtable_size(&ht->p);

759 760 761 762 763 764 765 766 767 768 769 770
	/* The maximum (not average) chain length grows with the
	 * size of the hash table, at a rate of (log N)/(log log N).
	 * The value of 16 is selected so that even if the hash
	 * table grew to 2^32 you would not expect the maximum
	 * chain length to exceed it unless we are under attack
	 * (or extremely unlucky).
	 *
	 * As this limit is only to detect attacks, we don't need
	 * to set it to a lower value as you'd need the chain
	 * length to vastly exceed 16 to have any real effect
	 * on the system.
	 */
771 772 773
	if (!params->insecure_elasticity)
		ht->elasticity = 16;

774 775 776 777 778
	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

779 780 781 782 783 784 785 786 787 788
	ht->key_len = ht->p.key_len;
	if (!params->hashfn) {
		ht->p.hashfn = jhash;

		if (!(ht->key_len & (sizeof(u32) - 1))) {
			ht->key_len /= sizeof(u32);
			ht->p.hashfn = rhashtable_jhash2;
		}
	}

789
	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
790 791 792
	if (tbl == NULL)
		return -ENOMEM;

793
	atomic_set(&ht->nelems, 0);
794

795 796
	RCU_INIT_POINTER(ht->tbl, tbl);

797
	INIT_WORK(&ht->run_work, rht_deferred_worker);
798

799 800 801 802 803
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
804
 * rhashtable_free_and_destroy - free elements and destroy hash table
805
 * @ht:		the hash table to destroy
806 807
 * @free_fn:	callback to release resources of element
 * @arg:	pointer passed to free_fn
808
 *
809 810 811 812 813 814 815 816
 * Stops an eventual async resize. If defined, invokes free_fn for each
 * element to releasal resources. Please note that RCU protected
 * readers may still be accessing the elements. Releasing of resources
 * must occur in a compatible manner. Then frees the bucket array.
 *
 * This function will eventually sleep to wait for an async resize
 * to complete. The caller is responsible that no further write operations
 * occurs in parallel.
817
 */
818 819 820
void rhashtable_free_and_destroy(struct rhashtable *ht,
				 void (*free_fn)(void *ptr, void *arg),
				 void *arg)
821
{
822 823
	const struct bucket_table *tbl;
	unsigned int i;
824

825
	cancel_work_sync(&ht->run_work);
826

827
	mutex_lock(&ht->mutex);
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
	tbl = rht_dereference(ht->tbl, ht);
	if (free_fn) {
		for (i = 0; i < tbl->size; i++) {
			struct rhash_head *pos, *next;

			for (pos = rht_dereference(tbl->buckets[i], ht),
			     next = !rht_is_a_nulls(pos) ?
					rht_dereference(pos->next, ht) : NULL;
			     !rht_is_a_nulls(pos);
			     pos = next,
			     next = !rht_is_a_nulls(pos) ?
					rht_dereference(pos->next, ht) : NULL)
				free_fn(rht_obj(ht, pos), arg);
		}
	}

	bucket_table_free(tbl);
845
	mutex_unlock(&ht->mutex);
846
}
847 848 849 850 851 852
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);

void rhashtable_destroy(struct rhashtable *ht)
{
	return rhashtable_free_and_destroy(ht, NULL, NULL);
}
853
EXPORT_SYMBOL_GPL(rhashtable_destroy);