rhashtable.c 20.2 KB
Newer Older
1 2 3
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
4
 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5
 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 7 8
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Code partially derived from nft_hash
9 10
 * Rewritten with rehash code from br_multicast plus single list
 * pointer as suggested by Josh Triplett
11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
E
Eric Dumazet 已提交
20
#include <linux/sched.h>
21 22 23
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
24
#include <linux/jhash.h>
25 26
#include <linux/random.h>
#include <linux/rhashtable.h>
27
#include <linux/err.h>
28 29

#define HASH_DEFAULT_SIZE	64UL
30
#define HASH_MIN_SIZE		4U
31 32
#define BUCKET_LOCKS_PER_CPU   128UL

33
static u32 head_hashfn(struct rhashtable *ht,
34 35
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
36
{
37
	return rht_head_hashfn(ht, tbl, he, ht->p);
38 39
}

40 41 42 43 44 45 46 47 48 49 50
#ifdef CONFIG_PROVE_LOCKING
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))

int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
51
	spinlock_t *lock = rht_bucket_lock(tbl, hash);
52 53 54 55 56 57 58 59 60

	return (debug_locks) ? lockdep_is_held(lock) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else
#define ASSERT_RHT_MUTEX(HT)
#endif


61 62
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
			      gfp_t gfp)
63 64 65 66 67 68 69 70 71 72 73
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

74 75
	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);
76 77 78

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
79 80
		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
		    gfp == GFP_KERNEL)
81 82 83 84
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
85
					   gfp);
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

104 105 106 107 108
static void bucket_table_free_rcu(struct rcu_head *head)
{
	bucket_table_free(container_of(head, struct bucket_table, rcu));
}

109
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
110 111
					       size_t nbuckets,
					       gfp_t gfp)
112
{
113
	struct bucket_table *tbl = NULL;
114
	size_t size;
115
	int i;
116 117

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
118 119 120 121
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
	    gfp != GFP_KERNEL)
		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
	if (tbl == NULL && gfp == GFP_KERNEL)
122 123 124 125 126 127
		tbl = vzalloc(size);
	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

128
	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
129 130 131
		bucket_table_free(tbl);
		return NULL;
	}
132

133 134
	INIT_LIST_HEAD(&tbl->walkers);

135 136
	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));

137 138 139
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

140
	return tbl;
141 142
}

143 144 145 146 147 148 149 150 151 152 153 154 155
static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
						  struct bucket_table *tbl)
{
	struct bucket_table *new_tbl;

	do {
		new_tbl = tbl;
		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	} while (tbl);

	return new_tbl;
}

156
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
157
{
158
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
159 160
	struct bucket_table *new_tbl = rhashtable_last_table(ht,
		rht_dereference_rcu(old_tbl->future_tbl, ht));
161 162 163 164
	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
	int err = -ENOENT;
	struct rhash_head *head, *next, *entry;
	spinlock_t *new_bucket_lock;
165
	unsigned int new_hash;
166 167 168 169 170 171 172

	rht_for_each(entry, old_tbl, old_hash) {
		err = 0;
		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);

		if (rht_is_a_nulls(next))
			break;
173

174 175
		pprev = &entry->next;
	}
176

177 178
	if (err)
		goto out;
179

180
	new_hash = head_hashfn(ht, new_tbl, entry);
181

182
	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
183

184
	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
185 186
	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
				      new_tbl, new_hash);
187

188 189 190 191
	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
	else
		RCU_INIT_POINTER(entry->next, head);
192

193 194
	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
	spin_unlock(new_bucket_lock);
195

196
	rcu_assign_pointer(*pprev, next);
197

198 199 200
out:
	return err;
}
201

202 203
static void rhashtable_rehash_chain(struct rhashtable *ht,
				    unsigned int old_hash)
204 205 206 207
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	spinlock_t *old_bucket_lock;

208
	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
209

210 211 212
	spin_lock_bh(old_bucket_lock);
	while (!rhashtable_rehash_one(ht, old_hash))
		;
213
	old_tbl->rehash++;
214
	spin_unlock_bh(old_bucket_lock);
215 216
}

217 218 219
static int rhashtable_rehash_attach(struct rhashtable *ht,
				    struct bucket_table *old_tbl,
				    struct bucket_table *new_tbl)
220
{
221 222 223 224 225 226 227 228
	/* Protect future_tbl using the first bucket lock. */
	spin_lock_bh(old_tbl->locks);

	/* Did somebody beat us to it? */
	if (rcu_access_pointer(old_tbl->future_tbl)) {
		spin_unlock_bh(old_tbl->locks);
		return -EEXIST;
	}
229

230 231 232
	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 */
233
	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
234

H
Herbert Xu 已提交
235 236 237
	/* Ensure the new table is visible to readers. */
	smp_wmb();

238 239 240 241 242 243 244 245 246 247
	spin_unlock_bh(old_tbl->locks);

	return 0;
}

static int rhashtable_rehash_table(struct rhashtable *ht)
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	struct bucket_table *new_tbl;
	struct rhashtable_walker *walker;
248
	unsigned int old_hash;
249 250 251 252 253

	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
	if (!new_tbl)
		return 0;

254 255 256 257 258 259
	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
		rhashtable_rehash_chain(ht, old_hash);

	/* Publish the new table pointer. */
	rcu_assign_pointer(ht->tbl, new_tbl);

260
	spin_lock(&ht->lock);
261 262
	list_for_each_entry(walker, &old_tbl->walkers, list)
		walker->tbl = NULL;
263
	spin_unlock(&ht->lock);
264

265 266 267 268
	/* Wait for readers. All new readers will see the new
	 * table, and thus no references to the old table will
	 * remain.
	 */
269
	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
270 271

	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
272 273 274 275 276 277
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
278
 * A secondary bucket array is allocated and the hash entries are migrated.
279 280 281 282
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
283 284 285 286 287
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
288
 */
289
static int rhashtable_expand(struct rhashtable *ht)
290 291
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
292
	int err;
293 294 295

	ASSERT_RHT_MUTEX(ht);

296 297
	old_tbl = rhashtable_last_table(ht, old_tbl);

298
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
299 300 301
	if (new_tbl == NULL)
		return -ENOMEM;

302 303 304 305 306
	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
	if (err)
		bucket_table_free(new_tbl);

	return err;
307 308 309 310 311 312
}

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
H
Herbert Xu 已提交
313 314
 * This function shrinks the hash table to fit, i.e., the smallest
 * size would not cause it to expand right away automatically.
315
 *
316 317 318
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
319 320
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
321 322 323
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
324
 */
325
static int rhashtable_shrink(struct rhashtable *ht)
326
{
327
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
328
	unsigned int size;
329
	int err;
330 331 332

	ASSERT_RHT_MUTEX(ht);

333
	size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
H
Herbert Xu 已提交
334 335 336 337 338 339
	if (size < ht->p.min_size)
		size = ht->p.min_size;

	if (old_tbl->size <= size)
		return 0;

340 341 342
	if (rht_dereference(old_tbl->future_tbl, ht))
		return -EEXIST;

343
	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
344
	if (new_tbl == NULL)
345 346
		return -ENOMEM;

347 348 349 350 351
	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
	if (err)
		bucket_table_free(new_tbl);

	return err;
352 353
}

354 355 356 357
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;
358
	int err = 0;
359

360
	ht = container_of(work, struct rhashtable, run_work);
361
	mutex_lock(&ht->mutex);
362

363
	tbl = rht_dereference(ht->tbl, ht);
364
	tbl = rhashtable_last_table(ht, tbl);
365

366
	if (rht_grow_above_75(ht, tbl))
367
		rhashtable_expand(ht);
368
	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
369
		rhashtable_shrink(ht);
370 371 372

	err = rhashtable_rehash_table(ht);

373
	mutex_unlock(&ht->mutex);
374 375 376

	if (err)
		schedule_work(&ht->run_work);
377 378
}

379 380
static bool rhashtable_check_elasticity(struct rhashtable *ht,
					struct bucket_table *tbl,
381
					unsigned int hash)
382
{
383
	unsigned int elasticity = ht->elasticity;
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	struct rhash_head *head;

	rht_for_each(head, tbl, hash)
		if (!--elasticity)
			return true;

	return false;
}

int rhashtable_insert_rehash(struct rhashtable *ht)
{
	struct bucket_table *old_tbl;
	struct bucket_table *new_tbl;
	struct bucket_table *tbl;
	unsigned int size;
	int err;

	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	tbl = rhashtable_last_table(ht, old_tbl);

	size = tbl->size;

	if (rht_grow_above_75(ht, tbl))
		size *= 2;
408 409
	/* Do not schedule more than one rehash */
	else if (old_tbl != tbl)
410 411 412
		return -EBUSY;

	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
413 414 415 416 417
	if (new_tbl == NULL) {
		/* Schedule async resize/rehash to try allocation
		 * non-atomic context.
		 */
		schedule_work(&ht->run_work);
418
		return -ENOMEM;
419
	}
420 421 422 423 424 425 426 427 428 429 430 431 432

	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
	if (err) {
		bucket_table_free(new_tbl);
		if (err == -EEXIST)
			err = 0;
	} else
		schedule_work(&ht->run_work);

	return err;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);

433 434 435 436 437
int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
			   struct rhash_head *obj,
			   struct bucket_table *tbl)
{
	struct rhash_head *head;
438
	unsigned int hash;
439
	int err;
440

441
	tbl = rhashtable_last_table(ht, tbl);
442 443 444
	hash = head_hashfn(ht, tbl, obj);
	spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);

445
	err = -EEXIST;
446 447 448
	if (key && rhashtable_lookup_fast(ht, key, ht->p))
		goto exit;

449 450 451 452 453
	err = -EAGAIN;
	if (rhashtable_check_elasticity(ht, tbl, hash) ||
	    rht_grow_above_100(ht, tbl))
		goto exit;

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
	err = 0;

	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);

	RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);

exit:
	spin_unlock(rht_bucket_lock(tbl, hash));

	return err;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
/**
 * rhashtable_walk_init - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may sleep so you must not call it from interrupt
 * context or with spin locks held.
 *
 * You must call rhashtable_walk_exit if this function returns
 * successfully.
 */
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
{
	iter->ht = ht;
	iter->p = NULL;
	iter->slot = 0;
	iter->skip = 0;

	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
	if (!iter->walker)
		return -ENOMEM;

	mutex_lock(&ht->mutex);
504 505
	iter->walker->tbl = rht_dereference(ht->tbl, ht);
	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
	mutex_unlock(&ht->mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_init.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
	mutex_lock(&iter->ht->mutex);
521 522
	if (iter->walker->tbl)
		list_del(&iter->walker->list);
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
	mutex_unlock(&iter->ht->mutex);
	kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);

/**
 * rhashtable_walk_start - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk.  Note that we take the RCU lock in all
 * cases including when we return an error.  So you must always call
 * rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 */
int rhashtable_walk_start(struct rhashtable_iter *iter)
543
	__acquires(RCU)
544
{
545 546 547 548 549 550 551
	struct rhashtable *ht = iter->ht;

	mutex_lock(&ht->mutex);

	if (iter->walker->tbl)
		list_del(&iter->walker->list);

552 553
	rcu_read_lock();

554 555 556 557
	mutex_unlock(&ht->mutex);

	if (!iter->walker->tbl) {
		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
		return -EAGAIN;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start);

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
579
	struct bucket_table *tbl = iter->walker->tbl;
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
	struct rhashtable *ht = iter->ht;
	struct rhash_head *p = iter->p;
	void *obj = NULL;

	if (p) {
		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
		goto next;
	}

	for (; iter->slot < tbl->size; iter->slot++) {
		int skip = iter->skip;

		rht_for_each_rcu(p, tbl, iter->slot) {
			if (!skip)
				break;
			skip--;
		}

next:
		if (!rht_is_a_nulls(p)) {
			iter->skip++;
			iter->p = p;
			obj = rht_obj(ht, p);
			goto out;
		}

		iter->skip = 0;
	}

609 610 611
	/* Ensure we see any new tables. */
	smp_rmb();

612 613
	iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	if (iter->walker->tbl) {
614 615 616 617 618
		iter->slot = 0;
		iter->skip = 0;
		return ERR_PTR(-EAGAIN);
	}

619 620 621 622
	iter->p = NULL;

out:

623 624 625 626 627 628 629 630 631 632 633
	return obj;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
634
	__releases(RCU)
635
{
636 637 638 639
	struct rhashtable *ht;
	struct bucket_table *tbl = iter->walker->tbl;

	if (!tbl)
640
		goto out;
641 642 643

	ht = iter->ht;

644
	spin_lock(&ht->lock);
645
	if (tbl->rehash < tbl->size)
646 647 648
		list_add(&iter->walker->list, &tbl->walkers);
	else
		iter->walker->tbl = NULL;
649
	spin_unlock(&ht->lock);
650

651
	iter->p = NULL;
652 653 654

out:
	rcu_read_unlock();
655 656 657
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);

658
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
659
{
660
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
661
		   (unsigned long)params->min_size);
662 663
}

664 665 666 667 668
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
{
	return jhash2(key, length, seed);
}

669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
689
 *	.hashfn = jhash,
690
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
691 692 693 694 695 696 697 698
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
699
 * u32 my_hash_fn(const void *data, u32 len, u32 seed)
700 701 702 703 704 705 706 707
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
708
 *	.hashfn = jhash,
709 710 711
 *	.obj_hashfn = my_hash_fn,
 * };
 */
712 713
int rhashtable_init(struct rhashtable *ht,
		    const struct rhashtable_params *params)
714 715 716 717 718 719
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

720
	if ((!params->key_len && !params->obj_hashfn) ||
721
	    (params->obj_hashfn && !params->obj_cmpfn))
722 723
		return -EINVAL;

724 725 726
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

727
	if (params->nelem_hint)
728
		size = rounded_hashtable_size(params);
729

730 731
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
732
	spin_lock_init(&ht->lock);
733 734
	memcpy(&ht->p, params, sizeof(*params));

735 736 737 738 739 740
	if (params->min_size)
		ht->p.min_size = roundup_pow_of_two(params->min_size);

	if (params->max_size)
		ht->p.max_size = rounddown_pow_of_two(params->max_size);

741
	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
742

743 744 745 746 747 748 749 750 751 752 753 754
	/* The maximum (not average) chain length grows with the
	 * size of the hash table, at a rate of (log N)/(log log N).
	 * The value of 16 is selected so that even if the hash
	 * table grew to 2^32 you would not expect the maximum
	 * chain length to exceed it unless we are under attack
	 * (or extremely unlucky).
	 *
	 * As this limit is only to detect attacks, we don't need
	 * to set it to a lower value as you'd need the chain
	 * length to vastly exceed 16 to have any real effect
	 * on the system.
	 */
755 756 757
	if (!params->insecure_elasticity)
		ht->elasticity = 16;

758 759 760 761 762
	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

763 764 765 766 767 768 769 770 771 772
	ht->key_len = ht->p.key_len;
	if (!params->hashfn) {
		ht->p.hashfn = jhash;

		if (!(ht->key_len & (sizeof(u32) - 1))) {
			ht->key_len /= sizeof(u32);
			ht->p.hashfn = rhashtable_jhash2;
		}
	}

773
	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
774 775 776
	if (tbl == NULL)
		return -ENOMEM;

777
	atomic_set(&ht->nelems, 0);
778

779 780
	RCU_INIT_POINTER(ht->tbl, tbl);

781
	INIT_WORK(&ht->run_work, rht_deferred_worker);
782

783 784 785 786 787
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
788
 * rhashtable_free_and_destroy - free elements and destroy hash table
789
 * @ht:		the hash table to destroy
790 791
 * @free_fn:	callback to release resources of element
 * @arg:	pointer passed to free_fn
792
 *
793 794 795 796 797 798 799 800
 * Stops an eventual async resize. If defined, invokes free_fn for each
 * element to releasal resources. Please note that RCU protected
 * readers may still be accessing the elements. Releasing of resources
 * must occur in a compatible manner. Then frees the bucket array.
 *
 * This function will eventually sleep to wait for an async resize
 * to complete. The caller is responsible that no further write operations
 * occurs in parallel.
801
 */
802 803 804
void rhashtable_free_and_destroy(struct rhashtable *ht,
				 void (*free_fn)(void *ptr, void *arg),
				 void *arg)
805
{
806 807
	const struct bucket_table *tbl;
	unsigned int i;
808

809
	cancel_work_sync(&ht->run_work);
810

811
	mutex_lock(&ht->mutex);
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
	tbl = rht_dereference(ht->tbl, ht);
	if (free_fn) {
		for (i = 0; i < tbl->size; i++) {
			struct rhash_head *pos, *next;

			for (pos = rht_dereference(tbl->buckets[i], ht),
			     next = !rht_is_a_nulls(pos) ?
					rht_dereference(pos->next, ht) : NULL;
			     !rht_is_a_nulls(pos);
			     pos = next,
			     next = !rht_is_a_nulls(pos) ?
					rht_dereference(pos->next, ht) : NULL)
				free_fn(rht_obj(ht, pos), arg);
		}
	}

	bucket_table_free(tbl);
829
	mutex_unlock(&ht->mutex);
830
}
831 832 833 834 835 836
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);

void rhashtable_destroy(struct rhashtable *ht)
{
	return rhashtable_free_and_destroy(ht, NULL, NULL);
}
837
EXPORT_SYMBOL_GPL(rhashtable_destroy);