rhashtable.c 18.6 KB
Newer Older
1 2 3
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
4
 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5
 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 7 8
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Code partially derived from nft_hash
9 10
 * Rewritten with rehash code from br_multicast plus single list
 * pointer as suggested by Josh Triplett
11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
E
Eric Dumazet 已提交
20
#include <linux/sched.h>
21 22 23
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
24
#include <linux/jhash.h>
25 26
#include <linux/random.h>
#include <linux/rhashtable.h>
27
#include <linux/err.h>
28 29

#define HASH_DEFAULT_SIZE	64UL
30
#define HASH_MIN_SIZE		4U
31 32
#define BUCKET_LOCKS_PER_CPU   128UL

33
static u32 head_hashfn(struct rhashtable *ht,
34 35
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
36
{
37
	return rht_head_hashfn(ht, tbl, he, ht->p);
38 39
}

40 41 42 43 44 45 46 47 48 49 50
#ifdef CONFIG_PROVE_LOCKING
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))

int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
51
	spinlock_t *lock = rht_bucket_lock(tbl, hash);
52 53 54 55 56 57 58 59 60

	return (debug_locks) ? lockdep_is_held(lock) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else
#define ASSERT_RHT_MUTEX(HT)
#endif


61 62
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
			      gfp_t gfp)
63 64 65 66 67 68 69 70 71 72 73
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

74 75
	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);
76 77 78

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
79 80
		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
		    gfp == GFP_KERNEL)
81 82 83 84
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
85
					   gfp);
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

104 105 106 107 108
static void bucket_table_free_rcu(struct rcu_head *head)
{
	bucket_table_free(container_of(head, struct bucket_table, rcu));
}

109
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
110 111
					       size_t nbuckets,
					       gfp_t gfp)
112
{
113
	struct bucket_table *tbl = NULL;
114
	size_t size;
115
	int i;
116 117

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
118 119 120 121
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
	    gfp != GFP_KERNEL)
		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
	if (tbl == NULL && gfp == GFP_KERNEL)
122 123 124 125 126 127
		tbl = vzalloc(size);
	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

128
	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
129 130 131
		bucket_table_free(tbl);
		return NULL;
	}
132

133 134
	INIT_LIST_HEAD(&tbl->walkers);

135 136
	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));

137 138 139
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

140
	return tbl;
141 142
}

143 144 145 146 147 148 149 150 151 152 153 154 155
static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
						  struct bucket_table *tbl)
{
	struct bucket_table *new_tbl;

	do {
		new_tbl = tbl;
		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	} while (tbl);

	return new_tbl;
}

156
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
157
{
158
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
159 160
	struct bucket_table *new_tbl = rhashtable_last_table(ht,
		rht_dereference_rcu(old_tbl->future_tbl, ht));
161 162 163 164 165 166 167 168 169 170 171 172
	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
	int err = -ENOENT;
	struct rhash_head *head, *next, *entry;
	spinlock_t *new_bucket_lock;
	unsigned new_hash;

	rht_for_each(entry, old_tbl, old_hash) {
		err = 0;
		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);

		if (rht_is_a_nulls(next))
			break;
173

174 175
		pprev = &entry->next;
	}
176

177 178
	if (err)
		goto out;
179

180
	new_hash = head_hashfn(ht, new_tbl, entry);
181

182
	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
183

184
	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
185 186
	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
				      new_tbl, new_hash);
187

188 189 190 191
	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
	else
		RCU_INIT_POINTER(entry->next, head);
192

193 194
	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
	spin_unlock(new_bucket_lock);
195

196
	rcu_assign_pointer(*pprev, next);
197

198 199 200
out:
	return err;
}
201

202 203 204 205 206
static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	spinlock_t *old_bucket_lock;

207
	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
208

209 210 211
	spin_lock_bh(old_bucket_lock);
	while (!rhashtable_rehash_one(ht, old_hash))
		;
212
	old_tbl->rehash++;
213
	spin_unlock_bh(old_bucket_lock);
214 215
}

216 217 218
static int rhashtable_rehash_attach(struct rhashtable *ht,
				    struct bucket_table *old_tbl,
				    struct bucket_table *new_tbl)
219
{
220 221 222 223 224 225 226 227
	/* Protect future_tbl using the first bucket lock. */
	spin_lock_bh(old_tbl->locks);

	/* Did somebody beat us to it? */
	if (rcu_access_pointer(old_tbl->future_tbl)) {
		spin_unlock_bh(old_tbl->locks);
		return -EEXIST;
	}
228

229 230 231
	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 */
232
	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
233

H
Herbert Xu 已提交
234 235 236
	/* Ensure the new table is visible to readers. */
	smp_wmb();

237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
	spin_unlock_bh(old_tbl->locks);

	return 0;
}

static int rhashtable_rehash_table(struct rhashtable *ht)
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	struct bucket_table *new_tbl;
	struct rhashtable_walker *walker;
	unsigned old_hash;

	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
	if (!new_tbl)
		return 0;

253 254 255 256 257 258
	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
		rhashtable_rehash_chain(ht, old_hash);

	/* Publish the new table pointer. */
	rcu_assign_pointer(ht->tbl, new_tbl);

259
	spin_lock(&ht->lock);
260 261
	list_for_each_entry(walker, &old_tbl->walkers, list)
		walker->tbl = NULL;
262
	spin_unlock(&ht->lock);
263

264 265 266 267
	/* Wait for readers. All new readers will see the new
	 * table, and thus no references to the old table will
	 * remain.
	 */
268
	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
269 270

	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
271 272 273 274 275 276
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
277
 * A secondary bucket array is allocated and the hash entries are migrated.
278 279 280 281
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
282 283 284 285 286
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
287
 */
288
static int rhashtable_expand(struct rhashtable *ht)
289 290
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
291
	int err;
292 293 294

	ASSERT_RHT_MUTEX(ht);

295 296
	old_tbl = rhashtable_last_table(ht, old_tbl);

297
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
298 299 300
	if (new_tbl == NULL)
		return -ENOMEM;

301 302 303 304 305
	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
	if (err)
		bucket_table_free(new_tbl);

	return err;
306 307 308 309 310 311
}

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
H
Herbert Xu 已提交
312 313
 * This function shrinks the hash table to fit, i.e., the smallest
 * size would not cause it to expand right away automatically.
314
 *
315 316 317
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
318 319
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
320 321 322
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
323
 */
324
static int rhashtable_shrink(struct rhashtable *ht)
325
{
326
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
H
Herbert Xu 已提交
327
	unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
328
	int err;
329 330 331

	ASSERT_RHT_MUTEX(ht);

H
Herbert Xu 已提交
332 333 334 335 336 337
	if (size < ht->p.min_size)
		size = ht->p.min_size;

	if (old_tbl->size <= size)
		return 0;

338 339 340
	if (rht_dereference(old_tbl->future_tbl, ht))
		return -EEXIST;

341
	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
342
	if (new_tbl == NULL)
343 344
		return -ENOMEM;

345 346 347 348 349
	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
	if (err)
		bucket_table_free(new_tbl);

	return err;
350 351
}

352 353 354 355
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;
356
	int err = 0;
357

358
	ht = container_of(work, struct rhashtable, run_work);
359
	mutex_lock(&ht->mutex);
360 361 362
	if (ht->being_destroyed)
		goto unlock;

363
	tbl = rht_dereference(ht->tbl, ht);
364
	tbl = rhashtable_last_table(ht, tbl);
365

366
	if (rht_grow_above_75(ht, tbl))
367
		rhashtable_expand(ht);
368
	else if (rht_shrink_below_30(ht, tbl))
369
		rhashtable_shrink(ht);
370 371 372

	err = rhashtable_rehash_table(ht);

373
unlock:
374
	mutex_unlock(&ht->mutex);
375 376 377

	if (err)
		schedule_work(&ht->run_work);
378 379
}

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
static bool rhashtable_check_elasticity(struct rhashtable *ht,
					struct bucket_table *tbl,
					unsigned hash)
{
	unsigned elasticity = ht->elasticity;
	struct rhash_head *head;

	rht_for_each(head, tbl, hash)
		if (!--elasticity)
			return true;

	return false;
}

int rhashtable_insert_rehash(struct rhashtable *ht)
{
	struct bucket_table *old_tbl;
	struct bucket_table *new_tbl;
	struct bucket_table *tbl;
	unsigned int size;
	int err;

	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	tbl = rhashtable_last_table(ht, old_tbl);

	size = tbl->size;

	if (rht_grow_above_75(ht, tbl))
		size *= 2;
	/* More than two rehashes (not resizes) detected. */
	else if (WARN_ON(old_tbl != tbl && old_tbl->size == size))
		return -EBUSY;

	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
	if (new_tbl == NULL)
		return -ENOMEM;

	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
	if (err) {
		bucket_table_free(new_tbl);
		if (err == -EEXIST)
			err = 0;
	} else
		schedule_work(&ht->run_work);

	return err;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);

429 430 431 432 433 434
int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
			   struct rhash_head *obj,
			   struct bucket_table *tbl)
{
	struct rhash_head *head;
	unsigned hash;
435
	int err;
436

437
	tbl = rhashtable_last_table(ht, tbl);
438 439 440
	hash = head_hashfn(ht, tbl, obj);
	spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);

441
	err = -EEXIST;
442 443 444
	if (key && rhashtable_lookup_fast(ht, key, ht->p))
		goto exit;

445 446 447 448 449
	err = -EAGAIN;
	if (rhashtable_check_elasticity(ht, tbl, hash) ||
	    rht_grow_above_100(ht, tbl))
		goto exit;

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
	err = 0;

	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);

	RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);

exit:
	spin_unlock(rht_bucket_lock(tbl, hash));

	return err;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
/**
 * rhashtable_walk_init - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may sleep so you must not call it from interrupt
 * context or with spin locks held.
 *
 * You must call rhashtable_walk_exit if this function returns
 * successfully.
 */
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
{
	iter->ht = ht;
	iter->p = NULL;
	iter->slot = 0;
	iter->skip = 0;

	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
	if (!iter->walker)
		return -ENOMEM;

	mutex_lock(&ht->mutex);
500 501
	iter->walker->tbl = rht_dereference(ht->tbl, ht);
	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
	mutex_unlock(&ht->mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_init.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
	mutex_lock(&iter->ht->mutex);
517 518
	if (iter->walker->tbl)
		list_del(&iter->walker->list);
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
	mutex_unlock(&iter->ht->mutex);
	kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);

/**
 * rhashtable_walk_start - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk.  Note that we take the RCU lock in all
 * cases including when we return an error.  So you must always call
 * rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 */
int rhashtable_walk_start(struct rhashtable_iter *iter)
539
	__acquires(RCU)
540
{
541 542 543 544 545 546 547
	struct rhashtable *ht = iter->ht;

	mutex_lock(&ht->mutex);

	if (iter->walker->tbl)
		list_del(&iter->walker->list);

548 549
	rcu_read_lock();

550 551 552 553
	mutex_unlock(&ht->mutex);

	if (!iter->walker->tbl) {
		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
		return -EAGAIN;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start);

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
575
	struct bucket_table *tbl = iter->walker->tbl;
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
	struct rhashtable *ht = iter->ht;
	struct rhash_head *p = iter->p;
	void *obj = NULL;

	if (p) {
		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
		goto next;
	}

	for (; iter->slot < tbl->size; iter->slot++) {
		int skip = iter->skip;

		rht_for_each_rcu(p, tbl, iter->slot) {
			if (!skip)
				break;
			skip--;
		}

next:
		if (!rht_is_a_nulls(p)) {
			iter->skip++;
			iter->p = p;
			obj = rht_obj(ht, p);
			goto out;
		}

		iter->skip = 0;
	}

605 606 607
	/* Ensure we see any new tables. */
	smp_rmb();

608 609
	iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	if (iter->walker->tbl) {
610 611 612 613 614
		iter->slot = 0;
		iter->skip = 0;
		return ERR_PTR(-EAGAIN);
	}

615 616 617 618
	iter->p = NULL;

out:

619 620 621 622 623 624 625 626 627 628 629
	return obj;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
630
	__releases(RCU)
631
{
632 633 634 635
	struct rhashtable *ht;
	struct bucket_table *tbl = iter->walker->tbl;

	if (!tbl)
636
		goto out;
637 638 639

	ht = iter->ht;

640
	spin_lock(&ht->lock);
641
	if (tbl->rehash < tbl->size)
642 643 644
		list_add(&iter->walker->list, &tbl->walkers);
	else
		iter->walker->tbl = NULL;
645
	spin_unlock(&ht->lock);
646

647
	iter->p = NULL;
648 649 650

out:
	rcu_read_unlock();
651 652 653
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);

654
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
655
{
656
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
657
		   (unsigned long)params->min_size);
658 659
}

660 661 662 663 664
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
{
	return jhash2(key, length, seed);
}

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
685
 *	.hashfn = jhash,
686
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
 * u32 my_hash_fn(const void *data, u32 seed)
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
704
 *	.hashfn = jhash,
705 706 707
 *	.obj_hashfn = my_hash_fn,
 * };
 */
708 709
int rhashtable_init(struct rhashtable *ht,
		    const struct rhashtable_params *params)
710 711 712 713 714 715
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

716
	if ((!params->key_len && !params->obj_hashfn) ||
717
	    (params->obj_hashfn && !params->obj_cmpfn))
718 719
		return -EINVAL;

720 721 722
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

723
	if (params->nelem_hint)
724
		size = rounded_hashtable_size(params);
725

726 727
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
728
	spin_lock_init(&ht->lock);
729 730
	memcpy(&ht->p, params, sizeof(*params));

731 732 733 734 735 736
	if (params->min_size)
		ht->p.min_size = roundup_pow_of_two(params->min_size);

	if (params->max_size)
		ht->p.max_size = rounddown_pow_of_two(params->max_size);

737
	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
738

739 740 741
	if (!params->insecure_elasticity)
		ht->elasticity = 16;

742 743 744 745 746
	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

747 748 749 750 751 752 753 754 755 756
	ht->key_len = ht->p.key_len;
	if (!params->hashfn) {
		ht->p.hashfn = jhash;

		if (!(ht->key_len & (sizeof(u32) - 1))) {
			ht->key_len /= sizeof(u32);
			ht->p.hashfn = rhashtable_jhash2;
		}
	}

757
	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
758 759 760
	if (tbl == NULL)
		return -ENOMEM;

761
	atomic_set(&ht->nelems, 0);
762

763 764
	RCU_INIT_POINTER(ht->tbl, tbl);

765
	INIT_WORK(&ht->run_work, rht_deferred_worker);
766

767 768 769 770 771 772 773 774
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
 * rhashtable_destroy - destroy hash table
 * @ht:		the hash table to destroy
 *
775 776 777
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
778
 */
779
void rhashtable_destroy(struct rhashtable *ht)
780
{
781 782
	ht->being_destroyed = true;

783
	cancel_work_sync(&ht->run_work);
784

785
	mutex_lock(&ht->mutex);
786 787
	bucket_table_free(rht_dereference(ht->tbl, ht));
	mutex_unlock(&ht->mutex);
788 789
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);