rhashtable.c 17.2 KB
Newer Older
1 2 3
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
4
 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5
 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 7 8
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Code partially derived from nft_hash
9 10
 * Rewritten with rehash code from br_multicast plus single list
 * pointer as suggested by Josh Triplett
11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
E
Eric Dumazet 已提交
20
#include <linux/sched.h>
21 22 23
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
24
#include <linux/jhash.h>
25 26
#include <linux/random.h>
#include <linux/rhashtable.h>
27
#include <linux/err.h>
28 29

#define HASH_DEFAULT_SIZE	64UL
30
#define HASH_MIN_SIZE		4U
31 32
#define BUCKET_LOCKS_PER_CPU   128UL

33
static u32 head_hashfn(struct rhashtable *ht,
34 35
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
36
{
37
	return rht_head_hashfn(ht, tbl, he, ht->p);
38 39
}

40 41 42 43 44 45 46 47 48 49 50
#ifdef CONFIG_PROVE_LOCKING
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))

int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
51
	spinlock_t *lock = rht_bucket_lock(tbl, hash);
52 53 54 55 56 57 58 59 60

	return (debug_locks) ? lockdep_is_held(lock) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else
#define ASSERT_RHT_MUTEX(HT)
#endif


61 62 63 64 65 66 67 68 69 70 71 72
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

73 74
	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
		if (size * sizeof(spinlock_t) > PAGE_SIZE)
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
					   GFP_KERNEL);
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

102 103 104 105 106
static void bucket_table_free_rcu(struct rcu_head *head)
{
	bucket_table_free(container_of(head, struct bucket_table, rcu));
}

107
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
108
					       size_t nbuckets)
109
{
110
	struct bucket_table *tbl = NULL;
111
	size_t size;
112
	int i;
113 114

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
115 116
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
		tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
117 118 119 120 121 122 123
	if (tbl == NULL)
		tbl = vzalloc(size);
	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

124 125 126 127
	if (alloc_bucket_locks(ht, tbl) < 0) {
		bucket_table_free(tbl);
		return NULL;
	}
128

129 130
	INIT_LIST_HEAD(&tbl->walkers);

131 132
	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));

133 134 135
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

136
	return tbl;
137 138
}

139 140 141 142 143 144 145 146 147 148 149 150 151
static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
						  struct bucket_table *tbl)
{
	struct bucket_table *new_tbl;

	do {
		new_tbl = tbl;
		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	} while (tbl);

	return new_tbl;
}

152
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
153
{
154
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
155 156
	struct bucket_table *new_tbl = rhashtable_last_table(ht,
		rht_dereference_rcu(old_tbl->future_tbl, ht));
157 158 159 160 161 162 163 164 165 166 167 168
	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
	int err = -ENOENT;
	struct rhash_head *head, *next, *entry;
	spinlock_t *new_bucket_lock;
	unsigned new_hash;

	rht_for_each(entry, old_tbl, old_hash) {
		err = 0;
		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);

		if (rht_is_a_nulls(next))
			break;
169

170 171
		pprev = &entry->next;
	}
172

173 174
	if (err)
		goto out;
175

176
	new_hash = head_hashfn(ht, new_tbl, entry);
177

178
	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
179

180
	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
181 182
	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
				      new_tbl, new_hash);
183

184 185 186 187
	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
	else
		RCU_INIT_POINTER(entry->next, head);
188

189 190
	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
	spin_unlock(new_bucket_lock);
191

192
	rcu_assign_pointer(*pprev, next);
193

194 195 196
out:
	return err;
}
197

198 199 200 201 202
static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	spinlock_t *old_bucket_lock;

203
	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
204

205 206 207
	spin_lock_bh(old_bucket_lock);
	while (!rhashtable_rehash_one(ht, old_hash))
		;
208
	old_tbl->rehash++;
209
	spin_unlock_bh(old_bucket_lock);
210 211
}

212 213 214
static int rhashtable_rehash_attach(struct rhashtable *ht,
				    struct bucket_table *old_tbl,
				    struct bucket_table *new_tbl)
215
{
216 217 218 219 220 221 222 223
	/* Protect future_tbl using the first bucket lock. */
	spin_lock_bh(old_tbl->locks);

	/* Did somebody beat us to it? */
	if (rcu_access_pointer(old_tbl->future_tbl)) {
		spin_unlock_bh(old_tbl->locks);
		return -EEXIST;
	}
224

225 226 227
	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 */
228
	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
229

H
Herbert Xu 已提交
230 231 232
	/* Ensure the new table is visible to readers. */
	smp_wmb();

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
	spin_unlock_bh(old_tbl->locks);

	return 0;
}

static int rhashtable_rehash_table(struct rhashtable *ht)
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	struct bucket_table *new_tbl;
	struct rhashtable_walker *walker;
	unsigned old_hash;

	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
	if (!new_tbl)
		return 0;

249 250 251 252 253 254
	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
		rhashtable_rehash_chain(ht, old_hash);

	/* Publish the new table pointer. */
	rcu_assign_pointer(ht->tbl, new_tbl);

255 256 257
	list_for_each_entry(walker, &old_tbl->walkers, list)
		walker->tbl = NULL;

258 259 260 261
	/* Wait for readers. All new readers will see the new
	 * table, and thus no references to the old table will
	 * remain.
	 */
262
	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
263 264

	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
265 266 267 268 269 270
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
271
 * A secondary bucket array is allocated and the hash entries are migrated.
272 273 274 275
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
276 277 278 279 280
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
281
 */
282
static int rhashtable_expand(struct rhashtable *ht)
283 284
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
285
	int err;
286 287 288

	ASSERT_RHT_MUTEX(ht);

289 290
	old_tbl = rhashtable_last_table(ht, old_tbl);

291
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
292 293 294
	if (new_tbl == NULL)
		return -ENOMEM;

295 296 297 298 299
	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
	if (err)
		bucket_table_free(new_tbl);

	return err;
300 301 302 303 304 305
}

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
H
Herbert Xu 已提交
306 307
 * This function shrinks the hash table to fit, i.e., the smallest
 * size would not cause it to expand right away automatically.
308
 *
309 310 311
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
312 313
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
314 315 316
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
317
 */
318
static int rhashtable_shrink(struct rhashtable *ht)
319
{
320
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
H
Herbert Xu 已提交
321
	unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
322
	int err;
323 324 325

	ASSERT_RHT_MUTEX(ht);

H
Herbert Xu 已提交
326 327 328 329 330 331
	if (size < ht->p.min_size)
		size = ht->p.min_size;

	if (old_tbl->size <= size)
		return 0;

332 333 334
	if (rht_dereference(old_tbl->future_tbl, ht))
		return -EEXIST;

H
Herbert Xu 已提交
335
	new_tbl = bucket_table_alloc(ht, size);
336
	if (new_tbl == NULL)
337 338
		return -ENOMEM;

339 340 341 342 343
	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
	if (err)
		bucket_table_free(new_tbl);

	return err;
344 345
}

346 347 348 349
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;
350
	int err = 0;
351

352
	ht = container_of(work, struct rhashtable, run_work);
353
	mutex_lock(&ht->mutex);
354 355 356
	if (ht->being_destroyed)
		goto unlock;

357
	tbl = rht_dereference(ht->tbl, ht);
358
	tbl = rhashtable_last_table(ht, tbl);
359

360
	if (rht_grow_above_75(ht, tbl))
361
		rhashtable_expand(ht);
362
	else if (rht_shrink_below_30(ht, tbl))
363
		rhashtable_shrink(ht);
364 365 366

	err = rhashtable_rehash_table(ht);

367
unlock:
368
	mutex_unlock(&ht->mutex);
369 370 371

	if (err)
		schedule_work(&ht->run_work);
372 373
}

374 375 376 377 378 379 380 381
int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
			   struct rhash_head *obj,
			   struct bucket_table *tbl)
{
	struct rhash_head *head;
	unsigned hash;
	int err = -EEXIST;

382
	tbl = rhashtable_last_table(ht, tbl);
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
	hash = head_hashfn(ht, tbl, obj);
	spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);

	if (key && rhashtable_lookup_fast(ht, key, ht->p))
		goto exit;

	err = 0;

	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);

	RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);

exit:
	spin_unlock(rht_bucket_lock(tbl, hash));

	return err;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
/**
 * rhashtable_walk_init - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may sleep so you must not call it from interrupt
 * context or with spin locks held.
 *
 * You must call rhashtable_walk_exit if this function returns
 * successfully.
 */
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
{
	iter->ht = ht;
	iter->p = NULL;
	iter->slot = 0;
	iter->skip = 0;

	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
	if (!iter->walker)
		return -ENOMEM;

	mutex_lock(&ht->mutex);
439 440
	iter->walker->tbl = rht_dereference(ht->tbl, ht);
	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
	mutex_unlock(&ht->mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_init.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
	mutex_lock(&iter->ht->mutex);
456 457
	if (iter->walker->tbl)
		list_del(&iter->walker->list);
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
	mutex_unlock(&iter->ht->mutex);
	kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);

/**
 * rhashtable_walk_start - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk.  Note that we take the RCU lock in all
 * cases including when we return an error.  So you must always call
 * rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 */
int rhashtable_walk_start(struct rhashtable_iter *iter)
478
	__acquires(RCU)
479
{
480 481 482 483 484 485 486
	struct rhashtable *ht = iter->ht;

	mutex_lock(&ht->mutex);

	if (iter->walker->tbl)
		list_del(&iter->walker->list);

487 488
	rcu_read_lock();

489 490 491 492
	mutex_unlock(&ht->mutex);

	if (!iter->walker->tbl) {
		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
		return -EAGAIN;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start);

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
514
	struct bucket_table *tbl = iter->walker->tbl;
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
	struct rhashtable *ht = iter->ht;
	struct rhash_head *p = iter->p;
	void *obj = NULL;

	if (p) {
		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
		goto next;
	}

	for (; iter->slot < tbl->size; iter->slot++) {
		int skip = iter->skip;

		rht_for_each_rcu(p, tbl, iter->slot) {
			if (!skip)
				break;
			skip--;
		}

next:
		if (!rht_is_a_nulls(p)) {
			iter->skip++;
			iter->p = p;
			obj = rht_obj(ht, p);
			goto out;
		}

		iter->skip = 0;
	}

544 545 546
	/* Ensure we see any new tables. */
	smp_rmb();

547 548
	iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	if (iter->walker->tbl) {
549 550 551 552 553
		iter->slot = 0;
		iter->skip = 0;
		return ERR_PTR(-EAGAIN);
	}

554 555 556 557
	iter->p = NULL;

out:

558 559 560 561 562 563 564 565 566 567 568
	return obj;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
569
	__releases(RCU)
570
{
571 572 573 574
	struct rhashtable *ht;
	struct bucket_table *tbl = iter->walker->tbl;

	if (!tbl)
575
		goto out;
576 577 578 579

	ht = iter->ht;

	mutex_lock(&ht->mutex);
580
	if (tbl->rehash < tbl->size)
581 582 583 584 585
		list_add(&iter->walker->list, &tbl->walkers);
	else
		iter->walker->tbl = NULL;
	mutex_unlock(&ht->mutex);

586
	iter->p = NULL;
587 588 589

out:
	rcu_read_unlock();
590 591 592
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);

593
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
594
{
595
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
596
		   (unsigned long)params->min_size);
597 598
}

599 600 601 602 603
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
{
	return jhash2(key, length, seed);
}

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
624
 *	.hashfn = jhash,
625
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
 * u32 my_hash_fn(const void *data, u32 seed)
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
643
 *	.hashfn = jhash,
644 645 646
 *	.obj_hashfn = my_hash_fn,
 * };
 */
647 648
int rhashtable_init(struct rhashtable *ht,
		    const struct rhashtable_params *params)
649 650 651 652 653 654
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

655
	if ((!params->key_len && !params->obj_hashfn) ||
656
	    (params->obj_hashfn && !params->obj_cmpfn))
657 658
		return -EINVAL;

659 660 661
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

662
	if (params->nelem_hint)
663
		size = rounded_hashtable_size(params);
664

665 666 667 668
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
	memcpy(&ht->p, params, sizeof(*params));

669 670 671 672 673 674
	if (params->min_size)
		ht->p.min_size = roundup_pow_of_two(params->min_size);

	if (params->max_size)
		ht->p.max_size = rounddown_pow_of_two(params->max_size);

675
	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
676

677 678 679 680 681
	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

682 683 684 685 686 687 688 689 690 691
	ht->key_len = ht->p.key_len;
	if (!params->hashfn) {
		ht->p.hashfn = jhash;

		if (!(ht->key_len & (sizeof(u32) - 1))) {
			ht->key_len /= sizeof(u32);
			ht->p.hashfn = rhashtable_jhash2;
		}
	}

692
	tbl = bucket_table_alloc(ht, size);
693 694 695
	if (tbl == NULL)
		return -ENOMEM;

696
	atomic_set(&ht->nelems, 0);
697

698 699
	RCU_INIT_POINTER(ht->tbl, tbl);

700
	INIT_WORK(&ht->run_work, rht_deferred_worker);
701

702 703 704 705 706 707 708 709
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
 * rhashtable_destroy - destroy hash table
 * @ht:		the hash table to destroy
 *
710 711 712
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
713
 */
714
void rhashtable_destroy(struct rhashtable *ht)
715
{
716 717
	ht->being_destroyed = true;

718
	cancel_work_sync(&ht->run_work);
719

720
	mutex_lock(&ht->mutex);
721 722
	bucket_table_free(rht_dereference(ht->tbl, ht));
	mutex_unlock(&ht->mutex);
723 724
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);