rhashtable.c 25.5 KB
Newer Older
1 2 3
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
4
 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Based on the following paper:
 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
 *
 * Code partially derived from nft_hash
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
E
Eric Dumazet 已提交
20
#include <linux/sched.h>
21 22 23
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
24
#include <linux/jhash.h>
25 26
#include <linux/random.h>
#include <linux/rhashtable.h>
27
#include <linux/err.h>
28 29

#define HASH_DEFAULT_SIZE	64UL
30
#define HASH_MIN_SIZE		4U
31 32
#define BUCKET_LOCKS_PER_CPU   128UL

33 34 35
/* Base bits plus 1 bit for nulls marker */
#define HASH_RESERVED_SPACE	(RHT_BASE_BITS + 1)

36 37 38
/* The bucket lock is selected based on the hash and protects mutations
 * on a group of hash buckets.
 *
39 40 41 42 43 44
 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
 * a single lock always covers both buckets which may both contains
 * entries which link to the same bucket of the old table during resizing.
 * This allows to simplify the locking as locking the bucket in both
 * tables during resize always guarantee protection.
 *
45 46 47 48 49 50 51 52
 * IMPORTANT: When holding the bucket lock of both the old and new table
 * during expansions and shrinking, the old bucket lock must always be
 * acquired first.
 */
static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
{
	return &tbl->locks[hash & tbl->locks_mask];
}
53

54
static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
55 56 57 58
{
	return (void *) he - ht->p.head_offset;
}

59
static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
60
{
H
Herbert Xu 已提交
61
	return (hash >> HASH_RESERVED_SPACE) & (tbl->size - 1);
62 63
}

64
static u32 key_hashfn(struct rhashtable *ht, const struct bucket_table *tbl,
65
		      const void *key)
66
{
67
	return rht_bucket_index(tbl, ht->p.hashfn(key, ht->p.key_len,
H
Herbert Xu 已提交
68
						  tbl->hash_rnd));
69 70
}

71
static u32 head_hashfn(struct rhashtable *ht,
72 73
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
74
{
H
Herbert Xu 已提交
75 76 77 78 79
	const char *ptr = rht_obj(ht, he);

	return likely(ht->p.key_len) ?
	       key_hashfn(ht, tbl, ptr + ht->p.key_offset) :
	       rht_bucket_index(tbl, ht->p.obj_hashfn(ptr, tbl->hash_rnd));
80 81
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
#ifdef CONFIG_PROVE_LOCKING
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))

int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
	spinlock_t *lock = bucket_lock(tbl, hash);

	return (debug_locks) ? lockdep_is_held(lock) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else
#define ASSERT_RHT_MUTEX(HT)
#endif


103 104 105 106 107 108 109 110 111 112 113 114
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

115 116
	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
		if (size * sizeof(spinlock_t) > PAGE_SIZE)
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
					   GFP_KERNEL);
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

144 145 146 147 148
static void bucket_table_free_rcu(struct rcu_head *head)
{
	bucket_table_free(container_of(head, struct bucket_table, rcu));
}

149
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
150
					       size_t nbuckets)
151
{
152
	struct bucket_table *tbl = NULL;
153
	size_t size;
154
	int i;
155 156

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
157 158
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
		tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
159 160 161 162 163 164 165
	if (tbl == NULL)
		tbl = vzalloc(size);
	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

166 167 168 169
	if (alloc_bucket_locks(ht, tbl) < 0) {
		bucket_table_free(tbl);
		return NULL;
	}
170

171 172
	INIT_LIST_HEAD(&tbl->walkers);

173 174
	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));

175 176 177
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

178
	return tbl;
179 180 181 182 183
}

/**
 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
 * @ht:		hash table
184
 * @tbl:	current table
185
 */
186 187
static bool rht_grow_above_75(const struct rhashtable *ht,
			      const struct bucket_table *tbl)
188 189
{
	/* Expand table when exceeding 75% load */
190
	return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
191 192
	       (!ht->p.max_shift || tbl->size < (1 << ht->p.max_shift)) &&
	       (!ht->p.max_size || tbl->size < ht->p.max_size);
193 194 195 196 197
}

/**
 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
 * @ht:		hash table
198
 * @tbl:	current table
199
 */
200 201
static bool rht_shrink_below_30(const struct rhashtable *ht,
				const struct bucket_table *tbl)
202 203
{
	/* Shrink table beneath 30% load */
204
	return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
205 206
	       tbl->size > (1 << ht->p.min_shift) &&
	       tbl->size > ht->p.min_size;
207 208
}

209
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
210
{
211
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
212 213
	struct bucket_table *new_tbl =
		rht_dereference(old_tbl->future_tbl, ht) ?: old_tbl;
214 215 216 217 218 219 220 221 222 223 224 225
	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
	int err = -ENOENT;
	struct rhash_head *head, *next, *entry;
	spinlock_t *new_bucket_lock;
	unsigned new_hash;

	rht_for_each(entry, old_tbl, old_hash) {
		err = 0;
		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);

		if (rht_is_a_nulls(next))
			break;
226

227 228
		pprev = &entry->next;
	}
229

230 231
	if (err)
		goto out;
232

233
	new_hash = head_hashfn(ht, new_tbl, entry);
234

235
	new_bucket_lock = bucket_lock(new_tbl, new_hash);
236

237
	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
238 239
	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
				      new_tbl, new_hash);
240

241 242 243 244
	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
	else
		RCU_INIT_POINTER(entry->next, head);
245

246 247
	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
	spin_unlock(new_bucket_lock);
248

249
	rcu_assign_pointer(*pprev, next);
250

251 252 253
out:
	return err;
}
254

255 256 257 258 259 260
static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	spinlock_t *old_bucket_lock;

	old_bucket_lock = bucket_lock(old_tbl, old_hash);
261

262 263 264
	spin_lock_bh(old_bucket_lock);
	while (!rhashtable_rehash_one(ht, old_hash))
		;
265
	old_tbl->rehash++;
266
	spin_unlock_bh(old_bucket_lock);
267 268
}

269 270
static void rhashtable_rehash(struct rhashtable *ht,
			      struct bucket_table *new_tbl)
271
{
272
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
273
	struct rhashtable_walker *walker;
274
	unsigned old_hash;
275

276 277 278
	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 */
279
	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
280

H
Herbert Xu 已提交
281 282 283
	/* Ensure the new table is visible to readers. */
	smp_wmb();

284 285 286 287 288 289
	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
		rhashtable_rehash_chain(ht, old_hash);

	/* Publish the new table pointer. */
	rcu_assign_pointer(ht->tbl, new_tbl);

290 291 292
	list_for_each_entry(walker, &old_tbl->walkers, list)
		walker->tbl = NULL;

293 294 295 296
	/* Wait for readers. All new readers will see the new
	 * table, and thus no references to the old table will
	 * remain.
	 */
297
	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
298 299 300 301 302 303
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
304
 * A secondary bucket array is allocated and the hash entries are migrated.
305 306 307 308
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
309 310 311 312 313
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
314
 */
315
int rhashtable_expand(struct rhashtable *ht)
316 317 318 319 320
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);

	ASSERT_RHT_MUTEX(ht);

321
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
322 323 324
	if (new_tbl == NULL)
		return -ENOMEM;

325
	rhashtable_rehash(ht, new_tbl);
326 327 328 329 330 331 332 333 334 335 336
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_expand);

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
337 338 339
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
340 341
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
342 343 344
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
345
 */
346
int rhashtable_shrink(struct rhashtable *ht)
347
{
348
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
349 350 351

	ASSERT_RHT_MUTEX(ht);

352
	new_tbl = bucket_table_alloc(ht, old_tbl->size / 2);
353
	if (new_tbl == NULL)
354 355
		return -ENOMEM;

356
	rhashtable_rehash(ht, new_tbl);
357 358 359 360
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);

361 362 363 364 365
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;

366
	ht = container_of(work, struct rhashtable, run_work);
367
	mutex_lock(&ht->mutex);
368 369 370
	if (ht->being_destroyed)
		goto unlock;

371 372
	tbl = rht_dereference(ht->tbl, ht);

373
	if (rht_grow_above_75(ht, tbl))
374
		rhashtable_expand(ht);
375
	else if (rht_shrink_below_30(ht, tbl))
376
		rhashtable_shrink(ht);
377
unlock:
378 379 380
	mutex_unlock(&ht->mutex);
}

381 382
static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
				bool (*compare)(void *, void *), void *arg)
383
{
384
	struct bucket_table *tbl, *old_tbl;
385
	struct rhash_head *head;
386 387
	bool no_resize_running;
	unsigned hash;
388
	spinlock_t *old_lock;
389 390 391 392 393
	bool success = true;

	rcu_read_lock();

	old_tbl = rht_dereference_rcu(ht->tbl, ht);
394
	hash = head_hashfn(ht, old_tbl, obj);
395
	old_lock = bucket_lock(old_tbl, hash);
396

397
	spin_lock_bh(old_lock);
398 399 400 401 402 403 404

	/* Because we have already taken the bucket lock in old_tbl,
	 * if we find that future_tbl is not yet visible then that
	 * guarantees all other insertions of the same entry will
	 * also grab the bucket lock in old_tbl because until the
	 * rehash completes ht->tbl won't be changed.
	 */
405
	tbl = rht_dereference_rcu(old_tbl->future_tbl, ht) ?: old_tbl;
406
	if (tbl != old_tbl) {
407
		hash = head_hashfn(ht, tbl, obj);
408
		spin_lock_nested(bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
409 410 411 412 413 414 415 416 417 418
	}

	if (compare &&
	    rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
				      compare, arg)) {
		success = false;
		goto exit;
	}

	no_resize_running = tbl == old_tbl;
419 420

	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
421 422 423 424 425 426 427 428 429

	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
	else
		RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);
430
	if (no_resize_running && rht_grow_above_75(ht, tbl))
431
		schedule_work(&ht->run_work);
432 433

exit:
434
	if (tbl != old_tbl)
435 436
		spin_unlock(bucket_lock(tbl, hash));

437
	spin_unlock_bh(old_lock);
438 439 440 441

	rcu_read_unlock();

	return success;
442 443
}

444
/**
445
 * rhashtable_insert - insert object into hash table
446 447 448
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
449 450 451
 * Will take a per bucket spinlock to protect against mutual mutations
 * on the same bucket. Multiple insertions may occur in parallel unless
 * they map to the same bucket lock.
452
 *
453 454 455 456 457
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
458
 */
459
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
460
{
461 462 463 464 465 466 467 468 469 470 471
	__rhashtable_insert(ht, obj, NULL, NULL);
}
EXPORT_SYMBOL_GPL(rhashtable_insert);

static bool __rhashtable_remove(struct rhashtable *ht,
				struct bucket_table *tbl,
				struct rhash_head *obj)
{
	struct rhash_head __rcu **pprev;
	struct rhash_head *he;
	spinlock_t * lock;
472
	unsigned hash;
473
	bool ret = false;
474

475
	hash = head_hashfn(ht, tbl, obj);
476
	lock = bucket_lock(tbl, hash);
477

478
	spin_lock_bh(lock);
479

480 481 482 483 484 485
	pprev = &tbl->buckets[hash];
	rht_for_each(he, tbl, hash) {
		if (he != obj) {
			pprev = &he->next;
			continue;
		}
486

487 488 489 490 491 492 493 494
		rcu_assign_pointer(*pprev, obj->next);
		ret = true;
		break;
	}

	spin_unlock_bh(lock);

	return ret;
495 496 497 498 499 500 501 502 503 504 505
}

/**
 * rhashtable_remove - remove object from hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Since the hash chain is single linked, the removal operation needs to
 * walk the bucket chain upon removal. The removal operation is thus
 * considerable slow if the hash table is not correctly sized.
 *
506
 * Will automatically shrink the table via rhashtable_expand() if the
507 508 509 510 511
 * shrink_decision function specified at rhashtable_init() returns true.
 *
 * The caller must ensure that no concurrent table mutations occur. It is
 * however valid to have concurrent lookups if they are RCU protected.
 */
512
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
513
{
514
	struct bucket_table *tbl;
515
	bool ret;
516

517
	rcu_read_lock();
518

519
	tbl = rht_dereference_rcu(ht->tbl, ht);
520

521 522 523
	/* Because we have already taken (and released) the bucket
	 * lock in old_tbl, if we find that future_tbl is not yet
	 * visible then that guarantees the entry to still be in
524
	 * the old tbl if it exists.
525
	 */
526 527 528
	while (!(ret = __rhashtable_remove(ht, tbl, obj)) &&
	       (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
		;
529 530 531

	if (ret) {
		atomic_dec(&ht->nelems);
532
		if (rht_shrink_below_30(ht, tbl))
533
			schedule_work(&ht->run_work);
534 535
	}

536 537
	rcu_read_unlock();

538
	return ret;
539 540 541
}
EXPORT_SYMBOL_GPL(rhashtable_remove);

542 543 544 545 546 547 548 549 550 551 552 553 554
struct rhashtable_compare_arg {
	struct rhashtable *ht;
	const void *key;
};

static bool rhashtable_compare(void *ptr, void *arg)
{
	struct rhashtable_compare_arg *x = arg;
	struct rhashtable *ht = x->ht;

	return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
}

555 556 557 558 559 560 561 562 563
/**
 * rhashtable_lookup - lookup key in hash table
 * @ht:		hash table
 * @key:	pointer to key
 *
 * Computes the hash value for the key and traverses the bucket chain looking
 * for a entry with an identical key. The first matching entry is returned.
 *
 * This lookup function may only be used for fixed key hash table (key_len
564
 * parameter set). It will BUG() if used inappropriately.
565
 *
566
 * Lookups may occur in parallel with hashtable mutations and resizing.
567
 */
568
void *rhashtable_lookup(struct rhashtable *ht, const void *key)
569
{
570 571 572 573
	struct rhashtable_compare_arg arg = {
		.ht = ht,
		.key = key,
	};
574 575 576

	BUG_ON(!ht->p.key_len);

577
	return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
578 579 580 581 582 583
}
EXPORT_SYMBOL_GPL(rhashtable_lookup);

/**
 * rhashtable_lookup_compare - search hash table with compare function
 * @ht:		hash table
584
 * @key:	the pointer to the key
585 586 587 588 589 590
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Traverses the bucket chain behind the provided hash value and calls the
 * specified compare function for each entry.
 *
591
 * Lookups may occur in parallel with hashtable mutations and resizing.
592 593 594
 *
 * Returns the first entry on which the compare function returned true.
 */
595
void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
596 597
				bool (*compare)(void *, void *), void *arg)
{
598
	const struct bucket_table *tbl;
599
	struct rhash_head *he;
600
	u32 hash;
601

602 603
	rcu_read_lock();

604
	tbl = rht_dereference_rcu(ht->tbl, ht);
605
restart:
606
	hash = key_hashfn(ht, tbl, key);
607
	rht_for_each_rcu(he, tbl, hash) {
608 609
		if (!compare(rht_obj(ht, he), arg))
			continue;
610
		rcu_read_unlock();
611
		return rht_obj(ht, he);
612 613
	}

H
Herbert Xu 已提交
614 615 616
	/* Ensure we see any new tables. */
	smp_rmb();

617 618
	tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	if (unlikely(tbl))
619 620 621
		goto restart;
	rcu_read_unlock();

622 623 624 625
	return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
/**
 * rhashtable_lookup_insert - lookup and insert object into hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Locks down the bucket chain in both the old and new table if a resize
 * is in progress to ensure that writers can't remove from the old table
 * and can't insert to the new table during the atomic operation of search
 * and insertion. Searches for duplicates in both the old and new table if
 * a resize is in progress.
 *
 * This lookup function may only be used for fixed key hash table (key_len
 * parameter set). It will BUG() if used inappropriately.
 *
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
 */
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
{
	struct rhashtable_compare_arg arg = {
		.ht = ht,
		.key = rht_obj(ht, obj) + ht->p.key_offset,
	};

	BUG_ON(!ht->p.key_len);

	return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
						&arg);
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);

/**
 * rhashtable_lookup_compare_insert - search and insert object to hash table
 *                                    with compare function
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Locks down the bucket chain in both the old and new table if a resize
 * is in progress to ensure that writers can't remove from the old table
 * and can't insert to the new table during the atomic operation of search
 * and insertion. Searches for duplicates in both the old and new table if
 * a resize is in progress.
 *
 * Lookups may occur in parallel with hashtable mutations and resizing.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
 */
bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
				      struct rhash_head *obj,
				      bool (*compare)(void *, void *),
				      void *arg)
684 685 686
{
	BUG_ON(!ht->p.key_len);

687
	return __rhashtable_insert(ht, obj, compare, arg);
688
}
689
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
690

691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
/**
 * rhashtable_walk_init - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may sleep so you must not call it from interrupt
 * context or with spin locks held.
 *
 * You must call rhashtable_walk_exit if this function returns
 * successfully.
 */
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
{
	iter->ht = ht;
	iter->p = NULL;
	iter->slot = 0;
	iter->skip = 0;

	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
	if (!iter->walker)
		return -ENOMEM;

	mutex_lock(&ht->mutex);
724 725
	iter->walker->tbl = rht_dereference(ht->tbl, ht);
	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
	mutex_unlock(&ht->mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_init.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
	mutex_lock(&iter->ht->mutex);
741 742
	if (iter->walker->tbl)
		list_del(&iter->walker->list);
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
	mutex_unlock(&iter->ht->mutex);
	kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);

/**
 * rhashtable_walk_start - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk.  Note that we take the RCU lock in all
 * cases including when we return an error.  So you must always call
 * rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 */
int rhashtable_walk_start(struct rhashtable_iter *iter)
763
	__acquires(RCU)
764
{
765 766 767 768 769 770 771
	struct rhashtable *ht = iter->ht;

	mutex_lock(&ht->mutex);

	if (iter->walker->tbl)
		list_del(&iter->walker->list);

772 773
	rcu_read_lock();

774 775 776 777
	mutex_unlock(&ht->mutex);

	if (!iter->walker->tbl) {
		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
		return -EAGAIN;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start);

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
799
	struct bucket_table *tbl = iter->walker->tbl;
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
	struct rhashtable *ht = iter->ht;
	struct rhash_head *p = iter->p;
	void *obj = NULL;

	if (p) {
		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
		goto next;
	}

	for (; iter->slot < tbl->size; iter->slot++) {
		int skip = iter->skip;

		rht_for_each_rcu(p, tbl, iter->slot) {
			if (!skip)
				break;
			skip--;
		}

next:
		if (!rht_is_a_nulls(p)) {
			iter->skip++;
			iter->p = p;
			obj = rht_obj(ht, p);
			goto out;
		}

		iter->skip = 0;
	}

829 830
	iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	if (iter->walker->tbl) {
831 832 833 834 835
		iter->slot = 0;
		iter->skip = 0;
		return ERR_PTR(-EAGAIN);
	}

836 837 838 839
	iter->p = NULL;

out:

840 841 842 843 844 845 846 847 848 849 850
	return obj;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
851
	__releases(RCU)
852
{
853 854 855 856
	struct rhashtable *ht;
	struct bucket_table *tbl = iter->walker->tbl;

	if (!tbl)
857
		goto out;
858 859 860 861

	ht = iter->ht;

	mutex_lock(&ht->mutex);
862
	if (tbl->rehash < tbl->size)
863 864 865 866 867
		list_add(&iter->walker->list, &tbl->walkers);
	else
		iter->walker->tbl = NULL;
	mutex_unlock(&ht->mutex);

868
	iter->p = NULL;
869 870 871

out:
	rcu_read_unlock();
872 873 874
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);

875
static size_t rounded_hashtable_size(struct rhashtable_params *params)
876
{
877
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
878 879
		   max(1UL << params->min_shift,
		       (unsigned long)params->min_size));
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
}

/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
902
 *	.hashfn = jhash,
903
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
 * u32 my_hash_fn(const void *data, u32 seed)
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
921
 *	.hashfn = jhash,
922 923 924 925 926 927 928 929 930 931 932 933 934 935
 *	.obj_hashfn = my_hash_fn,
 * };
 */
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

	if ((params->key_len && !params->hashfn) ||
	    (!params->key_len && !params->obj_hashfn))
		return -EINVAL;

936 937 938
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

939 940
	params->min_shift = max_t(size_t, params->min_shift,
				  ilog2(HASH_MIN_SIZE));
941
	params->min_size = max(params->min_size, HASH_MIN_SIZE);
942

943
	if (params->nelem_hint)
944
		size = rounded_hashtable_size(params);
945

946 947 948 949 950 951 952 953 954
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
	memcpy(&ht->p, params, sizeof(*params));

	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

955
	tbl = bucket_table_alloc(ht, size);
956 957 958
	if (tbl == NULL)
		return -ENOMEM;

959
	atomic_set(&ht->nelems, 0);
960

961 962
	RCU_INIT_POINTER(ht->tbl, tbl);

963
	INIT_WORK(&ht->run_work, rht_deferred_worker);
964

965 966 967 968 969 970 971 972
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
 * rhashtable_destroy - destroy hash table
 * @ht:		the hash table to destroy
 *
973 974 975
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
976
 */
977
void rhashtable_destroy(struct rhashtable *ht)
978
{
979 980
	ht->being_destroyed = true;

981
	cancel_work_sync(&ht->run_work);
982

983
	mutex_lock(&ht->mutex);
984 985
	bucket_table_free(rht_dereference(ht->tbl, ht));
	mutex_unlock(&ht->mutex);
986 987
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);