rhashtable.c 29.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Based on the following paper:
 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
 *
 * Code partially derived from nft_hash
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
23
#include <linux/jhash.h>
24 25 26 27 28
#include <linux/random.h>
#include <linux/rhashtable.h>

#define HASH_DEFAULT_SIZE	64UL
#define HASH_MIN_SIZE		4UL
29 30
#define BUCKET_LOCKS_PER_CPU   128UL

31 32 33
/* Base bits plus 1 bit for nulls marker */
#define HASH_RESERVED_SPACE	(RHT_BASE_BITS + 1)

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
enum {
	RHT_LOCK_NORMAL,
	RHT_LOCK_NESTED,
	RHT_LOCK_NESTED2,
};

/* The bucket lock is selected based on the hash and protects mutations
 * on a group of hash buckets.
 *
 * IMPORTANT: When holding the bucket lock of both the old and new table
 * during expansions and shrinking, the old bucket lock must always be
 * acquired first.
 */
static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
{
	return &tbl->locks[hash & tbl->locks_mask];
}
51 52

#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
53 54
#define ASSERT_BUCKET_LOCK(TBL, HASH) \
	BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
55 56

#ifdef CONFIG_PROVE_LOCKING
57
int lockdep_rht_mutex_is_held(struct rhashtable *ht)
58
{
59
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
60 61
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
62 63 64

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
65 66 67
	spinlock_t *lock = bucket_lock(tbl, hash);

	return (debug_locks) ? lockdep_is_held(lock) : 1;
68 69
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
70 71
#endif

72
static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
73 74 75 76
{
	return (void *) he - ht->p.head_offset;
}

77
static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
78
{
79
	return hash & (tbl->size - 1);
80 81
}

82
static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
83
{
84
	u32 hash;
85

86 87 88 89 90
	if (unlikely(!ht->p.key_len))
		hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
	else
		hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
				    ht->p.hash_rnd);
91

92
	return hash >> HASH_RESERVED_SPACE;
93 94
}

95
static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
96
{
97
	return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
98 99 100
}

static u32 head_hashfn(const struct rhashtable *ht,
101 102
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
103
{
104
	return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
105 106
}

107 108 109 110 111
static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
{
	struct rhash_head __rcu **pprev;

	for (pprev = &tbl->buckets[n];
112
	     !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
113 114 115 116 117 118
	     pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
		;

	return pprev;
}

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

	/* Never allocate more than one lock per bucket */
	size = min_t(unsigned int, size, tbl->size);

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
		if (size * sizeof(spinlock_t) > PAGE_SIZE)
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
					   GFP_KERNEL);
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
					       size_t nbuckets)
162 163 164
{
	struct bucket_table *tbl;
	size_t size;
165
	int i;
166 167

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
168
	tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
169 170 171 172 173 174 175 176
	if (tbl == NULL)
		tbl = vzalloc(size);

	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

177 178 179 180
	if (alloc_bucket_locks(ht, tbl) < 0) {
		bucket_table_free(tbl);
		return NULL;
	}
181

182 183 184
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

185
	return tbl;
186 187 188 189 190 191 192 193 194 195
}

/**
 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
{
	/* Expand table when exceeding 75% load */
196 197
	return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
	       (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
198 199 200 201 202 203 204 205 206 207 208
}
EXPORT_SYMBOL_GPL(rht_grow_above_75);

/**
 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
{
	/* Shrink table beneath 30% load */
209 210
	return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
	       (atomic_read(&ht->shift) > ht->p.min_shift);
211 212 213 214 215
}
EXPORT_SYMBOL_GPL(rht_shrink_below_30);

static void hashtable_chain_unzip(const struct rhashtable *ht,
				  const struct bucket_table *new_tbl,
216 217
				  struct bucket_table *old_tbl,
				  size_t old_hash)
218 219
{
	struct rhash_head *he, *p, *next;
220 221 222 223
	spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL;
	unsigned int new_hash, new_hash2;

	ASSERT_BUCKET_LOCK(old_tbl, old_hash);
224 225

	/* Old bucket empty, no work needed. */
226 227
	p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
				   old_hash);
228
	if (rht_is_a_nulls(p))
229 230
		return;

231 232 233
	new_hash = new_hash2 = head_hashfn(ht, new_tbl, p);
	new_bucket_lock = bucket_lock(new_tbl, new_hash);

234 235 236 237
	/* Advance the old bucket pointer one or more times until it
	 * reaches a node that doesn't hash to the same bucket as the
	 * previous node p. Call the previous node p;
	 */
238 239 240
	rht_for_each_continue(he, p->next, old_tbl, old_hash) {
		new_hash2 = head_hashfn(ht, new_tbl, he);
		if (new_hash != new_hash2)
241 242 243
			break;
		p = he;
	}
244 245 246 247 248 249 250 251 252 253 254
	rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);

	spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);

	/* If we have encountered an entry that maps to a different bucket in
	 * the new table, lock down that bucket as well as we might cut off
	 * the end of the chain.
	 */
	new_bucket_lock2 = bucket_lock(new_tbl, new_hash);
	if (new_bucket_lock != new_bucket_lock2)
		spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2);
255 256 257 258

	/* Find the subsequent node which does hash to the same
	 * bucket as node P, or NULL if no such node exists.
	 */
259 260
	INIT_RHT_NULLS_HEAD(next, ht, old_hash);
	if (!rht_is_a_nulls(he)) {
261 262
		rht_for_each_continue(he, he->next, old_tbl, old_hash) {
			if (head_hashfn(ht, new_tbl, he) == new_hash) {
263 264 265 266 267 268 269 270 271
				next = he;
				break;
			}
		}
	}

	/* Set p's next pointer to that subsequent node pointer,
	 * bypassing the nodes which do not hash to p's bucket
	 */
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
	rcu_assign_pointer(p->next, next);

	if (new_bucket_lock != new_bucket_lock2)
		spin_unlock_bh(new_bucket_lock2);
	spin_unlock_bh(new_bucket_lock);
}

static void link_old_to_new(struct bucket_table *new_tbl,
			    unsigned int new_hash, struct rhash_head *entry)
{
	spinlock_t *new_bucket_lock;

	new_bucket_lock = bucket_lock(new_tbl, new_hash);

	spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
	rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
	spin_unlock_bh(new_bucket_lock);
289 290 291 292 293 294 295 296 297 298 299 300
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
 * A secondary bucket array is allocated and the hash entries are migrated
 * while keeping them on both lists until the end of the RCU grace period.
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
301 302 303 304 305
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
306
 */
307
int rhashtable_expand(struct rhashtable *ht)
308 309 310
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
	struct rhash_head *he;
311 312 313
	spinlock_t *old_bucket_lock;
	unsigned int new_hash, old_hash;
	bool complete = false;
314 315 316

	ASSERT_RHT_MUTEX(ht);

317
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
318 319 320
	if (new_tbl == NULL)
		return -ENOMEM;

321
	atomic_inc(&ht->shift);
322

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 * The synchronize_rcu() guarantees for the new table to be picked up
	 * so no new additions go into the old table while we relink.
	 */
	rcu_assign_pointer(ht->future_tbl, new_tbl);
	synchronize_rcu();

	/* For each new bucket, search the corresponding old bucket for the
	 * first entry that hashes to the new bucket, and link the end of
	 * newly formed bucket chain (containing entries added to future
	 * table) to that entry. Since all the entries which will end up in
	 * the new bucket appear in the same old bucket, this constructs an
	 * entirely valid new hash table, but with multiple buckets
	 * "zipped" together into a single imprecise chain.
338
	 */
339 340 341 342 343 344 345 346
	for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
		old_hash = rht_bucket_index(old_tbl, new_hash);
		old_bucket_lock = bucket_lock(old_tbl, old_hash);

		spin_lock_bh(old_bucket_lock);
		rht_for_each(he, old_tbl, old_hash) {
			if (head_hashfn(ht, new_tbl, he) == new_hash) {
				link_old_to_new(new_tbl, new_hash, he);
347 348 349
				break;
			}
		}
350
		spin_unlock_bh(old_bucket_lock);
351 352 353
	}

	/* Publish the new table pointer. Lookups may now traverse
354 355
	 * the new table, but they will not benefit from any
	 * additional efficiency until later steps unzip the buckets.
356 357 358 359
	 */
	rcu_assign_pointer(ht->tbl, new_tbl);

	/* Unzip interleaved hash chains */
360
	while (!complete && !ht->being_destroyed) {
361 362 363 364 365 366 367 368 369 370 371
		/* Wait for readers. All new readers will see the new
		 * table, and thus no references to the old table will
		 * remain.
		 */
		synchronize_rcu();

		/* For each bucket in the old table (each of which
		 * contains items from multiple buckets of the new
		 * table): ...
		 */
		complete = true;
372
		for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
373 374
			struct rhash_head *head;

375 376 377 378
			old_bucket_lock = bucket_lock(old_tbl, old_hash);
			spin_lock_bh(old_bucket_lock);

			hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash);
379 380 381
			head = rht_dereference_bucket(old_tbl->buckets[old_hash],
						      old_tbl, old_hash);
			if (!rht_is_a_nulls(head))
382
				complete = false;
383 384

			spin_unlock_bh(old_bucket_lock);
385
		}
386
	}
387 388 389 390 391 392 393 394 395 396 397 398 399

	bucket_table_free(old_tbl);
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_expand);

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
400 401 402
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
403 404
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
405 406 407
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
408
 */
409
int rhashtable_shrink(struct rhashtable *ht)
410
{
411 412 413
	struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
	spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2;
	unsigned int new_hash;
414 415 416

	ASSERT_RHT_MUTEX(ht);

417 418
	new_tbl = bucket_table_alloc(ht, tbl->size / 2);
	if (new_tbl == NULL)
419 420
		return -ENOMEM;

421 422
	rcu_assign_pointer(ht->future_tbl, new_tbl);
	synchronize_rcu();
423

424 425 426 427 428 429 430 431 432
	/* Link the first entry in the old bucket to the end of the
	 * bucket in the new table. As entries are concurrently being
	 * added to the new table, lock down the new bucket. As we
	 * always divide the size in half when shrinking, each bucket
	 * in the new table maps to exactly two buckets in the old
	 * table.
	 *
	 * As removals can occur concurrently on the old table, we need
	 * to lock down both matching buckets in the old table.
433
	 */
434 435 436 437 438 439
	for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
		old_bucket_lock1 = bucket_lock(tbl, new_hash);
		old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size);
		new_bucket_lock = bucket_lock(new_tbl, new_hash);

		spin_lock_bh(old_bucket_lock1);
440 441 442 443 444 445 446 447 448 449

		/* Depending on the lock per buckets mapping, the bucket in
		 * the lower and upper region may map to the same lock.
		 */
		if (old_bucket_lock1 != old_bucket_lock2) {
			spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
			spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
		} else {
			spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
		}
450 451 452 453 454 455 456

		rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
				   tbl->buckets[new_hash]);
		rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
				   tbl->buckets[new_hash + new_tbl->size]);

		spin_unlock_bh(new_bucket_lock);
457 458
		if (old_bucket_lock1 != old_bucket_lock2)
			spin_unlock_bh(old_bucket_lock2);
459
		spin_unlock_bh(old_bucket_lock1);
460 461 462
	}

	/* Publish the new, valid hash table */
463
	rcu_assign_pointer(ht->tbl, new_tbl);
464
	atomic_dec(&ht->shift);
465 466 467 468 469 470 471 472 473 474 475 476

	/* Wait for readers. No new readers will have references to the
	 * old hash table.
	 */
	synchronize_rcu();

	bucket_table_free(tbl);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);

477 478 479 480
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;
481
	struct rhashtable_walker *walker;
482

483
	ht = container_of(work, struct rhashtable, run_work);
484
	mutex_lock(&ht->mutex);
485 486 487
	if (ht->being_destroyed)
		goto unlock;

488 489
	tbl = rht_dereference(ht->tbl, ht);

490 491 492
	list_for_each_entry(walker, &ht->walkers, list)
		walker->resize = true;

493 494 495 496 497
	if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
		rhashtable_expand(ht);
	else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
		rhashtable_shrink(ht);

498
unlock:
499 500 501
	mutex_unlock(&ht->mutex);
}

502 503 504 505 506 507 508 509 510 511
static void rhashtable_wakeup_worker(struct rhashtable *ht)
{
	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
	struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
	size_t size = tbl->size;

	/* Only adjust the table if no resizing is currently in progress. */
	if (tbl == new_tbl &&
	    ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
	     (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
512
		schedule_work(&ht->run_work);
513 514
}

515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
				struct bucket_table *tbl, u32 hash)
{
	struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
							 tbl, hash);

	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
	else
		RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);

	rhashtable_wakeup_worker(ht);
}

533
/**
534
 * rhashtable_insert - insert object into hash table
535 536 537
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
538 539 540
 * Will take a per bucket spinlock to protect against mutual mutations
 * on the same bucket. Multiple insertions may occur in parallel unless
 * they map to the same bucket lock.
541
 *
542 543 544 545 546
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
547
 */
548
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
549
{
550 551 552
	struct bucket_table *tbl;
	spinlock_t *lock;
	unsigned hash;
553

554
	rcu_read_lock();
555

556
	tbl = rht_dereference_rcu(ht->future_tbl, ht);
557
	hash = head_hashfn(ht, tbl, obj);
558 559 560
	lock = bucket_lock(tbl, hash);

	spin_lock_bh(lock);
561
	__rhashtable_insert(ht, obj, tbl, hash);
562
	spin_unlock_bh(lock);
563

564
	rcu_read_unlock();
565 566 567 568 569 570 571 572 573 574 575 576
}
EXPORT_SYMBOL_GPL(rhashtable_insert);

/**
 * rhashtable_remove - remove object from hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Since the hash chain is single linked, the removal operation needs to
 * walk the bucket chain upon removal. The removal operation is thus
 * considerable slow if the hash table is not correctly sized.
 *
577
 * Will automatically shrink the table via rhashtable_expand() if the
578 579 580 581 582
 * shrink_decision function specified at rhashtable_init() returns true.
 *
 * The caller must ensure that no concurrent table mutations occur. It is
 * however valid to have concurrent lookups if they are RCU protected.
 */
583
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
584
{
585
	struct bucket_table *tbl;
586 587
	struct rhash_head __rcu **pprev;
	struct rhash_head *he;
588 589
	spinlock_t *lock;
	unsigned int hash;
590
	bool ret = false;
591

592 593 594
	rcu_read_lock();
	tbl = rht_dereference_rcu(ht->tbl, ht);
	hash = head_hashfn(ht, tbl, obj);
595

596 597
	lock = bucket_lock(tbl, hash);
	spin_lock_bh(lock);
598

599 600 601
restart:
	pprev = &tbl->buckets[hash];
	rht_for_each(he, tbl, hash) {
602 603 604 605 606
		if (he != obj) {
			pprev = &he->next;
			continue;
		}

607
		rcu_assign_pointer(*pprev, obj->next);
608

609 610
		ret = true;
		break;
611 612
	}

613 614 615 616 617
	/* The entry may be linked in either 'tbl', 'future_tbl', or both.
	 * 'future_tbl' only exists for a short period of time during
	 * resizing. Thus traversing both is fine and the added cost is
	 * very rare.
	 */
618
	if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) {
619 620
		spin_unlock_bh(lock);

621
		tbl = rht_dereference_rcu(ht->future_tbl, ht);
622 623 624 625 626 627 628 629
		hash = head_hashfn(ht, tbl, obj);

		lock = bucket_lock(tbl, hash);
		spin_lock_bh(lock);
		goto restart;
	}

	spin_unlock_bh(lock);
630 631 632 633 634 635

	if (ret) {
		atomic_dec(&ht->nelems);
		rhashtable_wakeup_worker(ht);
	}

636 637
	rcu_read_unlock();

638
	return ret;
639 640 641
}
EXPORT_SYMBOL_GPL(rhashtable_remove);

642 643 644 645 646 647 648 649 650 651 652 653 654
struct rhashtable_compare_arg {
	struct rhashtable *ht;
	const void *key;
};

static bool rhashtable_compare(void *ptr, void *arg)
{
	struct rhashtable_compare_arg *x = arg;
	struct rhashtable *ht = x->ht;

	return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
}

655 656 657 658 659 660 661 662 663
/**
 * rhashtable_lookup - lookup key in hash table
 * @ht:		hash table
 * @key:	pointer to key
 *
 * Computes the hash value for the key and traverses the bucket chain looking
 * for a entry with an identical key. The first matching entry is returned.
 *
 * This lookup function may only be used for fixed key hash table (key_len
664
 * parameter set). It will BUG() if used inappropriately.
665
 *
666
 * Lookups may occur in parallel with hashtable mutations and resizing.
667
 */
668
void *rhashtable_lookup(struct rhashtable *ht, const void *key)
669
{
670 671 672 673
	struct rhashtable_compare_arg arg = {
		.ht = ht,
		.key = key,
	};
674 675 676

	BUG_ON(!ht->p.key_len);

677
	return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
678 679 680 681 682 683
}
EXPORT_SYMBOL_GPL(rhashtable_lookup);

/**
 * rhashtable_lookup_compare - search hash table with compare function
 * @ht:		hash table
684
 * @key:	the pointer to the key
685 686 687 688 689 690
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Traverses the bucket chain behind the provided hash value and calls the
 * specified compare function for each entry.
 *
691
 * Lookups may occur in parallel with hashtable mutations and resizing.
692 693 694
 *
 * Returns the first entry on which the compare function returned true.
 */
695
void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
696 697
				bool (*compare)(void *, void *), void *arg)
{
698
	const struct bucket_table *tbl, *old_tbl;
699
	struct rhash_head *he;
700
	u32 hash;
701

702 703 704 705
	rcu_read_lock();

	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	tbl = rht_dereference_rcu(ht->future_tbl, ht);
706
	hash = key_hashfn(ht, key, ht->p.key_len);
707 708
restart:
	rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
709 710
		if (!compare(rht_obj(ht, he), arg))
			continue;
711
		rcu_read_unlock();
712
		return rht_obj(ht, he);
713 714
	}

715 716 717 718 719 720
	if (unlikely(tbl != old_tbl)) {
		tbl = old_tbl;
		goto restart;
	}
	rcu_read_unlock();

721 722 723 724
	return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
/**
 * rhashtable_lookup_insert - lookup and insert object into hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Locks down the bucket chain in both the old and new table if a resize
 * is in progress to ensure that writers can't remove from the old table
 * and can't insert to the new table during the atomic operation of search
 * and insertion. Searches for duplicates in both the old and new table if
 * a resize is in progress.
 *
 * This lookup function may only be used for fixed key hash table (key_len
 * parameter set). It will BUG() if used inappropriately.
 *
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
 */
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
{
	struct rhashtable_compare_arg arg = {
		.ht = ht,
		.key = rht_obj(ht, obj) + ht->p.key_offset,
	};

	BUG_ON(!ht->p.key_len);

	return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
						&arg);
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);

/**
 * rhashtable_lookup_compare_insert - search and insert object to hash table
 *                                    with compare function
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Locks down the bucket chain in both the old and new table if a resize
 * is in progress to ensure that writers can't remove from the old table
 * and can't insert to the new table during the atomic operation of search
 * and insertion. Searches for duplicates in both the old and new table if
 * a resize is in progress.
 *
 * Lookups may occur in parallel with hashtable mutations and resizing.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
 */
bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
				      struct rhash_head *obj,
				      bool (*compare)(void *, void *),
				      void *arg)
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
{
	struct bucket_table *new_tbl, *old_tbl;
	spinlock_t *new_bucket_lock, *old_bucket_lock;
	u32 new_hash, old_hash;
	bool success = true;

	BUG_ON(!ht->p.key_len);

	rcu_read_lock();

	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	old_hash = head_hashfn(ht, old_tbl, obj);
	old_bucket_lock = bucket_lock(old_tbl, old_hash);
	spin_lock_bh(old_bucket_lock);

	new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
	new_hash = head_hashfn(ht, new_tbl, obj);
	new_bucket_lock = bucket_lock(new_tbl, new_hash);
	if (unlikely(old_tbl != new_tbl))
		spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);

804 805
	if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
				      compare, arg)) {
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
		success = false;
		goto exit;
	}

	__rhashtable_insert(ht, obj, new_tbl, new_hash);

exit:
	if (unlikely(old_tbl != new_tbl))
		spin_unlock_bh(new_bucket_lock);
	spin_unlock_bh(old_bucket_lock);

	rcu_read_unlock();

	return success;
}
821
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
822

823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
/**
 * rhashtable_walk_init - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may sleep so you must not call it from interrupt
 * context or with spin locks held.
 *
 * You must call rhashtable_walk_exit if this function returns
 * successfully.
 */
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
{
	iter->ht = ht;
	iter->p = NULL;
	iter->slot = 0;
	iter->skip = 0;

	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
	if (!iter->walker)
		return -ENOMEM;

	mutex_lock(&ht->mutex);
	list_add(&iter->walker->list, &ht->walkers);
	mutex_unlock(&ht->mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_init.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
	mutex_lock(&iter->ht->mutex);
	list_del(&iter->walker->list);
	mutex_unlock(&iter->ht->mutex);
	kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);

/**
 * rhashtable_walk_start - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk.  Note that we take the RCU lock in all
 * cases including when we return an error.  So you must always call
 * rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 */
int rhashtable_walk_start(struct rhashtable_iter *iter)
{
	rcu_read_lock();

	if (iter->walker->resize) {
		iter->slot = 0;
		iter->skip = 0;
		iter->walker->resize = false;
		return -EAGAIN;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start);

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
	const struct bucket_table *tbl;
	struct rhashtable *ht = iter->ht;
	struct rhash_head *p = iter->p;
	void *obj = NULL;

	tbl = rht_dereference_rcu(ht->tbl, ht);

	if (p) {
		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
		goto next;
	}

	for (; iter->slot < tbl->size; iter->slot++) {
		int skip = iter->skip;

		rht_for_each_rcu(p, tbl, iter->slot) {
			if (!skip)
				break;
			skip--;
		}

next:
		if (!rht_is_a_nulls(p)) {
			iter->skip++;
			iter->p = p;
			obj = rht_obj(ht, p);
			goto out;
		}

		iter->skip = 0;
	}

	iter->p = NULL;

out:
	if (iter->walker->resize) {
		iter->p = NULL;
		iter->slot = 0;
		iter->skip = 0;
		iter->walker->resize = false;
		return ERR_PTR(-EAGAIN);
	}

	return obj;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
{
	rcu_read_unlock();
	iter->p = NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);

981
static size_t rounded_hashtable_size(struct rhashtable_params *params)
982
{
983 984
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
		   1UL << params->min_shift);
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
}

/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
1007
 *	.hashfn = jhash,
1008
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
 * u32 my_hash_fn(const void *data, u32 seed)
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
1026
 *	.hashfn = jhash,
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
 *	.obj_hashfn = my_hash_fn,
 * };
 */
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

	if ((params->key_len && !params->hashfn) ||
	    (!params->key_len && !params->obj_hashfn))
		return -EINVAL;

1041 1042 1043
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

1044 1045 1046
	params->min_shift = max_t(size_t, params->min_shift,
				  ilog2(HASH_MIN_SIZE));

1047
	if (params->nelem_hint)
1048
		size = rounded_hashtable_size(params);
1049

1050 1051 1052
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
	memcpy(&ht->p, params, sizeof(*params));
1053
	INIT_LIST_HEAD(&ht->walkers);
1054 1055 1056 1057 1058 1059 1060

	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

	tbl = bucket_table_alloc(ht, size);
1061 1062 1063
	if (tbl == NULL)
		return -ENOMEM;

1064
	atomic_set(&ht->nelems, 0);
1065
	atomic_set(&ht->shift, ilog2(tbl->size));
1066
	RCU_INIT_POINTER(ht->tbl, tbl);
1067
	RCU_INIT_POINTER(ht->future_tbl, tbl);
1068 1069 1070 1071

	if (!ht->p.hash_rnd)
		get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));

1072
	if (ht->p.grow_decision || ht->p.shrink_decision)
1073
		INIT_WORK(&ht->run_work, rht_deferred_worker);
1074

1075 1076 1077 1078 1079 1080 1081 1082
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
 * rhashtable_destroy - destroy hash table
 * @ht:		the hash table to destroy
 *
1083 1084 1085
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
1086
 */
1087
void rhashtable_destroy(struct rhashtable *ht)
1088
{
1089 1090
	ht->being_destroyed = true;

1091 1092
	if (ht->p.grow_decision || ht->p.shrink_decision)
		cancel_work_sync(&ht->run_work);
1093

1094
	mutex_lock(&ht->mutex);
1095 1096
	bucket_table_free(rht_dereference(ht->tbl, ht));
	mutex_unlock(&ht->mutex);
1097 1098
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);