rhashtable.c 30.3 KB
Newer Older
1 2 3
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
4
 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Based on the following paper:
 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
 *
 * Code partially derived from nft_hash
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
E
Eric Dumazet 已提交
20
#include <linux/sched.h>
21 22 23
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
24
#include <linux/jhash.h>
25 26
#include <linux/random.h>
#include <linux/rhashtable.h>
27
#include <linux/err.h>
28 29 30

#define HASH_DEFAULT_SIZE	64UL
#define HASH_MIN_SIZE		4UL
31 32
#define BUCKET_LOCKS_PER_CPU   128UL

33 34 35
/* Base bits plus 1 bit for nulls marker */
#define HASH_RESERVED_SPACE	(RHT_BASE_BITS + 1)

36 37 38 39 40 41 42 43
enum {
	RHT_LOCK_NORMAL,
	RHT_LOCK_NESTED,
};

/* The bucket lock is selected based on the hash and protects mutations
 * on a group of hash buckets.
 *
44 45 46 47 48 49
 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
 * a single lock always covers both buckets which may both contains
 * entries which link to the same bucket of the old table during resizing.
 * This allows to simplify the locking as locking the bucket in both
 * tables during resize always guarantee protection.
 *
50 51 52 53 54 55 56 57
 * IMPORTANT: When holding the bucket lock of both the old and new table
 * during expansions and shrinking, the old bucket lock must always be
 * acquired first.
 */
static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
{
	return &tbl->locks[hash & tbl->locks_mask];
}
58

59
static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
60 61 62 63
{
	return (void *) he - ht->p.head_offset;
}

64
static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
65
{
66
	return hash & (tbl->size - 1);
67 68
}

69
static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
70
{
71
	u32 hash;
72

73 74 75 76 77
	if (unlikely(!ht->p.key_len))
		hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
	else
		hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
				    ht->p.hash_rnd);
78

79
	return hash >> HASH_RESERVED_SPACE;
80 81
}

82
static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
83
{
84
	return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
85 86 87
}

static u32 head_hashfn(const struct rhashtable *ht,
88 89
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
90
{
91
	return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
92 93
}

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
#ifdef CONFIG_PROVE_LOCKING
static void debug_dump_buckets(const struct rhashtable *ht,
			       const struct bucket_table *tbl)
{
	struct rhash_head *he;
	unsigned int i, hash;

	for (i = 0; i < tbl->size; i++) {
		pr_warn(" [Bucket %d] ", i);
		rht_for_each_rcu(he, tbl, i) {
			hash = head_hashfn(ht, tbl, he);
			pr_cont("[hash = %#x, lock = %p] ",
				hash, bucket_lock(tbl, hash));
		}
		pr_cont("\n");
	}

}

static void debug_dump_table(struct rhashtable *ht,
			     const struct bucket_table *tbl,
			     unsigned int hash)
{
	struct bucket_table *old_tbl, *future_tbl;

	pr_emerg("BUG: lock for hash %#x in table %p not held\n",
		 hash, tbl);

	rcu_read_lock();
	future_tbl = rht_dereference_rcu(ht->future_tbl, ht);
	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	if (future_tbl != old_tbl) {
		pr_warn("Future table %p (size: %zd)\n",
			future_tbl, future_tbl->size);
		debug_dump_buckets(ht, future_tbl);
	}

	pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size);
	debug_dump_buckets(ht, old_tbl);

	rcu_read_unlock();
}

#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
#define ASSERT_BUCKET_LOCK(HT, TBL, HASH)				\
	do {								\
		if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) {	\
			debug_dump_table(HT, TBL, HASH);		\
			BUG();						\
		}							\
	} while (0)

int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
	spinlock_t *lock = bucket_lock(tbl, hash);

	return (debug_locks) ? lockdep_is_held(lock) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else
#define ASSERT_RHT_MUTEX(HT)
#define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
#endif


165 166 167 168 169
static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
{
	struct rhash_head __rcu **pprev;

	for (pprev = &tbl->buckets[n];
170
	     !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
171 172 173 174 175 176
	     pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
		;

	return pprev;
}

177 178 179 180 181 182 183 184 185 186 187 188
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

189 190
	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
		if (size * sizeof(spinlock_t) > PAGE_SIZE)
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
					   GFP_KERNEL);
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
					       size_t nbuckets)
220
{
221
	struct bucket_table *tbl = NULL;
222
	size_t size;
223
	int i;
224 225

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
226 227
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
		tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
228 229 230 231 232 233 234
	if (tbl == NULL)
		tbl = vzalloc(size);
	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

235 236 237 238
	if (alloc_bucket_locks(ht, tbl) < 0) {
		bucket_table_free(tbl);
		return NULL;
	}
239

240 241 242
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

243
	return tbl;
244 245 246 247 248 249 250
}

/**
 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
251
static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
252 253
{
	/* Expand table when exceeding 75% load */
254
	return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
255
	       (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
256 257 258 259 260 261 262
}

/**
 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
263
static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
264 265
{
	/* Shrink table beneath 30% load */
266 267
	return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
	       (atomic_read(&ht->shift) > ht->p.min_shift);
268 269
}

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
static void lock_buckets(struct bucket_table *new_tbl,
			 struct bucket_table *old_tbl, unsigned int hash)
	__acquires(old_bucket_lock)
{
	spin_lock_bh(bucket_lock(old_tbl, hash));
	if (new_tbl != old_tbl)
		spin_lock_bh_nested(bucket_lock(new_tbl, hash),
				    RHT_LOCK_NESTED);
}

static void unlock_buckets(struct bucket_table *new_tbl,
			   struct bucket_table *old_tbl, unsigned int hash)
	__releases(old_bucket_lock)
{
	if (new_tbl != old_tbl)
		spin_unlock_bh(bucket_lock(new_tbl, hash));
	spin_unlock_bh(bucket_lock(old_tbl, hash));
}

/**
 * Unlink entries on bucket which hash to different bucket.
 *
 * Returns true if no more work needs to be performed on the bucket.
 */
294
static bool hashtable_chain_unzip(struct rhashtable *ht,
295
				  const struct bucket_table *new_tbl,
296 297
				  struct bucket_table *old_tbl,
				  size_t old_hash)
298 299
{
	struct rhash_head *he, *p, *next;
300 301
	unsigned int new_hash, new_hash2;

302
	ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash);
303 304

	/* Old bucket empty, no work needed. */
305 306
	p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
				   old_hash);
307
	if (rht_is_a_nulls(p))
308
		return false;
309

310
	new_hash = head_hashfn(ht, new_tbl, p);
311
	ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
312

313 314 315 316
	/* Advance the old bucket pointer one or more times until it
	 * reaches a node that doesn't hash to the same bucket as the
	 * previous node p. Call the previous node p;
	 */
317 318
	rht_for_each_continue(he, p->next, old_tbl, old_hash) {
		new_hash2 = head_hashfn(ht, new_tbl, he);
319
		ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2);
320

321
		if (new_hash != new_hash2)
322 323 324
			break;
		p = he;
	}
325 326
	rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);

327 328 329
	/* Find the subsequent node which does hash to the same
	 * bucket as node P, or NULL if no such node exists.
	 */
330 331
	INIT_RHT_NULLS_HEAD(next, ht, old_hash);
	if (!rht_is_a_nulls(he)) {
332 333
		rht_for_each_continue(he, he->next, old_tbl, old_hash) {
			if (head_hashfn(ht, new_tbl, he) == new_hash) {
334 335 336 337 338 339 340 341 342
				next = he;
				break;
			}
		}
	}

	/* Set p's next pointer to that subsequent node pointer,
	 * bypassing the nodes which do not hash to p's bucket
	 */
343 344
	rcu_assign_pointer(p->next, next);

345 346 347 348
	p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
				   old_hash);

	return !rht_is_a_nulls(p);
349 350
}

351
static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
352 353
			    unsigned int new_hash, struct rhash_head *entry)
{
354 355
	ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);

356
	rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
357 358 359 360 361 362 363 364 365 366 367 368
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
 * A secondary bucket array is allocated and the hash entries are migrated
 * while keeping them on both lists until the end of the RCU grace period.
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
369 370 371 372 373
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
374
 */
375
int rhashtable_expand(struct rhashtable *ht)
376 377 378
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
	struct rhash_head *he;
379 380
	unsigned int new_hash, old_hash;
	bool complete = false;
381 382 383

	ASSERT_RHT_MUTEX(ht);

384
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
385 386 387
	if (new_tbl == NULL)
		return -ENOMEM;

388
	atomic_inc(&ht->shift);
389

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 * The synchronize_rcu() guarantees for the new table to be picked up
	 * so no new additions go into the old table while we relink.
	 */
	rcu_assign_pointer(ht->future_tbl, new_tbl);
	synchronize_rcu();

	/* For each new bucket, search the corresponding old bucket for the
	 * first entry that hashes to the new bucket, and link the end of
	 * newly formed bucket chain (containing entries added to future
	 * table) to that entry. Since all the entries which will end up in
	 * the new bucket appear in the same old bucket, this constructs an
	 * entirely valid new hash table, but with multiple buckets
	 * "zipped" together into a single imprecise chain.
405
	 */
406 407
	for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
		old_hash = rht_bucket_index(old_tbl, new_hash);
408
		lock_buckets(new_tbl, old_tbl, new_hash);
409 410
		rht_for_each(he, old_tbl, old_hash) {
			if (head_hashfn(ht, new_tbl, he) == new_hash) {
411
				link_old_to_new(ht, new_tbl, new_hash, he);
412 413 414
				break;
			}
		}
415
		unlock_buckets(new_tbl, old_tbl, new_hash);
E
Eric Dumazet 已提交
416
		cond_resched();
417 418 419
	}

	/* Unzip interleaved hash chains */
420
	while (!complete && !ht->being_destroyed) {
421 422 423 424 425 426 427 428 429 430 431
		/* Wait for readers. All new readers will see the new
		 * table, and thus no references to the old table will
		 * remain.
		 */
		synchronize_rcu();

		/* For each bucket in the old table (each of which
		 * contains items from multiple buckets of the new
		 * table): ...
		 */
		complete = true;
432
		for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
433
			lock_buckets(new_tbl, old_tbl, old_hash);
434

435 436
			if (hashtable_chain_unzip(ht, new_tbl, old_tbl,
						  old_hash))
437
				complete = false;
438

439
			unlock_buckets(new_tbl, old_tbl, old_hash);
E
Eric Dumazet 已提交
440
			cond_resched();
441
		}
442
	}
443

444
	rcu_assign_pointer(ht->tbl, new_tbl);
445 446
	synchronize_rcu();

447 448 449 450 451 452 453 454 455 456 457 458
	bucket_table_free(old_tbl);
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_expand);

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
459 460 461
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
462 463
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
464 465 466
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
467
 */
468
int rhashtable_shrink(struct rhashtable *ht)
469
{
470 471
	struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
	unsigned int new_hash;
472 473 474

	ASSERT_RHT_MUTEX(ht);

475 476
	new_tbl = bucket_table_alloc(ht, tbl->size / 2);
	if (new_tbl == NULL)
477 478
		return -ENOMEM;

479 480
	rcu_assign_pointer(ht->future_tbl, new_tbl);
	synchronize_rcu();
481

482 483 484 485 486 487
	/* Link the first entry in the old bucket to the end of the
	 * bucket in the new table. As entries are concurrently being
	 * added to the new table, lock down the new bucket. As we
	 * always divide the size in half when shrinking, each bucket
	 * in the new table maps to exactly two buckets in the old
	 * table.
488
	 */
489
	for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
490
		lock_buckets(new_tbl, tbl, new_hash);
491 492 493

		rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
				   tbl->buckets[new_hash]);
494
		ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size);
495 496 497
		rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
				   tbl->buckets[new_hash + new_tbl->size]);

498
		unlock_buckets(new_tbl, tbl, new_hash);
E
Eric Dumazet 已提交
499
		cond_resched();
500 501 502
	}

	/* Publish the new, valid hash table */
503
	rcu_assign_pointer(ht->tbl, new_tbl);
504
	atomic_dec(&ht->shift);
505 506 507 508 509 510 511 512 513 514 515 516

	/* Wait for readers. No new readers will have references to the
	 * old hash table.
	 */
	synchronize_rcu();

	bucket_table_free(tbl);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);

517 518 519 520
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;
521
	struct rhashtable_walker *walker;
522

523
	ht = container_of(work, struct rhashtable, run_work);
524
	mutex_lock(&ht->mutex);
525 526 527
	if (ht->being_destroyed)
		goto unlock;

528 529
	tbl = rht_dereference(ht->tbl, ht);

530 531 532
	list_for_each_entry(walker, &ht->walkers, list)
		walker->resize = true;

533
	if (rht_grow_above_75(ht, tbl->size))
534
		rhashtable_expand(ht);
535
	else if (rht_shrink_below_30(ht, tbl->size))
536
		rhashtable_shrink(ht);
537
unlock:
538 539 540
	mutex_unlock(&ht->mutex);
}

541
static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
542 543
				struct bucket_table *tbl,
				const struct bucket_table *old_tbl, u32 hash)
544
{
545
	bool no_resize_running = tbl == old_tbl;
546 547 548 549
	struct rhash_head *head;

	hash = rht_bucket_index(tbl, hash);
	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
550

551 552
	ASSERT_BUCKET_LOCK(ht, tbl, hash);

553 554 555 556 557 558 559 560
	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
	else
		RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);
561 562
	if (no_resize_running && rht_grow_above_75(ht, tbl->size))
		schedule_work(&ht->run_work);
563 564
}

565
/**
566
 * rhashtable_insert - insert object into hash table
567 568 569
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
570 571 572
 * Will take a per bucket spinlock to protect against mutual mutations
 * on the same bucket. Multiple insertions may occur in parallel unless
 * they map to the same bucket lock.
573
 *
574 575 576 577 578
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
579
 */
580
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
581
{
582
	struct bucket_table *tbl, *old_tbl;
583
	unsigned hash;
584

585
	rcu_read_lock();
586

587
	tbl = rht_dereference_rcu(ht->future_tbl, ht);
588
	old_tbl = rht_dereference_rcu(ht->tbl, ht);
589
	hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
590

591
	lock_buckets(tbl, old_tbl, hash);
592
	__rhashtable_insert(ht, obj, tbl, old_tbl, hash);
593
	unlock_buckets(tbl, old_tbl, hash);
594

595
	rcu_read_unlock();
596 597 598 599 600 601 602 603 604 605 606 607
}
EXPORT_SYMBOL_GPL(rhashtable_insert);

/**
 * rhashtable_remove - remove object from hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Since the hash chain is single linked, the removal operation needs to
 * walk the bucket chain upon removal. The removal operation is thus
 * considerable slow if the hash table is not correctly sized.
 *
608
 * Will automatically shrink the table via rhashtable_expand() if the
609 610 611 612 613
 * shrink_decision function specified at rhashtable_init() returns true.
 *
 * The caller must ensure that no concurrent table mutations occur. It is
 * however valid to have concurrent lookups if they are RCU protected.
 */
614
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
615
{
616
	struct bucket_table *tbl, *new_tbl, *old_tbl;
617
	struct rhash_head __rcu **pprev;
618
	struct rhash_head *he, *he2;
619
	unsigned int hash, new_hash;
620
	bool ret = false;
621

622
	rcu_read_lock();
623 624
	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
625
	new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
626

627
	lock_buckets(new_tbl, old_tbl, new_hash);
628
restart:
629
	hash = rht_bucket_index(tbl, new_hash);
630 631
	pprev = &tbl->buckets[hash];
	rht_for_each(he, tbl, hash) {
632 633 634 635 636
		if (he != obj) {
			pprev = &he->next;
			continue;
		}

637
		ASSERT_BUCKET_LOCK(ht, tbl, hash);
638

639 640 641 642 643 644
		if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
		    !rht_is_a_nulls(obj->next) &&
		    head_hashfn(ht, tbl, obj->next) != hash) {
			rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
		} else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
			rht_for_each_continue(he2, obj->next, tbl, hash) {
645 646 647 648 649 650
				if (head_hashfn(ht, tbl, he2) == hash) {
					rcu_assign_pointer(*pprev, he2);
					goto found;
				}
			}

651
			rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
652 653 654 655 656
		} else {
			rcu_assign_pointer(*pprev, obj->next);
		}

found:
657 658
		ret = true;
		break;
659 660
	}

661 662 663 664 665
	/* The entry may be linked in either 'tbl', 'future_tbl', or both.
	 * 'future_tbl' only exists for a short period of time during
	 * resizing. Thus traversing both is fine and the added cost is
	 * very rare.
	 */
666 667
	if (tbl != old_tbl) {
		tbl = old_tbl;
668 669 670
		goto restart;
	}

671
	unlock_buckets(new_tbl, old_tbl, new_hash);
672 673

	if (ret) {
674 675
		bool no_resize_running = new_tbl == old_tbl;

676
		atomic_dec(&ht->nelems);
677 678
		if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
			schedule_work(&ht->run_work);
679 680
	}

681 682
	rcu_read_unlock();

683
	return ret;
684 685 686
}
EXPORT_SYMBOL_GPL(rhashtable_remove);

687 688 689 690 691 692 693 694 695 696 697 698 699
struct rhashtable_compare_arg {
	struct rhashtable *ht;
	const void *key;
};

static bool rhashtable_compare(void *ptr, void *arg)
{
	struct rhashtable_compare_arg *x = arg;
	struct rhashtable *ht = x->ht;

	return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
}

700 701 702 703 704 705 706 707 708
/**
 * rhashtable_lookup - lookup key in hash table
 * @ht:		hash table
 * @key:	pointer to key
 *
 * Computes the hash value for the key and traverses the bucket chain looking
 * for a entry with an identical key. The first matching entry is returned.
 *
 * This lookup function may only be used for fixed key hash table (key_len
709
 * parameter set). It will BUG() if used inappropriately.
710
 *
711
 * Lookups may occur in parallel with hashtable mutations and resizing.
712
 */
713
void *rhashtable_lookup(struct rhashtable *ht, const void *key)
714
{
715 716 717 718
	struct rhashtable_compare_arg arg = {
		.ht = ht,
		.key = key,
	};
719 720 721

	BUG_ON(!ht->p.key_len);

722
	return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
723 724 725 726 727 728
}
EXPORT_SYMBOL_GPL(rhashtable_lookup);

/**
 * rhashtable_lookup_compare - search hash table with compare function
 * @ht:		hash table
729
 * @key:	the pointer to the key
730 731 732 733 734 735
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Traverses the bucket chain behind the provided hash value and calls the
 * specified compare function for each entry.
 *
736
 * Lookups may occur in parallel with hashtable mutations and resizing.
737 738 739
 *
 * Returns the first entry on which the compare function returned true.
 */
740
void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
741 742
				bool (*compare)(void *, void *), void *arg)
{
743
	const struct bucket_table *tbl, *old_tbl;
744
	struct rhash_head *he;
745
	u32 hash;
746

747 748 749 750
	rcu_read_lock();

	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	tbl = rht_dereference_rcu(ht->future_tbl, ht);
751
	hash = key_hashfn(ht, key, ht->p.key_len);
752 753
restart:
	rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
754 755
		if (!compare(rht_obj(ht, he), arg))
			continue;
756
		rcu_read_unlock();
757
		return rht_obj(ht, he);
758 759
	}

760 761 762 763 764 765
	if (unlikely(tbl != old_tbl)) {
		tbl = old_tbl;
		goto restart;
	}
	rcu_read_unlock();

766 767 768 769
	return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);

770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
/**
 * rhashtable_lookup_insert - lookup and insert object into hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Locks down the bucket chain in both the old and new table if a resize
 * is in progress to ensure that writers can't remove from the old table
 * and can't insert to the new table during the atomic operation of search
 * and insertion. Searches for duplicates in both the old and new table if
 * a resize is in progress.
 *
 * This lookup function may only be used for fixed key hash table (key_len
 * parameter set). It will BUG() if used inappropriately.
 *
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
 */
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
{
	struct rhashtable_compare_arg arg = {
		.ht = ht,
		.key = rht_obj(ht, obj) + ht->p.key_offset,
	};

	BUG_ON(!ht->p.key_len);

	return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
						&arg);
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);

/**
 * rhashtable_lookup_compare_insert - search and insert object to hash table
 *                                    with compare function
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Locks down the bucket chain in both the old and new table if a resize
 * is in progress to ensure that writers can't remove from the old table
 * and can't insert to the new table during the atomic operation of search
 * and insertion. Searches for duplicates in both the old and new table if
 * a resize is in progress.
 *
 * Lookups may occur in parallel with hashtable mutations and resizing.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
 */
bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
				      struct rhash_head *obj,
				      bool (*compare)(void *, void *),
				      void *arg)
828 829
{
	struct bucket_table *new_tbl, *old_tbl;
830
	u32 new_hash;
831 832 833 834 835 836 837
	bool success = true;

	BUG_ON(!ht->p.key_len);

	rcu_read_lock();
	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
838
	new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
839 840

	lock_buckets(new_tbl, old_tbl, new_hash);
841

842 843
	if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
				      compare, arg)) {
844 845 846 847
		success = false;
		goto exit;
	}

848
	__rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
849 850

exit:
851
	unlock_buckets(new_tbl, old_tbl, new_hash);
852 853 854 855
	rcu_read_unlock();

	return success;
}
856
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
857

858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
/**
 * rhashtable_walk_init - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may sleep so you must not call it from interrupt
 * context or with spin locks held.
 *
 * You must call rhashtable_walk_exit if this function returns
 * successfully.
 */
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
{
	iter->ht = ht;
	iter->p = NULL;
	iter->slot = 0;
	iter->skip = 0;

	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
	if (!iter->walker)
		return -ENOMEM;

890 891 892
	INIT_LIST_HEAD(&iter->walker->list);
	iter->walker->resize = false;

893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	mutex_lock(&ht->mutex);
	list_add(&iter->walker->list, &ht->walkers);
	mutex_unlock(&ht->mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_init.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
	mutex_lock(&iter->ht->mutex);
	list_del(&iter->walker->list);
	mutex_unlock(&iter->ht->mutex);
	kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);

/**
 * rhashtable_walk_start - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk.  Note that we take the RCU lock in all
 * cases including when we return an error.  So you must always call
 * rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 */
int rhashtable_walk_start(struct rhashtable_iter *iter)
{
	rcu_read_lock();

	if (iter->walker->resize) {
		iter->slot = 0;
		iter->skip = 0;
		iter->walker->resize = false;
		return -EAGAIN;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start);

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
	const struct bucket_table *tbl;
	struct rhashtable *ht = iter->ht;
	struct rhash_head *p = iter->p;
	void *obj = NULL;

	tbl = rht_dereference_rcu(ht->tbl, ht);

	if (p) {
		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
		goto next;
	}

	for (; iter->slot < tbl->size; iter->slot++) {
		int skip = iter->skip;

		rht_for_each_rcu(p, tbl, iter->slot) {
			if (!skip)
				break;
			skip--;
		}

next:
		if (!rht_is_a_nulls(p)) {
			iter->skip++;
			iter->p = p;
			obj = rht_obj(ht, p);
			goto out;
		}

		iter->skip = 0;
	}

	iter->p = NULL;

out:
	if (iter->walker->resize) {
		iter->p = NULL;
		iter->slot = 0;
		iter->skip = 0;
		iter->walker->resize = false;
		return ERR_PTR(-EAGAIN);
	}

	return obj;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
{
	rcu_read_unlock();
	iter->p = NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);

1019
static size_t rounded_hashtable_size(struct rhashtable_params *params)
1020
{
1021 1022
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
		   1UL << params->min_shift);
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
}

/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
1045
 *	.hashfn = jhash,
1046
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
 * u32 my_hash_fn(const void *data, u32 seed)
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
1064
 *	.hashfn = jhash,
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
 *	.obj_hashfn = my_hash_fn,
 * };
 */
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

	if ((params->key_len && !params->hashfn) ||
	    (!params->key_len && !params->obj_hashfn))
		return -EINVAL;

1079 1080 1081
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

1082 1083 1084
	params->min_shift = max_t(size_t, params->min_shift,
				  ilog2(HASH_MIN_SIZE));

1085
	if (params->nelem_hint)
1086
		size = rounded_hashtable_size(params);
1087

1088 1089 1090
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
	memcpy(&ht->p, params, sizeof(*params));
1091
	INIT_LIST_HEAD(&ht->walkers);
1092 1093 1094 1095 1096 1097 1098

	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

	tbl = bucket_table_alloc(ht, size);
1099 1100 1101
	if (tbl == NULL)
		return -ENOMEM;

1102
	atomic_set(&ht->nelems, 0);
1103
	atomic_set(&ht->shift, ilog2(tbl->size));
1104
	RCU_INIT_POINTER(ht->tbl, tbl);
1105
	RCU_INIT_POINTER(ht->future_tbl, tbl);
1106 1107 1108 1109

	if (!ht->p.hash_rnd)
		get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));

1110
	INIT_WORK(&ht->run_work, rht_deferred_worker);
1111

1112 1113 1114 1115 1116 1117 1118 1119
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
 * rhashtable_destroy - destroy hash table
 * @ht:		the hash table to destroy
 *
1120 1121 1122
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
1123
 */
1124
void rhashtable_destroy(struct rhashtable *ht)
1125
{
1126 1127
	ht->being_destroyed = true;

1128
	cancel_work_sync(&ht->run_work);
1129

1130
	mutex_lock(&ht->mutex);
1131 1132
	bucket_table_free(rht_dereference(ht->tbl, ht));
	mutex_unlock(&ht->mutex);
1133 1134
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);