rhashtable.c 30.9 KB
Newer Older
1 2 3
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
4
 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Based on the following paper:
 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
 *
 * Code partially derived from nft_hash
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
23
#include <linux/jhash.h>
24 25
#include <linux/random.h>
#include <linux/rhashtable.h>
26
#include <linux/err.h>
27 28 29

#define HASH_DEFAULT_SIZE	64UL
#define HASH_MIN_SIZE		4UL
30 31
#define BUCKET_LOCKS_PER_CPU   128UL

32 33 34
/* Base bits plus 1 bit for nulls marker */
#define HASH_RESERVED_SPACE	(RHT_BASE_BITS + 1)

35 36 37 38 39 40 41 42
enum {
	RHT_LOCK_NORMAL,
	RHT_LOCK_NESTED,
};

/* The bucket lock is selected based on the hash and protects mutations
 * on a group of hash buckets.
 *
43 44 45 46 47 48
 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
 * a single lock always covers both buckets which may both contains
 * entries which link to the same bucket of the old table during resizing.
 * This allows to simplify the locking as locking the bucket in both
 * tables during resize always guarantee protection.
 *
49 50 51 52 53 54 55 56
 * IMPORTANT: When holding the bucket lock of both the old and new table
 * during expansions and shrinking, the old bucket lock must always be
 * acquired first.
 */
static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
{
	return &tbl->locks[hash & tbl->locks_mask];
}
57

58
static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
59 60 61 62
{
	return (void *) he - ht->p.head_offset;
}

63
static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
64
{
65
	return hash & (tbl->size - 1);
66 67
}

68
static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
69
{
70
	u32 hash;
71

72 73 74 75 76
	if (unlikely(!ht->p.key_len))
		hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
	else
		hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
				    ht->p.hash_rnd);
77

78
	return hash >> HASH_RESERVED_SPACE;
79 80
}

81
static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
82
{
83
	return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE;
84 85 86
}

static u32 head_hashfn(const struct rhashtable *ht,
87 88
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
89
{
90
	return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
91 92
}

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
#ifdef CONFIG_PROVE_LOCKING
static void debug_dump_buckets(const struct rhashtable *ht,
			       const struct bucket_table *tbl)
{
	struct rhash_head *he;
	unsigned int i, hash;

	for (i = 0; i < tbl->size; i++) {
		pr_warn(" [Bucket %d] ", i);
		rht_for_each_rcu(he, tbl, i) {
			hash = head_hashfn(ht, tbl, he);
			pr_cont("[hash = %#x, lock = %p] ",
				hash, bucket_lock(tbl, hash));
		}
		pr_cont("\n");
	}

}

static void debug_dump_table(struct rhashtable *ht,
			     const struct bucket_table *tbl,
			     unsigned int hash)
{
	struct bucket_table *old_tbl, *future_tbl;

	pr_emerg("BUG: lock for hash %#x in table %p not held\n",
		 hash, tbl);

	rcu_read_lock();
	future_tbl = rht_dereference_rcu(ht->future_tbl, ht);
	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	if (future_tbl != old_tbl) {
		pr_warn("Future table %p (size: %zd)\n",
			future_tbl, future_tbl->size);
		debug_dump_buckets(ht, future_tbl);
	}

	pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size);
	debug_dump_buckets(ht, old_tbl);

	rcu_read_unlock();
}

#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
#define ASSERT_BUCKET_LOCK(HT, TBL, HASH)				\
	do {								\
		if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) {	\
			debug_dump_table(HT, TBL, HASH);		\
			BUG();						\
		}							\
	} while (0)

int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
	spinlock_t *lock = bucket_lock(tbl, hash);

	return (debug_locks) ? lockdep_is_held(lock) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else
#define ASSERT_RHT_MUTEX(HT)
#define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
#endif


164 165 166 167 168
static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
{
	struct rhash_head __rcu **pprev;

	for (pprev = &tbl->buckets[n];
169
	     !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
170 171 172 173 174 175
	     pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
		;

	return pprev;
}

176 177 178 179 180 181 182 183 184 185 186 187
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

188 189
	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
		if (size * sizeof(spinlock_t) > PAGE_SIZE)
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
					   GFP_KERNEL);
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
					       size_t nbuckets)
219
{
220
	struct bucket_table *tbl = NULL;
221
	size_t size;
222
	int i;
223 224

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
225 226
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
		tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
227 228 229 230 231 232 233
	if (tbl == NULL)
		tbl = vzalloc(size);
	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

234 235 236 237
	if (alloc_bucket_locks(ht, tbl) < 0) {
		bucket_table_free(tbl);
		return NULL;
	}
238

239 240 241
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

242
	return tbl;
243 244 245 246 247 248 249 250 251 252
}

/**
 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
{
	/* Expand table when exceeding 75% load */
253 254
	return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
	       (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
255 256 257 258 259 260 261 262 263 264 265
}
EXPORT_SYMBOL_GPL(rht_grow_above_75);

/**
 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
{
	/* Shrink table beneath 30% load */
266 267
	return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
	       (atomic_read(&ht->shift) > ht->p.min_shift);
268 269 270
}
EXPORT_SYMBOL_GPL(rht_shrink_below_30);

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
static void lock_buckets(struct bucket_table *new_tbl,
			 struct bucket_table *old_tbl, unsigned int hash)
	__acquires(old_bucket_lock)
{
	spin_lock_bh(bucket_lock(old_tbl, hash));
	if (new_tbl != old_tbl)
		spin_lock_bh_nested(bucket_lock(new_tbl, hash),
				    RHT_LOCK_NESTED);
}

static void unlock_buckets(struct bucket_table *new_tbl,
			   struct bucket_table *old_tbl, unsigned int hash)
	__releases(old_bucket_lock)
{
	if (new_tbl != old_tbl)
		spin_unlock_bh(bucket_lock(new_tbl, hash));
	spin_unlock_bh(bucket_lock(old_tbl, hash));
}

/**
 * Unlink entries on bucket which hash to different bucket.
 *
 * Returns true if no more work needs to be performed on the bucket.
 */
295
static bool hashtable_chain_unzip(struct rhashtable *ht,
296
				  const struct bucket_table *new_tbl,
297 298
				  struct bucket_table *old_tbl,
				  size_t old_hash)
299 300
{
	struct rhash_head *he, *p, *next;
301 302
	unsigned int new_hash, new_hash2;

303
	ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash);
304 305

	/* Old bucket empty, no work needed. */
306 307
	p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
				   old_hash);
308
	if (rht_is_a_nulls(p))
309
		return false;
310

311
	new_hash = head_hashfn(ht, new_tbl, p);
312
	ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);
313

314 315 316 317
	/* Advance the old bucket pointer one or more times until it
	 * reaches a node that doesn't hash to the same bucket as the
	 * previous node p. Call the previous node p;
	 */
318 319
	rht_for_each_continue(he, p->next, old_tbl, old_hash) {
		new_hash2 = head_hashfn(ht, new_tbl, he);
320
		ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2);
321

322
		if (new_hash != new_hash2)
323 324 325
			break;
		p = he;
	}
326 327
	rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);

328 329 330
	/* Find the subsequent node which does hash to the same
	 * bucket as node P, or NULL if no such node exists.
	 */
331 332
	INIT_RHT_NULLS_HEAD(next, ht, old_hash);
	if (!rht_is_a_nulls(he)) {
333 334
		rht_for_each_continue(he, he->next, old_tbl, old_hash) {
			if (head_hashfn(ht, new_tbl, he) == new_hash) {
335 336 337 338 339 340 341 342 343
				next = he;
				break;
			}
		}
	}

	/* Set p's next pointer to that subsequent node pointer,
	 * bypassing the nodes which do not hash to p's bucket
	 */
344 345
	rcu_assign_pointer(p->next, next);

346 347 348 349
	p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
				   old_hash);

	return !rht_is_a_nulls(p);
350 351
}

352
static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl,
353 354
			    unsigned int new_hash, struct rhash_head *entry)
{
355 356
	ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash);

357
	rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
358 359 360 361 362 363 364 365 366 367 368 369
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
 * A secondary bucket array is allocated and the hash entries are migrated
 * while keeping them on both lists until the end of the RCU grace period.
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
370 371 372 373 374
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
375
 */
376
int rhashtable_expand(struct rhashtable *ht)
377 378 379
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
	struct rhash_head *he;
380 381
	unsigned int new_hash, old_hash;
	bool complete = false;
382 383 384

	ASSERT_RHT_MUTEX(ht);

385
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
386 387 388
	if (new_tbl == NULL)
		return -ENOMEM;

389
	atomic_inc(&ht->shift);
390

391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 * The synchronize_rcu() guarantees for the new table to be picked up
	 * so no new additions go into the old table while we relink.
	 */
	rcu_assign_pointer(ht->future_tbl, new_tbl);
	synchronize_rcu();

	/* For each new bucket, search the corresponding old bucket for the
	 * first entry that hashes to the new bucket, and link the end of
	 * newly formed bucket chain (containing entries added to future
	 * table) to that entry. Since all the entries which will end up in
	 * the new bucket appear in the same old bucket, this constructs an
	 * entirely valid new hash table, but with multiple buckets
	 * "zipped" together into a single imprecise chain.
406
	 */
407 408
	for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
		old_hash = rht_bucket_index(old_tbl, new_hash);
409
		lock_buckets(new_tbl, old_tbl, new_hash);
410 411
		rht_for_each(he, old_tbl, old_hash) {
			if (head_hashfn(ht, new_tbl, he) == new_hash) {
412
				link_old_to_new(ht, new_tbl, new_hash, he);
413 414 415
				break;
			}
		}
416
		unlock_buckets(new_tbl, old_tbl, new_hash);
417 418 419
	}

	/* Unzip interleaved hash chains */
420
	while (!complete && !ht->being_destroyed) {
421 422 423 424 425 426 427 428 429 430 431
		/* Wait for readers. All new readers will see the new
		 * table, and thus no references to the old table will
		 * remain.
		 */
		synchronize_rcu();

		/* For each bucket in the old table (each of which
		 * contains items from multiple buckets of the new
		 * table): ...
		 */
		complete = true;
432
		for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
433
			lock_buckets(new_tbl, old_tbl, old_hash);
434

435 436
			if (hashtable_chain_unzip(ht, new_tbl, old_tbl,
						  old_hash))
437
				complete = false;
438

439
			unlock_buckets(new_tbl, old_tbl, old_hash);
440
		}
441
	}
442

443
	rcu_assign_pointer(ht->tbl, new_tbl);
444 445
	synchronize_rcu();

446 447 448 449 450 451 452 453 454 455 456 457
	bucket_table_free(old_tbl);
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_expand);

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
458 459 460
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
461 462
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
463 464 465
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
466
 */
467
int rhashtable_shrink(struct rhashtable *ht)
468
{
469 470
	struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
	unsigned int new_hash;
471 472 473

	ASSERT_RHT_MUTEX(ht);

474 475
	new_tbl = bucket_table_alloc(ht, tbl->size / 2);
	if (new_tbl == NULL)
476 477
		return -ENOMEM;

478 479
	rcu_assign_pointer(ht->future_tbl, new_tbl);
	synchronize_rcu();
480

481 482 483 484 485 486
	/* Link the first entry in the old bucket to the end of the
	 * bucket in the new table. As entries are concurrently being
	 * added to the new table, lock down the new bucket. As we
	 * always divide the size in half when shrinking, each bucket
	 * in the new table maps to exactly two buckets in the old
	 * table.
487
	 */
488
	for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
489
		lock_buckets(new_tbl, tbl, new_hash);
490 491 492

		rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
				   tbl->buckets[new_hash]);
493
		ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size);
494 495 496
		rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
				   tbl->buckets[new_hash + new_tbl->size]);

497
		unlock_buckets(new_tbl, tbl, new_hash);
498 499 500
	}

	/* Publish the new, valid hash table */
501
	rcu_assign_pointer(ht->tbl, new_tbl);
502
	atomic_dec(&ht->shift);
503 504 505 506 507 508 509 510 511 512 513 514

	/* Wait for readers. No new readers will have references to the
	 * old hash table.
	 */
	synchronize_rcu();

	bucket_table_free(tbl);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);

515 516 517 518
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;
519
	struct rhashtable_walker *walker;
520

521
	ht = container_of(work, struct rhashtable, run_work);
522
	mutex_lock(&ht->mutex);
523 524 525
	if (ht->being_destroyed)
		goto unlock;

526 527
	tbl = rht_dereference(ht->tbl, ht);

528 529 530
	list_for_each_entry(walker, &ht->walkers, list)
		walker->resize = true;

531 532 533 534 535
	if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
		rhashtable_expand(ht);
	else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
		rhashtable_shrink(ht);

536
unlock:
537 538 539
	mutex_unlock(&ht->mutex);
}

540
static void rhashtable_probe_expand(struct rhashtable *ht)
541
{
542 543
	const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
	const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
544 545

	/* Only adjust the table if no resizing is currently in progress. */
546 547 548 549 550 551 552 553 554 555 556 557 558
	if (tbl == new_tbl && ht->p.grow_decision &&
	    ht->p.grow_decision(ht, tbl->size))
		schedule_work(&ht->run_work);
}

static void rhashtable_probe_shrink(struct rhashtable *ht)
{
	const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
	const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);

	/* Only adjust the table if no resizing is currently in progress. */
	if (tbl == new_tbl && ht->p.shrink_decision &&
	    ht->p.shrink_decision(ht, tbl->size))
559
		schedule_work(&ht->run_work);
560 561
}

562 563 564
static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
				struct bucket_table *tbl, u32 hash)
{
565 566 567 568
	struct rhash_head *head;

	hash = rht_bucket_index(tbl, hash);
	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
569

570 571
	ASSERT_BUCKET_LOCK(ht, tbl, hash);

572 573 574 575 576 577 578 579 580
	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
	else
		RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);

581
	rhashtable_probe_expand(ht);
582 583
}

584
/**
585
 * rhashtable_insert - insert object into hash table
586 587 588
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
589 590 591
 * Will take a per bucket spinlock to protect against mutual mutations
 * on the same bucket. Multiple insertions may occur in parallel unless
 * they map to the same bucket lock.
592
 *
593 594 595 596 597
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
598
 */
599
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
600
{
601
	struct bucket_table *tbl, *old_tbl;
602
	unsigned hash;
603

604
	rcu_read_lock();
605

606
	tbl = rht_dereference_rcu(ht->future_tbl, ht);
607
	old_tbl = rht_dereference_rcu(ht->tbl, ht);
608
	hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
609

610
	lock_buckets(tbl, old_tbl, hash);
611
	__rhashtable_insert(ht, obj, tbl, hash);
612
	unlock_buckets(tbl, old_tbl, hash);
613

614
	rcu_read_unlock();
615 616 617 618 619 620 621 622 623 624 625 626
}
EXPORT_SYMBOL_GPL(rhashtable_insert);

/**
 * rhashtable_remove - remove object from hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Since the hash chain is single linked, the removal operation needs to
 * walk the bucket chain upon removal. The removal operation is thus
 * considerable slow if the hash table is not correctly sized.
 *
627
 * Will automatically shrink the table via rhashtable_expand() if the
628 629 630 631 632
 * shrink_decision function specified at rhashtable_init() returns true.
 *
 * The caller must ensure that no concurrent table mutations occur. It is
 * however valid to have concurrent lookups if they are RCU protected.
 */
633
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
634
{
635
	struct bucket_table *tbl, *new_tbl, *old_tbl;
636
	struct rhash_head __rcu **pprev;
637
	struct rhash_head *he, *he2;
638
	unsigned int hash, new_hash;
639
	bool ret = false;
640

641
	rcu_read_lock();
642 643
	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
644
	new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
645

646
	lock_buckets(new_tbl, old_tbl, new_hash);
647
restart:
648
	hash = rht_bucket_index(tbl, new_hash);
649 650
	pprev = &tbl->buckets[hash];
	rht_for_each(he, tbl, hash) {
651 652 653 654 655
		if (he != obj) {
			pprev = &he->next;
			continue;
		}

656
		ASSERT_BUCKET_LOCK(ht, tbl, hash);
657

658 659 660 661 662 663
		if (old_tbl->size > new_tbl->size && tbl == old_tbl &&
		    !rht_is_a_nulls(obj->next) &&
		    head_hashfn(ht, tbl, obj->next) != hash) {
			rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
		} else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) {
			rht_for_each_continue(he2, obj->next, tbl, hash) {
664 665 666 667 668 669
				if (head_hashfn(ht, tbl, he2) == hash) {
					rcu_assign_pointer(*pprev, he2);
					goto found;
				}
			}

670
			rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash));
671 672 673 674 675
		} else {
			rcu_assign_pointer(*pprev, obj->next);
		}

found:
676 677
		ret = true;
		break;
678 679
	}

680 681 682 683 684
	/* The entry may be linked in either 'tbl', 'future_tbl', or both.
	 * 'future_tbl' only exists for a short period of time during
	 * resizing. Thus traversing both is fine and the added cost is
	 * very rare.
	 */
685 686
	if (tbl != old_tbl) {
		tbl = old_tbl;
687 688 689
		goto restart;
	}

690
	unlock_buckets(new_tbl, old_tbl, new_hash);
691 692 693

	if (ret) {
		atomic_dec(&ht->nelems);
694
		rhashtable_probe_shrink(ht);
695 696
	}

697 698
	rcu_read_unlock();

699
	return ret;
700 701 702
}
EXPORT_SYMBOL_GPL(rhashtable_remove);

703 704 705 706 707 708 709 710 711 712 713 714 715
struct rhashtable_compare_arg {
	struct rhashtable *ht;
	const void *key;
};

static bool rhashtable_compare(void *ptr, void *arg)
{
	struct rhashtable_compare_arg *x = arg;
	struct rhashtable *ht = x->ht;

	return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
}

716 717 718 719 720 721 722 723 724
/**
 * rhashtable_lookup - lookup key in hash table
 * @ht:		hash table
 * @key:	pointer to key
 *
 * Computes the hash value for the key and traverses the bucket chain looking
 * for a entry with an identical key. The first matching entry is returned.
 *
 * This lookup function may only be used for fixed key hash table (key_len
725
 * parameter set). It will BUG() if used inappropriately.
726
 *
727
 * Lookups may occur in parallel with hashtable mutations and resizing.
728
 */
729
void *rhashtable_lookup(struct rhashtable *ht, const void *key)
730
{
731 732 733 734
	struct rhashtable_compare_arg arg = {
		.ht = ht,
		.key = key,
	};
735 736 737

	BUG_ON(!ht->p.key_len);

738
	return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
739 740 741 742 743 744
}
EXPORT_SYMBOL_GPL(rhashtable_lookup);

/**
 * rhashtable_lookup_compare - search hash table with compare function
 * @ht:		hash table
745
 * @key:	the pointer to the key
746 747 748 749 750 751
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Traverses the bucket chain behind the provided hash value and calls the
 * specified compare function for each entry.
 *
752
 * Lookups may occur in parallel with hashtable mutations and resizing.
753 754 755
 *
 * Returns the first entry on which the compare function returned true.
 */
756
void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
757 758
				bool (*compare)(void *, void *), void *arg)
{
759
	const struct bucket_table *tbl, *old_tbl;
760
	struct rhash_head *he;
761
	u32 hash;
762

763 764 765 766
	rcu_read_lock();

	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	tbl = rht_dereference_rcu(ht->future_tbl, ht);
767
	hash = key_hashfn(ht, key, ht->p.key_len);
768 769
restart:
	rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
770 771
		if (!compare(rht_obj(ht, he), arg))
			continue;
772
		rcu_read_unlock();
773
		return rht_obj(ht, he);
774 775
	}

776 777 778 779 780 781
	if (unlikely(tbl != old_tbl)) {
		tbl = old_tbl;
		goto restart;
	}
	rcu_read_unlock();

782 783 784 785
	return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);

786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
/**
 * rhashtable_lookup_insert - lookup and insert object into hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Locks down the bucket chain in both the old and new table if a resize
 * is in progress to ensure that writers can't remove from the old table
 * and can't insert to the new table during the atomic operation of search
 * and insertion. Searches for duplicates in both the old and new table if
 * a resize is in progress.
 *
 * This lookup function may only be used for fixed key hash table (key_len
 * parameter set). It will BUG() if used inappropriately.
 *
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
 */
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
{
	struct rhashtable_compare_arg arg = {
		.ht = ht,
		.key = rht_obj(ht, obj) + ht->p.key_offset,
	};

	BUG_ON(!ht->p.key_len);

	return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
						&arg);
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);

/**
 * rhashtable_lookup_compare_insert - search and insert object to hash table
 *                                    with compare function
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Locks down the bucket chain in both the old and new table if a resize
 * is in progress to ensure that writers can't remove from the old table
 * and can't insert to the new table during the atomic operation of search
 * and insertion. Searches for duplicates in both the old and new table if
 * a resize is in progress.
 *
 * Lookups may occur in parallel with hashtable mutations and resizing.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
 */
bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
				      struct rhash_head *obj,
				      bool (*compare)(void *, void *),
				      void *arg)
844 845
{
	struct bucket_table *new_tbl, *old_tbl;
846
	u32 new_hash;
847 848 849 850 851 852 853
	bool success = true;

	BUG_ON(!ht->p.key_len);

	rcu_read_lock();
	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
854
	new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
855 856

	lock_buckets(new_tbl, old_tbl, new_hash);
857

858 859
	if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
				      compare, arg)) {
860 861 862 863 864 865 866
		success = false;
		goto exit;
	}

	__rhashtable_insert(ht, obj, new_tbl, new_hash);

exit:
867
	unlock_buckets(new_tbl, old_tbl, new_hash);
868 869 870 871
	rcu_read_unlock();

	return success;
}
872
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
873

874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
/**
 * rhashtable_walk_init - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may sleep so you must not call it from interrupt
 * context or with spin locks held.
 *
 * You must call rhashtable_walk_exit if this function returns
 * successfully.
 */
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
{
	iter->ht = ht;
	iter->p = NULL;
	iter->slot = 0;
	iter->skip = 0;

	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
	if (!iter->walker)
		return -ENOMEM;

906 907 908
	INIT_LIST_HEAD(&iter->walker->list);
	iter->walker->resize = false;

909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	mutex_lock(&ht->mutex);
	list_add(&iter->walker->list, &ht->walkers);
	mutex_unlock(&ht->mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_init.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
	mutex_lock(&iter->ht->mutex);
	list_del(&iter->walker->list);
	mutex_unlock(&iter->ht->mutex);
	kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);

/**
 * rhashtable_walk_start - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk.  Note that we take the RCU lock in all
 * cases including when we return an error.  So you must always call
 * rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 */
int rhashtable_walk_start(struct rhashtable_iter *iter)
{
	rcu_read_lock();

	if (iter->walker->resize) {
		iter->slot = 0;
		iter->skip = 0;
		iter->walker->resize = false;
		return -EAGAIN;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start);

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
	const struct bucket_table *tbl;
	struct rhashtable *ht = iter->ht;
	struct rhash_head *p = iter->p;
	void *obj = NULL;

	tbl = rht_dereference_rcu(ht->tbl, ht);

	if (p) {
		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
		goto next;
	}

	for (; iter->slot < tbl->size; iter->slot++) {
		int skip = iter->skip;

		rht_for_each_rcu(p, tbl, iter->slot) {
			if (!skip)
				break;
			skip--;
		}

next:
		if (!rht_is_a_nulls(p)) {
			iter->skip++;
			iter->p = p;
			obj = rht_obj(ht, p);
			goto out;
		}

		iter->skip = 0;
	}

	iter->p = NULL;

out:
	if (iter->walker->resize) {
		iter->p = NULL;
		iter->slot = 0;
		iter->skip = 0;
		iter->walker->resize = false;
		return ERR_PTR(-EAGAIN);
	}

	return obj;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
{
	rcu_read_unlock();
	iter->p = NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);

1035
static size_t rounded_hashtable_size(struct rhashtable_params *params)
1036
{
1037 1038
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
		   1UL << params->min_shift);
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
}

/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
1061
 *	.hashfn = jhash,
1062
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
 * u32 my_hash_fn(const void *data, u32 seed)
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
1080
 *	.hashfn = jhash,
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
 *	.obj_hashfn = my_hash_fn,
 * };
 */
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

	if ((params->key_len && !params->hashfn) ||
	    (!params->key_len && !params->obj_hashfn))
		return -EINVAL;

1095 1096 1097
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

1098 1099 1100
	params->min_shift = max_t(size_t, params->min_shift,
				  ilog2(HASH_MIN_SIZE));

1101
	if (params->nelem_hint)
1102
		size = rounded_hashtable_size(params);
1103

1104 1105 1106
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
	memcpy(&ht->p, params, sizeof(*params));
1107
	INIT_LIST_HEAD(&ht->walkers);
1108 1109 1110 1111 1112 1113 1114

	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

	tbl = bucket_table_alloc(ht, size);
1115 1116 1117
	if (tbl == NULL)
		return -ENOMEM;

1118
	atomic_set(&ht->nelems, 0);
1119
	atomic_set(&ht->shift, ilog2(tbl->size));
1120
	RCU_INIT_POINTER(ht->tbl, tbl);
1121
	RCU_INIT_POINTER(ht->future_tbl, tbl);
1122 1123 1124 1125

	if (!ht->p.hash_rnd)
		get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));

1126
	if (ht->p.grow_decision || ht->p.shrink_decision)
1127
		INIT_WORK(&ht->run_work, rht_deferred_worker);
1128

1129 1130 1131 1132 1133 1134 1135 1136
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
 * rhashtable_destroy - destroy hash table
 * @ht:		the hash table to destroy
 *
1137 1138 1139
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
1140
 */
1141
void rhashtable_destroy(struct rhashtable *ht)
1142
{
1143 1144
	ht->being_destroyed = true;

1145 1146
	if (ht->p.grow_decision || ht->p.shrink_decision)
		cancel_work_sync(&ht->run_work);
1147

1148
	mutex_lock(&ht->mutex);
1149 1150
	bucket_table_free(rht_dereference(ht->tbl, ht));
	mutex_unlock(&ht->mutex);
1151 1152
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);