rhashtable.c 25.4 KB
Newer Older
1 2 3
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
4
 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Based on the following paper:
 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
 *
 * Code partially derived from nft_hash
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
E
Eric Dumazet 已提交
20
#include <linux/sched.h>
21 22 23
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
24
#include <linux/jhash.h>
25 26
#include <linux/random.h>
#include <linux/rhashtable.h>
27
#include <linux/err.h>
28 29 30

#define HASH_DEFAULT_SIZE	64UL
#define HASH_MIN_SIZE		4UL
31 32
#define BUCKET_LOCKS_PER_CPU   128UL

33 34 35
/* Base bits plus 1 bit for nulls marker */
#define HASH_RESERVED_SPACE	(RHT_BASE_BITS + 1)

36 37 38 39 40 41 42 43
enum {
	RHT_LOCK_NORMAL,
	RHT_LOCK_NESTED,
};

/* The bucket lock is selected based on the hash and protects mutations
 * on a group of hash buckets.
 *
44 45 46 47 48 49
 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
 * a single lock always covers both buckets which may both contains
 * entries which link to the same bucket of the old table during resizing.
 * This allows to simplify the locking as locking the bucket in both
 * tables during resize always guarantee protection.
 *
50 51 52 53 54 55 56 57
 * IMPORTANT: When holding the bucket lock of both the old and new table
 * during expansions and shrinking, the old bucket lock must always be
 * acquired first.
 */
static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
{
	return &tbl->locks[hash & tbl->locks_mask];
}
58

59
static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
60 61 62 63
{
	return (void *) he - ht->p.head_offset;
}

64
static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
65
{
66
	return hash & (tbl->size - 1);
67 68
}

69 70
static u32 obj_raw_hashfn(struct rhashtable *ht,
			  const struct bucket_table *tbl, const void *ptr)
71
{
72
	u32 hash;
73

74
	if (unlikely(!ht->p.key_len))
75
		hash = ht->p.obj_hashfn(ptr, tbl->hash_rnd);
76 77
	else
		hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
78
				    tbl->hash_rnd);
79

80
	return hash >> HASH_RESERVED_SPACE;
81 82
}

83 84
static u32 key_hashfn(struct rhashtable *ht, const struct bucket_table *tbl,
		      const void *key, u32 len)
85
{
86 87
	return rht_bucket_index(tbl, ht->p.hashfn(key, len, tbl->hash_rnd) >>
				     HASH_RESERVED_SPACE);
88 89
}

90
static u32 head_hashfn(struct rhashtable *ht,
91 92
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
93
{
94
	return rht_bucket_index(tbl, obj_raw_hashfn(ht, tbl, rht_obj(ht, he)));
95 96
}

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
#ifdef CONFIG_PROVE_LOCKING
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))

int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
	spinlock_t *lock = bucket_lock(tbl, hash);

	return (debug_locks) ? lockdep_is_held(lock) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else
#define ASSERT_RHT_MUTEX(HT)
#endif


118 119 120 121 122 123 124 125 126 127 128 129
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

130 131
	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
		if (size * sizeof(spinlock_t) > PAGE_SIZE)
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
					   GFP_KERNEL);
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
					       size_t nbuckets)
161
{
162
	struct bucket_table *tbl = NULL;
163
	size_t size;
164
	int i;
165 166

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
167 168
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
		tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
169 170 171 172 173 174 175
	if (tbl == NULL)
		tbl = vzalloc(size);
	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

176 177 178 179
	if (alloc_bucket_locks(ht, tbl) < 0) {
		bucket_table_free(tbl);
		return NULL;
	}
180

181 182 183
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

184
	return tbl;
185 186 187 188 189 190 191
}

/**
 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
192
static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
193 194
{
	/* Expand table when exceeding 75% load */
195
	return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
196
	       (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
197 198 199 200 201 202 203
}

/**
 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
204
static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
205 206
{
	/* Shrink table beneath 30% load */
207 208
	return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
	       (atomic_read(&ht->shift) > ht->p.min_shift);
209 210
}

211
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
212
{
213 214 215 216 217 218 219 220 221 222 223 224 225 226
	struct bucket_table *new_tbl = rht_dereference(ht->future_tbl, ht);
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
	int err = -ENOENT;
	struct rhash_head *head, *next, *entry;
	spinlock_t *new_bucket_lock;
	unsigned new_hash;

	rht_for_each(entry, old_tbl, old_hash) {
		err = 0;
		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);

		if (rht_is_a_nulls(next))
			break;
227

228 229
		pprev = &entry->next;
	}
230

231 232
	if (err)
		goto out;
233

234
	new_hash = head_hashfn(ht, new_tbl, entry);
235

236
	new_bucket_lock = bucket_lock(new_tbl, new_hash);
237

238
	spin_lock_nested(new_bucket_lock, RHT_LOCK_NESTED);
239 240
	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
				      new_tbl, new_hash);
241

242 243 244 245
	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
	else
		RCU_INIT_POINTER(entry->next, head);
246

247 248
	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
	spin_unlock(new_bucket_lock);
249

250
	rcu_assign_pointer(*pprev, next);
251

252 253 254
out:
	return err;
}
255

256 257 258 259 260 261
static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	spinlock_t *old_bucket_lock;

	old_bucket_lock = bucket_lock(old_tbl, old_hash);
262

263 264 265 266
	spin_lock_bh(old_bucket_lock);
	while (!rhashtable_rehash_one(ht, old_hash))
		;
	spin_unlock_bh(old_bucket_lock);
267 268
}

269 270
static void rhashtable_rehash(struct rhashtable *ht,
			      struct bucket_table *new_tbl)
271
{
272 273
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	unsigned old_hash;
274

275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	get_random_bytes(&new_tbl->hash_rnd, sizeof(new_tbl->hash_rnd));

	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 * The synchronize_rcu() guarantees for the new table to be picked up
	 * so no new additions go into the old table while we relink.
	 */
	rcu_assign_pointer(ht->future_tbl, new_tbl);

	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
		rhashtable_rehash_chain(ht, old_hash);

	/* Publish the new table pointer. */
	rcu_assign_pointer(ht->tbl, new_tbl);

	/* Wait for readers. All new readers will see the new
	 * table, and thus no references to the old table will
	 * remain.
	 */
	synchronize_rcu();

	bucket_table_free(old_tbl);
297 298 299 300 301 302
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
303
 * A secondary bucket array is allocated and the hash entries are migrated.
304 305 306 307
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
308 309 310 311 312
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
313
 */
314
int rhashtable_expand(struct rhashtable *ht)
315 316 317 318 319
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);

	ASSERT_RHT_MUTEX(ht);

320
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
321 322 323
	if (new_tbl == NULL)
		return -ENOMEM;

324 325
	new_tbl->hash_rnd = old_tbl->hash_rnd;

326
	atomic_inc(&ht->shift);
327

328
	rhashtable_rehash(ht, new_tbl);
329 330 331 332 333 334 335 336 337 338 339 340

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_expand);

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
341 342 343
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
344 345
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
346 347 348
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
349
 */
350
int rhashtable_shrink(struct rhashtable *ht)
351
{
352
	struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
353 354 355

	ASSERT_RHT_MUTEX(ht);

356 357
	new_tbl = bucket_table_alloc(ht, tbl->size / 2);
	if (new_tbl == NULL)
358 359
		return -ENOMEM;

360 361
	new_tbl->hash_rnd = tbl->hash_rnd;

362
	atomic_dec(&ht->shift);
363

364
	rhashtable_rehash(ht, new_tbl);
365 366 367 368 369

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);

370 371 372 373
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;
374
	struct rhashtable_walker *walker;
375

376
	ht = container_of(work, struct rhashtable, run_work);
377
	mutex_lock(&ht->mutex);
378 379 380
	if (ht->being_destroyed)
		goto unlock;

381 382
	tbl = rht_dereference(ht->tbl, ht);

383 384 385
	list_for_each_entry(walker, &ht->walkers, list)
		walker->resize = true;

386
	if (rht_grow_above_75(ht, tbl->size))
387
		rhashtable_expand(ht);
388
	else if (rht_shrink_below_30(ht, tbl->size))
389
		rhashtable_shrink(ht);
390
unlock:
391 392 393
	mutex_unlock(&ht->mutex);
}

394 395
static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
				bool (*compare)(void *, void *), void *arg)
396
{
397
	struct bucket_table *tbl, *old_tbl;
398
	struct rhash_head *head;
399 400 401 402 403 404 405
	bool no_resize_running;
	unsigned hash;
	bool success = true;

	rcu_read_lock();

	old_tbl = rht_dereference_rcu(ht->tbl, ht);
406
	hash = head_hashfn(ht, old_tbl, obj);
407 408 409 410 411 412 413 414 415 416 417

	spin_lock_bh(bucket_lock(old_tbl, hash));

	/* Because we have already taken the bucket lock in old_tbl,
	 * if we find that future_tbl is not yet visible then that
	 * guarantees all other insertions of the same entry will
	 * also grab the bucket lock in old_tbl because until the
	 * rehash completes ht->tbl won't be changed.
	 */
	tbl = rht_dereference_rcu(ht->future_tbl, ht);
	if (tbl != old_tbl) {
418
		hash = head_hashfn(ht, tbl, obj);
419
		spin_lock_nested(bucket_lock(tbl, hash), RHT_LOCK_NESTED);
420 421 422 423 424 425 426 427 428 429
	}

	if (compare &&
	    rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset,
				      compare, arg)) {
		success = false;
		goto exit;
	}

	no_resize_running = tbl == old_tbl;
430 431

	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
432 433 434 435 436 437 438 439 440

	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
	else
		RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);
441 442
	if (no_resize_running && rht_grow_above_75(ht, tbl->size))
		schedule_work(&ht->run_work);
443 444 445

exit:
	if (tbl != old_tbl) {
446
		hash = head_hashfn(ht, tbl, obj);
447 448 449
		spin_unlock(bucket_lock(tbl, hash));
	}

450
	hash = head_hashfn(ht, old_tbl, obj);
451 452 453 454 455
	spin_unlock_bh(bucket_lock(old_tbl, hash));

	rcu_read_unlock();

	return success;
456 457
}

458
/**
459
 * rhashtable_insert - insert object into hash table
460 461 462
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
463 464 465
 * Will take a per bucket spinlock to protect against mutual mutations
 * on the same bucket. Multiple insertions may occur in parallel unless
 * they map to the same bucket lock.
466
 *
467 468 469 470 471
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
472
 */
473
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
474
{
475 476 477 478 479 480 481 482 483 484 485
	__rhashtable_insert(ht, obj, NULL, NULL);
}
EXPORT_SYMBOL_GPL(rhashtable_insert);

static bool __rhashtable_remove(struct rhashtable *ht,
				struct bucket_table *tbl,
				struct rhash_head *obj)
{
	struct rhash_head __rcu **pprev;
	struct rhash_head *he;
	spinlock_t * lock;
486
	unsigned hash;
487
	bool ret = false;
488

489
	hash = head_hashfn(ht, tbl, obj);
490
	lock = bucket_lock(tbl, hash);
491

492
	spin_lock_bh(lock);
493

494 495 496 497 498 499
	pprev = &tbl->buckets[hash];
	rht_for_each(he, tbl, hash) {
		if (he != obj) {
			pprev = &he->next;
			continue;
		}
500

501 502 503 504 505 506 507 508
		rcu_assign_pointer(*pprev, obj->next);
		ret = true;
		break;
	}

	spin_unlock_bh(lock);

	return ret;
509 510 511 512 513 514 515 516 517 518 519
}

/**
 * rhashtable_remove - remove object from hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Since the hash chain is single linked, the removal operation needs to
 * walk the bucket chain upon removal. The removal operation is thus
 * considerable slow if the hash table is not correctly sized.
 *
520
 * Will automatically shrink the table via rhashtable_expand() if the
521 522 523 524 525
 * shrink_decision function specified at rhashtable_init() returns true.
 *
 * The caller must ensure that no concurrent table mutations occur. It is
 * however valid to have concurrent lookups if they are RCU protected.
 */
526
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
527
{
528 529
	struct bucket_table *tbl, *old_tbl;
	bool ret;
530

531
	rcu_read_lock();
532

533 534
	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	ret = __rhashtable_remove(ht, old_tbl, obj);
535

536 537 538 539
	/* Because we have already taken (and released) the bucket
	 * lock in old_tbl, if we find that future_tbl is not yet
	 * visible then that guarantees the entry to still be in
	 * old_tbl if it exists.
540
	 */
541 542 543
	tbl = rht_dereference_rcu(ht->future_tbl, ht);
	if (!ret && old_tbl != tbl)
		ret = __rhashtable_remove(ht, tbl, obj);
544 545

	if (ret) {
546
		bool no_resize_running = tbl == old_tbl;
547

548
		atomic_dec(&ht->nelems);
549
		if (no_resize_running && rht_shrink_below_30(ht, tbl->size))
550
			schedule_work(&ht->run_work);
551 552
	}

553 554
	rcu_read_unlock();

555
	return ret;
556 557 558
}
EXPORT_SYMBOL_GPL(rhashtable_remove);

559 560 561 562 563 564 565 566 567 568 569 570 571
struct rhashtable_compare_arg {
	struct rhashtable *ht;
	const void *key;
};

static bool rhashtable_compare(void *ptr, void *arg)
{
	struct rhashtable_compare_arg *x = arg;
	struct rhashtable *ht = x->ht;

	return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
}

572 573 574 575 576 577 578 579 580
/**
 * rhashtable_lookup - lookup key in hash table
 * @ht:		hash table
 * @key:	pointer to key
 *
 * Computes the hash value for the key and traverses the bucket chain looking
 * for a entry with an identical key. The first matching entry is returned.
 *
 * This lookup function may only be used for fixed key hash table (key_len
581
 * parameter set). It will BUG() if used inappropriately.
582
 *
583
 * Lookups may occur in parallel with hashtable mutations and resizing.
584
 */
585
void *rhashtable_lookup(struct rhashtable *ht, const void *key)
586
{
587 588 589 590
	struct rhashtable_compare_arg arg = {
		.ht = ht,
		.key = key,
	};
591 592 593

	BUG_ON(!ht->p.key_len);

594
	return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
595 596 597 598 599 600
}
EXPORT_SYMBOL_GPL(rhashtable_lookup);

/**
 * rhashtable_lookup_compare - search hash table with compare function
 * @ht:		hash table
601
 * @key:	the pointer to the key
602 603 604 605 606 607
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Traverses the bucket chain behind the provided hash value and calls the
 * specified compare function for each entry.
 *
608
 * Lookups may occur in parallel with hashtable mutations and resizing.
609 610 611
 *
 * Returns the first entry on which the compare function returned true.
 */
612
void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
613 614
				bool (*compare)(void *, void *), void *arg)
{
615
	const struct bucket_table *tbl, *old_tbl;
616
	struct rhash_head *he;
617
	u32 hash;
618

619 620
	rcu_read_lock();

621 622
	tbl = rht_dereference_rcu(ht->tbl, ht);
	hash = key_hashfn(ht, tbl, key, ht->p.key_len);
623
restart:
624
	rht_for_each_rcu(he, tbl, hash) {
625 626
		if (!compare(rht_obj(ht, he), arg))
			continue;
627
		rcu_read_unlock();
628
		return rht_obj(ht, he);
629 630
	}

631 632 633
	old_tbl = tbl;
	tbl = rht_dereference_rcu(ht->future_tbl, ht);
	if (unlikely(tbl != old_tbl))
634 635 636
		goto restart;
	rcu_read_unlock();

637 638 639 640
	return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
/**
 * rhashtable_lookup_insert - lookup and insert object into hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Locks down the bucket chain in both the old and new table if a resize
 * is in progress to ensure that writers can't remove from the old table
 * and can't insert to the new table during the atomic operation of search
 * and insertion. Searches for duplicates in both the old and new table if
 * a resize is in progress.
 *
 * This lookup function may only be used for fixed key hash table (key_len
 * parameter set). It will BUG() if used inappropriately.
 *
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
 */
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
{
	struct rhashtable_compare_arg arg = {
		.ht = ht,
		.key = rht_obj(ht, obj) + ht->p.key_offset,
	};

	BUG_ON(!ht->p.key_len);

	return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare,
						&arg);
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);

/**
 * rhashtable_lookup_compare_insert - search and insert object to hash table
 *                                    with compare function
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Locks down the bucket chain in both the old and new table if a resize
 * is in progress to ensure that writers can't remove from the old table
 * and can't insert to the new table during the atomic operation of search
 * and insertion. Searches for duplicates in both the old and new table if
 * a resize is in progress.
 *
 * Lookups may occur in parallel with hashtable mutations and resizing.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
 */
bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
				      struct rhash_head *obj,
				      bool (*compare)(void *, void *),
				      void *arg)
699 700 701
{
	BUG_ON(!ht->p.key_len);

702
	return __rhashtable_insert(ht, obj, compare, arg);
703
}
704
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert);
705

706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
/**
 * rhashtable_walk_init - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may sleep so you must not call it from interrupt
 * context or with spin locks held.
 *
 * You must call rhashtable_walk_exit if this function returns
 * successfully.
 */
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
{
	iter->ht = ht;
	iter->p = NULL;
	iter->slot = 0;
	iter->skip = 0;

	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
	if (!iter->walker)
		return -ENOMEM;

738 739 740
	INIT_LIST_HEAD(&iter->walker->list);
	iter->walker->resize = false;

741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
	mutex_lock(&ht->mutex);
	list_add(&iter->walker->list, &ht->walkers);
	mutex_unlock(&ht->mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_init.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
	mutex_lock(&iter->ht->mutex);
	list_del(&iter->walker->list);
	mutex_unlock(&iter->ht->mutex);
	kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);

/**
 * rhashtable_walk_start - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk.  Note that we take the RCU lock in all
 * cases including when we return an error.  So you must always call
 * rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 */
int rhashtable_walk_start(struct rhashtable_iter *iter)
{
	rcu_read_lock();

	if (iter->walker->resize) {
		iter->slot = 0;
		iter->skip = 0;
		iter->walker->resize = false;
		return -EAGAIN;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start);

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
	const struct bucket_table *tbl;
	struct rhashtable *ht = iter->ht;
	struct rhash_head *p = iter->p;
	void *obj = NULL;

	tbl = rht_dereference_rcu(ht->tbl, ht);

	if (p) {
		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
		goto next;
	}

	for (; iter->slot < tbl->size; iter->slot++) {
		int skip = iter->skip;

		rht_for_each_rcu(p, tbl, iter->slot) {
			if (!skip)
				break;
			skip--;
		}

next:
		if (!rht_is_a_nulls(p)) {
			iter->skip++;
			iter->p = p;
			obj = rht_obj(ht, p);
			goto out;
		}

		iter->skip = 0;
	}

	iter->p = NULL;

out:
	if (iter->walker->resize) {
		iter->p = NULL;
		iter->slot = 0;
		iter->skip = 0;
		iter->walker->resize = false;
		return ERR_PTR(-EAGAIN);
	}

	return obj;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
{
	rcu_read_unlock();
	iter->p = NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);

867
static size_t rounded_hashtable_size(struct rhashtable_params *params)
868
{
869 870
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
		   1UL << params->min_shift);
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
}

/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
893
 *	.hashfn = jhash,
894
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
 * u32 my_hash_fn(const void *data, u32 seed)
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
912
 *	.hashfn = jhash,
913 914 915 916 917 918 919 920 921 922 923 924 925 926
 *	.obj_hashfn = my_hash_fn,
 * };
 */
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

	if ((params->key_len && !params->hashfn) ||
	    (!params->key_len && !params->obj_hashfn))
		return -EINVAL;

927 928 929
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

930 931 932
	params->min_shift = max_t(size_t, params->min_shift,
				  ilog2(HASH_MIN_SIZE));

933
	if (params->nelem_hint)
934
		size = rounded_hashtable_size(params);
935

936 937 938
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
	memcpy(&ht->p, params, sizeof(*params));
939
	INIT_LIST_HEAD(&ht->walkers);
940 941 942 943 944 945 946

	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

	tbl = bucket_table_alloc(ht, size);
947 948 949
	if (tbl == NULL)
		return -ENOMEM;

950 951
	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));

952
	atomic_set(&ht->nelems, 0);
953
	atomic_set(&ht->shift, ilog2(tbl->size));
954
	RCU_INIT_POINTER(ht->tbl, tbl);
955
	RCU_INIT_POINTER(ht->future_tbl, tbl);
956

957
	INIT_WORK(&ht->run_work, rht_deferred_worker);
958

959 960 961 962 963 964 965 966
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
 * rhashtable_destroy - destroy hash table
 * @ht:		the hash table to destroy
 *
967 968 969
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
970
 */
971
void rhashtable_destroy(struct rhashtable *ht)
972
{
973 974
	ht->being_destroyed = true;

975
	cancel_work_sync(&ht->run_work);
976

977
	mutex_lock(&ht->mutex);
978 979
	bucket_table_free(rht_dereference(ht->tbl, ht));
	mutex_unlock(&ht->mutex);
980 981
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);