rhashtable.c 16.1 KB
Newer Older
1 2 3
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
4
 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5
 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 7 8
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Code partially derived from nft_hash
9 10
 * Rewritten with rehash code from br_multicast plus single list
 * pointer as suggested by Josh Triplett
11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
E
Eric Dumazet 已提交
20
#include <linux/sched.h>
21 22 23
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
24
#include <linux/jhash.h>
25 26
#include <linux/random.h>
#include <linux/rhashtable.h>
27
#include <linux/err.h>
28 29

#define HASH_DEFAULT_SIZE	64UL
30
#define HASH_MIN_SIZE		4U
31 32
#define BUCKET_LOCKS_PER_CPU   128UL

33
static u32 head_hashfn(struct rhashtable *ht,
34 35
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
36
{
37
	return rht_head_hashfn(ht, tbl, he, ht->p);
38 39
}

40 41 42 43 44 45 46 47 48 49 50
#ifdef CONFIG_PROVE_LOCKING
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))

int lockdep_rht_mutex_is_held(struct rhashtable *ht)
{
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
51
	spinlock_t *lock = rht_bucket_lock(tbl, hash);
52 53 54 55 56 57 58 59 60

	return (debug_locks) ? lockdep_is_held(lock) : 1;
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#else
#define ASSERT_RHT_MUTEX(HT)
#endif


61 62 63 64 65 66 67 68 69 70 71 72
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

73 74
	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
		if (size * sizeof(spinlock_t) > PAGE_SIZE)
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
					   GFP_KERNEL);
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

102 103 104 105 106
static void bucket_table_free_rcu(struct rcu_head *head)
{
	bucket_table_free(container_of(head, struct bucket_table, rcu));
}

107
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
108
					       size_t nbuckets)
109
{
110
	struct bucket_table *tbl = NULL;
111
	size_t size;
112
	int i;
113 114

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
115 116
	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
		tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
117 118 119 120 121 122 123
	if (tbl == NULL)
		tbl = vzalloc(size);
	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

124 125 126 127
	if (alloc_bucket_locks(ht, tbl) < 0) {
		bucket_table_free(tbl);
		return NULL;
	}
128

129 130
	INIT_LIST_HEAD(&tbl->walkers);

131 132
	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));

133 134 135
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

136
	return tbl;
137 138
}

139
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
140
{
141
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
142 143
	struct bucket_table *new_tbl =
		rht_dereference(old_tbl->future_tbl, ht) ?: old_tbl;
144 145 146 147 148 149 150 151 152 153 154 155
	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
	int err = -ENOENT;
	struct rhash_head *head, *next, *entry;
	spinlock_t *new_bucket_lock;
	unsigned new_hash;

	rht_for_each(entry, old_tbl, old_hash) {
		err = 0;
		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);

		if (rht_is_a_nulls(next))
			break;
156

157 158
		pprev = &entry->next;
	}
159

160 161
	if (err)
		goto out;
162

163
	new_hash = head_hashfn(ht, new_tbl, entry);
164

165
	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
166

167
	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
168 169
	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
				      new_tbl, new_hash);
170

171 172 173 174
	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
	else
		RCU_INIT_POINTER(entry->next, head);
175

176 177
	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
	spin_unlock(new_bucket_lock);
178

179
	rcu_assign_pointer(*pprev, next);
180

181 182 183
out:
	return err;
}
184

185 186 187 188 189
static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
{
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
	spinlock_t *old_bucket_lock;

190
	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
191

192 193 194
	spin_lock_bh(old_bucket_lock);
	while (!rhashtable_rehash_one(ht, old_hash))
		;
195
	old_tbl->rehash++;
196
	spin_unlock_bh(old_bucket_lock);
197 198
}

199 200
static void rhashtable_rehash(struct rhashtable *ht,
			      struct bucket_table *new_tbl)
201
{
202
	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
203
	struct rhashtable_walker *walker;
204
	unsigned old_hash;
205

206 207 208
	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 */
209
	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
210

H
Herbert Xu 已提交
211 212 213
	/* Ensure the new table is visible to readers. */
	smp_wmb();

214 215 216 217 218 219
	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
		rhashtable_rehash_chain(ht, old_hash);

	/* Publish the new table pointer. */
	rcu_assign_pointer(ht->tbl, new_tbl);

220 221 222
	list_for_each_entry(walker, &old_tbl->walkers, list)
		walker->tbl = NULL;

223 224 225 226
	/* Wait for readers. All new readers will see the new
	 * table, and thus no references to the old table will
	 * remain.
	 */
227
	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
228 229 230 231 232 233
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
234
 * A secondary bucket array is allocated and the hash entries are migrated.
235 236 237 238
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
239 240 241 242 243
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
244
 */
245
int rhashtable_expand(struct rhashtable *ht)
246 247 248 249 250
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);

	ASSERT_RHT_MUTEX(ht);

251
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
252 253 254
	if (new_tbl == NULL)
		return -ENOMEM;

255
	rhashtable_rehash(ht, new_tbl);
256 257 258 259 260 261 262 263
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_expand);

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
H
Herbert Xu 已提交
264 265
 * This function shrinks the hash table to fit, i.e., the smallest
 * size would not cause it to expand right away automatically.
266
 *
267 268 269
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
270 271
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
272 273 274
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
275
 */
276
int rhashtable_shrink(struct rhashtable *ht)
277
{
278
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
H
Herbert Xu 已提交
279
	unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
280 281 282

	ASSERT_RHT_MUTEX(ht);

H
Herbert Xu 已提交
283 284 285 286 287 288 289
	if (size < ht->p.min_size)
		size = ht->p.min_size;

	if (old_tbl->size <= size)
		return 0;

	new_tbl = bucket_table_alloc(ht, size);
290
	if (new_tbl == NULL)
291 292
		return -ENOMEM;

293
	rhashtable_rehash(ht, new_tbl);
294 295 296 297
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);

298 299 300 301 302
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;

303
	ht = container_of(work, struct rhashtable, run_work);
304
	mutex_lock(&ht->mutex);
305 306 307
	if (ht->being_destroyed)
		goto unlock;

308 309
	tbl = rht_dereference(ht->tbl, ht);

310
	if (rht_grow_above_75(ht, tbl))
311
		rhashtable_expand(ht);
312
	else if (rht_shrink_below_30(ht, tbl))
313
		rhashtable_shrink(ht);
314
unlock:
315 316 317
	mutex_unlock(&ht->mutex);
}

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
			   struct rhash_head *obj,
			   struct bucket_table *tbl)
{
	struct rhash_head *head;
	unsigned hash;
	int err = -EEXIST;

	hash = head_hashfn(ht, tbl, obj);
	spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);

	if (key && rhashtable_lookup_fast(ht, key, ht->p))
		goto exit;

	err = 0;

	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);

	RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);

exit:
	spin_unlock(rht_bucket_lock(tbl, hash));

	return err;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
/**
 * rhashtable_walk_init - Initialise an iterator
 * @ht:		Table to walk over
 * @iter:	Hash table Iterator
 *
 * This function prepares a hash table walk.
 *
 * Note that if you restart a walk after rhashtable_walk_stop you
 * may see the same object twice.  Also, you may miss objects if
 * there are removals in between rhashtable_walk_stop and the next
 * call to rhashtable_walk_start.
 *
 * For a completely stable walk you should construct your own data
 * structure outside the hash table.
 *
 * This function may sleep so you must not call it from interrupt
 * context or with spin locks held.
 *
 * You must call rhashtable_walk_exit if this function returns
 * successfully.
 */
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
{
	iter->ht = ht;
	iter->p = NULL;
	iter->slot = 0;
	iter->skip = 0;

	iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
	if (!iter->walker)
		return -ENOMEM;

	mutex_lock(&ht->mutex);
382 383
	iter->walker->tbl = rht_dereference(ht->tbl, ht);
	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
	mutex_unlock(&ht->mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_init);

/**
 * rhashtable_walk_exit - Free an iterator
 * @iter:	Hash table Iterator
 *
 * This function frees resources allocated by rhashtable_walk_init.
 */
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
	mutex_lock(&iter->ht->mutex);
399 400
	if (iter->walker->tbl)
		list_del(&iter->walker->list);
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
	mutex_unlock(&iter->ht->mutex);
	kfree(iter->walker);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);

/**
 * rhashtable_walk_start - Start a hash table walk
 * @iter:	Hash table iterator
 *
 * Start a hash table walk.  Note that we take the RCU lock in all
 * cases including when we return an error.  So you must always call
 * rhashtable_walk_stop to clean up.
 *
 * Returns zero if successful.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may use it immediately
 * by calling rhashtable_walk_next.
 */
int rhashtable_walk_start(struct rhashtable_iter *iter)
421
	__acquires(RCU)
422
{
423 424 425 426 427 428 429
	struct rhashtable *ht = iter->ht;

	mutex_lock(&ht->mutex);

	if (iter->walker->tbl)
		list_del(&iter->walker->list);

430 431
	rcu_read_lock();

432 433 434 435
	mutex_unlock(&ht->mutex);

	if (!iter->walker->tbl) {
		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
		return -EAGAIN;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start);

/**
 * rhashtable_walk_next - Return the next object and advance the iterator
 * @iter:	Hash table iterator
 *
 * Note that you must call rhashtable_walk_stop when you are finished
 * with the walk.
 *
 * Returns the next object or NULL when the end of the table is reached.
 *
 * Returns -EAGAIN if resize event occured.  Note that the iterator
 * will rewind back to the beginning and you may continue to use it.
 */
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
457
	struct bucket_table *tbl = iter->walker->tbl;
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	struct rhashtable *ht = iter->ht;
	struct rhash_head *p = iter->p;
	void *obj = NULL;

	if (p) {
		p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
		goto next;
	}

	for (; iter->slot < tbl->size; iter->slot++) {
		int skip = iter->skip;

		rht_for_each_rcu(p, tbl, iter->slot) {
			if (!skip)
				break;
			skip--;
		}

next:
		if (!rht_is_a_nulls(p)) {
			iter->skip++;
			iter->p = p;
			obj = rht_obj(ht, p);
			goto out;
		}

		iter->skip = 0;
	}

487 488 489
	/* Ensure we see any new tables. */
	smp_rmb();

490 491
	iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
	if (iter->walker->tbl) {
492 493 494 495 496
		iter->slot = 0;
		iter->skip = 0;
		return ERR_PTR(-EAGAIN);
	}

497 498 499 500
	iter->p = NULL;

out:

501 502 503 504 505 506 507 508 509 510 511
	return obj;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);

/**
 * rhashtable_walk_stop - Finish a hash table walk
 * @iter:	Hash table iterator
 *
 * Finish a hash table walk.
 */
void rhashtable_walk_stop(struct rhashtable_iter *iter)
512
	__releases(RCU)
513
{
514 515 516 517
	struct rhashtable *ht;
	struct bucket_table *tbl = iter->walker->tbl;

	if (!tbl)
518
		goto out;
519 520 521 522

	ht = iter->ht;

	mutex_lock(&ht->mutex);
523
	if (tbl->rehash < tbl->size)
524 525 526 527 528
		list_add(&iter->walker->list, &tbl->walkers);
	else
		iter->walker->tbl = NULL;
	mutex_unlock(&ht->mutex);

529
	iter->p = NULL;
530 531 532

out:
	rcu_read_unlock();
533 534 535
}
EXPORT_SYMBOL_GPL(rhashtable_walk_stop);

536
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
537
{
538
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
539
		   (unsigned long)params->min_size);
540 541
}

542 543 544 545 546
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
{
	return jhash2(key, length, seed);
}

547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
567
 *	.hashfn = jhash,
568
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
 * u32 my_hash_fn(const void *data, u32 seed)
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
586
 *	.hashfn = jhash,
587 588 589
 *	.obj_hashfn = my_hash_fn,
 * };
 */
590 591
int rhashtable_init(struct rhashtable *ht,
		    const struct rhashtable_params *params)
592 593 594 595 596 597
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

598
	if ((!params->key_len && !params->obj_hashfn) ||
599
	    (params->obj_hashfn && !params->obj_cmpfn))
600 601
		return -EINVAL;

602 603 604
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

605
	if (params->nelem_hint)
606
		size = rounded_hashtable_size(params);
607

608 609 610 611
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
	memcpy(&ht->p, params, sizeof(*params));

612 613 614 615 616 617
	if (params->min_size)
		ht->p.min_size = roundup_pow_of_two(params->min_size);

	if (params->max_size)
		ht->p.max_size = rounddown_pow_of_two(params->max_size);

618
	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
619

620 621 622 623 624
	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

625 626 627 628 629 630 631 632 633 634
	ht->key_len = ht->p.key_len;
	if (!params->hashfn) {
		ht->p.hashfn = jhash;

		if (!(ht->key_len & (sizeof(u32) - 1))) {
			ht->key_len /= sizeof(u32);
			ht->p.hashfn = rhashtable_jhash2;
		}
	}

635
	tbl = bucket_table_alloc(ht, size);
636 637 638
	if (tbl == NULL)
		return -ENOMEM;

639
	atomic_set(&ht->nelems, 0);
640

641 642
	RCU_INIT_POINTER(ht->tbl, tbl);

643
	INIT_WORK(&ht->run_work, rht_deferred_worker);
644

645 646 647 648 649 650 651 652
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
 * rhashtable_destroy - destroy hash table
 * @ht:		the hash table to destroy
 *
653 654 655
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
656
 */
657
void rhashtable_destroy(struct rhashtable *ht)
658
{
659 660
	ht->being_destroyed = true;

661
	cancel_work_sync(&ht->run_work);
662

663
	mutex_lock(&ht->mutex);
664 665
	bucket_table_free(rht_dereference(ht->tbl, ht));
	mutex_unlock(&ht->mutex);
666 667
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);