hashtab.c 25.1 KB
Newer Older
1
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2
 * Copyright (c) 2016 Facebook
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 */
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
#include <linux/vmalloc.h>
17
#include "percpu_freelist.h"
M
Martin KaFai Lau 已提交
18
#include "bpf_lru_list.h"
19

20 21 22 23 24
struct bucket {
	struct hlist_head head;
	raw_spinlock_t lock;
};

25 26
struct bpf_htab {
	struct bpf_map map;
27
	struct bucket *buckets;
28
	void *elems;
M
Martin KaFai Lau 已提交
29 30 31 32
	union {
		struct pcpu_freelist freelist;
		struct bpf_lru lru;
	};
33
	void __percpu *extra_elems;
34
	atomic_t count;	/* number of elements in this hashtable */
35 36 37 38
	u32 n_buckets;	/* number of hash buckets */
	u32 elem_size;	/* size of each element in bytes */
};

39 40 41 42 43 44
enum extra_elem_state {
	HTAB_NOT_AN_EXTRA_ELEM = 0,
	HTAB_EXTRA_ELEM_FREE,
	HTAB_EXTRA_ELEM_USED
};

45 46
/* each htab element is struct htab_elem + key + value */
struct htab_elem {
47
	union {
48 49 50
		struct hlist_node hash_node;
		struct bpf_htab *htab;
		struct pcpu_freelist_node fnode;
51
	};
52 53 54
	union {
		struct rcu_head rcu;
		enum extra_elem_state state;
M
Martin KaFai Lau 已提交
55
		struct bpf_lru_node lru_node;
56
	};
57
	u32 hash;
58 59 60
	char key[0] __aligned(8);
};

M
Martin KaFai Lau 已提交
61 62 63 64 65 66 67
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);

static bool htab_is_lru(const struct bpf_htab *htab)
{
	return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH;
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
				     void __percpu *pptr)
{
	*(void __percpu **)(l->key + key_size) = pptr;
}

static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
{
	return *(void __percpu **)(l->key + key_size);
}

static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
{
	return (struct htab_elem *) (htab->elems + i * htab->elem_size);
}

static void htab_free_elems(struct bpf_htab *htab)
{
	int i;

	if (htab->map.map_type != BPF_MAP_TYPE_PERCPU_HASH)
		goto free_elems;

	for (i = 0; i < htab->map.max_entries; i++) {
		void __percpu *pptr;

		pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
					 htab->map.key_size);
		free_percpu(pptr);
	}
free_elems:
	vfree(htab->elems);
}

M
Martin KaFai Lau 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
					  u32 hash)
{
	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
	struct htab_elem *l;

	if (node) {
		l = container_of(node, struct htab_elem, lru_node);
		memcpy(l->key, key, htab->map.key_size);
		return l;
	}

	return NULL;
}

static int prealloc_init(struct bpf_htab *htab)
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
{
	int err = -ENOMEM, i;

	htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
	if (!htab->elems)
		return -ENOMEM;

	if (htab->map.map_type != BPF_MAP_TYPE_PERCPU_HASH)
		goto skip_percpu_elems;

	for (i = 0; i < htab->map.max_entries; i++) {
		u32 size = round_up(htab->map.value_size, 8);
		void __percpu *pptr;

		pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
		if (!pptr)
			goto free_elems;
		htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
				  pptr);
	}

skip_percpu_elems:
M
Martin KaFai Lau 已提交
140 141 142 143 144 145 146 147 148 149
	if (htab_is_lru(htab))
		err = bpf_lru_init(&htab->lru,
				   htab->map.map_flags & BPF_F_NO_COMMON_LRU,
				   offsetof(struct htab_elem, hash) -
				   offsetof(struct htab_elem, lru_node),
				   htab_lru_map_delete_node,
				   htab);
	else
		err = pcpu_freelist_init(&htab->freelist);

150 151 152
	if (err)
		goto free_elems;

M
Martin KaFai Lau 已提交
153 154 155 156 157 158 159 160
	if (htab_is_lru(htab))
		bpf_lru_populate(&htab->lru, htab->elems,
				 offsetof(struct htab_elem, lru_node),
				 htab->elem_size, htab->map.max_entries);
	else
		pcpu_freelist_populate(&htab->freelist, htab->elems,
				       htab->elem_size, htab->map.max_entries);

161 162 163 164 165 166 167
	return 0;

free_elems:
	htab_free_elems(htab);
	return err;
}

M
Martin KaFai Lau 已提交
168 169 170 171 172 173 174 175 176 177
static void prealloc_destroy(struct bpf_htab *htab)
{
	htab_free_elems(htab);

	if (htab_is_lru(htab))
		bpf_lru_destroy(&htab->lru);
	else
		pcpu_freelist_destroy(&htab->freelist);
}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
static int alloc_extra_elems(struct bpf_htab *htab)
{
	void __percpu *pptr;
	int cpu;

	pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
	if (!pptr)
		return -ENOMEM;

	for_each_possible_cpu(cpu) {
		((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
			HTAB_EXTRA_ELEM_FREE;
	}
	htab->extra_elems = pptr;
	return 0;
}

195 196 197
/* Called from syscall */
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{
198
	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_HASH;
M
Martin KaFai Lau 已提交
199 200 201 202 203 204 205 206
	bool lru = attr->map_type == BPF_MAP_TYPE_LRU_HASH;
	/* percpu_lru means each cpu has its own LRU list.
	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
	 * the map's value itself is percpu.  percpu_lru has
	 * nothing to do with the map's value.
	 */
	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
207 208
	struct bpf_htab *htab;
	int err, i;
209
	u64 cost;
210

M
Martin KaFai Lau 已提交
211 212 213 214 215 216 217
	if (lru && !capable(CAP_SYS_ADMIN))
		/* LRU implementation is much complicated than other
		 * maps.  Hence, limit to CAP_SYS_ADMIN for now.
		 */
		return ERR_PTR(-EPERM);

	if (attr->map_flags & ~(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU))
218 219 220
		/* reserved bits should not be used */
		return ERR_PTR(-EINVAL);

M
Martin KaFai Lau 已提交
221 222 223 224 225 226
	if (!lru && percpu_lru)
		return ERR_PTR(-EINVAL);

	if (lru && !prealloc)
		return ERR_PTR(-ENOTSUPP);

227 228 229 230 231
	htab = kzalloc(sizeof(*htab), GFP_USER);
	if (!htab)
		return ERR_PTR(-ENOMEM);

	/* mandatory map attributes */
232
	htab->map.map_type = attr->map_type;
233 234 235
	htab->map.key_size = attr->key_size;
	htab->map.value_size = attr->value_size;
	htab->map.max_entries = attr->max_entries;
236
	htab->map.map_flags = attr->map_flags;
237 238 239 240 241 242 243 244 245

	/* check sanity of attributes.
	 * value_size == 0 may be allowed in the future to use map as a set
	 */
	err = -EINVAL;
	if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
	    htab->map.value_size == 0)
		goto free_htab;

M
Martin KaFai Lau 已提交
246 247 248 249 250 251 252 253 254 255 256 257
	if (percpu_lru) {
		/* ensure each CPU's lru list has >=1 elements.
		 * since we are at it, make each lru list has the same
		 * number of elements.
		 */
		htab->map.max_entries = roundup(attr->max_entries,
						num_possible_cpus());
		if (htab->map.max_entries < attr->max_entries)
			htab->map.max_entries = rounddown(attr->max_entries,
							  num_possible_cpus());
	}

258 259 260 261 262 263 264 265 266 267
	/* hash table size must be power of 2 */
	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);

	err = -E2BIG;
	if (htab->map.key_size > MAX_BPF_STACK)
		/* eBPF programs initialize keys on stack, so they cannot be
		 * larger than max stack size
		 */
		goto free_htab;

268 269 270 271 272 273 274 275 276
	if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
	    MAX_BPF_STACK - sizeof(struct htab_elem))
		/* if value_size is bigger, the user space won't be able to
		 * access the elements via bpf syscall. This check also makes
		 * sure that the elem_size doesn't overflow and it's
		 * kmalloc-able later in htab_map_update_elem()
		 */
		goto free_htab;

277 278 279 280
	if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
		/* make sure the size for pcpu_alloc() is reasonable */
		goto free_htab;

281
	htab->elem_size = sizeof(struct htab_elem) +
282 283 284 285
			  round_up(htab->map.key_size, 8);
	if (percpu)
		htab->elem_size += sizeof(void *);
	else
286
		htab->elem_size += round_up(htab->map.value_size, 8);
287

288 289
	/* prevent zero size kmalloc and check for u32 overflow */
	if (htab->n_buckets == 0 ||
290
	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
291 292
		goto free_htab;

293 294 295 296 297 298
	cost = (u64) htab->n_buckets * sizeof(struct bucket) +
	       (u64) htab->elem_size * htab->map.max_entries;

	if (percpu)
		cost += (u64) round_up(htab->map.value_size, 8) *
			num_possible_cpus() * htab->map.max_entries;
299 300
	else
	       cost += (u64) htab->elem_size * num_possible_cpus();
301 302

	if (cost >= U32_MAX - PAGE_SIZE)
303 304 305
		/* make sure page count doesn't overflow */
		goto free_htab;

306
	htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
307

308 309 310 311 312
	/* if map size is larger than memlock limit, reject it early */
	err = bpf_map_precharge_memlock(htab->map.pages);
	if (err)
		goto free_htab;

313
	err = -ENOMEM;
314
	htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
315 316 317
				      GFP_USER | __GFP_NOWARN);

	if (!htab->buckets) {
318
		htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
319 320 321 322
		if (!htab->buckets)
			goto free_htab;
	}

323 324 325 326
	for (i = 0; i < htab->n_buckets; i++) {
		INIT_HLIST_HEAD(&htab->buckets[i].head);
		raw_spin_lock_init(&htab->buckets[i].lock);
	}
327

M
Martin KaFai Lau 已提交
328 329 330 331
	if (!percpu && !lru) {
		/* lru itself can remove the least used element, so
		 * there is no need for an extra elem during map_update.
		 */
332 333 334 335 336
		err = alloc_extra_elems(htab);
		if (err)
			goto free_buckets;
	}

M
Martin KaFai Lau 已提交
337 338
	if (prealloc) {
		err = prealloc_init(htab);
339
		if (err)
340
			goto free_extra_elems;
341
	}
342 343 344

	return &htab->map;

345 346
free_extra_elems:
	free_percpu(htab->extra_elems);
347 348
free_buckets:
	kvfree(htab->buckets);
349 350 351 352 353 354 355 356 357 358
free_htab:
	kfree(htab);
	return ERR_PTR(err);
}

static inline u32 htab_map_hash(const void *key, u32 key_len)
{
	return jhash(key, key_len, 0);
}

359
static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
360 361 362 363
{
	return &htab->buckets[hash & (htab->n_buckets - 1)];
}

364 365 366 367 368
static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
{
	return &__select_bucket(htab, hash)->head;
}

369 370 371 372 373 374 375 376 377 378 379 380 381
static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
					 void *key, u32 key_size)
{
	struct htab_elem *l;

	hlist_for_each_entry_rcu(l, head, hash_node)
		if (l->hash == hash && !memcmp(&l->key, key, key_size))
			return l;

	return NULL;
}

/* Called from syscall or from eBPF program */
382
static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
	struct hlist_head *head;
	struct htab_elem *l;
	u32 hash, key_size;

	/* Must be called with rcu_read_lock. */
	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);

	head = select_bucket(htab, hash);

	l = lookup_elem_raw(head, hash, key, key_size);

400 401 402 403 404 405 406
	return l;
}

static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct htab_elem *l = __htab_map_lookup_elem(map, key);

407 408 409 410 411 412
	if (l)
		return l->key + round_up(map->key_size, 8);

	return NULL;
}

M
Martin KaFai Lau 已提交
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct htab_elem *l = __htab_map_lookup_elem(map, key);

	if (l) {
		bpf_lru_node_set_ref(&l->lru_node);
		return l->key + round_up(map->key_size, 8);
	}

	return NULL;
}

/* It is called from the bpf_lru_list when the LRU needs to delete
 * older elements from the htab.
 */
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
{
	struct bpf_htab *htab = (struct bpf_htab *)arg;
	struct htab_elem *l, *tgt_l;
	struct hlist_head *head;
	unsigned long flags;
	struct bucket *b;

	tgt_l = container_of(node, struct htab_elem, lru_node);
	b = __select_bucket(htab, tgt_l->hash);
	head = &b->head;

	raw_spin_lock_irqsave(&b->lock, flags);

	hlist_for_each_entry_rcu(l, head, hash_node)
		if (l == tgt_l) {
			hlist_del_rcu(&l->hash_node);
			break;
		}

	raw_spin_unlock_irqrestore(&b->lock, flags);

	return l == tgt_l;
}

453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
/* Called from syscall */
static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
	struct hlist_head *head;
	struct htab_elem *l, *next_l;
	u32 hash, key_size;
	int i;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);

	head = select_bucket(htab, hash);

	/* lookup the key */
	l = lookup_elem_raw(head, hash, key, key_size);

	if (!l) {
		i = 0;
		goto find_first_elem;
	}

	/* key was found, get next key in the same bucket */
	next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
				  struct htab_elem, hash_node);

	if (next_l) {
		/* if next elem in this hash list is non-zero, just return it */
		memcpy(next_key, next_l->key, key_size);
		return 0;
	}

	/* no more elements in this hash list, go to the next bucket */
	i = hash & (htab->n_buckets - 1);
	i++;

find_first_elem:
	/* iterate over buckets */
	for (; i < htab->n_buckets; i++) {
		head = select_bucket(htab, i);

		/* pick first element in the bucket */
		next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
					  struct htab_elem, hash_node);
		if (next_l) {
			/* if it's not empty, just return it */
			memcpy(next_key, next_l->key, key_size);
			return 0;
		}
	}

507
	/* iterated over all buckets and all elements */
508 509 510
	return -ENOENT;
}

511
static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
512
{
513 514
	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
		free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
515 516 517
	kfree(l);
}

518
static void htab_elem_free_rcu(struct rcu_head *head)
519 520
{
	struct htab_elem *l = container_of(head, struct htab_elem, rcu);
521
	struct bpf_htab *htab = l->htab;
522

523 524 525 526 527 528 529 530 531
	/* must increment bpf_prog_active to avoid kprobe+bpf triggering while
	 * we're calling kfree, otherwise deadlock is possible if kprobes
	 * are placed somewhere inside of slub
	 */
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
	htab_elem_free(htab, l);
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
532 533
}

534
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
535
{
536 537 538 539 540
	if (l->state == HTAB_EXTRA_ELEM_USED) {
		l->state = HTAB_EXTRA_ELEM_FREE;
		return;
	}

541 542
	if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
		pcpu_freelist_push(&htab->freelist, &l->fnode);
543
	} else {
544 545 546
		atomic_dec(&htab->count);
		l->htab = htab;
		call_rcu(&l->rcu, htab_elem_free_rcu);
547 548 549
	}
}

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
			    void *value, bool onallcpus)
{
	if (!onallcpus) {
		/* copy true value_size bytes */
		memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
	} else {
		u32 size = round_up(htab->map.value_size, 8);
		int off = 0, cpu;

		for_each_possible_cpu(cpu) {
			bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
					value + off, size);
			off += size;
		}
	}
}

568 569
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
					 void *value, u32 key_size, u32 hash,
570 571
					 bool percpu, bool onallcpus,
					 bool old_elem_exists)
572 573
{
	u32 size = htab->map.value_size;
574
	bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
575 576
	struct htab_elem *l_new;
	void __percpu *pptr;
577
	int err = 0;
578

579 580 581
	if (prealloc) {
		l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
		if (!l_new)
582
			err = -E2BIG;
583 584 585
	} else {
		if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
			atomic_dec(&htab->count);
586 587 588 589 590 591
			err = -E2BIG;
		} else {
			l_new = kmalloc(htab->elem_size,
					GFP_ATOMIC | __GFP_NOWARN);
			if (!l_new)
				return ERR_PTR(-ENOMEM);
592
		}
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
	}

	if (err) {
		if (!old_elem_exists)
			return ERR_PTR(err);

		/* if we're updating the existing element and the hash table
		 * is full, use per-cpu extra elems
		 */
		l_new = this_cpu_ptr(htab->extra_elems);
		if (l_new->state != HTAB_EXTRA_ELEM_FREE)
			return ERR_PTR(-E2BIG);
		l_new->state = HTAB_EXTRA_ELEM_USED;
	} else {
		l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
608
	}
609 610 611 612 613 614

	memcpy(l_new->key, key, key_size);
	if (percpu) {
		/* round up value_size to 8 bytes */
		size = round_up(size, 8);

615 616 617 618 619 620 621 622 623 624
		if (prealloc) {
			pptr = htab_elem_get_ptr(l_new, key_size);
		} else {
			/* alloc_percpu zero-fills */
			pptr = __alloc_percpu_gfp(size, 8,
						  GFP_ATOMIC | __GFP_NOWARN);
			if (!pptr) {
				kfree(l_new);
				return ERR_PTR(-ENOMEM);
			}
625 626
		}

627
		pcpu_copy_value(htab, pptr, value, onallcpus);
628

629 630
		if (!prealloc)
			htab_elem_set_ptr(l_new, key_size, pptr);
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
	} else {
		memcpy(l_new->key + round_up(key_size, 8), value, size);
	}

	l_new->hash = hash;
	return l_new;
}

static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
		       u64 map_flags)
{
	if (l_old && map_flags == BPF_NOEXIST)
		/* elem already exists */
		return -EEXIST;

	if (!l_old && map_flags == BPF_EXIST)
		/* elem doesn't exist, cannot update it */
		return -ENOENT;

	return 0;
}

653 654 655 656 657
/* Called from syscall or from eBPF program */
static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
				u64 map_flags)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
658
	struct htab_elem *l_new = NULL, *l_old;
659 660
	struct hlist_head *head;
	unsigned long flags;
661 662
	struct bucket *b;
	u32 key_size, hash;
663 664
	int ret;

665
	if (unlikely(map_flags > BPF_EXIST))
666 667 668 669 670 671 672
		/* unknown flags */
		return -EINVAL;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

673 674 675
	hash = htab_map_hash(key, key_size);

	b = __select_bucket(htab, hash);
676
	head = &b->head;
677 678

	/* bpf_map_update_elem() can be called in_irq() */
679
	raw_spin_lock_irqsave(&b->lock, flags);
680

681
	l_old = lookup_elem_raw(head, hash, key, key_size);
682

683 684
	ret = check_flags(htab, l_old, map_flags);
	if (ret)
685 686
		goto err;

687 688
	l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
				!!l_old);
689 690 691 692 693 694
	if (IS_ERR(l_new)) {
		/* all pre-allocated elements are in use or memory exhausted */
		ret = PTR_ERR(l_new);
		goto err;
	}

695 696
	/* add new element to the head of the list, so that
	 * concurrent search will find it before old elem
697 698 699 700
	 */
	hlist_add_head_rcu(&l_new->hash_node, head);
	if (l_old) {
		hlist_del_rcu(&l_old->hash_node);
701
		free_htab_elem(htab, l_old);
702
	}
703
	ret = 0;
704
err:
705
	raw_spin_unlock_irqrestore(&b->lock, flags);
706 707 708
	return ret;
}

M
Martin KaFai Lau 已提交
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
				    u64 map_flags)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
	struct htab_elem *l_new, *l_old = NULL;
	struct hlist_head *head;
	unsigned long flags;
	struct bucket *b;
	u32 key_size, hash;
	int ret;

	if (unlikely(map_flags > BPF_EXIST))
		/* unknown flags */
		return -EINVAL;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);

	b = __select_bucket(htab, hash);
	head = &b->head;

	/* For LRU, we need to alloc before taking bucket's
	 * spinlock because getting free nodes from LRU may need
	 * to remove older elements from htab and this removal
	 * operation will need a bucket lock.
	 */
	l_new = prealloc_lru_pop(htab, key, hash);
	if (!l_new)
		return -ENOMEM;
	memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);

	/* bpf_map_update_elem() can be called in_irq() */
	raw_spin_lock_irqsave(&b->lock, flags);

	l_old = lookup_elem_raw(head, hash, key, key_size);

	ret = check_flags(htab, l_old, map_flags);
	if (ret)
		goto err;

	/* add new element to the head of the list, so that
	 * concurrent search will find it before old elem
	 */
	hlist_add_head_rcu(&l_new->hash_node, head);
	if (l_old) {
		bpf_lru_node_set_ref(&l_new->lru_node);
		hlist_del_rcu(&l_old->hash_node);
	}
	ret = 0;

err:
	raw_spin_unlock_irqrestore(&b->lock, flags);

	if (ret)
		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
	else if (l_old)
		bpf_lru_push_free(&htab->lru, &l_old->lru_node);

	return ret;
}

773 774 775
static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
					 void *value, u64 map_flags,
					 bool onallcpus)
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
	struct htab_elem *l_new = NULL, *l_old;
	struct hlist_head *head;
	unsigned long flags;
	struct bucket *b;
	u32 key_size, hash;
	int ret;

	if (unlikely(map_flags > BPF_EXIST))
		/* unknown flags */
		return -EINVAL;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);

	b = __select_bucket(htab, hash);
	head = &b->head;

	/* bpf_map_update_elem() can be called in_irq() */
	raw_spin_lock_irqsave(&b->lock, flags);

	l_old = lookup_elem_raw(head, hash, key, key_size);

	ret = check_flags(htab, l_old, map_flags);
	if (ret)
		goto err;

	if (l_old) {
		/* per-cpu hash map can update value in-place */
809 810
		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
				value, onallcpus);
811 812
	} else {
		l_new = alloc_htab_elem(htab, key, value, key_size,
813
					hash, true, onallcpus, false);
814 815
		if (IS_ERR(l_new)) {
			ret = PTR_ERR(l_new);
816 817 818 819 820 821 822 823 824 825
			goto err;
		}
		hlist_add_head_rcu(&l_new->hash_node, head);
	}
	ret = 0;
err:
	raw_spin_unlock_irqrestore(&b->lock, flags);
	return ret;
}

826 827 828 829 830 831
static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
				       void *value, u64 map_flags)
{
	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
}

832 833 834 835 836
/* Called from syscall or from eBPF program */
static int htab_map_delete_elem(struct bpf_map *map, void *key)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
	struct hlist_head *head;
837
	struct bucket *b;
838 839 840 841 842 843 844 845 846 847
	struct htab_elem *l;
	unsigned long flags;
	u32 hash, key_size;
	int ret = -ENOENT;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);
848 849
	b = __select_bucket(htab, hash);
	head = &b->head;
850

851
	raw_spin_lock_irqsave(&b->lock, flags);
852 853 854 855 856

	l = lookup_elem_raw(head, hash, key, key_size);

	if (l) {
		hlist_del_rcu(&l->hash_node);
857
		free_htab_elem(htab, l);
858 859 860
		ret = 0;
	}

861
	raw_spin_unlock_irqrestore(&b->lock, flags);
862 863 864
	return ret;
}

M
Martin KaFai Lau 已提交
865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
	struct hlist_head *head;
	struct bucket *b;
	struct htab_elem *l;
	unsigned long flags;
	u32 hash, key_size;
	int ret = -ENOENT;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);
	b = __select_bucket(htab, hash);
	head = &b->head;

	raw_spin_lock_irqsave(&b->lock, flags);

	l = lookup_elem_raw(head, hash, key, key_size);

	if (l) {
		hlist_del_rcu(&l->hash_node);
		ret = 0;
	}

	raw_spin_unlock_irqrestore(&b->lock, flags);
	if (l)
		bpf_lru_push_free(&htab->lru, &l->lru_node);
	return ret;
}

898 899 900 901 902 903 904 905 906 907 908
static void delete_all_elements(struct bpf_htab *htab)
{
	int i;

	for (i = 0; i < htab->n_buckets; i++) {
		struct hlist_head *head = select_bucket(htab, i);
		struct hlist_node *n;
		struct htab_elem *l;

		hlist_for_each_entry_safe(l, n, head, hash_node) {
			hlist_del_rcu(&l->hash_node);
909 910
			if (l->state != HTAB_EXTRA_ELEM_USED)
				htab_elem_free(htab, l);
911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
		}
	}
}
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
static void htab_map_free(struct bpf_map *map)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);

	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
	 * so the programs (can be more than one that used this map) were
	 * disconnected from events. Wait for outstanding critical sections in
	 * these programs to complete
	 */
	synchronize_rcu();

926 927
	/* some of free_htab_elem() callbacks for elements of this map may
	 * not have executed. Wait for them.
928
	 */
929
	rcu_barrier();
M
Martin KaFai Lau 已提交
930
	if (htab->map.map_flags & BPF_F_NO_PREALLOC)
931
		delete_all_elements(htab);
M
Martin KaFai Lau 已提交
932 933 934
	else
		prealloc_destroy(htab);

935
	free_percpu(htab->extra_elems);
936 937 938 939
	kvfree(htab->buckets);
	kfree(htab);
}

940
static const struct bpf_map_ops htab_ops = {
941 942 943 944 945 946 947 948
	.map_alloc = htab_map_alloc,
	.map_free = htab_map_free,
	.map_get_next_key = htab_map_get_next_key,
	.map_lookup_elem = htab_map_lookup_elem,
	.map_update_elem = htab_map_update_elem,
	.map_delete_elem = htab_map_delete_elem,
};

949
static struct bpf_map_type_list htab_type __read_mostly = {
950 951 952 953
	.ops = &htab_ops,
	.type = BPF_MAP_TYPE_HASH,
};

M
Martin KaFai Lau 已提交
954 955 956 957 958 959 960 961 962 963 964 965 966 967
static const struct bpf_map_ops htab_lru_ops = {
	.map_alloc = htab_map_alloc,
	.map_free = htab_map_free,
	.map_get_next_key = htab_map_get_next_key,
	.map_lookup_elem = htab_lru_map_lookup_elem,
	.map_update_elem = htab_lru_map_update_elem,
	.map_delete_elem = htab_lru_map_delete_elem,
};

static struct bpf_map_type_list htab_lru_type __read_mostly = {
	.ops = &htab_lru_ops,
	.type = BPF_MAP_TYPE_LRU_HASH,
};

968 969 970 971 972 973 974 975 976 977 978
/* Called from eBPF program */
static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct htab_elem *l = __htab_map_lookup_elem(map, key);

	if (l)
		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
	else
		return NULL;
}

979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
{
	struct htab_elem *l;
	void __percpu *pptr;
	int ret = -ENOENT;
	int cpu, off = 0;
	u32 size;

	/* per_cpu areas are zero-filled and bpf programs can only
	 * access 'value_size' of them, so copying rounded areas
	 * will not leak any kernel data
	 */
	size = round_up(map->value_size, 8);
	rcu_read_lock();
	l = __htab_map_lookup_elem(map, key);
	if (!l)
		goto out;
	pptr = htab_elem_get_ptr(l, map->key_size);
	for_each_possible_cpu(cpu) {
		bpf_long_memcpy(value + off,
				per_cpu_ptr(pptr, cpu), size);
		off += size;
	}
	ret = 0;
out:
	rcu_read_unlock();
	return ret;
}

int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
			   u64 map_flags)
{
1011 1012 1013 1014 1015 1016 1017
	int ret;

	rcu_read_lock();
	ret = __htab_percpu_map_update_elem(map, key, value, map_flags, true);
	rcu_read_unlock();

	return ret;
1018 1019
}

1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
static const struct bpf_map_ops htab_percpu_ops = {
	.map_alloc = htab_map_alloc,
	.map_free = htab_map_free,
	.map_get_next_key = htab_map_get_next_key,
	.map_lookup_elem = htab_percpu_map_lookup_elem,
	.map_update_elem = htab_percpu_map_update_elem,
	.map_delete_elem = htab_map_delete_elem,
};

static struct bpf_map_type_list htab_percpu_type __read_mostly = {
	.ops = &htab_percpu_ops,
	.type = BPF_MAP_TYPE_PERCPU_HASH,
};

1034 1035
static int __init register_htab_map(void)
{
1036
	bpf_register_map_type(&htab_type);
1037
	bpf_register_map_type(&htab_percpu_type);
M
Martin KaFai Lau 已提交
1038
	bpf_register_map_type(&htab_lru_type);
1039 1040 1041
	return 0;
}
late_initcall(register_htab_map);