hashtab.c 29.5 KB
Newer Older
1
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2
 * Copyright (c) 2016 Facebook
3 4 5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 */
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
16
#include <linux/rculist_nulls.h>
17
#include "percpu_freelist.h"
M
Martin KaFai Lau 已提交
18
#include "bpf_lru_list.h"
19

20
struct bucket {
21
	struct hlist_nulls_head head;
22 23 24
	raw_spinlock_t lock;
};

25 26
struct bpf_htab {
	struct bpf_map map;
27
	struct bucket *buckets;
28
	void *elems;
M
Martin KaFai Lau 已提交
29 30 31 32
	union {
		struct pcpu_freelist freelist;
		struct bpf_lru lru;
	};
33
	void __percpu *extra_elems;
34
	atomic_t count;	/* number of elements in this hashtable */
35 36 37 38
	u32 n_buckets;	/* number of hash buckets */
	u32 elem_size;	/* size of each element in bytes */
};

39 40 41 42 43 44
enum extra_elem_state {
	HTAB_NOT_AN_EXTRA_ELEM = 0,
	HTAB_EXTRA_ELEM_FREE,
	HTAB_EXTRA_ELEM_USED
};

45 46
/* each htab element is struct htab_elem + key + value */
struct htab_elem {
47
	union {
48
		struct hlist_nulls_node hash_node;
49 50 51 52 53 54 55
		struct {
			void *padding;
			union {
				struct bpf_htab *htab;
				struct pcpu_freelist_node fnode;
			};
		};
56
	};
57 58 59
	union {
		struct rcu_head rcu;
		enum extra_elem_state state;
M
Martin KaFai Lau 已提交
60
		struct bpf_lru_node lru_node;
61
	};
62
	u32 hash;
63 64 65
	char key[0] __aligned(8);
};

M
Martin KaFai Lau 已提交
66 67 68 69
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);

static bool htab_is_lru(const struct bpf_htab *htab)
{
70 71 72 73 74 75 76 77
	return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
}

static bool htab_is_percpu(const struct bpf_htab *htab)
{
	return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
		htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
M
Martin KaFai Lau 已提交
78 79
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
				     void __percpu *pptr)
{
	*(void __percpu **)(l->key + key_size) = pptr;
}

static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
{
	return *(void __percpu **)(l->key + key_size);
}

static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
{
	return (struct htab_elem *) (htab->elems + i * htab->elem_size);
}

static void htab_free_elems(struct bpf_htab *htab)
{
	int i;

100
	if (!htab_is_percpu(htab))
101 102 103 104 105 106 107 108 109 110
		goto free_elems;

	for (i = 0; i < htab->map.max_entries; i++) {
		void __percpu *pptr;

		pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
					 htab->map.key_size);
		free_percpu(pptr);
	}
free_elems:
111
	bpf_map_area_free(htab->elems);
112 113
}

M
Martin KaFai Lau 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
					  u32 hash)
{
	struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
	struct htab_elem *l;

	if (node) {
		l = container_of(node, struct htab_elem, lru_node);
		memcpy(l->key, key, htab->map.key_size);
		return l;
	}

	return NULL;
}

static int prealloc_init(struct bpf_htab *htab)
130 131 132
{
	int err = -ENOMEM, i;

133 134
	htab->elems = bpf_map_area_alloc(htab->elem_size *
					 htab->map.max_entries);
135 136 137
	if (!htab->elems)
		return -ENOMEM;

138
	if (!htab_is_percpu(htab))
139 140 141 142 143 144 145 146 147 148 149 150 151 152
		goto skip_percpu_elems;

	for (i = 0; i < htab->map.max_entries; i++) {
		u32 size = round_up(htab->map.value_size, 8);
		void __percpu *pptr;

		pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
		if (!pptr)
			goto free_elems;
		htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
				  pptr);
	}

skip_percpu_elems:
M
Martin KaFai Lau 已提交
153 154 155 156 157 158 159 160 161 162
	if (htab_is_lru(htab))
		err = bpf_lru_init(&htab->lru,
				   htab->map.map_flags & BPF_F_NO_COMMON_LRU,
				   offsetof(struct htab_elem, hash) -
				   offsetof(struct htab_elem, lru_node),
				   htab_lru_map_delete_node,
				   htab);
	else
		err = pcpu_freelist_init(&htab->freelist);

163 164 165
	if (err)
		goto free_elems;

M
Martin KaFai Lau 已提交
166 167 168 169 170
	if (htab_is_lru(htab))
		bpf_lru_populate(&htab->lru, htab->elems,
				 offsetof(struct htab_elem, lru_node),
				 htab->elem_size, htab->map.max_entries);
	else
171 172
		pcpu_freelist_populate(&htab->freelist,
				       htab->elems + offsetof(struct htab_elem, fnode),
M
Martin KaFai Lau 已提交
173 174
				       htab->elem_size, htab->map.max_entries);

175 176 177 178 179 180 181
	return 0;

free_elems:
	htab_free_elems(htab);
	return err;
}

M
Martin KaFai Lau 已提交
182 183 184 185 186 187 188 189 190 191
static void prealloc_destroy(struct bpf_htab *htab)
{
	htab_free_elems(htab);

	if (htab_is_lru(htab))
		bpf_lru_destroy(&htab->lru);
	else
		pcpu_freelist_destroy(&htab->freelist);
}

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
static int alloc_extra_elems(struct bpf_htab *htab)
{
	void __percpu *pptr;
	int cpu;

	pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN);
	if (!pptr)
		return -ENOMEM;

	for_each_possible_cpu(cpu) {
		((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state =
			HTAB_EXTRA_ELEM_FREE;
	}
	htab->extra_elems = pptr;
	return 0;
}

209 210 211
/* Called from syscall */
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{
212 213 214 215
	bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
		       attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
	bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
		    attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
M
Martin KaFai Lau 已提交
216 217 218 219 220 221 222
	/* percpu_lru means each cpu has its own LRU list.
	 * it is different from BPF_MAP_TYPE_PERCPU_HASH where
	 * the map's value itself is percpu.  percpu_lru has
	 * nothing to do with the map's value.
	 */
	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
223 224
	struct bpf_htab *htab;
	int err, i;
225
	u64 cost;
226

227 228 229 230 231
	BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
		     offsetof(struct htab_elem, hash_node.pprev));
	BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
		     offsetof(struct htab_elem, hash_node.pprev));

M
Martin KaFai Lau 已提交
232 233 234 235 236 237 238
	if (lru && !capable(CAP_SYS_ADMIN))
		/* LRU implementation is much complicated than other
		 * maps.  Hence, limit to CAP_SYS_ADMIN for now.
		 */
		return ERR_PTR(-EPERM);

	if (attr->map_flags & ~(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU))
239 240 241
		/* reserved bits should not be used */
		return ERR_PTR(-EINVAL);

M
Martin KaFai Lau 已提交
242 243 244 245 246 247
	if (!lru && percpu_lru)
		return ERR_PTR(-EINVAL);

	if (lru && !prealloc)
		return ERR_PTR(-ENOTSUPP);

248 249 250 251 252
	htab = kzalloc(sizeof(*htab), GFP_USER);
	if (!htab)
		return ERR_PTR(-ENOMEM);

	/* mandatory map attributes */
253
	htab->map.map_type = attr->map_type;
254 255 256
	htab->map.key_size = attr->key_size;
	htab->map.value_size = attr->value_size;
	htab->map.max_entries = attr->max_entries;
257
	htab->map.map_flags = attr->map_flags;
258 259 260 261 262 263 264 265 266

	/* check sanity of attributes.
	 * value_size == 0 may be allowed in the future to use map as a set
	 */
	err = -EINVAL;
	if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
	    htab->map.value_size == 0)
		goto free_htab;

M
Martin KaFai Lau 已提交
267 268 269 270 271 272 273 274 275 276 277 278
	if (percpu_lru) {
		/* ensure each CPU's lru list has >=1 elements.
		 * since we are at it, make each lru list has the same
		 * number of elements.
		 */
		htab->map.max_entries = roundup(attr->max_entries,
						num_possible_cpus());
		if (htab->map.max_entries < attr->max_entries)
			htab->map.max_entries = rounddown(attr->max_entries,
							  num_possible_cpus());
	}

279 280 281 282 283 284 285 286 287 288
	/* hash table size must be power of 2 */
	htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);

	err = -E2BIG;
	if (htab->map.key_size > MAX_BPF_STACK)
		/* eBPF programs initialize keys on stack, so they cannot be
		 * larger than max stack size
		 */
		goto free_htab;

M
Michal Hocko 已提交
289
	if (htab->map.value_size >= KMALLOC_MAX_SIZE -
290 291 292 293 294 295 296 297
	    MAX_BPF_STACK - sizeof(struct htab_elem))
		/* if value_size is bigger, the user space won't be able to
		 * access the elements via bpf syscall. This check also makes
		 * sure that the elem_size doesn't overflow and it's
		 * kmalloc-able later in htab_map_update_elem()
		 */
		goto free_htab;

298 299 300 301
	if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
		/* make sure the size for pcpu_alloc() is reasonable */
		goto free_htab;

302
	htab->elem_size = sizeof(struct htab_elem) +
303 304 305 306
			  round_up(htab->map.key_size, 8);
	if (percpu)
		htab->elem_size += sizeof(void *);
	else
307
		htab->elem_size += round_up(htab->map.value_size, 8);
308

309 310
	/* prevent zero size kmalloc and check for u32 overflow */
	if (htab->n_buckets == 0 ||
311
	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
312 313
		goto free_htab;

314 315 316 317 318 319
	cost = (u64) htab->n_buckets * sizeof(struct bucket) +
	       (u64) htab->elem_size * htab->map.max_entries;

	if (percpu)
		cost += (u64) round_up(htab->map.value_size, 8) *
			num_possible_cpus() * htab->map.max_entries;
320 321
	else
	       cost += (u64) htab->elem_size * num_possible_cpus();
322 323

	if (cost >= U32_MAX - PAGE_SIZE)
324 325 326
		/* make sure page count doesn't overflow */
		goto free_htab;

327
	htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
328

329 330 331 332 333
	/* if map size is larger than memlock limit, reject it early */
	err = bpf_map_precharge_memlock(htab->map.pages);
	if (err)
		goto free_htab;

334
	err = -ENOMEM;
335 336 337 338
	htab->buckets = bpf_map_area_alloc(htab->n_buckets *
					   sizeof(struct bucket));
	if (!htab->buckets)
		goto free_htab;
339

340
	for (i = 0; i < htab->n_buckets; i++) {
341
		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
342 343
		raw_spin_lock_init(&htab->buckets[i].lock);
	}
344

M
Martin KaFai Lau 已提交
345 346 347 348
	if (!percpu && !lru) {
		/* lru itself can remove the least used element, so
		 * there is no need for an extra elem during map_update.
		 */
349 350 351 352 353
		err = alloc_extra_elems(htab);
		if (err)
			goto free_buckets;
	}

M
Martin KaFai Lau 已提交
354 355
	if (prealloc) {
		err = prealloc_init(htab);
356
		if (err)
357
			goto free_extra_elems;
358
	}
359 360 361

	return &htab->map;

362 363
free_extra_elems:
	free_percpu(htab->extra_elems);
364
free_buckets:
365
	bpf_map_area_free(htab->buckets);
366 367 368 369 370 371 372 373 374 375
free_htab:
	kfree(htab);
	return ERR_PTR(err);
}

static inline u32 htab_map_hash(const void *key, u32 key_len)
{
	return jhash(key, key_len, 0);
}

376
static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
377 378 379 380
{
	return &htab->buckets[hash & (htab->n_buckets - 1)];
}

381
static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
382 383 384 385
{
	return &__select_bucket(htab, hash)->head;
}

386 387
/* this lookup function can only be called with bucket lock taken */
static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
388 389
					 void *key, u32 key_size)
{
390
	struct hlist_nulls_node *n;
391 392
	struct htab_elem *l;

393
	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
394 395 396 397 398 399
		if (l->hash == hash && !memcmp(&l->key, key, key_size))
			return l;

	return NULL;
}

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
/* can be called without bucket lock. it will repeat the loop in
 * the unlikely event when elements moved from one bucket into another
 * while link list is being walked
 */
static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
					       u32 hash, void *key,
					       u32 key_size, u32 n_buckets)
{
	struct hlist_nulls_node *n;
	struct htab_elem *l;

again:
	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
		if (l->hash == hash && !memcmp(&l->key, key, key_size))
			return l;

	if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
		goto again;

	return NULL;
}

422
/* Called from syscall or from eBPF program */
423
static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
424 425
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
426
	struct hlist_nulls_head *head;
427 428 429 430 431 432 433 434 435 436 437 438
	struct htab_elem *l;
	u32 hash, key_size;

	/* Must be called with rcu_read_lock. */
	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);

	head = select_bucket(htab, hash);

439
	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
440

441 442 443 444 445 446 447
	return l;
}

static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct htab_elem *l = __htab_map_lookup_elem(map, key);

448 449 450 451 452 453
	if (l)
		return l->key + round_up(map->key_size, 8);

	return NULL;
}

M
Martin KaFai Lau 已提交
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct htab_elem *l = __htab_map_lookup_elem(map, key);

	if (l) {
		bpf_lru_node_set_ref(&l->lru_node);
		return l->key + round_up(map->key_size, 8);
	}

	return NULL;
}

/* It is called from the bpf_lru_list when the LRU needs to delete
 * older elements from the htab.
 */
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
{
	struct bpf_htab *htab = (struct bpf_htab *)arg;
472 473 474
	struct htab_elem *l = NULL, *tgt_l;
	struct hlist_nulls_head *head;
	struct hlist_nulls_node *n;
M
Martin KaFai Lau 已提交
475 476 477 478 479 480 481 482 483
	unsigned long flags;
	struct bucket *b;

	tgt_l = container_of(node, struct htab_elem, lru_node);
	b = __select_bucket(htab, tgt_l->hash);
	head = &b->head;

	raw_spin_lock_irqsave(&b->lock, flags);

484
	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
M
Martin KaFai Lau 已提交
485
		if (l == tgt_l) {
486
			hlist_nulls_del_rcu(&l->hash_node);
M
Martin KaFai Lau 已提交
487 488 489 490 491 492 493 494
			break;
		}

	raw_spin_unlock_irqrestore(&b->lock, flags);

	return l == tgt_l;
}

495 496 497 498
/* Called from syscall */
static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
499
	struct hlist_nulls_head *head;
500 501 502 503 504 505 506 507 508 509 510 511 512
	struct htab_elem *l, *next_l;
	u32 hash, key_size;
	int i;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);

	head = select_bucket(htab, hash);

	/* lookup the key */
513
	l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
514 515 516 517 518 519 520

	if (!l) {
		i = 0;
		goto find_first_elem;
	}

	/* key was found, get next key in the same bucket */
521
	next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
				  struct htab_elem, hash_node);

	if (next_l) {
		/* if next elem in this hash list is non-zero, just return it */
		memcpy(next_key, next_l->key, key_size);
		return 0;
	}

	/* no more elements in this hash list, go to the next bucket */
	i = hash & (htab->n_buckets - 1);
	i++;

find_first_elem:
	/* iterate over buckets */
	for (; i < htab->n_buckets; i++) {
		head = select_bucket(htab, i);

		/* pick first element in the bucket */
540
		next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
541 542 543 544 545 546 547 548
					  struct htab_elem, hash_node);
		if (next_l) {
			/* if it's not empty, just return it */
			memcpy(next_key, next_l->key, key_size);
			return 0;
		}
	}

549
	/* iterated over all buckets and all elements */
550 551 552
	return -ENOENT;
}

553
static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
554
{
555 556
	if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
		free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
557 558 559
	kfree(l);
}

560
static void htab_elem_free_rcu(struct rcu_head *head)
561 562
{
	struct htab_elem *l = container_of(head, struct htab_elem, rcu);
563
	struct bpf_htab *htab = l->htab;
564

565 566 567 568 569 570 571 572 573
	/* must increment bpf_prog_active to avoid kprobe+bpf triggering while
	 * we're calling kfree, otherwise deadlock is possible if kprobes
	 * are placed somewhere inside of slub
	 */
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
	htab_elem_free(htab, l);
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
574 575
}

576
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
577
{
578 579 580 581 582
	if (l->state == HTAB_EXTRA_ELEM_USED) {
		l->state = HTAB_EXTRA_ELEM_FREE;
		return;
	}

583 584
	if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
		pcpu_freelist_push(&htab->freelist, &l->fnode);
585
	} else {
586 587 588
		atomic_dec(&htab->count);
		l->htab = htab;
		call_rcu(&l->rcu, htab_elem_free_rcu);
589 590 591
	}
}

592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
			    void *value, bool onallcpus)
{
	if (!onallcpus) {
		/* copy true value_size bytes */
		memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
	} else {
		u32 size = round_up(htab->map.value_size, 8);
		int off = 0, cpu;

		for_each_possible_cpu(cpu) {
			bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
					value + off, size);
			off += size;
		}
	}
}

610 611
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
					 void *value, u32 key_size, u32 hash,
612 613
					 bool percpu, bool onallcpus,
					 bool old_elem_exists)
614 615
{
	u32 size = htab->map.value_size;
616
	bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
617 618
	struct htab_elem *l_new;
	void __percpu *pptr;
619
	int err = 0;
620

621
	if (prealloc) {
622 623 624 625
		struct pcpu_freelist_node *l;

		l = pcpu_freelist_pop(&htab->freelist);
		if (!l)
626
			err = -E2BIG;
627 628
		else
			l_new = container_of(l, struct htab_elem, fnode);
629 630 631
	} else {
		if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
			atomic_dec(&htab->count);
632 633 634 635 636 637
			err = -E2BIG;
		} else {
			l_new = kmalloc(htab->elem_size,
					GFP_ATOMIC | __GFP_NOWARN);
			if (!l_new)
				return ERR_PTR(-ENOMEM);
638
		}
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
	}

	if (err) {
		if (!old_elem_exists)
			return ERR_PTR(err);

		/* if we're updating the existing element and the hash table
		 * is full, use per-cpu extra elems
		 */
		l_new = this_cpu_ptr(htab->extra_elems);
		if (l_new->state != HTAB_EXTRA_ELEM_FREE)
			return ERR_PTR(-E2BIG);
		l_new->state = HTAB_EXTRA_ELEM_USED;
	} else {
		l_new->state = HTAB_NOT_AN_EXTRA_ELEM;
654
	}
655 656 657 658 659 660

	memcpy(l_new->key, key, key_size);
	if (percpu) {
		/* round up value_size to 8 bytes */
		size = round_up(size, 8);

661 662 663 664 665 666 667 668 669 670
		if (prealloc) {
			pptr = htab_elem_get_ptr(l_new, key_size);
		} else {
			/* alloc_percpu zero-fills */
			pptr = __alloc_percpu_gfp(size, 8,
						  GFP_ATOMIC | __GFP_NOWARN);
			if (!pptr) {
				kfree(l_new);
				return ERR_PTR(-ENOMEM);
			}
671 672
		}

673
		pcpu_copy_value(htab, pptr, value, onallcpus);
674

675 676
		if (!prealloc)
			htab_elem_set_ptr(l_new, key_size, pptr);
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
	} else {
		memcpy(l_new->key + round_up(key_size, 8), value, size);
	}

	l_new->hash = hash;
	return l_new;
}

static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
		       u64 map_flags)
{
	if (l_old && map_flags == BPF_NOEXIST)
		/* elem already exists */
		return -EEXIST;

	if (!l_old && map_flags == BPF_EXIST)
		/* elem doesn't exist, cannot update it */
		return -ENOENT;

	return 0;
}

699 700 701 702 703
/* Called from syscall or from eBPF program */
static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
				u64 map_flags)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
704
	struct htab_elem *l_new = NULL, *l_old;
705
	struct hlist_nulls_head *head;
706
	unsigned long flags;
707 708
	struct bucket *b;
	u32 key_size, hash;
709 710
	int ret;

711
	if (unlikely(map_flags > BPF_EXIST))
712 713 714 715 716 717 718
		/* unknown flags */
		return -EINVAL;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

719 720 721
	hash = htab_map_hash(key, key_size);

	b = __select_bucket(htab, hash);
722
	head = &b->head;
723 724

	/* bpf_map_update_elem() can be called in_irq() */
725
	raw_spin_lock_irqsave(&b->lock, flags);
726

727
	l_old = lookup_elem_raw(head, hash, key, key_size);
728

729 730
	ret = check_flags(htab, l_old, map_flags);
	if (ret)
731 732
		goto err;

733 734
	l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
				!!l_old);
735 736 737 738 739 740
	if (IS_ERR(l_new)) {
		/* all pre-allocated elements are in use or memory exhausted */
		ret = PTR_ERR(l_new);
		goto err;
	}

741 742
	/* add new element to the head of the list, so that
	 * concurrent search will find it before old elem
743
	 */
744
	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
745
	if (l_old) {
746
		hlist_nulls_del_rcu(&l_old->hash_node);
747
		free_htab_elem(htab, l_old);
748
	}
749
	ret = 0;
750
err:
751
	raw_spin_unlock_irqrestore(&b->lock, flags);
752 753 754
	return ret;
}

M
Martin KaFai Lau 已提交
755 756 757 758 759
static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
				    u64 map_flags)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
	struct htab_elem *l_new, *l_old = NULL;
760
	struct hlist_nulls_head *head;
M
Martin KaFai Lau 已提交
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800
	unsigned long flags;
	struct bucket *b;
	u32 key_size, hash;
	int ret;

	if (unlikely(map_flags > BPF_EXIST))
		/* unknown flags */
		return -EINVAL;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);

	b = __select_bucket(htab, hash);
	head = &b->head;

	/* For LRU, we need to alloc before taking bucket's
	 * spinlock because getting free nodes from LRU may need
	 * to remove older elements from htab and this removal
	 * operation will need a bucket lock.
	 */
	l_new = prealloc_lru_pop(htab, key, hash);
	if (!l_new)
		return -ENOMEM;
	memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);

	/* bpf_map_update_elem() can be called in_irq() */
	raw_spin_lock_irqsave(&b->lock, flags);

	l_old = lookup_elem_raw(head, hash, key, key_size);

	ret = check_flags(htab, l_old, map_flags);
	if (ret)
		goto err;

	/* add new element to the head of the list, so that
	 * concurrent search will find it before old elem
	 */
801
	hlist_nulls_add_head_rcu(&l_new->hash_node, head);
M
Martin KaFai Lau 已提交
802 803
	if (l_old) {
		bpf_lru_node_set_ref(&l_new->lru_node);
804
		hlist_nulls_del_rcu(&l_old->hash_node);
M
Martin KaFai Lau 已提交
805 806 807 808 809 810 811 812 813 814 815 816 817 818
	}
	ret = 0;

err:
	raw_spin_unlock_irqrestore(&b->lock, flags);

	if (ret)
		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
	else if (l_old)
		bpf_lru_push_free(&htab->lru, &l_old->lru_node);

	return ret;
}

819 820 821
static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
					 void *value, u64 map_flags,
					 bool onallcpus)
822 823 824
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
	struct htab_elem *l_new = NULL, *l_old;
825
	struct hlist_nulls_head *head;
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854
	unsigned long flags;
	struct bucket *b;
	u32 key_size, hash;
	int ret;

	if (unlikely(map_flags > BPF_EXIST))
		/* unknown flags */
		return -EINVAL;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);

	b = __select_bucket(htab, hash);
	head = &b->head;

	/* bpf_map_update_elem() can be called in_irq() */
	raw_spin_lock_irqsave(&b->lock, flags);

	l_old = lookup_elem_raw(head, hash, key, key_size);

	ret = check_flags(htab, l_old, map_flags);
	if (ret)
		goto err;

	if (l_old) {
		/* per-cpu hash map can update value in-place */
855 856
		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
				value, onallcpus);
857 858
	} else {
		l_new = alloc_htab_elem(htab, key, value, key_size,
859
					hash, true, onallcpus, false);
860 861
		if (IS_ERR(l_new)) {
			ret = PTR_ERR(l_new);
862 863
			goto err;
		}
864
		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
865 866 867 868 869 870 871
	}
	ret = 0;
err:
	raw_spin_unlock_irqrestore(&b->lock, flags);
	return ret;
}

872 873 874 875 876 877
static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
					     void *value, u64 map_flags,
					     bool onallcpus)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
	struct htab_elem *l_new = NULL, *l_old;
878
	struct hlist_nulls_head *head;
879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
	unsigned long flags;
	struct bucket *b;
	u32 key_size, hash;
	int ret;

	if (unlikely(map_flags > BPF_EXIST))
		/* unknown flags */
		return -EINVAL;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);

	b = __select_bucket(htab, hash);
	head = &b->head;

	/* For LRU, we need to alloc before taking bucket's
	 * spinlock because LRU's elem alloc may need
	 * to remove older elem from htab and this removal
	 * operation will need a bucket lock.
	 */
	if (map_flags != BPF_EXIST) {
		l_new = prealloc_lru_pop(htab, key, hash);
		if (!l_new)
			return -ENOMEM;
	}

	/* bpf_map_update_elem() can be called in_irq() */
	raw_spin_lock_irqsave(&b->lock, flags);

	l_old = lookup_elem_raw(head, hash, key, key_size);

	ret = check_flags(htab, l_old, map_flags);
	if (ret)
		goto err;

	if (l_old) {
		bpf_lru_node_set_ref(&l_old->lru_node);

		/* per-cpu hash map can update value in-place */
		pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
				value, onallcpus);
	} else {
		pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
				value, onallcpus);
926
		hlist_nulls_add_head_rcu(&l_new->hash_node, head);
927 928 929 930 931 932 933 934 935 936
		l_new = NULL;
	}
	ret = 0;
err:
	raw_spin_unlock_irqrestore(&b->lock, flags);
	if (l_new)
		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
	return ret;
}

937 938 939 940 941 942
static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
				       void *value, u64 map_flags)
{
	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
}

943 944 945 946 947 948 949
static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
					   void *value, u64 map_flags)
{
	return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
						 false);
}

950 951 952 953
/* Called from syscall or from eBPF program */
static int htab_map_delete_elem(struct bpf_map *map, void *key)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
954
	struct hlist_nulls_head *head;
955
	struct bucket *b;
956 957 958 959 960 961 962 963 964 965
	struct htab_elem *l;
	unsigned long flags;
	u32 hash, key_size;
	int ret = -ENOENT;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);
966 967
	b = __select_bucket(htab, hash);
	head = &b->head;
968

969
	raw_spin_lock_irqsave(&b->lock, flags);
970 971 972 973

	l = lookup_elem_raw(head, hash, key, key_size);

	if (l) {
974
		hlist_nulls_del_rcu(&l->hash_node);
975
		free_htab_elem(htab, l);
976 977 978
		ret = 0;
	}

979
	raw_spin_unlock_irqrestore(&b->lock, flags);
980 981 982
	return ret;
}

M
Martin KaFai Lau 已提交
983 984 985
static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
986
	struct hlist_nulls_head *head;
M
Martin KaFai Lau 已提交
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
	struct bucket *b;
	struct htab_elem *l;
	unsigned long flags;
	u32 hash, key_size;
	int ret = -ENOENT;

	WARN_ON_ONCE(!rcu_read_lock_held());

	key_size = map->key_size;

	hash = htab_map_hash(key, key_size);
	b = __select_bucket(htab, hash);
	head = &b->head;

	raw_spin_lock_irqsave(&b->lock, flags);

	l = lookup_elem_raw(head, hash, key, key_size);

	if (l) {
1006
		hlist_nulls_del_rcu(&l->hash_node);
M
Martin KaFai Lau 已提交
1007 1008 1009 1010 1011 1012 1013 1014 1015
		ret = 0;
	}

	raw_spin_unlock_irqrestore(&b->lock, flags);
	if (l)
		bpf_lru_push_free(&htab->lru, &l->lru_node);
	return ret;
}

1016 1017 1018 1019 1020
static void delete_all_elements(struct bpf_htab *htab)
{
	int i;

	for (i = 0; i < htab->n_buckets; i++) {
1021 1022
		struct hlist_nulls_head *head = select_bucket(htab, i);
		struct hlist_nulls_node *n;
1023 1024
		struct htab_elem *l;

1025 1026
		hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
			hlist_nulls_del_rcu(&l->hash_node);
1027 1028
			if (l->state != HTAB_EXTRA_ELEM_USED)
				htab_elem_free(htab, l);
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
		}
	}
}
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
static void htab_map_free(struct bpf_map *map)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);

	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
	 * so the programs (can be more than one that used this map) were
	 * disconnected from events. Wait for outstanding critical sections in
	 * these programs to complete
	 */
	synchronize_rcu();

1044 1045
	/* some of free_htab_elem() callbacks for elements of this map may
	 * not have executed. Wait for them.
1046
	 */
1047
	rcu_barrier();
M
Martin KaFai Lau 已提交
1048
	if (htab->map.map_flags & BPF_F_NO_PREALLOC)
1049
		delete_all_elements(htab);
M
Martin KaFai Lau 已提交
1050 1051 1052
	else
		prealloc_destroy(htab);

1053
	free_percpu(htab->extra_elems);
1054
	bpf_map_area_free(htab->buckets);
1055 1056 1057
	kfree(htab);
}

1058
static const struct bpf_map_ops htab_ops = {
1059 1060 1061 1062 1063 1064 1065 1066
	.map_alloc = htab_map_alloc,
	.map_free = htab_map_free,
	.map_get_next_key = htab_map_get_next_key,
	.map_lookup_elem = htab_map_lookup_elem,
	.map_update_elem = htab_map_update_elem,
	.map_delete_elem = htab_map_delete_elem,
};

1067
static struct bpf_map_type_list htab_type __ro_after_init = {
1068 1069 1070 1071
	.ops = &htab_ops,
	.type = BPF_MAP_TYPE_HASH,
};

M
Martin KaFai Lau 已提交
1072 1073 1074 1075 1076 1077 1078 1079 1080
static const struct bpf_map_ops htab_lru_ops = {
	.map_alloc = htab_map_alloc,
	.map_free = htab_map_free,
	.map_get_next_key = htab_map_get_next_key,
	.map_lookup_elem = htab_lru_map_lookup_elem,
	.map_update_elem = htab_lru_map_update_elem,
	.map_delete_elem = htab_lru_map_delete_elem,
};

1081
static struct bpf_map_type_list htab_lru_type __ro_after_init = {
M
Martin KaFai Lau 已提交
1082 1083 1084 1085
	.ops = &htab_lru_ops,
	.type = BPF_MAP_TYPE_LRU_HASH,
};

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
/* Called from eBPF program */
static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct htab_elem *l = __htab_map_lookup_elem(map, key);

	if (l)
		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
	else
		return NULL;
}

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct htab_elem *l = __htab_map_lookup_elem(map, key);

	if (l) {
		bpf_lru_node_set_ref(&l->lru_node);
		return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
	}

	return NULL;
}

1109 1110
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
{
1111
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
	struct htab_elem *l;
	void __percpu *pptr;
	int ret = -ENOENT;
	int cpu, off = 0;
	u32 size;

	/* per_cpu areas are zero-filled and bpf programs can only
	 * access 'value_size' of them, so copying rounded areas
	 * will not leak any kernel data
	 */
	size = round_up(map->value_size, 8);
	rcu_read_lock();
	l = __htab_map_lookup_elem(map, key);
	if (!l)
		goto out;
1127 1128
	if (htab_is_lru(htab))
		bpf_lru_node_set_ref(&l->lru_node);
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
	pptr = htab_elem_get_ptr(l, map->key_size);
	for_each_possible_cpu(cpu) {
		bpf_long_memcpy(value + off,
				per_cpu_ptr(pptr, cpu), size);
		off += size;
	}
	ret = 0;
out:
	rcu_read_unlock();
	return ret;
}

int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
			   u64 map_flags)
{
1144
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1145 1146 1147
	int ret;

	rcu_read_lock();
1148 1149 1150 1151 1152 1153
	if (htab_is_lru(htab))
		ret = __htab_lru_percpu_map_update_elem(map, key, value,
							map_flags, true);
	else
		ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
						    true);
1154 1155 1156
	rcu_read_unlock();

	return ret;
1157 1158
}

1159 1160 1161 1162 1163 1164 1165 1166 1167
static const struct bpf_map_ops htab_percpu_ops = {
	.map_alloc = htab_map_alloc,
	.map_free = htab_map_free,
	.map_get_next_key = htab_map_get_next_key,
	.map_lookup_elem = htab_percpu_map_lookup_elem,
	.map_update_elem = htab_percpu_map_update_elem,
	.map_delete_elem = htab_map_delete_elem,
};

1168
static struct bpf_map_type_list htab_percpu_type __ro_after_init = {
1169 1170 1171 1172
	.ops = &htab_percpu_ops,
	.type = BPF_MAP_TYPE_PERCPU_HASH,
};

1173 1174 1175 1176 1177 1178 1179 1180 1181
static const struct bpf_map_ops htab_lru_percpu_ops = {
	.map_alloc = htab_map_alloc,
	.map_free = htab_map_free,
	.map_get_next_key = htab_map_get_next_key,
	.map_lookup_elem = htab_lru_percpu_map_lookup_elem,
	.map_update_elem = htab_lru_percpu_map_update_elem,
	.map_delete_elem = htab_lru_map_delete_elem,
};

1182
static struct bpf_map_type_list htab_lru_percpu_type __ro_after_init = {
1183 1184 1185 1186
	.ops = &htab_lru_percpu_ops,
	.type = BPF_MAP_TYPE_LRU_PERCPU_HASH,
};

1187 1188
static int __init register_htab_map(void)
{
1189
	bpf_register_map_type(&htab_type);
1190
	bpf_register_map_type(&htab_percpu_type);
M
Martin KaFai Lau 已提交
1191
	bpf_register_map_type(&htab_lru_type);
1192
	bpf_register_map_type(&htab_lru_percpu_type);
1193 1194 1195
	return 0;
}
late_initcall(register_htab_map);