arraymap.c 16.5 KB
Newer Older
1
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2
 * Copyright (c) 2016,2017 Facebook
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 */
#include <linux/bpf.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/mm.h>
17
#include <linux/filter.h>
18
#include <linux/perf_event.h>
19

20 21
#include "map_in_map.h"

22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
static void bpf_array_free_percpu(struct bpf_array *array)
{
	int i;

	for (i = 0; i < array->map.max_entries; i++)
		free_percpu(array->pptrs[i]);
}

static int bpf_array_alloc_percpu(struct bpf_array *array)
{
	void __percpu *ptr;
	int i;

	for (i = 0; i < array->map.max_entries; i++) {
		ptr = __alloc_percpu_gfp(array->elem_size, 8,
					 GFP_USER | __GFP_NOWARN);
		if (!ptr) {
			bpf_array_free_percpu(array);
			return -ENOMEM;
		}
		array->pptrs[i] = ptr;
	}

	return 0;
}

48 49 50
/* Called from syscall */
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
{
51
	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
52
	int numa_node = bpf_map_attr_numa_node(attr);
53
	struct bpf_array *array;
54 55
	u64 array_size;
	u32 elem_size;
56 57 58

	/* check sanity of attributes */
	if (attr->max_entries == 0 || attr->key_size != 4 ||
59 60
	    attr->value_size == 0 || attr->map_flags & ~BPF_F_NUMA_NODE ||
	    (percpu && numa_node != NUMA_NO_NODE))
61 62
		return ERR_PTR(-EINVAL);

M
Michal Hocko 已提交
63
	if (attr->value_size > KMALLOC_MAX_SIZE)
64 65 66 67 68
		/* if value_size is bigger, the user space won't be able to
		 * access the elements.
		 */
		return ERR_PTR(-E2BIG);

69 70
	elem_size = round_up(attr->value_size, 8);

71 72 73 74 75 76 77 78
	array_size = sizeof(*array);
	if (percpu)
		array_size += (u64) attr->max_entries * sizeof(void *);
	else
		array_size += (u64) attr->max_entries * elem_size;

	/* make sure there is no u32 overflow later in round_up() */
	if (array_size >= U32_MAX - PAGE_SIZE)
79 80
		return ERR_PTR(-ENOMEM);

81
	/* allocate all map elements and zero-initialize them */
82
	array = bpf_map_area_alloc(array_size, numa_node);
83 84
	if (!array)
		return ERR_PTR(-ENOMEM);
85 86

	/* copy mandatory map attributes */
87
	array->map.map_type = attr->map_type;
88 89 90
	array->map.key_size = attr->key_size;
	array->map.value_size = attr->value_size;
	array->map.max_entries = attr->max_entries;
91
	array->map.map_flags = attr->map_flags;
92
	array->map.numa_node = numa_node;
93 94
	array->elem_size = elem_size;

95 96 97 98 99 100 101
	if (!percpu)
		goto out;

	array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();

	if (array_size >= U32_MAX - PAGE_SIZE ||
	    elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
102
		bpf_map_area_free(array);
103 104 105 106 107
		return ERR_PTR(-ENOMEM);
	}
out:
	array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;

108 109 110 111 112 113 114 115 116
	return &array->map;
}

/* Called from syscall or from eBPF program */
static void *array_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	u32 index = *(u32 *)key;

117
	if (unlikely(index >= array->map.max_entries))
118 119 120 121 122
		return NULL;

	return array->value + array->elem_size * index;
}

123 124 125 126
/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{
	struct bpf_insn *insn = insn_buf;
127
	u32 elem_size = round_up(map->value_size, 8);
128 129 130 131 132 133
	const int ret = BPF_REG_0;
	const int map_ptr = BPF_REG_1;
	const int index = BPF_REG_2;

	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
134 135 136
	*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);

	if (is_power_of_2(elem_size)) {
137 138 139 140 141 142 143 144 145 146
		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
	} else {
		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
	}
	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
	*insn++ = BPF_MOV64_IMM(ret, 0);
	return insn - insn_buf;
}

147 148 149 150 151 152 153 154 155 156 157 158
/* Called from eBPF program */
static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	u32 index = *(u32 *)key;

	if (unlikely(index >= array->map.max_entries))
		return NULL;

	return this_cpu_ptr(array->pptrs[index]);
}

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	u32 index = *(u32 *)key;
	void __percpu *pptr;
	int cpu, off = 0;
	u32 size;

	if (unlikely(index >= array->map.max_entries))
		return -ENOENT;

	/* per_cpu areas are zero-filled and bpf programs can only
	 * access 'value_size' of them, so copying rounded areas
	 * will not leak any kernel data
	 */
	size = round_up(map->value_size, 8);
	rcu_read_lock();
	pptr = array->pptrs[index];
	for_each_possible_cpu(cpu) {
		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
		off += size;
	}
	rcu_read_unlock();
	return 0;
}

185 186 187 188
/* Called from syscall */
static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
189
	u32 index = key ? *(u32 *)key : U32_MAX;
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
	u32 *next = (u32 *)next_key;

	if (index >= array->map.max_entries) {
		*next = 0;
		return 0;
	}

	if (index == array->map.max_entries - 1)
		return -ENOENT;

	*next = index + 1;
	return 0;
}

/* Called from syscall or from eBPF program */
static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
				 u64 map_flags)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	u32 index = *(u32 *)key;

211
	if (unlikely(map_flags > BPF_EXIST))
212 213 214
		/* unknown flags */
		return -EINVAL;

215
	if (unlikely(index >= array->map.max_entries))
216 217 218
		/* all elements were pre-allocated, cannot insert a new one */
		return -E2BIG;

219
	if (unlikely(map_flags == BPF_NOEXIST))
220
		/* all elements already exist */
221 222
		return -EEXIST;

223 224 225 226 227 228
	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		memcpy(this_cpu_ptr(array->pptrs[index]),
		       value, map->value_size);
	else
		memcpy(array->value + array->elem_size * index,
		       value, map->value_size);
229 230 231
	return 0;
}

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
			    u64 map_flags)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	u32 index = *(u32 *)key;
	void __percpu *pptr;
	int cpu, off = 0;
	u32 size;

	if (unlikely(map_flags > BPF_EXIST))
		/* unknown flags */
		return -EINVAL;

	if (unlikely(index >= array->map.max_entries))
		/* all elements were pre-allocated, cannot insert a new one */
		return -E2BIG;

	if (unlikely(map_flags == BPF_NOEXIST))
		/* all elements already exist */
		return -EEXIST;

	/* the user space will provide round_up(value_size, 8) bytes that
	 * will be copied into per-cpu area. bpf programs can only access
	 * value_size of it. During lookup the same extra bytes will be
	 * returned or zeros which were zero-filled by percpu_alloc,
	 * so no kernel data leaks possible
	 */
	size = round_up(map->value_size, 8);
	rcu_read_lock();
	pptr = array->pptrs[index];
	for_each_possible_cpu(cpu) {
		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
		off += size;
	}
	rcu_read_unlock();
	return 0;
}

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
/* Called from syscall or from eBPF program */
static int array_map_delete_elem(struct bpf_map *map, void *key)
{
	return -EINVAL;
}

/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
static void array_map_free(struct bpf_map *map)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);

	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
	 * so the programs (can be more than one that used this map) were
	 * disconnected from events. Wait for outstanding programs to complete
	 * and free the array
	 */
	synchronize_rcu();

288 289 290
	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		bpf_array_free_percpu(array);

291
	bpf_map_area_free(array);
292 293
}

294
const struct bpf_map_ops array_map_ops = {
295 296 297 298 299 300
	.map_alloc = array_map_alloc,
	.map_free = array_map_free,
	.map_get_next_key = array_map_get_next_key,
	.map_lookup_elem = array_map_lookup_elem,
	.map_update_elem = array_map_update_elem,
	.map_delete_elem = array_map_delete_elem,
301
	.map_gen_lookup = array_map_gen_lookup,
302 303
};

304
const struct bpf_map_ops percpu_array_map_ops = {
305 306 307 308 309 310 311 312
	.map_alloc = array_map_alloc,
	.map_free = array_map_free,
	.map_get_next_key = array_map_get_next_key,
	.map_lookup_elem = percpu_array_map_lookup_elem,
	.map_update_elem = array_map_update_elem,
	.map_delete_elem = array_map_delete_elem,
};

313
static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
314
{
315
	/* only file descriptors can be stored in this type of map */
316 317 318 319 320
	if (attr->value_size != sizeof(u32))
		return ERR_PTR(-EINVAL);
	return array_map_alloc(attr);
}

321
static void fd_array_map_free(struct bpf_map *map)
322 323 324 325 326 327 328 329
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	int i;

	synchronize_rcu();

	/* make sure it's empty */
	for (i = 0; i < array->map.max_entries; i++)
330
		BUG_ON(array->ptrs[i] != NULL);
331 332

	bpf_map_area_free(array);
333 334
}

335
static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
336 337 338 339
{
	return NULL;
}

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
/* only called from syscall */
int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
{
	void **elem, *ptr;
	int ret =  0;

	if (!map->ops->map_fd_sys_lookup_elem)
		return -ENOTSUPP;

	rcu_read_lock();
	elem = array_map_lookup_elem(map, key);
	if (elem && (ptr = READ_ONCE(*elem)))
		*value = map->ops->map_fd_sys_lookup_elem(ptr);
	else
		ret = -ENOENT;
	rcu_read_unlock();

	return ret;
}

360
/* only called from syscall */
361 362
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
				 void *key, void *value, u64 map_flags)
363 364
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
365
	void *new_ptr, *old_ptr;
366 367 368 369 370 371 372 373 374
	u32 index = *(u32 *)key, ufd;

	if (map_flags != BPF_ANY)
		return -EINVAL;

	if (index >= array->map.max_entries)
		return -E2BIG;

	ufd = *(u32 *)value;
375
	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
376 377
	if (IS_ERR(new_ptr))
		return PTR_ERR(new_ptr);
378

379 380 381
	old_ptr = xchg(array->ptrs + index, new_ptr);
	if (old_ptr)
		map->ops->map_fd_put_ptr(old_ptr);
382 383 384 385

	return 0;
}

386
static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
387 388
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
389
	void *old_ptr;
390 391 392 393 394
	u32 index = *(u32 *)key;

	if (index >= array->map.max_entries)
		return -E2BIG;

395 396 397
	old_ptr = xchg(array->ptrs + index, NULL);
	if (old_ptr) {
		map->ops->map_fd_put_ptr(old_ptr);
398 399 400 401 402 403
		return 0;
	} else {
		return -ENOENT;
	}
}

404 405
static void *prog_fd_array_get_ptr(struct bpf_map *map,
				   struct file *map_file, int fd)
406 407 408
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	struct bpf_prog *prog = bpf_prog_get(fd);
409

410 411 412 413 414 415 416
	if (IS_ERR(prog))
		return prog;

	if (!bpf_prog_array_compatible(array, prog)) {
		bpf_prog_put(prog);
		return ERR_PTR(-EINVAL);
	}
417

418 419 420 421 422
	return prog;
}

static void prog_fd_array_put_ptr(void *ptr)
{
423
	bpf_prog_put(ptr);
424 425
}

426 427 428 429 430
static u32 prog_fd_array_sys_lookup_elem(void *ptr)
{
	return ((struct bpf_prog *)ptr)->aux->id;
}

431
/* decrement refcnt of all bpf_progs that are stored in this map */
432
void bpf_fd_array_map_clear(struct bpf_map *map)
433 434 435 436 437
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	int i;

	for (i = 0; i < array->map.max_entries; i++)
438
		fd_array_map_delete_elem(map, &i);
439 440
}

441
const struct bpf_map_ops prog_array_map_ops = {
442 443
	.map_alloc = fd_array_map_alloc,
	.map_free = fd_array_map_free,
444
	.map_get_next_key = array_map_get_next_key,
445 446 447 448
	.map_lookup_elem = fd_array_map_lookup_elem,
	.map_delete_elem = fd_array_map_delete_elem,
	.map_fd_get_ptr = prog_fd_array_get_ptr,
	.map_fd_put_ptr = prog_fd_array_put_ptr,
449
	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
450 451
};

452 453
static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
						   struct file *map_file)
454
{
455 456
	struct bpf_event_entry *ee;

457
	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
	if (ee) {
		ee->event = perf_file->private_data;
		ee->perf_file = perf_file;
		ee->map_file = map_file;
	}

	return ee;
}

static void __bpf_event_entry_free(struct rcu_head *rcu)
{
	struct bpf_event_entry *ee;

	ee = container_of(rcu, struct bpf_event_entry, rcu);
	fput(ee->perf_file);
	kfree(ee);
}

static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
{
	call_rcu(&ee->rcu, __bpf_event_entry_free);
479 480
}

481 482
static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
					 struct file *map_file, int fd)
483
{
484 485 486
	struct bpf_event_entry *ee;
	struct perf_event *event;
	struct file *perf_file;
487
	u64 value;
488

489 490 491
	perf_file = perf_event_get(fd);
	if (IS_ERR(perf_file))
		return perf_file;
492

493
	ee = ERR_PTR(-EOPNOTSUPP);
494
	event = perf_file->private_data;
495
	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
496 497
		goto err_out;

498 499 500 501
	ee = bpf_event_entry_gen(perf_file, map_file);
	if (ee)
		return ee;
	ee = ERR_PTR(-ENOMEM);
502 503 504
err_out:
	fput(perf_file);
	return ee;
505 506 507 508
}

static void perf_event_fd_array_put_ptr(void *ptr)
{
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
	bpf_event_entry_free_rcu(ptr);
}

static void perf_event_fd_array_release(struct bpf_map *map,
					struct file *map_file)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	struct bpf_event_entry *ee;
	int i;

	rcu_read_lock();
	for (i = 0; i < array->map.max_entries; i++) {
		ee = READ_ONCE(array->ptrs[i]);
		if (ee && ee->map_file == map_file)
			fd_array_map_delete_elem(map, &i);
	}
	rcu_read_unlock();
526 527
}

528
const struct bpf_map_ops perf_event_array_map_ops = {
529
	.map_alloc = fd_array_map_alloc,
530
	.map_free = fd_array_map_free,
531 532 533 534 535
	.map_get_next_key = array_map_get_next_key,
	.map_lookup_elem = fd_array_map_lookup_elem,
	.map_delete_elem = fd_array_map_delete_elem,
	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
536
	.map_release = perf_event_fd_array_release,
537 538
};

539
#ifdef CONFIG_CGROUPS
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
				     struct file *map_file /* not used */,
				     int fd)
{
	return cgroup_get_from_fd(fd);
}

static void cgroup_fd_array_put_ptr(void *ptr)
{
	/* cgroup_put free cgrp after a rcu grace period */
	cgroup_put(ptr);
}

static void cgroup_fd_array_free(struct bpf_map *map)
{
	bpf_fd_array_map_clear(map);
	fd_array_map_free(map);
}

559
const struct bpf_map_ops cgroup_array_map_ops = {
560 561 562 563 564 565 566 567 568
	.map_alloc = fd_array_map_alloc,
	.map_free = cgroup_fd_array_free,
	.map_get_next_key = array_map_get_next_key,
	.map_lookup_elem = fd_array_map_lookup_elem,
	.map_delete_elem = fd_array_map_delete_elem,
	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
};
#endif
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608

static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
{
	struct bpf_map *map, *inner_map_meta;

	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
	if (IS_ERR(inner_map_meta))
		return inner_map_meta;

	map = fd_array_map_alloc(attr);
	if (IS_ERR(map)) {
		bpf_map_meta_free(inner_map_meta);
		return map;
	}

	map->inner_map_meta = inner_map_meta;

	return map;
}

static void array_of_map_free(struct bpf_map *map)
{
	/* map->inner_map_meta is only accessed by syscall which
	 * is protected by fdget/fdput.
	 */
	bpf_map_meta_free(map->inner_map_meta);
	bpf_fd_array_map_clear(map);
	fd_array_map_free(map);
}

static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct bpf_map **inner_map = array_map_lookup_elem(map, key);

	if (!inner_map)
		return NULL;

	return READ_ONCE(*inner_map);
}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
static u32 array_of_map_gen_lookup(struct bpf_map *map,
				   struct bpf_insn *insn_buf)
{
	u32 elem_size = round_up(map->value_size, 8);
	struct bpf_insn *insn = insn_buf;
	const int ret = BPF_REG_0;
	const int map_ptr = BPF_REG_1;
	const int index = BPF_REG_2;

	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
	*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
	if (is_power_of_2(elem_size))
		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
	else
		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
	*insn++ = BPF_MOV64_IMM(ret, 0);

	return insn - insn_buf;
}

634
const struct bpf_map_ops array_of_maps_map_ops = {
635 636 637 638 639 640 641
	.map_alloc = array_of_map_alloc,
	.map_free = array_of_map_free,
	.map_get_next_key = array_map_get_next_key,
	.map_lookup_elem = array_of_map_lookup_elem,
	.map_delete_elem = fd_array_map_delete_elem,
	.map_fd_get_ptr = bpf_map_fd_get_ptr,
	.map_fd_put_ptr = bpf_map_fd_put_ptr,
642
	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
643
	.map_gen_lookup = array_of_map_gen_lookup,
644
};