arraymap.c 15.1 KB
Newer Older
1
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2
 * Copyright (c) 2016,2017 Facebook
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 */
#include <linux/bpf.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/mm.h>
17
#include <linux/filter.h>
18
#include <linux/perf_event.h>
19

20 21
#include "map_in_map.h"

22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
static void bpf_array_free_percpu(struct bpf_array *array)
{
	int i;

	for (i = 0; i < array->map.max_entries; i++)
		free_percpu(array->pptrs[i]);
}

static int bpf_array_alloc_percpu(struct bpf_array *array)
{
	void __percpu *ptr;
	int i;

	for (i = 0; i < array->map.max_entries; i++) {
		ptr = __alloc_percpu_gfp(array->elem_size, 8,
					 GFP_USER | __GFP_NOWARN);
		if (!ptr) {
			bpf_array_free_percpu(array);
			return -ENOMEM;
		}
		array->pptrs[i] = ptr;
	}

	return 0;
}

48 49 50
/* Called from syscall */
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
{
51
	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
52
	struct bpf_array *array;
53 54
	u64 array_size;
	u32 elem_size;
55 56 57

	/* check sanity of attributes */
	if (attr->max_entries == 0 || attr->key_size != 4 ||
58
	    attr->value_size == 0 || attr->map_flags)
59 60
		return ERR_PTR(-EINVAL);

M
Michal Hocko 已提交
61
	if (attr->value_size > KMALLOC_MAX_SIZE)
62 63 64 65 66
		/* if value_size is bigger, the user space won't be able to
		 * access the elements.
		 */
		return ERR_PTR(-E2BIG);

67 68
	elem_size = round_up(attr->value_size, 8);

69 70 71 72 73 74 75 76
	array_size = sizeof(*array);
	if (percpu)
		array_size += (u64) attr->max_entries * sizeof(void *);
	else
		array_size += (u64) attr->max_entries * elem_size;

	/* make sure there is no u32 overflow later in round_up() */
	if (array_size >= U32_MAX - PAGE_SIZE)
77 78
		return ERR_PTR(-ENOMEM);

79
	/* allocate all map elements and zero-initialize them */
80 81 82
	array = bpf_map_area_alloc(array_size);
	if (!array)
		return ERR_PTR(-ENOMEM);
83 84

	/* copy mandatory map attributes */
85
	array->map.map_type = attr->map_type;
86 87 88 89 90
	array->map.key_size = attr->key_size;
	array->map.value_size = attr->value_size;
	array->map.max_entries = attr->max_entries;
	array->elem_size = elem_size;

91 92 93 94 95 96 97
	if (!percpu)
		goto out;

	array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();

	if (array_size >= U32_MAX - PAGE_SIZE ||
	    elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
98
		bpf_map_area_free(array);
99 100 101 102 103
		return ERR_PTR(-ENOMEM);
	}
out:
	array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;

104 105 106 107 108 109 110 111 112
	return &array->map;
}

/* Called from syscall or from eBPF program */
static void *array_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	u32 index = *(u32 *)key;

113
	if (unlikely(index >= array->map.max_entries))
114 115 116 117 118
		return NULL;

	return array->value + array->elem_size * index;
}

119 120 121 122
/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{
	struct bpf_insn *insn = insn_buf;
123
	u32 elem_size = round_up(map->value_size, 8);
124 125 126 127 128 129
	const int ret = BPF_REG_0;
	const int map_ptr = BPF_REG_1;
	const int index = BPF_REG_2;

	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
130 131 132
	*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);

	if (is_power_of_2(elem_size)) {
133 134 135 136 137 138 139 140 141 142
		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
	} else {
		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
	}
	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
	*insn++ = BPF_MOV64_IMM(ret, 0);
	return insn - insn_buf;
}

143 144 145 146 147 148 149 150 151 152 153 154
/* Called from eBPF program */
static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	u32 index = *(u32 *)key;

	if (unlikely(index >= array->map.max_entries))
		return NULL;

	return this_cpu_ptr(array->pptrs[index]);
}

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	u32 index = *(u32 *)key;
	void __percpu *pptr;
	int cpu, off = 0;
	u32 size;

	if (unlikely(index >= array->map.max_entries))
		return -ENOENT;

	/* per_cpu areas are zero-filled and bpf programs can only
	 * access 'value_size' of them, so copying rounded areas
	 * will not leak any kernel data
	 */
	size = round_up(map->value_size, 8);
	rcu_read_lock();
	pptr = array->pptrs[index];
	for_each_possible_cpu(cpu) {
		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
		off += size;
	}
	rcu_read_unlock();
	return 0;
}

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
/* Called from syscall */
static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	u32 index = *(u32 *)key;
	u32 *next = (u32 *)next_key;

	if (index >= array->map.max_entries) {
		*next = 0;
		return 0;
	}

	if (index == array->map.max_entries - 1)
		return -ENOENT;

	*next = index + 1;
	return 0;
}

/* Called from syscall or from eBPF program */
static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
				 u64 map_flags)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	u32 index = *(u32 *)key;

207
	if (unlikely(map_flags > BPF_EXIST))
208 209 210
		/* unknown flags */
		return -EINVAL;

211
	if (unlikely(index >= array->map.max_entries))
212 213 214
		/* all elements were pre-allocated, cannot insert a new one */
		return -E2BIG;

215
	if (unlikely(map_flags == BPF_NOEXIST))
216
		/* all elements already exist */
217 218
		return -EEXIST;

219 220 221 222 223 224
	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		memcpy(this_cpu_ptr(array->pptrs[index]),
		       value, map->value_size);
	else
		memcpy(array->value + array->elem_size * index,
		       value, map->value_size);
225 226 227
	return 0;
}

228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
			    u64 map_flags)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	u32 index = *(u32 *)key;
	void __percpu *pptr;
	int cpu, off = 0;
	u32 size;

	if (unlikely(map_flags > BPF_EXIST))
		/* unknown flags */
		return -EINVAL;

	if (unlikely(index >= array->map.max_entries))
		/* all elements were pre-allocated, cannot insert a new one */
		return -E2BIG;

	if (unlikely(map_flags == BPF_NOEXIST))
		/* all elements already exist */
		return -EEXIST;

	/* the user space will provide round_up(value_size, 8) bytes that
	 * will be copied into per-cpu area. bpf programs can only access
	 * value_size of it. During lookup the same extra bytes will be
	 * returned or zeros which were zero-filled by percpu_alloc,
	 * so no kernel data leaks possible
	 */
	size = round_up(map->value_size, 8);
	rcu_read_lock();
	pptr = array->pptrs[index];
	for_each_possible_cpu(cpu) {
		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
		off += size;
	}
	rcu_read_unlock();
	return 0;
}

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
/* Called from syscall or from eBPF program */
static int array_map_delete_elem(struct bpf_map *map, void *key)
{
	return -EINVAL;
}

/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
static void array_map_free(struct bpf_map *map)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);

	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
	 * so the programs (can be more than one that used this map) were
	 * disconnected from events. Wait for outstanding programs to complete
	 * and free the array
	 */
	synchronize_rcu();

284 285 286
	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		bpf_array_free_percpu(array);

287
	bpf_map_area_free(array);
288 289
}

290
const struct bpf_map_ops array_map_ops = {
291 292 293 294 295 296
	.map_alloc = array_map_alloc,
	.map_free = array_map_free,
	.map_get_next_key = array_map_get_next_key,
	.map_lookup_elem = array_map_lookup_elem,
	.map_update_elem = array_map_update_elem,
	.map_delete_elem = array_map_delete_elem,
297
	.map_gen_lookup = array_map_gen_lookup,
298 299
};

300
const struct bpf_map_ops percpu_array_map_ops = {
301 302 303 304 305 306 307 308
	.map_alloc = array_map_alloc,
	.map_free = array_map_free,
	.map_get_next_key = array_map_get_next_key,
	.map_lookup_elem = percpu_array_map_lookup_elem,
	.map_update_elem = array_map_update_elem,
	.map_delete_elem = array_map_delete_elem,
};

309
static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
310
{
311
	/* only file descriptors can be stored in this type of map */
312 313 314 315 316
	if (attr->value_size != sizeof(u32))
		return ERR_PTR(-EINVAL);
	return array_map_alloc(attr);
}

317
static void fd_array_map_free(struct bpf_map *map)
318 319 320 321 322 323 324 325
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	int i;

	synchronize_rcu();

	/* make sure it's empty */
	for (i = 0; i < array->map.max_entries; i++)
326
		BUG_ON(array->ptrs[i] != NULL);
327 328

	bpf_map_area_free(array);
329 330
}

331
static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
332 333 334 335 336
{
	return NULL;
}

/* only called from syscall */
337 338
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
				 void *key, void *value, u64 map_flags)
339 340
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
341
	void *new_ptr, *old_ptr;
342 343 344 345 346 347 348 349 350
	u32 index = *(u32 *)key, ufd;

	if (map_flags != BPF_ANY)
		return -EINVAL;

	if (index >= array->map.max_entries)
		return -E2BIG;

	ufd = *(u32 *)value;
351
	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
352 353
	if (IS_ERR(new_ptr))
		return PTR_ERR(new_ptr);
354

355 356 357
	old_ptr = xchg(array->ptrs + index, new_ptr);
	if (old_ptr)
		map->ops->map_fd_put_ptr(old_ptr);
358 359 360 361

	return 0;
}

362
static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
363 364
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
365
	void *old_ptr;
366 367 368 369 370
	u32 index = *(u32 *)key;

	if (index >= array->map.max_entries)
		return -E2BIG;

371 372 373
	old_ptr = xchg(array->ptrs + index, NULL);
	if (old_ptr) {
		map->ops->map_fd_put_ptr(old_ptr);
374 375 376 377 378 379
		return 0;
	} else {
		return -ENOENT;
	}
}

380 381
static void *prog_fd_array_get_ptr(struct bpf_map *map,
				   struct file *map_file, int fd)
382 383 384
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	struct bpf_prog *prog = bpf_prog_get(fd);
385

386 387 388 389 390 391 392
	if (IS_ERR(prog))
		return prog;

	if (!bpf_prog_array_compatible(array, prog)) {
		bpf_prog_put(prog);
		return ERR_PTR(-EINVAL);
	}
393

394 395 396 397 398
	return prog;
}

static void prog_fd_array_put_ptr(void *ptr)
{
399
	bpf_prog_put(ptr);
400 401
}

402
/* decrement refcnt of all bpf_progs that are stored in this map */
403
void bpf_fd_array_map_clear(struct bpf_map *map)
404 405 406 407 408
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	int i;

	for (i = 0; i < array->map.max_entries; i++)
409
		fd_array_map_delete_elem(map, &i);
410 411
}

412
const struct bpf_map_ops prog_array_map_ops = {
413 414
	.map_alloc = fd_array_map_alloc,
	.map_free = fd_array_map_free,
415
	.map_get_next_key = array_map_get_next_key,
416 417 418 419
	.map_lookup_elem = fd_array_map_lookup_elem,
	.map_delete_elem = fd_array_map_delete_elem,
	.map_fd_get_ptr = prog_fd_array_get_ptr,
	.map_fd_put_ptr = prog_fd_array_put_ptr,
420 421
};

422 423
static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
						   struct file *map_file)
424
{
425 426
	struct bpf_event_entry *ee;

427
	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
	if (ee) {
		ee->event = perf_file->private_data;
		ee->perf_file = perf_file;
		ee->map_file = map_file;
	}

	return ee;
}

static void __bpf_event_entry_free(struct rcu_head *rcu)
{
	struct bpf_event_entry *ee;

	ee = container_of(rcu, struct bpf_event_entry, rcu);
	fput(ee->perf_file);
	kfree(ee);
}

static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
{
	call_rcu(&ee->rcu, __bpf_event_entry_free);
449 450
}

451 452
static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
					 struct file *map_file, int fd)
453 454
{
	const struct perf_event_attr *attr;
455 456 457
	struct bpf_event_entry *ee;
	struct perf_event *event;
	struct file *perf_file;
458

459 460 461
	perf_file = perf_event_get(fd);
	if (IS_ERR(perf_file))
		return perf_file;
462

463 464
	event = perf_file->private_data;
	ee = ERR_PTR(-EINVAL);
465 466

	attr = perf_event_attrs(event);
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
	if (IS_ERR(attr) || attr->inherit)
		goto err_out;

	switch (attr->type) {
	case PERF_TYPE_SOFTWARE:
		if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
			goto err_out;
		/* fall-through */
	case PERF_TYPE_RAW:
	case PERF_TYPE_HARDWARE:
		ee = bpf_event_entry_gen(perf_file, map_file);
		if (ee)
			return ee;
		ee = ERR_PTR(-ENOMEM);
		/* fall-through */
	default:
		break;
	}
485

486 487 488
err_out:
	fput(perf_file);
	return ee;
489 490 491 492
}

static void perf_event_fd_array_put_ptr(void *ptr)
{
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
	bpf_event_entry_free_rcu(ptr);
}

static void perf_event_fd_array_release(struct bpf_map *map,
					struct file *map_file)
{
	struct bpf_array *array = container_of(map, struct bpf_array, map);
	struct bpf_event_entry *ee;
	int i;

	rcu_read_lock();
	for (i = 0; i < array->map.max_entries; i++) {
		ee = READ_ONCE(array->ptrs[i]);
		if (ee && ee->map_file == map_file)
			fd_array_map_delete_elem(map, &i);
	}
	rcu_read_unlock();
510 511
}

512
const struct bpf_map_ops perf_event_array_map_ops = {
513
	.map_alloc = fd_array_map_alloc,
514
	.map_free = fd_array_map_free,
515 516 517 518 519
	.map_get_next_key = array_map_get_next_key,
	.map_lookup_elem = fd_array_map_lookup_elem,
	.map_delete_elem = fd_array_map_delete_elem,
	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
520
	.map_release = perf_event_fd_array_release,
521 522
};

523
#ifdef CONFIG_CGROUPS
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
				     struct file *map_file /* not used */,
				     int fd)
{
	return cgroup_get_from_fd(fd);
}

static void cgroup_fd_array_put_ptr(void *ptr)
{
	/* cgroup_put free cgrp after a rcu grace period */
	cgroup_put(ptr);
}

static void cgroup_fd_array_free(struct bpf_map *map)
{
	bpf_fd_array_map_clear(map);
	fd_array_map_free(map);
}

543
const struct bpf_map_ops cgroup_array_map_ops = {
544 545 546 547 548 549 550 551 552
	.map_alloc = fd_array_map_alloc,
	.map_free = cgroup_fd_array_free,
	.map_get_next_key = array_map_get_next_key,
	.map_lookup_elem = fd_array_map_lookup_elem,
	.map_delete_elem = fd_array_map_delete_elem,
	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
};
#endif
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592

static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
{
	struct bpf_map *map, *inner_map_meta;

	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
	if (IS_ERR(inner_map_meta))
		return inner_map_meta;

	map = fd_array_map_alloc(attr);
	if (IS_ERR(map)) {
		bpf_map_meta_free(inner_map_meta);
		return map;
	}

	map->inner_map_meta = inner_map_meta;

	return map;
}

static void array_of_map_free(struct bpf_map *map)
{
	/* map->inner_map_meta is only accessed by syscall which
	 * is protected by fdget/fdput.
	 */
	bpf_map_meta_free(map->inner_map_meta);
	bpf_fd_array_map_clear(map);
	fd_array_map_free(map);
}

static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
{
	struct bpf_map **inner_map = array_map_lookup_elem(map, key);

	if (!inner_map)
		return NULL;

	return READ_ONCE(*inner_map);
}

593
const struct bpf_map_ops array_of_maps_map_ops = {
594 595 596 597 598 599 600 601
	.map_alloc = array_of_map_alloc,
	.map_free = array_of_map_free,
	.map_get_next_key = array_map_get_next_key,
	.map_lookup_elem = array_of_map_lookup_elem,
	.map_delete_elem = fd_array_map_delete_elem,
	.map_fd_get_ptr = bpf_map_fd_get_ptr,
	.map_fd_put_ptr = bpf_map_fd_put_ptr,
};