syscall.c 23.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 */
#include <linux/bpf.h>
13
#include <linux/bpf_trace.h>
14 15
#include <linux/syscalls.h>
#include <linux/slab.h>
16
#include <linux/sched/signal.h>
17 18
#include <linux/vmalloc.h>
#include <linux/mmzone.h>
19
#include <linux/anon_inodes.h>
20
#include <linux/file.h>
21 22
#include <linux/license.h>
#include <linux/filter.h>
23
#include <linux/version.h>
M
Mickaël Salaün 已提交
24
#include <linux/kernel.h>
25

26 27
DEFINE_PER_CPU(int, bpf_prog_active);

28 29
int sysctl_unprivileged_bpf_disabled __read_mostly;

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
static LIST_HEAD(bpf_map_types);

static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
{
	struct bpf_map_type_list *tl;
	struct bpf_map *map;

	list_for_each_entry(tl, &bpf_map_types, list_node) {
		if (tl->type == attr->map_type) {
			map = tl->ops->map_alloc(attr);
			if (IS_ERR(map))
				return map;
			map->ops = tl->ops;
			map->map_type = attr->map_type;
			return map;
		}
	}
	return ERR_PTR(-EINVAL);
}

/* boot time registration of different map implementations */
void bpf_register_map_type(struct bpf_map_type_list *tl)
{
	list_add(&tl->list_node, &bpf_map_types);
}

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
void *bpf_map_area_alloc(size_t size)
{
	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
	 * trigger under memory pressure as we really just want to
	 * fail instead.
	 */
	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
	void *area;

	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
		area = kmalloc(size, GFP_USER | flags);
		if (area != NULL)
			return area;
	}

	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
			 PAGE_KERNEL);
}

void bpf_map_area_free(void *area)
{
	kvfree(area);
}

80 81 82 83 84 85 86 87 88 89 90 91 92
int bpf_map_precharge_memlock(u32 pages)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit, cur;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	cur = atomic_long_read(&user->locked_vm);
	free_uid(user);
	if (cur + pages > memlock_limit)
		return -EPERM;
	return 0;
}

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
static int bpf_map_charge_memlock(struct bpf_map *map)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

	atomic_long_add(map->pages, &user->locked_vm);

	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
		atomic_long_sub(map->pages, &user->locked_vm);
		free_uid(user);
		return -EPERM;
	}
	map->user = user;
	return 0;
}

static void bpf_map_uncharge_memlock(struct bpf_map *map)
{
	struct user_struct *user = map->user;

	atomic_long_sub(map->pages, &user->locked_vm);
	free_uid(user);
}

119 120 121 122 123
/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{
	struct bpf_map *map = container_of(work, struct bpf_map, work);

124
	bpf_map_uncharge_memlock(map);
125 126 127 128
	/* implementation dependent freeing */
	map->ops->map_free(map);
}

129 130 131 132 133 134 135 136
static void bpf_map_put_uref(struct bpf_map *map)
{
	if (atomic_dec_and_test(&map->usercnt)) {
		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
			bpf_fd_array_map_clear(map);
	}
}

137 138 139 140 141 142 143 144 145 146 147
/* decrement map refcnt and schedule it for freeing via workqueue
 * (unrelying map implementation ops->map_free() might sleep)
 */
void bpf_map_put(struct bpf_map *map)
{
	if (atomic_dec_and_test(&map->refcnt)) {
		INIT_WORK(&map->work, bpf_map_free_deferred);
		schedule_work(&map->work);
	}
}

148
void bpf_map_put_with_uref(struct bpf_map *map)
149
{
150
	bpf_map_put_uref(map);
151
	bpf_map_put(map);
152 153 154 155
}

static int bpf_map_release(struct inode *inode, struct file *filp)
{
156 157 158 159 160 161
	struct bpf_map *map = filp->private_data;

	if (map->ops->map_release)
		map->ops->map_release(map, filp);

	bpf_map_put_with_uref(map);
162 163 164
	return 0;
}

165 166 167 168
#ifdef CONFIG_PROC_FS
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
	const struct bpf_map *map = filp->private_data;
169 170 171 172 173 174 175
	const struct bpf_array *array;
	u32 owner_prog_type = 0;

	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
		array = container_of(map, struct bpf_array, map);
		owner_prog_type = array->owner_prog_type;
	}
176 177 178 179 180

	seq_printf(m,
		   "map_type:\t%u\n"
		   "key_size:\t%u\n"
		   "value_size:\t%u\n"
181
		   "max_entries:\t%u\n"
182 183
		   "map_flags:\t%#x\n"
		   "memlock:\t%llu\n",
184 185 186
		   map->map_type,
		   map->key_size,
		   map->value_size,
187
		   map->max_entries,
188 189 190 191 192 193
		   map->map_flags,
		   map->pages * 1ULL << PAGE_SHIFT);

	if (owner_prog_type)
		seq_printf(m, "owner_prog_type:\t%u\n",
			   owner_prog_type);
194 195 196
}
#endif

197
static const struct file_operations bpf_map_fops = {
198 199 200 201
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= bpf_map_show_fdinfo,
#endif
	.release	= bpf_map_release,
202 203
};

204
int bpf_map_new_fd(struct bpf_map *map)
205 206 207 208 209
{
	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
				O_RDWR | O_CLOEXEC);
}

210 211 212 213 214 215 216 217
/* helper macro to check that unused fields 'union bpf_attr' are zero */
#define CHECK_ATTR(CMD) \
	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
		   sizeof(attr->CMD##_LAST_FIELD), 0, \
		   sizeof(*attr) - \
		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
		   sizeof(attr->CMD##_LAST_FIELD)) != NULL

218
#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
/* called via syscall */
static int map_create(union bpf_attr *attr)
{
	struct bpf_map *map;
	int err;

	err = CHECK_ATTR(BPF_MAP_CREATE);
	if (err)
		return -EINVAL;

	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
	map = find_and_alloc_map(attr);
	if (IS_ERR(map))
		return PTR_ERR(map);

	atomic_set(&map->refcnt, 1);
235
	atomic_set(&map->usercnt, 1);
236

237 238
	err = bpf_map_charge_memlock(map);
	if (err)
239
		goto free_map_nouncharge;
240

241
	err = bpf_map_new_fd(map);
242 243 244 245
	if (err < 0)
		/* failed to allocate fd */
		goto free_map;

246
	trace_bpf_map_create(map, err);
247 248 249
	return err;

free_map:
250 251
	bpf_map_uncharge_memlock(map);
free_map_nouncharge:
252 253 254 255
	map->ops->map_free(map);
	return err;
}

256 257 258
/* if error is returned, fd is released.
 * On success caller should complete fd access with matching fdput()
 */
259
struct bpf_map *__bpf_map_get(struct fd f)
260 261 262 263 264 265 266 267
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_map_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

268 269 270
	return f.file->private_data;
}

A
Alexei Starovoitov 已提交
271 272 273 274
/* prog's and map's refcnt limit */
#define BPF_MAX_REFCNT 32768

struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
275
{
A
Alexei Starovoitov 已提交
276 277 278 279
	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
		atomic_dec(&map->refcnt);
		return ERR_PTR(-EBUSY);
	}
280 281
	if (uref)
		atomic_inc(&map->usercnt);
A
Alexei Starovoitov 已提交
282
	return map;
283 284 285
}

struct bpf_map *bpf_map_get_with_uref(u32 ufd)
286 287 288 289 290 291 292 293
{
	struct fd f = fdget(ufd);
	struct bpf_map *map;

	map = __bpf_map_get(f);
	if (IS_ERR(map))
		return map;

A
Alexei Starovoitov 已提交
294
	map = bpf_map_inc(map, true);
295
	fdput(f);
296 297 298 299

	return map;
}

300 301 302 303 304
int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{
	return -ENOTSUPP;
}

305 306 307 308 309
/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value

static int map_lookup_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
310 311
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
312 313
	int ufd = attr->map_fd;
	struct bpf_map *map;
314
	void *key, *value, *ptr;
315
	u32 value_size;
316
	struct fd f;
317 318 319 320 321
	int err;

	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
		return -EINVAL;

322
	f = fdget(ufd);
323
	map = __bpf_map_get(f);
324 325 326 327 328 329 330 331 332 333 334 335
	if (IS_ERR(map))
		return PTR_ERR(map);

	err = -ENOMEM;
	key = kmalloc(map->key_size, GFP_USER);
	if (!key)
		goto err_put;

	err = -EFAULT;
	if (copy_from_user(key, ukey, map->key_size) != 0)
		goto free_key;

336
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
337
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
338 339 340 341 342
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
	else
		value_size = map->value_size;

343
	err = -ENOMEM;
344
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
345
	if (!value)
346 347
		goto free_key;

348 349
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
350 351 352
		err = bpf_percpu_hash_copy(map, key, value);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_copy(map, key, value);
353 354
	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
		err = bpf_stackmap_copy(map, key, value);
355 356
	} else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
		err = -ENOTSUPP;
357 358 359 360 361 362 363 364
	} else {
		rcu_read_lock();
		ptr = map->ops->map_lookup_elem(map, key);
		if (ptr)
			memcpy(value, ptr, value_size);
		rcu_read_unlock();
		err = ptr ? 0 : -ENOENT;
	}
365

366
	if (err)
367
		goto free_value;
368 369

	err = -EFAULT;
370
	if (copy_to_user(uvalue, value, value_size) != 0)
371
		goto free_value;
372

373
	trace_bpf_map_lookup_elem(map, ufd, key, value);
374 375
	err = 0;

376 377
free_value:
	kfree(value);
378 379 380 381 382 383 384
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

385
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
386 387 388

static int map_update_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
389 390
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
391 392 393
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *value;
394
	u32 value_size;
395
	struct fd f;
396 397 398 399 400
	int err;

	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
		return -EINVAL;

401
	f = fdget(ufd);
402
	map = __bpf_map_get(f);
403 404 405 406 407 408 409 410 411 412 413 414
	if (IS_ERR(map))
		return PTR_ERR(map);

	err = -ENOMEM;
	key = kmalloc(map->key_size, GFP_USER);
	if (!key)
		goto err_put;

	err = -EFAULT;
	if (copy_from_user(key, ukey, map->key_size) != 0)
		goto free_key;

415
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
416
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
417 418 419 420 421
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
	else
		value_size = map->value_size;

422
	err = -ENOMEM;
423
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
424 425 426 427
	if (!value)
		goto free_key;

	err = -EFAULT;
428
	if (copy_from_user(value, uvalue, value_size) != 0)
429 430
		goto free_value;

431 432 433 434 435
	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
	 * inside bpf map update or delete otherwise deadlocks are possible
	 */
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
436 437
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
438 439 440
		err = bpf_percpu_hash_update(map, key, value, attr->flags);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_update(map, key, value, attr->flags);
441
	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
442
		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
443 444
		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
		   map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
445 446 447 448
		rcu_read_lock();
		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
						   attr->flags);
		rcu_read_unlock();
449 450 451 452 453
	} else {
		rcu_read_lock();
		err = map->ops->map_update_elem(map, key, value, attr->flags);
		rcu_read_unlock();
	}
454 455
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
456

457 458
	if (!err)
		trace_bpf_map_update_elem(map, ufd, key, value);
459 460 461 462 463 464 465 466 467 468 469 470 471
free_value:
	kfree(value);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

#define BPF_MAP_DELETE_ELEM_LAST_FIELD key

static int map_delete_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
472
	void __user *ukey = u64_to_user_ptr(attr->key);
473 474
	int ufd = attr->map_fd;
	struct bpf_map *map;
475
	struct fd f;
476 477 478 479 480 481
	void *key;
	int err;

	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
		return -EINVAL;

482
	f = fdget(ufd);
483
	map = __bpf_map_get(f);
484 485 486 487 488 489 490 491 492 493 494 495
	if (IS_ERR(map))
		return PTR_ERR(map);

	err = -ENOMEM;
	key = kmalloc(map->key_size, GFP_USER);
	if (!key)
		goto err_put;

	err = -EFAULT;
	if (copy_from_user(key, ukey, map->key_size) != 0)
		goto free_key;

496 497
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
498 499 500
	rcu_read_lock();
	err = map->ops->map_delete_elem(map, key);
	rcu_read_unlock();
501 502
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
503

504 505
	if (!err)
		trace_bpf_map_delete_elem(map, ufd, key);
506 507 508 509 510 511 512 513 514 515 516 517
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key

static int map_get_next_key(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
518 519
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *unext_key = u64_to_user_ptr(attr->next_key);
520 521 522
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *next_key;
523
	struct fd f;
524 525 526 527 528
	int err;

	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
		return -EINVAL;

529
	f = fdget(ufd);
530
	map = __bpf_map_get(f);
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
	if (IS_ERR(map))
		return PTR_ERR(map);

	err = -ENOMEM;
	key = kmalloc(map->key_size, GFP_USER);
	if (!key)
		goto err_put;

	err = -EFAULT;
	if (copy_from_user(key, ukey, map->key_size) != 0)
		goto free_key;

	err = -ENOMEM;
	next_key = kmalloc(map->key_size, GFP_USER);
	if (!next_key)
		goto free_key;

	rcu_read_lock();
	err = map->ops->map_get_next_key(map, key, next_key);
	rcu_read_unlock();
	if (err)
		goto free_next_key;

	err = -EFAULT;
	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
		goto free_next_key;

558
	trace_bpf_map_next_key(map, ufd, key, next_key);
559 560 561 562 563 564 565 566 567 568 569
	err = 0;

free_next_key:
	kfree(next_key);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

570 571 572 573 574 575 576 577 578
static LIST_HEAD(bpf_prog_types);

static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
{
	struct bpf_prog_type_list *tl;

	list_for_each_entry(tl, &bpf_prog_types, list_node) {
		if (tl->type == type) {
			prog->aux->ops = tl->ops;
579
			prog->type = type;
580 581 582
			return 0;
		}
	}
583

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
	return -EINVAL;
}

void bpf_register_prog_type(struct bpf_prog_type_list *tl)
{
	list_add(&tl->list_node, &bpf_prog_types);
}

/* drop refcnt on maps used by eBPF program and free auxilary data */
static void free_used_maps(struct bpf_prog_aux *aux)
{
	int i;

	for (i = 0; i < aux->used_map_cnt; i++)
		bpf_map_put(aux->used_maps[i]);

	kfree(aux->used_maps);
}

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	unsigned long user_bufs;

	if (user) {
		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
		if (user_bufs > memlock_limit) {
			atomic_long_sub(pages, &user->locked_vm);
			return -EPERM;
		}
	}

	return 0;
}

void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
	if (user)
		atomic_long_sub(pages, &user->locked_vm);
}

625 626 627
static int bpf_prog_charge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = get_current_user();
628
	int ret;
629

630 631
	ret = __bpf_prog_charge(user, prog->pages);
	if (ret) {
632
		free_uid(user);
633
		return ret;
634
	}
635

636 637 638 639 640 641 642 643
	prog->aux->user = user;
	return 0;
}

static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = prog->aux->user;

644
	__bpf_prog_uncharge(user, prog->pages);
645 646 647
	free_uid(user);
}

648
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
649 650 651 652
{
	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);

	free_used_maps(aux);
653
	bpf_prog_uncharge_memlock(aux->prog);
654 655 656
	bpf_prog_free(aux->prog);
}

657 658
void bpf_prog_put(struct bpf_prog *prog)
{
659 660
	if (atomic_dec_and_test(&prog->aux->refcnt)) {
		trace_bpf_prog_put_rcu(prog);
661
		bpf_prog_kallsyms_del(prog);
662
		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
663
	}
664
}
665
EXPORT_SYMBOL_GPL(bpf_prog_put);
666 667 668 669 670

static int bpf_prog_release(struct inode *inode, struct file *filp)
{
	struct bpf_prog *prog = filp->private_data;

671
	bpf_prog_put(prog);
672 673 674
	return 0;
}

675 676 677 678
#ifdef CONFIG_PROC_FS
static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{
	const struct bpf_prog *prog = filp->private_data;
679
	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
680

681
	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
682 683 684
	seq_printf(m,
		   "prog_type:\t%u\n"
		   "prog_jited:\t%u\n"
685
		   "prog_tag:\t%s\n"
686 687 688
		   "memlock:\t%llu\n",
		   prog->type,
		   prog->jited,
689
		   prog_tag,
690 691 692 693
		   prog->pages * 1ULL << PAGE_SHIFT);
}
#endif

694
static const struct file_operations bpf_prog_fops = {
695 696 697 698
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= bpf_prog_show_fdinfo,
#endif
	.release	= bpf_prog_release,
699 700
};

701
int bpf_prog_new_fd(struct bpf_prog *prog)
702 703 704 705 706
{
	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
				O_RDWR | O_CLOEXEC);
}

707
static struct bpf_prog *____bpf_prog_get(struct fd f)
708 709 710 711 712 713 714 715
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_prog_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

716
	return f.file->private_data;
717 718
}

719
struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
A
Alexei Starovoitov 已提交
720
{
721 722
	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
		atomic_sub(i, &prog->aux->refcnt);
A
Alexei Starovoitov 已提交
723 724 725 726
		return ERR_PTR(-EBUSY);
	}
	return prog;
}
727 728
EXPORT_SYMBOL_GPL(bpf_prog_add);

729 730 731 732 733 734 735 736 737 738 739
void bpf_prog_sub(struct bpf_prog *prog, int i)
{
	/* Only to be used for undoing previous bpf_prog_add() in some
	 * error path. We still know that another entity in our call
	 * path holds a reference to the program, thus atomic_sub() can
	 * be safely used in such cases!
	 */
	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
}
EXPORT_SYMBOL_GPL(bpf_prog_sub);

740 741 742 743
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
{
	return bpf_prog_add(prog, 1);
}
744
EXPORT_SYMBOL_GPL(bpf_prog_inc);
A
Alexei Starovoitov 已提交
745

746
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
747 748 749 750
{
	struct fd f = fdget(ufd);
	struct bpf_prog *prog;

751
	prog = ____bpf_prog_get(f);
752 753
	if (IS_ERR(prog))
		return prog;
754 755 756 757
	if (type && prog->type != *type) {
		prog = ERR_PTR(-EINVAL);
		goto out;
	}
758

A
Alexei Starovoitov 已提交
759
	prog = bpf_prog_inc(prog);
760
out:
761 762 763
	fdput(f);
	return prog;
}
764 765 766 767 768 769 770 771

struct bpf_prog *bpf_prog_get(u32 ufd)
{
	return __bpf_prog_get(ufd, NULL);
}

struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
{
772 773 774 775 776
	struct bpf_prog *prog = __bpf_prog_get(ufd, &type);

	if (!IS_ERR(prog))
		trace_bpf_prog_get_type(prog);
	return prog;
777 778
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type);
779 780

/* last field in 'union bpf_attr' used by this command */
781
#define	BPF_PROG_LOAD_LAST_FIELD kern_version
782 783 784 785 786 787 788 789 790 791 792 793 794

static int bpf_prog_load(union bpf_attr *attr)
{
	enum bpf_prog_type type = attr->prog_type;
	struct bpf_prog *prog;
	int err;
	char license[128];
	bool is_gpl;

	if (CHECK_ATTR(BPF_PROG_LOAD))
		return -EINVAL;

	/* copy eBPF program license from user space */
M
Mickaël Salaün 已提交
795
	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
796 797 798 799 800 801 802
			      sizeof(license) - 1) < 0)
		return -EFAULT;
	license[sizeof(license) - 1] = 0;

	/* eBPF programs must be GPL compatible to use GPL-ed functions */
	is_gpl = license_is_gpl_compatible(license);

803 804
	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
		return -E2BIG;
805

806 807 808 809
	if (type == BPF_PROG_TYPE_KPROBE &&
	    attr->kern_version != LINUX_VERSION_CODE)
		return -EINVAL;

810 811 812
	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
		return -EPERM;

813 814 815 816 817
	/* plain bpf_prog allocation */
	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
	if (!prog)
		return -ENOMEM;

818 819 820 821
	err = bpf_prog_charge_memlock(prog);
	if (err)
		goto free_prog_nouncharge;

822 823 824
	prog->len = attr->insn_cnt;

	err = -EFAULT;
M
Mickaël Salaün 已提交
825
	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
826
			   bpf_prog_insn_size(prog)) != 0)
827 828 829
		goto free_prog;

	prog->orig_prog = NULL;
830
	prog->jited = 0;
831 832

	atomic_set(&prog->aux->refcnt, 1);
833
	prog->gpl_compatible = is_gpl ? 1 : 0;
834 835 836 837 838 839 840

	/* find program type: socket_filter vs tracing_filter */
	err = find_prog_type(type, prog);
	if (err < 0)
		goto free_prog;

	/* run eBPF verifier */
841
	err = bpf_check(&prog, attr);
842 843 844 845
	if (err < 0)
		goto free_used_maps;

	/* eBPF program is ready to be JITed */
846
	prog = bpf_prog_select_runtime(prog, &err);
847 848
	if (err < 0)
		goto free_used_maps;
849

850
	err = bpf_prog_new_fd(prog);
851 852 853 854
	if (err < 0)
		/* failed to allocate fd */
		goto free_used_maps;

855
	bpf_prog_kallsyms_add(prog);
856
	trace_bpf_prog_load(prog, err);
857 858 859 860 861
	return err;

free_used_maps:
	free_used_maps(prog->aux);
free_prog:
862 863
	bpf_prog_uncharge_memlock(prog);
free_prog_nouncharge:
864 865 866 867
	bpf_prog_free(prog);
	return err;
}

868 869 870 871 872 873 874
#define BPF_OBJ_LAST_FIELD bpf_fd

static int bpf_obj_pin(const union bpf_attr *attr)
{
	if (CHECK_ATTR(BPF_OBJ))
		return -EINVAL;

M
Mickaël Salaün 已提交
875
	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
876 877 878 879 880 881 882
}

static int bpf_obj_get(const union bpf_attr *attr)
{
	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
		return -EINVAL;

M
Mickaël Salaün 已提交
883
	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
884 885
}

886 887
#ifdef CONFIG_CGROUP_BPF

888
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
889 890 891

static int bpf_prog_attach(const union bpf_attr *attr)
{
892
	enum bpf_prog_type ptype;
893 894
	struct bpf_prog *prog;
	struct cgroup *cgrp;
895
	int ret;
896 897 898 899 900 901 902

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (CHECK_ATTR(BPF_PROG_ATTACH))
		return -EINVAL;

903 904 905
	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
		return -EINVAL;

906 907 908
	switch (attr->attach_type) {
	case BPF_CGROUP_INET_INGRESS:
	case BPF_CGROUP_INET_EGRESS:
909
		ptype = BPF_PROG_TYPE_CGROUP_SKB;
910
		break;
911 912 913
	case BPF_CGROUP_INET_SOCK_CREATE:
		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
		break;
914 915 916 917
	default:
		return -EINVAL;
	}

918 919 920 921 922 923 924 925 926 927
	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
	if (IS_ERR(prog))
		return PTR_ERR(prog);

	cgrp = cgroup_get_from_fd(attr->target_fd);
	if (IS_ERR(cgrp)) {
		bpf_prog_put(prog);
		return PTR_ERR(cgrp);
	}

928 929 930 931
	ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
				attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
	if (ret)
		bpf_prog_put(prog);
932 933
	cgroup_put(cgrp);

934
	return ret;
935 936 937 938 939 940 941
}

#define BPF_PROG_DETACH_LAST_FIELD attach_type

static int bpf_prog_detach(const union bpf_attr *attr)
{
	struct cgroup *cgrp;
942
	int ret;
943 944 945 946 947 948 949 950 951 952

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (CHECK_ATTR(BPF_PROG_DETACH))
		return -EINVAL;

	switch (attr->attach_type) {
	case BPF_CGROUP_INET_INGRESS:
	case BPF_CGROUP_INET_EGRESS:
953
	case BPF_CGROUP_INET_SOCK_CREATE:
954 955 956 957
		cgrp = cgroup_get_from_fd(attr->target_fd);
		if (IS_ERR(cgrp))
			return PTR_ERR(cgrp);

958
		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
959 960 961 962 963 964 965
		cgroup_put(cgrp);
		break;

	default:
		return -EINVAL;
	}

966
	return ret;
967 968 969
}
#endif /* CONFIG_CGROUP_BPF */

970 971 972 973 974
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
	union bpf_attr attr = {};
	int err;

975
	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
		return -EPERM;

	if (!access_ok(VERIFY_READ, uattr, 1))
		return -EFAULT;

	if (size > PAGE_SIZE)	/* silly large */
		return -E2BIG;

	/* If we're handed a bigger struct than we know of,
	 * ensure all the unknown bits are 0 - i.e. new
	 * user-space does not rely on any kernel feature
	 * extensions we dont know about yet.
	 */
	if (size > sizeof(attr)) {
		unsigned char __user *addr;
		unsigned char __user *end;
		unsigned char val;

		addr = (void __user *)uattr + sizeof(attr);
		end  = (void __user *)uattr + size;

		for (; addr < end; addr++) {
			err = get_user(val, addr);
			if (err)
				return err;
			if (val)
				return -E2BIG;
		}
		size = sizeof(attr);
	}

	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
	if (copy_from_user(&attr, uattr, size) != 0)
		return -EFAULT;

	switch (cmd) {
	case BPF_MAP_CREATE:
		err = map_create(&attr);
		break;
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
	case BPF_MAP_LOOKUP_ELEM:
		err = map_lookup_elem(&attr);
		break;
	case BPF_MAP_UPDATE_ELEM:
		err = map_update_elem(&attr);
		break;
	case BPF_MAP_DELETE_ELEM:
		err = map_delete_elem(&attr);
		break;
	case BPF_MAP_GET_NEXT_KEY:
		err = map_get_next_key(&attr);
		break;
1027 1028 1029
	case BPF_PROG_LOAD:
		err = bpf_prog_load(&attr);
		break;
1030 1031 1032 1033 1034 1035
	case BPF_OBJ_PIN:
		err = bpf_obj_pin(&attr);
		break;
	case BPF_OBJ_GET:
		err = bpf_obj_get(&attr);
		break;
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045

#ifdef CONFIG_CGROUP_BPF
	case BPF_PROG_ATTACH:
		err = bpf_prog_attach(&attr);
		break;
	case BPF_PROG_DETACH:
		err = bpf_prog_detach(&attr);
		break;
#endif

1046 1047 1048 1049 1050 1051 1052
	default:
		err = -EINVAL;
		break;
	}

	return err;
}