syscall.c 20.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 */
#include <linux/bpf.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/anon_inodes.h>
16
#include <linux/file.h>
17 18
#include <linux/license.h>
#include <linux/filter.h>
19
#include <linux/version.h>
M
Mickaël Salaün 已提交
20
#include <linux/kernel.h>
21

22 23
DEFINE_PER_CPU(int, bpf_prog_active);

24 25
int sysctl_unprivileged_bpf_disabled __read_mostly;

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
static LIST_HEAD(bpf_map_types);

static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
{
	struct bpf_map_type_list *tl;
	struct bpf_map *map;

	list_for_each_entry(tl, &bpf_map_types, list_node) {
		if (tl->type == attr->map_type) {
			map = tl->ops->map_alloc(attr);
			if (IS_ERR(map))
				return map;
			map->ops = tl->ops;
			map->map_type = attr->map_type;
			return map;
		}
	}
	return ERR_PTR(-EINVAL);
}

/* boot time registration of different map implementations */
void bpf_register_map_type(struct bpf_map_type_list *tl)
{
	list_add(&tl->list_node, &bpf_map_types);
}

52 53 54 55 56 57 58 59 60 61 62 63 64
int bpf_map_precharge_memlock(u32 pages)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit, cur;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	cur = atomic_long_read(&user->locked_vm);
	free_uid(user);
	if (cur + pages > memlock_limit)
		return -EPERM;
	return 0;
}

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
static int bpf_map_charge_memlock(struct bpf_map *map)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

	atomic_long_add(map->pages, &user->locked_vm);

	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
		atomic_long_sub(map->pages, &user->locked_vm);
		free_uid(user);
		return -EPERM;
	}
	map->user = user;
	return 0;
}

static void bpf_map_uncharge_memlock(struct bpf_map *map)
{
	struct user_struct *user = map->user;

	atomic_long_sub(map->pages, &user->locked_vm);
	free_uid(user);
}

91 92 93 94 95
/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{
	struct bpf_map *map = container_of(work, struct bpf_map, work);

96
	bpf_map_uncharge_memlock(map);
97 98 99 100
	/* implementation dependent freeing */
	map->ops->map_free(map);
}

101 102 103 104 105 106 107 108
static void bpf_map_put_uref(struct bpf_map *map)
{
	if (atomic_dec_and_test(&map->usercnt)) {
		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
			bpf_fd_array_map_clear(map);
	}
}

109 110 111 112 113 114 115 116 117 118 119
/* decrement map refcnt and schedule it for freeing via workqueue
 * (unrelying map implementation ops->map_free() might sleep)
 */
void bpf_map_put(struct bpf_map *map)
{
	if (atomic_dec_and_test(&map->refcnt)) {
		INIT_WORK(&map->work, bpf_map_free_deferred);
		schedule_work(&map->work);
	}
}

120
void bpf_map_put_with_uref(struct bpf_map *map)
121
{
122
	bpf_map_put_uref(map);
123
	bpf_map_put(map);
124 125 126 127
}

static int bpf_map_release(struct inode *inode, struct file *filp)
{
128 129 130 131 132 133
	struct bpf_map *map = filp->private_data;

	if (map->ops->map_release)
		map->ops->map_release(map, filp);

	bpf_map_put_with_uref(map);
134 135 136
	return 0;
}

137 138 139 140 141 142 143 144 145
#ifdef CONFIG_PROC_FS
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
	const struct bpf_map *map = filp->private_data;

	seq_printf(m,
		   "map_type:\t%u\n"
		   "key_size:\t%u\n"
		   "value_size:\t%u\n"
146 147
		   "max_entries:\t%u\n"
		   "map_flags:\t%#x\n",
148 149 150
		   map->map_type,
		   map->key_size,
		   map->value_size,
151 152
		   map->max_entries,
		   map->map_flags);
153 154 155
}
#endif

156
static const struct file_operations bpf_map_fops = {
157 158 159 160
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= bpf_map_show_fdinfo,
#endif
	.release	= bpf_map_release,
161 162
};

163
int bpf_map_new_fd(struct bpf_map *map)
164 165 166 167 168
{
	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
				O_RDWR | O_CLOEXEC);
}

169 170 171 172 173 174 175 176
/* helper macro to check that unused fields 'union bpf_attr' are zero */
#define CHECK_ATTR(CMD) \
	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
		   sizeof(attr->CMD##_LAST_FIELD), 0, \
		   sizeof(*attr) - \
		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
		   sizeof(attr->CMD##_LAST_FIELD)) != NULL

177
#define BPF_MAP_CREATE_LAST_FIELD map_flags
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/* called via syscall */
static int map_create(union bpf_attr *attr)
{
	struct bpf_map *map;
	int err;

	err = CHECK_ATTR(BPF_MAP_CREATE);
	if (err)
		return -EINVAL;

	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
	map = find_and_alloc_map(attr);
	if (IS_ERR(map))
		return PTR_ERR(map);

	atomic_set(&map->refcnt, 1);
194
	atomic_set(&map->usercnt, 1);
195

196 197
	err = bpf_map_charge_memlock(map);
	if (err)
198
		goto free_map_nouncharge;
199

200
	err = bpf_map_new_fd(map);
201 202 203 204 205 206 207
	if (err < 0)
		/* failed to allocate fd */
		goto free_map;

	return err;

free_map:
208 209
	bpf_map_uncharge_memlock(map);
free_map_nouncharge:
210 211 212 213
	map->ops->map_free(map);
	return err;
}

214 215 216
/* if error is returned, fd is released.
 * On success caller should complete fd access with matching fdput()
 */
217
struct bpf_map *__bpf_map_get(struct fd f)
218 219 220 221 222 223 224 225
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_map_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

226 227 228
	return f.file->private_data;
}

A
Alexei Starovoitov 已提交
229 230 231 232
/* prog's and map's refcnt limit */
#define BPF_MAX_REFCNT 32768

struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
233
{
A
Alexei Starovoitov 已提交
234 235 236 237
	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
		atomic_dec(&map->refcnt);
		return ERR_PTR(-EBUSY);
	}
238 239
	if (uref)
		atomic_inc(&map->usercnt);
A
Alexei Starovoitov 已提交
240
	return map;
241 242 243
}

struct bpf_map *bpf_map_get_with_uref(u32 ufd)
244 245 246 247 248 249 250 251
{
	struct fd f = fdget(ufd);
	struct bpf_map *map;

	map = __bpf_map_get(f);
	if (IS_ERR(map))
		return map;

A
Alexei Starovoitov 已提交
252
	map = bpf_map_inc(map, true);
253
	fdput(f);
254 255 256 257

	return map;
}

258 259 260 261 262
int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{
	return -ENOTSUPP;
}

263 264 265 266 267
/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value

static int map_lookup_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
268 269
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
270 271
	int ufd = attr->map_fd;
	struct bpf_map *map;
272
	void *key, *value, *ptr;
273
	u32 value_size;
274
	struct fd f;
275 276 277 278 279
	int err;

	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
		return -EINVAL;

280
	f = fdget(ufd);
281
	map = __bpf_map_get(f);
282 283 284 285 286 287 288 289 290 291 292 293
	if (IS_ERR(map))
		return PTR_ERR(map);

	err = -ENOMEM;
	key = kmalloc(map->key_size, GFP_USER);
	if (!key)
		goto err_put;

	err = -EFAULT;
	if (copy_from_user(key, ukey, map->key_size) != 0)
		goto free_key;

294 295 296 297 298 299
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
	else
		value_size = map->value_size;

300
	err = -ENOMEM;
301
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
302
	if (!value)
303 304
		goto free_key;

305 306 307 308
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
		err = bpf_percpu_hash_copy(map, key, value);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_copy(map, key, value);
309 310
	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
		err = bpf_stackmap_copy(map, key, value);
311 312 313 314 315 316 317 318
	} else {
		rcu_read_lock();
		ptr = map->ops->map_lookup_elem(map, key);
		if (ptr)
			memcpy(value, ptr, value_size);
		rcu_read_unlock();
		err = ptr ? 0 : -ENOENT;
	}
319

320
	if (err)
321
		goto free_value;
322 323

	err = -EFAULT;
324
	if (copy_to_user(uvalue, value, value_size) != 0)
325
		goto free_value;
326 327 328

	err = 0;

329 330
free_value:
	kfree(value);
331 332 333 334 335 336 337
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

338
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
339 340 341

static int map_update_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
342 343
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
344 345 346
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *value;
347
	u32 value_size;
348
	struct fd f;
349 350 351 352 353
	int err;

	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
		return -EINVAL;

354
	f = fdget(ufd);
355
	map = __bpf_map_get(f);
356 357 358 359 360 361 362 363 364 365 366 367
	if (IS_ERR(map))
		return PTR_ERR(map);

	err = -ENOMEM;
	key = kmalloc(map->key_size, GFP_USER);
	if (!key)
		goto err_put;

	err = -EFAULT;
	if (copy_from_user(key, ukey, map->key_size) != 0)
		goto free_key;

368 369 370 371 372 373
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
	else
		value_size = map->value_size;

374
	err = -ENOMEM;
375
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
376 377 378 379
	if (!value)
		goto free_key;

	err = -EFAULT;
380
	if (copy_from_user(value, uvalue, value_size) != 0)
381 382
		goto free_value;

383 384 385 386 387
	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
	 * inside bpf map update or delete otherwise deadlocks are possible
	 */
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
388 389 390 391
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
		err = bpf_percpu_hash_update(map, key, value, attr->flags);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_update(map, key, value, attr->flags);
392
	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
393 394
		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
395 396 397 398
		rcu_read_lock();
		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
						   attr->flags);
		rcu_read_unlock();
399 400 401 402 403
	} else {
		rcu_read_lock();
		err = map->ops->map_update_elem(map, key, value, attr->flags);
		rcu_read_unlock();
	}
404 405
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
406 407 408 409 410 411 412 413 414 415 416 417 418 419

free_value:
	kfree(value);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

#define BPF_MAP_DELETE_ELEM_LAST_FIELD key

static int map_delete_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
420
	void __user *ukey = u64_to_user_ptr(attr->key);
421 422
	int ufd = attr->map_fd;
	struct bpf_map *map;
423
	struct fd f;
424 425 426 427 428 429
	void *key;
	int err;

	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
		return -EINVAL;

430
	f = fdget(ufd);
431
	map = __bpf_map_get(f);
432 433 434 435 436 437 438 439 440 441 442 443
	if (IS_ERR(map))
		return PTR_ERR(map);

	err = -ENOMEM;
	key = kmalloc(map->key_size, GFP_USER);
	if (!key)
		goto err_put;

	err = -EFAULT;
	if (copy_from_user(key, ukey, map->key_size) != 0)
		goto free_key;

444 445
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
446 447 448
	rcu_read_lock();
	err = map->ops->map_delete_elem(map, key);
	rcu_read_unlock();
449 450
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
451 452 453 454 455 456 457 458 459 460 461 462 463

free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key

static int map_get_next_key(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
464 465
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *unext_key = u64_to_user_ptr(attr->next_key);
466 467 468
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *next_key;
469
	struct fd f;
470 471 472 473 474
	int err;

	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
		return -EINVAL;

475
	f = fdget(ufd);
476
	map = __bpf_map_get(f);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
	if (IS_ERR(map))
		return PTR_ERR(map);

	err = -ENOMEM;
	key = kmalloc(map->key_size, GFP_USER);
	if (!key)
		goto err_put;

	err = -EFAULT;
	if (copy_from_user(key, ukey, map->key_size) != 0)
		goto free_key;

	err = -ENOMEM;
	next_key = kmalloc(map->key_size, GFP_USER);
	if (!next_key)
		goto free_key;

	rcu_read_lock();
	err = map->ops->map_get_next_key(map, key, next_key);
	rcu_read_unlock();
	if (err)
		goto free_next_key;

	err = -EFAULT;
	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
		goto free_next_key;

	err = 0;

free_next_key:
	kfree(next_key);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

515 516 517 518 519 520 521 522 523
static LIST_HEAD(bpf_prog_types);

static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
{
	struct bpf_prog_type_list *tl;

	list_for_each_entry(tl, &bpf_prog_types, list_node) {
		if (tl->type == type) {
			prog->aux->ops = tl->ops;
524
			prog->type = type;
525 526 527
			return 0;
		}
	}
528

529 530 531 532 533 534 535 536
	return -EINVAL;
}

void bpf_register_prog_type(struct bpf_prog_type_list *tl)
{
	list_add(&tl->list_node, &bpf_prog_types);
}

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
/* fixup insn->imm field of bpf_call instructions:
 * if (insn->imm == BPF_FUNC_map_lookup_elem)
 *      insn->imm = bpf_map_lookup_elem - __bpf_call_base;
 * else if (insn->imm == BPF_FUNC_map_update_elem)
 *      insn->imm = bpf_map_update_elem - __bpf_call_base;
 * else ...
 *
 * this function is called after eBPF program passed verification
 */
static void fixup_bpf_calls(struct bpf_prog *prog)
{
	const struct bpf_func_proto *fn;
	int i;

	for (i = 0; i < prog->len; i++) {
		struct bpf_insn *insn = &prog->insnsi[i];

		if (insn->code == (BPF_JMP | BPF_CALL)) {
			/* we reach here when program has bpf_call instructions
			 * and it passed bpf_check(), means that
			 * ops->get_func_proto must have been supplied, check it
			 */
			BUG_ON(!prog->aux->ops->get_func_proto);

561 562
			if (insn->imm == BPF_FUNC_get_route_realm)
				prog->dst_needed = 1;
563 564
			if (insn->imm == BPF_FUNC_get_prandom_u32)
				bpf_user_rnd_init_once();
565 566 567 568 569 570 571 572 573 574 575 576 577
			if (insn->imm == BPF_FUNC_tail_call) {
				/* mark bpf_tail_call as different opcode
				 * to avoid conditional branch in
				 * interpeter for every normal call
				 * and to prevent accidental JITing by
				 * JIT compiler that doesn't support
				 * bpf_tail_call yet
				 */
				insn->imm = 0;
				insn->code |= BPF_X;
				continue;
			}

578 579 580 581 582 583 584 585 586 587
			fn = prog->aux->ops->get_func_proto(insn->imm);
			/* all functions that have prototype and verifier allowed
			 * programs to call them, must be real in-kernel functions
			 */
			BUG_ON(!fn->func);
			insn->imm = fn->func - __bpf_call_base;
		}
	}
}

588 589 590 591 592 593 594 595 596 597 598
/* drop refcnt on maps used by eBPF program and free auxilary data */
static void free_used_maps(struct bpf_prog_aux *aux)
{
	int i;

	for (i = 0; i < aux->used_map_cnt; i++)
		bpf_map_put(aux->used_maps[i]);

	kfree(aux->used_maps);
}

599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
static int bpf_prog_charge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

	atomic_long_add(prog->pages, &user->locked_vm);
	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
		atomic_long_sub(prog->pages, &user->locked_vm);
		free_uid(user);
		return -EPERM;
	}
	prog->aux->user = user;
	return 0;
}

static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = prog->aux->user;

	atomic_long_sub(prog->pages, &user->locked_vm);
	free_uid(user);
}

624
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
625 626 627 628
{
	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);

	free_used_maps(aux);
629
	bpf_prog_uncharge_memlock(aux->prog);
630 631 632
	bpf_prog_free(aux->prog);
}

633 634
void bpf_prog_put(struct bpf_prog *prog)
{
635
	if (atomic_dec_and_test(&prog->aux->refcnt))
636
		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
637
}
638
EXPORT_SYMBOL_GPL(bpf_prog_put);
639 640 641 642 643

static int bpf_prog_release(struct inode *inode, struct file *filp)
{
	struct bpf_prog *prog = filp->private_data;

644
	bpf_prog_put(prog);
645 646 647 648 649 650 651
	return 0;
}

static const struct file_operations bpf_prog_fops = {
        .release = bpf_prog_release,
};

652
int bpf_prog_new_fd(struct bpf_prog *prog)
653 654 655 656 657
{
	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
				O_RDWR | O_CLOEXEC);
}

658
static struct bpf_prog *____bpf_prog_get(struct fd f)
659 660 661 662 663 664 665 666
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_prog_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

667
	return f.file->private_data;
668 669
}

670
struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
A
Alexei Starovoitov 已提交
671
{
672 673
	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
		atomic_sub(i, &prog->aux->refcnt);
A
Alexei Starovoitov 已提交
674 675 676 677
		return ERR_PTR(-EBUSY);
	}
	return prog;
}
678 679
EXPORT_SYMBOL_GPL(bpf_prog_add);

680 681 682 683 684 685 686 687 688 689 690
void bpf_prog_sub(struct bpf_prog *prog, int i)
{
	/* Only to be used for undoing previous bpf_prog_add() in some
	 * error path. We still know that another entity in our call
	 * path holds a reference to the program, thus atomic_sub() can
	 * be safely used in such cases!
	 */
	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
}
EXPORT_SYMBOL_GPL(bpf_prog_sub);

691 692 693 694
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
{
	return bpf_prog_add(prog, 1);
}
A
Alexei Starovoitov 已提交
695

696
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
697 698 699 700
{
	struct fd f = fdget(ufd);
	struct bpf_prog *prog;

701
	prog = ____bpf_prog_get(f);
702 703
	if (IS_ERR(prog))
		return prog;
704 705 706 707
	if (type && prog->type != *type) {
		prog = ERR_PTR(-EINVAL);
		goto out;
	}
708

A
Alexei Starovoitov 已提交
709
	prog = bpf_prog_inc(prog);
710
out:
711 712 713
	fdput(f);
	return prog;
}
714 715 716 717 718 719 720 721 722 723 724

struct bpf_prog *bpf_prog_get(u32 ufd)
{
	return __bpf_prog_get(ufd, NULL);
}

struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
{
	return __bpf_prog_get(ufd, &type);
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type);
725 726

/* last field in 'union bpf_attr' used by this command */
727
#define	BPF_PROG_LOAD_LAST_FIELD kern_version
728 729 730 731 732 733 734 735 736 737 738 739 740

static int bpf_prog_load(union bpf_attr *attr)
{
	enum bpf_prog_type type = attr->prog_type;
	struct bpf_prog *prog;
	int err;
	char license[128];
	bool is_gpl;

	if (CHECK_ATTR(BPF_PROG_LOAD))
		return -EINVAL;

	/* copy eBPF program license from user space */
M
Mickaël Salaün 已提交
741
	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
742 743 744 745 746 747 748 749 750 751
			      sizeof(license) - 1) < 0)
		return -EFAULT;
	license[sizeof(license) - 1] = 0;

	/* eBPF programs must be GPL compatible to use GPL-ed functions */
	is_gpl = license_is_gpl_compatible(license);

	if (attr->insn_cnt >= BPF_MAXINSNS)
		return -EINVAL;

752 753 754 755
	if (type == BPF_PROG_TYPE_KPROBE &&
	    attr->kern_version != LINUX_VERSION_CODE)
		return -EINVAL;

756 757 758
	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
		return -EPERM;

759 760 761 762 763
	/* plain bpf_prog allocation */
	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
	if (!prog)
		return -ENOMEM;

764 765 766 767
	err = bpf_prog_charge_memlock(prog);
	if (err)
		goto free_prog_nouncharge;

768 769 770
	prog->len = attr->insn_cnt;

	err = -EFAULT;
M
Mickaël Salaün 已提交
771
	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
772 773 774 775
			   prog->len * sizeof(struct bpf_insn)) != 0)
		goto free_prog;

	prog->orig_prog = NULL;
776
	prog->jited = 0;
777 778

	atomic_set(&prog->aux->refcnt, 1);
779
	prog->gpl_compatible = is_gpl ? 1 : 0;
780 781 782 783 784 785 786

	/* find program type: socket_filter vs tracing_filter */
	err = find_prog_type(type, prog);
	if (err < 0)
		goto free_prog;

	/* run eBPF verifier */
787
	err = bpf_check(&prog, attr);
788 789 790
	if (err < 0)
		goto free_used_maps;

791 792 793
	/* fixup BPF_CALL->imm field */
	fixup_bpf_calls(prog);

794
	/* eBPF program is ready to be JITed */
795
	prog = bpf_prog_select_runtime(prog, &err);
796 797
	if (err < 0)
		goto free_used_maps;
798

799
	err = bpf_prog_new_fd(prog);
800 801 802 803 804 805 806 807 808
	if (err < 0)
		/* failed to allocate fd */
		goto free_used_maps;

	return err;

free_used_maps:
	free_used_maps(prog->aux);
free_prog:
809 810
	bpf_prog_uncharge_memlock(prog);
free_prog_nouncharge:
811 812 813 814
	bpf_prog_free(prog);
	return err;
}

815 816 817 818 819 820 821
#define BPF_OBJ_LAST_FIELD bpf_fd

static int bpf_obj_pin(const union bpf_attr *attr)
{
	if (CHECK_ATTR(BPF_OBJ))
		return -EINVAL;

M
Mickaël Salaün 已提交
822
	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
823 824 825 826 827 828 829
}

static int bpf_obj_get(const union bpf_attr *attr)
{
	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
		return -EINVAL;

M
Mickaël Salaün 已提交
830
	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
831 832
}

833 834 835 836 837
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
	union bpf_attr attr = {};
	int err;

838
	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
		return -EPERM;

	if (!access_ok(VERIFY_READ, uattr, 1))
		return -EFAULT;

	if (size > PAGE_SIZE)	/* silly large */
		return -E2BIG;

	/* If we're handed a bigger struct than we know of,
	 * ensure all the unknown bits are 0 - i.e. new
	 * user-space does not rely on any kernel feature
	 * extensions we dont know about yet.
	 */
	if (size > sizeof(attr)) {
		unsigned char __user *addr;
		unsigned char __user *end;
		unsigned char val;

		addr = (void __user *)uattr + sizeof(attr);
		end  = (void __user *)uattr + size;

		for (; addr < end; addr++) {
			err = get_user(val, addr);
			if (err)
				return err;
			if (val)
				return -E2BIG;
		}
		size = sizeof(attr);
	}

	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
	if (copy_from_user(&attr, uattr, size) != 0)
		return -EFAULT;

	switch (cmd) {
	case BPF_MAP_CREATE:
		err = map_create(&attr);
		break;
878 879 880 881 882 883 884 885 886 887 888 889
	case BPF_MAP_LOOKUP_ELEM:
		err = map_lookup_elem(&attr);
		break;
	case BPF_MAP_UPDATE_ELEM:
		err = map_update_elem(&attr);
		break;
	case BPF_MAP_DELETE_ELEM:
		err = map_delete_elem(&attr);
		break;
	case BPF_MAP_GET_NEXT_KEY:
		err = map_get_next_key(&attr);
		break;
890 891 892
	case BPF_PROG_LOAD:
		err = bpf_prog_load(&attr);
		break;
893 894 895 896 897 898
	case BPF_OBJ_PIN:
		err = bpf_obj_pin(&attr);
		break;
	case BPF_OBJ_GET:
		err = bpf_obj_get(&attr);
		break;
899 900 901 902 903 904 905
	default:
		err = -EINVAL;
		break;
	}

	return err;
}