syscall.c 39.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 */
#include <linux/bpf.h>
13
#include <linux/bpf_trace.h>
14 15
#include <linux/syscalls.h>
#include <linux/slab.h>
16
#include <linux/sched/signal.h>
17 18
#include <linux/vmalloc.h>
#include <linux/mmzone.h>
19
#include <linux/anon_inodes.h>
20
#include <linux/file.h>
21 22
#include <linux/license.h>
#include <linux/filter.h>
23
#include <linux/version.h>
M
Mickaël Salaün 已提交
24
#include <linux/kernel.h>
M
Martin KaFai Lau 已提交
25
#include <linux/idr.h>
26 27 28
#include <linux/cred.h>
#include <linux/timekeeping.h>
#include <linux/ctype.h>
29

30 31 32 33 34 35 36
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
			   (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
			   (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
			   (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))

37 38
#define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)

39
DEFINE_PER_CPU(int, bpf_prog_active);
M
Martin KaFai Lau 已提交
40 41
static DEFINE_IDR(prog_idr);
static DEFINE_SPINLOCK(prog_idr_lock);
M
Martin KaFai Lau 已提交
42 43
static DEFINE_IDR(map_idr);
static DEFINE_SPINLOCK(map_idr_lock);
44

45 46
int sysctl_unprivileged_bpf_disabled __read_mostly;

47 48 49 50 51 52 53 54
static const struct bpf_map_ops * const bpf_map_types[] = {
#define BPF_PROG_TYPE(_id, _ops)
#define BPF_MAP_TYPE(_id, _ops) \
	[_id] = &_ops,
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
};
55

56 57 58 59 60 61 62 63 64
/*
 * If we're handed a bigger struct than we know of, ensure all the unknown bits
 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
 * we don't know about yet.
 *
 * There is a ToCToU between this function call and the following
 * copy_from_user() call. However, this is not a concern since this function is
 * meant to be a future-proofing of bits.
 */
65 66 67 68 69 70 71 72 73
static int check_uarg_tail_zero(void __user *uaddr,
				size_t expected_size,
				size_t actual_size)
{
	unsigned char __user *addr;
	unsigned char __user *end;
	unsigned char val;
	int err;

74 75 76 77 78 79
	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
		return -E2BIG;

	if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size)))
		return -EFAULT;

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
	if (actual_size <= expected_size)
		return 0;

	addr = uaddr + expected_size;
	end  = uaddr + actual_size;

	for (; addr < end; addr++) {
		err = get_user(val, addr);
		if (err)
			return err;
		if (val)
			return -E2BIG;
	}

	return 0;
}

97 98 99 100
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
{
	struct bpf_map *map;

101 102 103
	if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
	    !bpf_map_types[attr->map_type])
		return ERR_PTR(-EINVAL);
104

105 106 107 108 109 110
	map = bpf_map_types[attr->map_type]->map_alloc(attr);
	if (IS_ERR(map))
		return map;
	map->ops = bpf_map_types[attr->map_type];
	map->map_type = attr->map_type;
	return map;
111 112
}

113
void *bpf_map_area_alloc(size_t size, int numa_node)
114 115 116 117 118 119 120 121 122
{
	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
	 * trigger under memory pressure as we really just want to
	 * fail instead.
	 */
	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
	void *area;

	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
123
		area = kmalloc_node(size, GFP_USER | flags, numa_node);
124 125 126 127
		if (area != NULL)
			return area;
	}

128 129
	return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
					   __builtin_return_address(0));
130 131 132 133 134 135 136
}

void bpf_map_area_free(void *area)
{
	kvfree(area);
}

137 138 139 140 141 142 143 144 145 146 147 148 149
int bpf_map_precharge_memlock(u32 pages)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit, cur;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	cur = atomic_long_read(&user->locked_vm);
	free_uid(user);
	if (cur + pages > memlock_limit)
		return -EPERM;
	return 0;
}

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
static int bpf_map_charge_memlock(struct bpf_map *map)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

	atomic_long_add(map->pages, &user->locked_vm);

	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
		atomic_long_sub(map->pages, &user->locked_vm);
		free_uid(user);
		return -EPERM;
	}
	map->user = user;
	return 0;
}

static void bpf_map_uncharge_memlock(struct bpf_map *map)
{
	struct user_struct *user = map->user;

	atomic_long_sub(map->pages, &user->locked_vm);
	free_uid(user);
}

M
Martin KaFai Lau 已提交
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
static int bpf_map_alloc_id(struct bpf_map *map)
{
	int id;

	spin_lock_bh(&map_idr_lock);
	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
	if (id > 0)
		map->id = id;
	spin_unlock_bh(&map_idr_lock);

	if (WARN_ON_ONCE(!id))
		return -ENOSPC;

	return id > 0 ? 0 : id;
}

M
Martin KaFai Lau 已提交
192
static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
M
Martin KaFai Lau 已提交
193
{
194 195
	unsigned long flags;

M
Martin KaFai Lau 已提交
196
	if (do_idr_lock)
197
		spin_lock_irqsave(&map_idr_lock, flags);
M
Martin KaFai Lau 已提交
198 199 200
	else
		__acquire(&map_idr_lock);

M
Martin KaFai Lau 已提交
201
	idr_remove(&map_idr, map->id);
M
Martin KaFai Lau 已提交
202 203

	if (do_idr_lock)
204
		spin_unlock_irqrestore(&map_idr_lock, flags);
M
Martin KaFai Lau 已提交
205 206
	else
		__release(&map_idr_lock);
M
Martin KaFai Lau 已提交
207 208
}

209 210 211 212 213
/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{
	struct bpf_map *map = container_of(work, struct bpf_map, work);

214
	bpf_map_uncharge_memlock(map);
215
	security_bpf_map_free(map);
216 217 218 219
	/* implementation dependent freeing */
	map->ops->map_free(map);
}

220 221 222 223 224 225 226 227
static void bpf_map_put_uref(struct bpf_map *map)
{
	if (atomic_dec_and_test(&map->usercnt)) {
		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
			bpf_fd_array_map_clear(map);
	}
}

228 229 230
/* decrement map refcnt and schedule it for freeing via workqueue
 * (unrelying map implementation ops->map_free() might sleep)
 */
M
Martin KaFai Lau 已提交
231
static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
232 233
{
	if (atomic_dec_and_test(&map->refcnt)) {
234
		/* bpf_map_free_id() must be called first */
M
Martin KaFai Lau 已提交
235
		bpf_map_free_id(map, do_idr_lock);
236 237 238 239 240
		INIT_WORK(&map->work, bpf_map_free_deferred);
		schedule_work(&map->work);
	}
}

M
Martin KaFai Lau 已提交
241 242 243 244 245
void bpf_map_put(struct bpf_map *map)
{
	__bpf_map_put(map, true);
}

246
void bpf_map_put_with_uref(struct bpf_map *map)
247
{
248
	bpf_map_put_uref(map);
249
	bpf_map_put(map);
250 251 252 253
}

static int bpf_map_release(struct inode *inode, struct file *filp)
{
254 255 256 257 258 259
	struct bpf_map *map = filp->private_data;

	if (map->ops->map_release)
		map->ops->map_release(map, filp);

	bpf_map_put_with_uref(map);
260 261 262
	return 0;
}

263 264 265 266
#ifdef CONFIG_PROC_FS
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
	const struct bpf_map *map = filp->private_data;
267 268
	const struct bpf_array *array;
	u32 owner_prog_type = 0;
269
	u32 owner_jited = 0;
270 271 272 273

	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
		array = container_of(map, struct bpf_array, map);
		owner_prog_type = array->owner_prog_type;
274
		owner_jited = array->owner_jited;
275
	}
276 277 278 279 280

	seq_printf(m,
		   "map_type:\t%u\n"
		   "key_size:\t%u\n"
		   "value_size:\t%u\n"
281
		   "max_entries:\t%u\n"
282 283
		   "map_flags:\t%#x\n"
		   "memlock:\t%llu\n",
284 285 286
		   map->map_type,
		   map->key_size,
		   map->value_size,
287
		   map->max_entries,
288 289 290
		   map->map_flags,
		   map->pages * 1ULL << PAGE_SHIFT);

291
	if (owner_prog_type) {
292 293
		seq_printf(m, "owner_prog_type:\t%u\n",
			   owner_prog_type);
294 295 296
		seq_printf(m, "owner_jited:\t%u\n",
			   owner_jited);
	}
297 298 299
}
#endif

300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
			      loff_t *ppos)
{
	/* We need this handler such that alloc_file() enables
	 * f_mode with FMODE_CAN_READ.
	 */
	return -EINVAL;
}

static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
			       size_t siz, loff_t *ppos)
{
	/* We need this handler such that alloc_file() enables
	 * f_mode with FMODE_CAN_WRITE.
	 */
	return -EINVAL;
}

318
const struct file_operations bpf_map_fops = {
319 320 321 322
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= bpf_map_show_fdinfo,
#endif
	.release	= bpf_map_release,
323 324
	.read		= bpf_dummy_read,
	.write		= bpf_dummy_write,
325 326
};

327
int bpf_map_new_fd(struct bpf_map *map, int flags)
328
{
329 330 331 332 333 334
	int ret;

	ret = security_bpf_map(map, OPEN_FMODE(flags));
	if (ret < 0)
		return ret;

335
	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
336 337 338 339 340 341 342 343 344 345 346 347
				flags | O_CLOEXEC);
}

int bpf_get_file_flag(int flags)
{
	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
		return -EINVAL;
	if (flags & BPF_F_RDONLY)
		return O_RDONLY;
	if (flags & BPF_F_WRONLY)
		return O_WRONLY;
	return O_RDWR;
348 349
}

350 351 352 353 354 355 356 357
/* helper macro to check that unused fields 'union bpf_attr' are zero */
#define CHECK_ATTR(CMD) \
	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
		   sizeof(attr->CMD##_LAST_FIELD), 0, \
		   sizeof(*attr) - \
		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
		   sizeof(attr->CMD##_LAST_FIELD)) != NULL

358 359 360 361 362 363 364
/* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes.
 * Return 0 on success and < 0 on error.
 */
static int bpf_obj_name_cpy(char *dst, const char *src)
{
	const char *end = src + BPF_OBJ_NAME_LEN;

365 366
	memset(dst, 0, BPF_OBJ_NAME_LEN);

367 368 369 370 371 372 373 374 375 376 377 378 379 380
	/* Copy all isalnum() and '_' char */
	while (src < end && *src) {
		if (!isalnum(*src) && *src != '_')
			return -EINVAL;
		*dst++ = *src++;
	}

	/* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */
	if (src == end)
		return -EINVAL;

	return 0;
}

381
#define BPF_MAP_CREATE_LAST_FIELD map_name
382 383 384
/* called via syscall */
static int map_create(union bpf_attr *attr)
{
385
	int numa_node = bpf_map_attr_numa_node(attr);
386
	struct bpf_map *map;
387
	int f_flags;
388 389 390 391 392 393
	int err;

	err = CHECK_ATTR(BPF_MAP_CREATE);
	if (err)
		return -EINVAL;

394 395 396 397
	f_flags = bpf_get_file_flag(attr->map_flags);
	if (f_flags < 0)
		return f_flags;

398
	if (numa_node != NUMA_NO_NODE &&
E
Eric Dumazet 已提交
399 400
	    ((unsigned int)numa_node >= nr_node_ids ||
	     !node_online(numa_node)))
401 402
		return -EINVAL;

403 404 405 406 407
	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
	map = find_and_alloc_map(attr);
	if (IS_ERR(map))
		return PTR_ERR(map);

408 409 410 411
	err = bpf_obj_name_cpy(map->name, attr->map_name);
	if (err)
		goto free_map_nouncharge;

412
	atomic_set(&map->refcnt, 1);
413
	atomic_set(&map->usercnt, 1);
414

415
	err = security_bpf_map_alloc(map);
416
	if (err)
417
		goto free_map_nouncharge;
418

419 420 421 422
	err = bpf_map_charge_memlock(map);
	if (err)
		goto free_map_sec;

M
Martin KaFai Lau 已提交
423 424 425 426
	err = bpf_map_alloc_id(map);
	if (err)
		goto free_map;

427
	err = bpf_map_new_fd(map, f_flags);
M
Martin KaFai Lau 已提交
428 429 430 431 432 433 434 435 436 437
	if (err < 0) {
		/* failed to allocate fd.
		 * bpf_map_put() is needed because the above
		 * bpf_map_alloc_id() has published the map
		 * to the userspace and the userspace may
		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
		 */
		bpf_map_put(map);
		return err;
	}
438

439
	trace_bpf_map_create(map, err);
440 441 442
	return err;

free_map:
443
	bpf_map_uncharge_memlock(map);
444 445
free_map_sec:
	security_bpf_map_free(map);
446
free_map_nouncharge:
447 448 449 450
	map->ops->map_free(map);
	return err;
}

451 452 453
/* if error is returned, fd is released.
 * On success caller should complete fd access with matching fdput()
 */
454
struct bpf_map *__bpf_map_get(struct fd f)
455 456 457 458 459 460 461 462
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_map_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

463 464 465
	return f.file->private_data;
}

A
Alexei Starovoitov 已提交
466 467 468 469
/* prog's and map's refcnt limit */
#define BPF_MAX_REFCNT 32768

struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
470
{
A
Alexei Starovoitov 已提交
471 472 473 474
	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
		atomic_dec(&map->refcnt);
		return ERR_PTR(-EBUSY);
	}
475 476
	if (uref)
		atomic_inc(&map->usercnt);
A
Alexei Starovoitov 已提交
477
	return map;
478 479 480
}

struct bpf_map *bpf_map_get_with_uref(u32 ufd)
481 482 483 484 485 486 487 488
{
	struct fd f = fdget(ufd);
	struct bpf_map *map;

	map = __bpf_map_get(f);
	if (IS_ERR(map))
		return map;

A
Alexei Starovoitov 已提交
489
	map = bpf_map_inc(map, true);
490
	fdput(f);
491 492 493 494

	return map;
}

M
Martin KaFai Lau 已提交
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
/* map_idr_lock should have been held */
static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
					    bool uref)
{
	int refold;

	refold = __atomic_add_unless(&map->refcnt, 1, 0);

	if (refold >= BPF_MAX_REFCNT) {
		__bpf_map_put(map, false);
		return ERR_PTR(-EBUSY);
	}

	if (!refold)
		return ERR_PTR(-ENOENT);

	if (uref)
		atomic_inc(&map->usercnt);

	return map;
}

517 518 519 520 521
int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{
	return -ENOTSUPP;
}

522 523 524 525 526
/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value

static int map_lookup_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
527 528
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
529 530
	int ufd = attr->map_fd;
	struct bpf_map *map;
531
	void *key, *value, *ptr;
532
	u32 value_size;
533
	struct fd f;
534 535 536 537 538
	int err;

	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
		return -EINVAL;

539
	f = fdget(ufd);
540
	map = __bpf_map_get(f);
541 542 543
	if (IS_ERR(map))
		return PTR_ERR(map);

544 545 546 547 548
	if (!(f.file->f_mode & FMODE_CAN_READ)) {
		err = -EPERM;
		goto err_put;
	}

A
Al Viro 已提交
549 550 551
	key = memdup_user(ukey, map->key_size);
	if (IS_ERR(key)) {
		err = PTR_ERR(key);
552
		goto err_put;
A
Al Viro 已提交
553
	}
554

555
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
556
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
557 558
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
559 560
	else if (IS_FD_MAP(map))
		value_size = sizeof(u32);
561 562 563
	else
		value_size = map->value_size;

564
	err = -ENOMEM;
565
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
566
	if (!value)
567 568
		goto free_key;

569 570
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
571 572 573
		err = bpf_percpu_hash_copy(map, key, value);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_copy(map, key, value);
574 575
	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
		err = bpf_stackmap_copy(map, key, value);
576 577 578 579
	} else if (IS_FD_ARRAY(map)) {
		err = bpf_fd_array_map_lookup_elem(map, key, value);
	} else if (IS_FD_HASH(map)) {
		err = bpf_fd_htab_map_lookup_elem(map, key, value);
580 581 582 583 584 585 586 587
	} else {
		rcu_read_lock();
		ptr = map->ops->map_lookup_elem(map, key);
		if (ptr)
			memcpy(value, ptr, value_size);
		rcu_read_unlock();
		err = ptr ? 0 : -ENOENT;
	}
588

589
	if (err)
590
		goto free_value;
591 592

	err = -EFAULT;
593
	if (copy_to_user(uvalue, value, value_size) != 0)
594
		goto free_value;
595

596
	trace_bpf_map_lookup_elem(map, ufd, key, value);
597 598
	err = 0;

599 600
free_value:
	kfree(value);
601 602 603 604 605 606 607
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

608
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
609 610 611

static int map_update_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
612 613
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
614 615 616
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *value;
617
	u32 value_size;
618
	struct fd f;
619 620 621 622 623
	int err;

	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
		return -EINVAL;

624
	f = fdget(ufd);
625
	map = __bpf_map_get(f);
626 627 628
	if (IS_ERR(map))
		return PTR_ERR(map);

629 630 631 632 633
	if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
		err = -EPERM;
		goto err_put;
	}

A
Al Viro 已提交
634 635 636
	key = memdup_user(ukey, map->key_size);
	if (IS_ERR(key)) {
		err = PTR_ERR(key);
637
		goto err_put;
A
Al Viro 已提交
638
	}
639

640
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
641
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
642 643 644 645 646
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
	else
		value_size = map->value_size;

647
	err = -ENOMEM;
648
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
649 650 651 652
	if (!value)
		goto free_key;

	err = -EFAULT;
653
	if (copy_from_user(value, uvalue, value_size) != 0)
654 655
		goto free_value;

656 657 658 659 660 661
	/* Need to create a kthread, thus must support schedule */
	if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
		err = map->ops->map_update_elem(map, key, value, attr->flags);
		goto out;
	}

662 663 664 665 666
	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
	 * inside bpf map update or delete otherwise deadlocks are possible
	 */
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
667 668
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
669 670 671
		err = bpf_percpu_hash_update(map, key, value, attr->flags);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_update(map, key, value, attr->flags);
672
	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
673
		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
674 675
		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
		   map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
676 677 678 679
		rcu_read_lock();
		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
						   attr->flags);
		rcu_read_unlock();
M
Martin KaFai Lau 已提交
680 681 682 683 684
	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
		rcu_read_lock();
		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
						  attr->flags);
		rcu_read_unlock();
685 686 687 688 689
	} else {
		rcu_read_lock();
		err = map->ops->map_update_elem(map, key, value, attr->flags);
		rcu_read_unlock();
	}
690 691
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
692
out:
693 694
	if (!err)
		trace_bpf_map_update_elem(map, ufd, key, value);
695 696 697 698 699 700 701 702 703 704 705 706 707
free_value:
	kfree(value);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

#define BPF_MAP_DELETE_ELEM_LAST_FIELD key

static int map_delete_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
708
	void __user *ukey = u64_to_user_ptr(attr->key);
709 710
	int ufd = attr->map_fd;
	struct bpf_map *map;
711
	struct fd f;
712 713 714 715 716 717
	void *key;
	int err;

	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
		return -EINVAL;

718
	f = fdget(ufd);
719
	map = __bpf_map_get(f);
720 721 722
	if (IS_ERR(map))
		return PTR_ERR(map);

723 724 725 726 727
	if (!(f.file->f_mode & FMODE_CAN_WRITE)) {
		err = -EPERM;
		goto err_put;
	}

A
Al Viro 已提交
728 729 730
	key = memdup_user(ukey, map->key_size);
	if (IS_ERR(key)) {
		err = PTR_ERR(key);
731
		goto err_put;
A
Al Viro 已提交
732
	}
733

734 735
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
736 737 738
	rcu_read_lock();
	err = map->ops->map_delete_elem(map, key);
	rcu_read_unlock();
739 740
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
741

742 743
	if (!err)
		trace_bpf_map_delete_elem(map, ufd, key);
744 745 746 747 748 749 750 751 752 753 754
	kfree(key);
err_put:
	fdput(f);
	return err;
}

/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key

static int map_get_next_key(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
755 756
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *unext_key = u64_to_user_ptr(attr->next_key);
757 758 759
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *next_key;
760
	struct fd f;
761 762 763 764 765
	int err;

	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
		return -EINVAL;

766
	f = fdget(ufd);
767
	map = __bpf_map_get(f);
768 769 770
	if (IS_ERR(map))
		return PTR_ERR(map);

771 772 773 774 775
	if (!(f.file->f_mode & FMODE_CAN_READ)) {
		err = -EPERM;
		goto err_put;
	}

776
	if (ukey) {
A
Al Viro 已提交
777 778 779
		key = memdup_user(ukey, map->key_size);
		if (IS_ERR(key)) {
			err = PTR_ERR(key);
780
			goto err_put;
A
Al Viro 已提交
781
		}
782 783 784
	} else {
		key = NULL;
	}
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800

	err = -ENOMEM;
	next_key = kmalloc(map->key_size, GFP_USER);
	if (!next_key)
		goto free_key;

	rcu_read_lock();
	err = map->ops->map_get_next_key(map, key, next_key);
	rcu_read_unlock();
	if (err)
		goto free_next_key;

	err = -EFAULT;
	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
		goto free_next_key;

801
	trace_bpf_map_next_key(map, ufd, key, next_key);
802 803 804 805 806 807 808 809 810 811 812
	err = 0;

free_next_key:
	kfree(next_key);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

813 814 815 816 817 818 819 820 821
static const struct bpf_prog_ops * const bpf_prog_types[] = {
#define BPF_PROG_TYPE(_id, _name) \
	[_id] = & _name ## _prog_ops,
#define BPF_MAP_TYPE(_id, _ops)
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
};

822 823
static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
{
824 825
	if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
		return -EINVAL;
826

827 828 829 830
	if (!bpf_prog_is_dev_bound(prog->aux))
		prog->aux->ops = bpf_prog_types[type];
	else
		prog->aux->ops = &bpf_offload_prog_ops;
831 832
	prog->type = type;
	return 0;
833 834 835 836 837 838 839 840 841 842 843 844 845
}

/* drop refcnt on maps used by eBPF program and free auxilary data */
static void free_used_maps(struct bpf_prog_aux *aux)
{
	int i;

	for (i = 0; i < aux->used_map_cnt; i++)
		bpf_map_put(aux->used_maps[i]);

	kfree(aux->used_maps);
}

846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	unsigned long user_bufs;

	if (user) {
		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
		if (user_bufs > memlock_limit) {
			atomic_long_sub(pages, &user->locked_vm);
			return -EPERM;
		}
	}

	return 0;
}

void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
	if (user)
		atomic_long_sub(pages, &user->locked_vm);
}

868 869 870
static int bpf_prog_charge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = get_current_user();
871
	int ret;
872

873 874
	ret = __bpf_prog_charge(user, prog->pages);
	if (ret) {
875
		free_uid(user);
876
		return ret;
877
	}
878

879 880 881 882 883 884 885 886
	prog->aux->user = user;
	return 0;
}

static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = prog->aux->user;

887
	__bpf_prog_uncharge(user, prog->pages);
888 889 890
	free_uid(user);
}

M
Martin KaFai Lau 已提交
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
static int bpf_prog_alloc_id(struct bpf_prog *prog)
{
	int id;

	spin_lock_bh(&prog_idr_lock);
	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
	if (id > 0)
		prog->aux->id = id;
	spin_unlock_bh(&prog_idr_lock);

	/* id is in [1, INT_MAX) */
	if (WARN_ON_ONCE(!id))
		return -ENOSPC;

	return id > 0 ? 0 : id;
}

908
static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
M
Martin KaFai Lau 已提交
909 910 911 912 913
{
	/* cBPF to eBPF migrations are currently not in the idr store. */
	if (!prog->aux->id)
		return;

914 915 916 917 918
	if (do_idr_lock)
		spin_lock_bh(&prog_idr_lock);
	else
		__acquire(&prog_idr_lock);

M
Martin KaFai Lau 已提交
919
	idr_remove(&prog_idr, prog->aux->id);
920 921 922 923 924

	if (do_idr_lock)
		spin_unlock_bh(&prog_idr_lock);
	else
		__release(&prog_idr_lock);
M
Martin KaFai Lau 已提交
925 926
}

927
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
928 929 930 931
{
	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);

	free_used_maps(aux);
932
	bpf_prog_uncharge_memlock(aux->prog);
933
	security_bpf_prog_free(aux);
934 935 936
	bpf_prog_free(aux->prog);
}

937
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
938
{
939 940
	if (atomic_dec_and_test(&prog->aux->refcnt)) {
		trace_bpf_prog_put_rcu(prog);
941
		/* bpf_prog_free_id() must be called first */
942
		bpf_prog_free_id(prog, do_idr_lock);
943
		bpf_prog_kallsyms_del(prog);
944
		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
945
	}
946
}
947 948 949 950 951

void bpf_prog_put(struct bpf_prog *prog)
{
	__bpf_prog_put(prog, true);
}
952
EXPORT_SYMBOL_GPL(bpf_prog_put);
953 954 955 956 957

static int bpf_prog_release(struct inode *inode, struct file *filp)
{
	struct bpf_prog *prog = filp->private_data;

958
	bpf_prog_put(prog);
959 960 961
	return 0;
}

962 963 964 965
#ifdef CONFIG_PROC_FS
static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{
	const struct bpf_prog *prog = filp->private_data;
966
	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
967

968
	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
969 970 971
	seq_printf(m,
		   "prog_type:\t%u\n"
		   "prog_jited:\t%u\n"
972
		   "prog_tag:\t%s\n"
973 974 975
		   "memlock:\t%llu\n",
		   prog->type,
		   prog->jited,
976
		   prog_tag,
977 978 979 980
		   prog->pages * 1ULL << PAGE_SHIFT);
}
#endif

981
const struct file_operations bpf_prog_fops = {
982 983 984 985
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= bpf_prog_show_fdinfo,
#endif
	.release	= bpf_prog_release,
986 987
	.read		= bpf_dummy_read,
	.write		= bpf_dummy_write,
988 989
};

990
int bpf_prog_new_fd(struct bpf_prog *prog)
991
{
992 993 994 995 996 997
	int ret;

	ret = security_bpf_prog(prog);
	if (ret < 0)
		return ret;

998 999 1000 1001
	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
				O_RDWR | O_CLOEXEC);
}

1002
static struct bpf_prog *____bpf_prog_get(struct fd f)
1003 1004 1005 1006 1007 1008 1009 1010
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_prog_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

1011
	return f.file->private_data;
1012 1013
}

1014
struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
A
Alexei Starovoitov 已提交
1015
{
1016 1017
	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
		atomic_sub(i, &prog->aux->refcnt);
A
Alexei Starovoitov 已提交
1018 1019 1020 1021
		return ERR_PTR(-EBUSY);
	}
	return prog;
}
1022 1023
EXPORT_SYMBOL_GPL(bpf_prog_add);

1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
void bpf_prog_sub(struct bpf_prog *prog, int i)
{
	/* Only to be used for undoing previous bpf_prog_add() in some
	 * error path. We still know that another entity in our call
	 * path holds a reference to the program, thus atomic_sub() can
	 * be safely used in such cases!
	 */
	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
}
EXPORT_SYMBOL_GPL(bpf_prog_sub);

1035 1036 1037 1038
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
{
	return bpf_prog_add(prog, 1);
}
1039
EXPORT_SYMBOL_GPL(bpf_prog_inc);
A
Alexei Starovoitov 已提交
1040

1041
/* prog_idr_lock should have been held */
1042
struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
{
	int refold;

	refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);

	if (refold >= BPF_MAX_REFCNT) {
		__bpf_prog_put(prog, false);
		return ERR_PTR(-EBUSY);
	}

	if (!refold)
		return ERR_PTR(-ENOENT);

	return prog;
}
1058
EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1059

1060 1061
static bool bpf_prog_get_ok(struct bpf_prog *prog,
			    enum bpf_prog_type *attach_type, bool attach_drv)
1062
{
1063 1064 1065
	/* not an attachment, just a refcount inc, always allow */
	if (!attach_type)
		return true;
1066 1067 1068

	if (prog->type != *attach_type)
		return false;
1069
	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1070 1071 1072 1073 1074 1075
		return false;

	return true;
}

static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1076
				       bool attach_drv)
1077 1078 1079 1080
{
	struct fd f = fdget(ufd);
	struct bpf_prog *prog;

1081
	prog = ____bpf_prog_get(f);
1082 1083
	if (IS_ERR(prog))
		return prog;
1084
	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1085 1086 1087
		prog = ERR_PTR(-EINVAL);
		goto out;
	}
1088

A
Alexei Starovoitov 已提交
1089
	prog = bpf_prog_inc(prog);
1090
out:
1091 1092 1093
	fdput(f);
	return prog;
}
1094 1095 1096

struct bpf_prog *bpf_prog_get(u32 ufd)
{
1097
	return __bpf_prog_get(ufd, NULL, false);
1098 1099
}

1100
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1101
				       bool attach_drv)
1102
{
1103
	struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv);
1104 1105 1106 1107 1108

	if (!IS_ERR(prog))
		trace_bpf_prog_get_type(prog);
	return prog;
}
1109
EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1110

1111
/* last field in 'union bpf_attr' used by this command */
1112
#define	BPF_PROG_LOAD_LAST_FIELD prog_ifindex
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124

static int bpf_prog_load(union bpf_attr *attr)
{
	enum bpf_prog_type type = attr->prog_type;
	struct bpf_prog *prog;
	int err;
	char license[128];
	bool is_gpl;

	if (CHECK_ATTR(BPF_PROG_LOAD))
		return -EINVAL;

1125 1126 1127
	if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
		return -EINVAL;

1128
	/* copy eBPF program license from user space */
M
Mickaël Salaün 已提交
1129
	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
1130 1131 1132 1133 1134 1135 1136
			      sizeof(license) - 1) < 0)
		return -EFAULT;
	license[sizeof(license) - 1] = 0;

	/* eBPF programs must be GPL compatible to use GPL-ed functions */
	is_gpl = license_is_gpl_compatible(license);

1137 1138
	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
		return -E2BIG;
1139

1140 1141 1142 1143
	if (type == BPF_PROG_TYPE_KPROBE &&
	    attr->kern_version != LINUX_VERSION_CODE)
		return -EINVAL;

1144 1145 1146
	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
	    type != BPF_PROG_TYPE_CGROUP_SKB &&
	    !capable(CAP_SYS_ADMIN))
1147 1148
		return -EPERM;

1149 1150 1151 1152 1153
	/* plain bpf_prog allocation */
	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
	if (!prog)
		return -ENOMEM;

1154
	err = security_bpf_prog_alloc(prog->aux);
1155 1156 1157
	if (err)
		goto free_prog_nouncharge;

1158 1159 1160 1161
	err = bpf_prog_charge_memlock(prog);
	if (err)
		goto free_prog_sec;

1162 1163 1164
	prog->len = attr->insn_cnt;

	err = -EFAULT;
M
Mickaël Salaün 已提交
1165
	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
1166
			   bpf_prog_insn_size(prog)) != 0)
1167 1168 1169
		goto free_prog;

	prog->orig_prog = NULL;
1170
	prog->jited = 0;
1171 1172

	atomic_set(&prog->aux->refcnt, 1);
1173
	prog->gpl_compatible = is_gpl ? 1 : 0;
1174

1175
	if (attr->prog_ifindex) {
1176 1177 1178 1179 1180
		err = bpf_prog_offload_init(prog, attr);
		if (err)
			goto free_prog;
	}

1181 1182 1183 1184 1185
	/* find program type: socket_filter vs tracing_filter */
	err = find_prog_type(type, prog);
	if (err < 0)
		goto free_prog;

1186 1187 1188 1189 1190
	prog->aux->load_time = ktime_get_boot_ns();
	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
	if (err)
		goto free_prog;

1191
	/* run eBPF verifier */
1192
	err = bpf_check(&prog, attr);
1193 1194 1195 1196
	if (err < 0)
		goto free_used_maps;

	/* eBPF program is ready to be JITed */
1197
	prog = bpf_prog_select_runtime(prog, &err);
1198 1199
	if (err < 0)
		goto free_used_maps;
1200

M
Martin KaFai Lau 已提交
1201 1202 1203 1204
	err = bpf_prog_alloc_id(prog);
	if (err)
		goto free_used_maps;

1205
	err = bpf_prog_new_fd(prog);
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
	if (err < 0) {
		/* failed to allocate fd.
		 * bpf_prog_put() is needed because the above
		 * bpf_prog_alloc_id() has published the prog
		 * to the userspace and the userspace may
		 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
		 */
		bpf_prog_put(prog);
		return err;
	}
1216

1217
	bpf_prog_kallsyms_add(prog);
1218
	trace_bpf_prog_load(prog, err);
1219 1220 1221 1222 1223
	return err;

free_used_maps:
	free_used_maps(prog->aux);
free_prog:
1224
	bpf_prog_uncharge_memlock(prog);
1225 1226
free_prog_sec:
	security_bpf_prog_free(prog->aux);
1227
free_prog_nouncharge:
1228 1229 1230 1231
	bpf_prog_free(prog);
	return err;
}

1232
#define BPF_OBJ_LAST_FIELD file_flags
1233 1234 1235

static int bpf_obj_pin(const union bpf_attr *attr)
{
1236
	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
1237 1238
		return -EINVAL;

M
Mickaël Salaün 已提交
1239
	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1240 1241 1242 1243
}

static int bpf_obj_get(const union bpf_attr *attr)
{
1244 1245
	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
1246 1247
		return -EINVAL;

1248 1249
	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
				attr->file_flags);
1250 1251
}

1252 1253
#ifdef CONFIG_CGROUP_BPF

1254
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1255

1256
static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
1257
{
1258
	struct bpf_prog *prog = NULL;
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
	int ufd = attr->target_fd;
	struct bpf_map *map;
	struct fd f;
	int err;

	f = fdget(ufd);
	map = __bpf_map_get(f);
	if (IS_ERR(map))
		return PTR_ERR(map);

1269 1270 1271 1272 1273 1274 1275
	if (attach) {
		prog = bpf_prog_get_type(attr->attach_bpf_fd,
					 BPF_PROG_TYPE_SK_SKB);
		if (IS_ERR(prog)) {
			fdput(f);
			return PTR_ERR(prog);
		}
1276 1277
	}

1278
	err = sock_map_prog(map, prog, attr->attach_type);
1279 1280
	if (err) {
		fdput(f);
1281 1282
		if (prog)
			bpf_prog_put(prog);
1283
		return err;
1284 1285 1286
	}

	fdput(f);
1287
	return 0;
1288
}
1289

1290 1291 1292
#define BPF_F_ATTACH_MASK \
	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)

1293 1294
static int bpf_prog_attach(const union bpf_attr *attr)
{
1295
	enum bpf_prog_type ptype;
1296 1297
	struct bpf_prog *prog;
	struct cgroup *cgrp;
1298
	int ret;
1299 1300 1301 1302 1303 1304 1305

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (CHECK_ATTR(BPF_PROG_ATTACH))
		return -EINVAL;

1306
	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
1307 1308
		return -EINVAL;

1309 1310 1311
	switch (attr->attach_type) {
	case BPF_CGROUP_INET_INGRESS:
	case BPF_CGROUP_INET_EGRESS:
1312
		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1313
		break;
1314 1315 1316
	case BPF_CGROUP_INET_SOCK_CREATE:
		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
		break;
L
Lawrence Brakmo 已提交
1317 1318 1319
	case BPF_CGROUP_SOCK_OPS:
		ptype = BPF_PROG_TYPE_SOCK_OPS;
		break;
1320 1321 1322
	case BPF_CGROUP_DEVICE:
		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
		break;
1323 1324
	case BPF_SK_SKB_STREAM_PARSER:
	case BPF_SK_SKB_STREAM_VERDICT:
1325
		return sockmap_get_from_fd(attr, true);
1326 1327 1328 1329
	default:
		return -EINVAL;
	}

1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
	if (IS_ERR(prog))
		return PTR_ERR(prog);

	cgrp = cgroup_get_from_fd(attr->target_fd);
	if (IS_ERR(cgrp)) {
		bpf_prog_put(prog);
		return PTR_ERR(cgrp);
	}

1340 1341
	ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
				attr->attach_flags);
1342 1343
	if (ret)
		bpf_prog_put(prog);
1344 1345
	cgroup_put(cgrp);

1346
	return ret;
1347 1348 1349 1350 1351 1352
}

#define BPF_PROG_DETACH_LAST_FIELD attach_type

static int bpf_prog_detach(const union bpf_attr *attr)
{
1353 1354
	enum bpf_prog_type ptype;
	struct bpf_prog *prog;
1355
	struct cgroup *cgrp;
1356
	int ret;
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (CHECK_ATTR(BPF_PROG_DETACH))
		return -EINVAL;

	switch (attr->attach_type) {
	case BPF_CGROUP_INET_INGRESS:
	case BPF_CGROUP_INET_EGRESS:
1367 1368
		ptype = BPF_PROG_TYPE_CGROUP_SKB;
		break;
1369
	case BPF_CGROUP_INET_SOCK_CREATE:
1370 1371
		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
		break;
L
Lawrence Brakmo 已提交
1372
	case BPF_CGROUP_SOCK_OPS:
1373
		ptype = BPF_PROG_TYPE_SOCK_OPS;
1374
		break;
1375 1376 1377
	case BPF_CGROUP_DEVICE:
		ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
		break;
1378 1379
	case BPF_SK_SKB_STREAM_PARSER:
	case BPF_SK_SKB_STREAM_VERDICT:
1380
		return sockmap_get_from_fd(attr, false);
1381 1382 1383 1384
	default:
		return -EINVAL;
	}

1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
	cgrp = cgroup_get_from_fd(attr->target_fd);
	if (IS_ERR(cgrp))
		return PTR_ERR(cgrp);

	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
	if (IS_ERR(prog))
		prog = NULL;

	ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
	if (prog)
		bpf_prog_put(prog);
	cgroup_put(cgrp);
1397
	return ret;
1398
}
L
Lawrence Brakmo 已提交
1399

1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt

static int bpf_prog_query(const union bpf_attr *attr,
			  union bpf_attr __user *uattr)
{
	struct cgroup *cgrp;
	int ret;

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;
	if (CHECK_ATTR(BPF_PROG_QUERY))
		return -EINVAL;
	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
		return -EINVAL;

	switch (attr->query.attach_type) {
	case BPF_CGROUP_INET_INGRESS:
	case BPF_CGROUP_INET_EGRESS:
	case BPF_CGROUP_INET_SOCK_CREATE:
	case BPF_CGROUP_SOCK_OPS:
1420
	case BPF_CGROUP_DEVICE:
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
		break;
	default:
		return -EINVAL;
	}
	cgrp = cgroup_get_from_fd(attr->query.target_fd);
	if (IS_ERR(cgrp))
		return PTR_ERR(cgrp);
	ret = cgroup_bpf_query(cgrp, attr, uattr);
	cgroup_put(cgrp);
	return ret;
}
1432 1433
#endif /* CONFIG_CGROUP_BPF */

1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration

static int bpf_prog_test_run(const union bpf_attr *attr,
			     union bpf_attr __user *uattr)
{
	struct bpf_prog *prog;
	int ret = -ENOTSUPP;

	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
		return -EINVAL;

	prog = bpf_prog_get(attr->test.prog_fd);
	if (IS_ERR(prog))
		return PTR_ERR(prog);

	if (prog->aux->ops->test_run)
		ret = prog->aux->ops->test_run(prog, attr, uattr);

	bpf_prog_put(prog);
	return ret;
}

1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id

static int bpf_obj_get_next_id(const union bpf_attr *attr,
			       union bpf_attr __user *uattr,
			       struct idr *idr,
			       spinlock_t *lock)
{
	u32 next_id = attr->start_id;
	int err = 0;

	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	next_id++;
	spin_lock_bh(lock);
	if (!idr_get_next(idr, &next_id))
		err = -ENOENT;
	spin_unlock_bh(lock);

	if (!err)
		err = put_user(next_id, &uattr->next_id);

	return err;
}

1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id

static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
{
	struct bpf_prog *prog;
	u32 id = attr->prog_id;
	int fd;

	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	spin_lock_bh(&prog_idr_lock);
	prog = idr_find(&prog_idr, id);
	if (prog)
		prog = bpf_prog_inc_not_zero(prog);
	else
		prog = ERR_PTR(-ENOENT);
	spin_unlock_bh(&prog_idr_lock);

	if (IS_ERR(prog))
		return PTR_ERR(prog);

	fd = bpf_prog_new_fd(prog);
	if (fd < 0)
		bpf_prog_put(prog);

	return fd;
}

1516
#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
M
Martin KaFai Lau 已提交
1517 1518 1519 1520 1521

static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
{
	struct bpf_map *map;
	u32 id = attr->map_id;
1522
	int f_flags;
M
Martin KaFai Lau 已提交
1523 1524
	int fd;

1525 1526
	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
M
Martin KaFai Lau 已提交
1527 1528 1529 1530 1531
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

1532 1533 1534 1535
	f_flags = bpf_get_file_flag(attr->open_flags);
	if (f_flags < 0)
		return f_flags;

M
Martin KaFai Lau 已提交
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
	spin_lock_bh(&map_idr_lock);
	map = idr_find(&map_idr, id);
	if (map)
		map = bpf_map_inc_not_zero(map, true);
	else
		map = ERR_PTR(-ENOENT);
	spin_unlock_bh(&map_idr_lock);

	if (IS_ERR(map))
		return PTR_ERR(map);

1547
	fd = bpf_map_new_fd(map, f_flags);
M
Martin KaFai Lau 已提交
1548 1549 1550 1551 1552 1553
	if (fd < 0)
		bpf_map_put(map);

	return fd;
}

1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
				   const union bpf_attr *attr,
				   union bpf_attr __user *uattr)
{
	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
	struct bpf_prog_info info = {};
	u32 info_len = attr->info.info_len;
	char __user *uinsns;
	u32 ulen;
	int err;

	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
	if (err)
		return err;
	info_len = min_t(u32, sizeof(info), info_len);

	if (copy_from_user(&info, uinfo, info_len))
1571
		return -EFAULT;
1572 1573 1574

	info.type = prog->type;
	info.id = prog->aux->id;
1575 1576 1577
	info.load_time = prog->aux->load_time;
	info.created_by_uid = from_kuid_munged(current_user_ns(),
					       prog->aux->user->uid);
1578 1579

	memcpy(info.tag, prog->tag, sizeof(prog->tag));
1580 1581 1582 1583 1584 1585
	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));

	ulen = info.nr_map_ids;
	info.nr_map_ids = prog->aux->used_map_cnt;
	ulen = min_t(u32, info.nr_map_ids, ulen);
	if (ulen) {
1586
		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
1587 1588 1589 1590 1591 1592 1593
		u32 i;

		for (i = 0; i < ulen; i++)
			if (put_user(prog->aux->used_maps[i]->id,
				     &user_map_ids[i]))
				return -EFAULT;
	}
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610

	if (!capable(CAP_SYS_ADMIN)) {
		info.jited_prog_len = 0;
		info.xlated_prog_len = 0;
		goto done;
	}

	ulen = info.jited_prog_len;
	info.jited_prog_len = prog->jited_len;
	if (info.jited_prog_len && ulen) {
		uinsns = u64_to_user_ptr(info.jited_prog_insns);
		ulen = min_t(u32, info.jited_prog_len, ulen);
		if (copy_to_user(uinsns, prog->bpf_func, ulen))
			return -EFAULT;
	}

	ulen = info.xlated_prog_len;
1611
	info.xlated_prog_len = bpf_prog_insn_size(prog);
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
	if (info.xlated_prog_len && ulen) {
		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
		ulen = min_t(u32, info.xlated_prog_len, ulen);
		if (copy_to_user(uinsns, prog->insnsi, ulen))
			return -EFAULT;
	}

done:
	if (copy_to_user(uinfo, &info, info_len) ||
	    put_user(info_len, &uattr->info.info_len))
		return -EFAULT;

	return 0;
}

static int bpf_map_get_info_by_fd(struct bpf_map *map,
				  const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{
	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
	struct bpf_map_info info = {};
	u32 info_len = attr->info.info_len;
	int err;

	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
	if (err)
		return err;
	info_len = min_t(u32, sizeof(info), info_len);

	info.type = map->map_type;
	info.id = map->id;
	info.key_size = map->key_size;
	info.value_size = map->value_size;
	info.max_entries = map->max_entries;
	info.map_flags = map->map_flags;
1647
	memcpy(info.name, map->name, sizeof(map->name));
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684

	if (copy_to_user(uinfo, &info, info_len) ||
	    put_user(info_len, &uattr->info.info_len))
		return -EFAULT;

	return 0;
}

#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info

static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{
	int ufd = attr->info.bpf_fd;
	struct fd f;
	int err;

	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
		return -EINVAL;

	f = fdget(ufd);
	if (!f.file)
		return -EBADFD;

	if (f.file->f_op == &bpf_prog_fops)
		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
					      uattr);
	else if (f.file->f_op == &bpf_map_fops)
		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
					     uattr);
	else
		err = -EINVAL;

	fdput(f);
	return err;
}

1685 1686 1687 1688 1689
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
	union bpf_attr attr = {};
	int err;

1690
	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1691 1692
		return -EPERM;

1693 1694 1695 1696
	err = check_uarg_tail_zero(uattr, sizeof(attr), size);
	if (err)
		return err;
	size = min_t(u32, size, sizeof(attr));
1697 1698 1699 1700 1701

	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
	if (copy_from_user(&attr, uattr, size) != 0)
		return -EFAULT;

1702 1703 1704 1705
	err = security_bpf(cmd, &attr, size);
	if (err < 0)
		return err;

1706 1707 1708 1709
	switch (cmd) {
	case BPF_MAP_CREATE:
		err = map_create(&attr);
		break;
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
	case BPF_MAP_LOOKUP_ELEM:
		err = map_lookup_elem(&attr);
		break;
	case BPF_MAP_UPDATE_ELEM:
		err = map_update_elem(&attr);
		break;
	case BPF_MAP_DELETE_ELEM:
		err = map_delete_elem(&attr);
		break;
	case BPF_MAP_GET_NEXT_KEY:
		err = map_get_next_key(&attr);
		break;
1722 1723 1724
	case BPF_PROG_LOAD:
		err = bpf_prog_load(&attr);
		break;
1725 1726 1727 1728 1729 1730
	case BPF_OBJ_PIN:
		err = bpf_obj_pin(&attr);
		break;
	case BPF_OBJ_GET:
		err = bpf_obj_get(&attr);
		break;
1731 1732 1733 1734 1735 1736 1737
#ifdef CONFIG_CGROUP_BPF
	case BPF_PROG_ATTACH:
		err = bpf_prog_attach(&attr);
		break;
	case BPF_PROG_DETACH:
		err = bpf_prog_detach(&attr);
		break;
1738 1739 1740
	case BPF_PROG_QUERY:
		err = bpf_prog_query(&attr, uattr);
		break;
1741
#endif
1742 1743 1744
	case BPF_PROG_TEST_RUN:
		err = bpf_prog_test_run(&attr, uattr);
		break;
1745 1746 1747 1748 1749 1750 1751 1752
	case BPF_PROG_GET_NEXT_ID:
		err = bpf_obj_get_next_id(&attr, uattr,
					  &prog_idr, &prog_idr_lock);
		break;
	case BPF_MAP_GET_NEXT_ID:
		err = bpf_obj_get_next_id(&attr, uattr,
					  &map_idr, &map_idr_lock);
		break;
1753 1754 1755
	case BPF_PROG_GET_FD_BY_ID:
		err = bpf_prog_get_fd_by_id(&attr);
		break;
M
Martin KaFai Lau 已提交
1756 1757 1758
	case BPF_MAP_GET_FD_BY_ID:
		err = bpf_map_get_fd_by_id(&attr);
		break;
1759 1760 1761
	case BPF_OBJ_GET_INFO_BY_FD:
		err = bpf_obj_get_info_by_fd(&attr, uattr);
		break;
1762 1763 1764 1765 1766 1767 1768
	default:
		err = -EINVAL;
		break;
	}

	return err;
}