syscall.c 35.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 */
#include <linux/bpf.h>
13
#include <linux/bpf_trace.h>
14 15
#include <linux/syscalls.h>
#include <linux/slab.h>
16
#include <linux/sched/signal.h>
17 18
#include <linux/vmalloc.h>
#include <linux/mmzone.h>
19
#include <linux/anon_inodes.h>
20
#include <linux/file.h>
21 22
#include <linux/license.h>
#include <linux/filter.h>
23
#include <linux/version.h>
M
Mickaël Salaün 已提交
24
#include <linux/kernel.h>
M
Martin KaFai Lau 已提交
25
#include <linux/idr.h>
26 27 28
#include <linux/cred.h>
#include <linux/timekeeping.h>
#include <linux/ctype.h>
29

30 31 32 33 34 35 36
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
			   (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
			   (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
			   (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))

37
DEFINE_PER_CPU(int, bpf_prog_active);
M
Martin KaFai Lau 已提交
38 39
static DEFINE_IDR(prog_idr);
static DEFINE_SPINLOCK(prog_idr_lock);
M
Martin KaFai Lau 已提交
40 41
static DEFINE_IDR(map_idr);
static DEFINE_SPINLOCK(map_idr_lock);
42

43 44
int sysctl_unprivileged_bpf_disabled __read_mostly;

45 46 47 48 49 50 51 52
static const struct bpf_map_ops * const bpf_map_types[] = {
#define BPF_PROG_TYPE(_id, _ops)
#define BPF_MAP_TYPE(_id, _ops) \
	[_id] = &_ops,
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
};
53

54 55 56 57 58 59 60 61 62
/*
 * If we're handed a bigger struct than we know of, ensure all the unknown bits
 * are 0 - i.e. new user-space does not rely on any kernel feature extensions
 * we don't know about yet.
 *
 * There is a ToCToU between this function call and the following
 * copy_from_user() call. However, this is not a concern since this function is
 * meant to be a future-proofing of bits.
 */
63 64 65 66 67 68 69 70 71
static int check_uarg_tail_zero(void __user *uaddr,
				size_t expected_size,
				size_t actual_size)
{
	unsigned char __user *addr;
	unsigned char __user *end;
	unsigned char val;
	int err;

72 73 74 75 76 77
	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
		return -E2BIG;

	if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size)))
		return -EFAULT;

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
	if (actual_size <= expected_size)
		return 0;

	addr = uaddr + expected_size;
	end  = uaddr + actual_size;

	for (; addr < end; addr++) {
		err = get_user(val, addr);
		if (err)
			return err;
		if (val)
			return -E2BIG;
	}

	return 0;
}

95 96 97 98
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
{
	struct bpf_map *map;

99 100 101
	if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
	    !bpf_map_types[attr->map_type])
		return ERR_PTR(-EINVAL);
102

103 104 105 106 107 108
	map = bpf_map_types[attr->map_type]->map_alloc(attr);
	if (IS_ERR(map))
		return map;
	map->ops = bpf_map_types[attr->map_type];
	map->map_type = attr->map_type;
	return map;
109 110
}

111
void *bpf_map_area_alloc(size_t size, int numa_node)
112 113 114 115 116 117 118 119 120
{
	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
	 * trigger under memory pressure as we really just want to
	 * fail instead.
	 */
	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
	void *area;

	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
121
		area = kmalloc_node(size, GFP_USER | flags, numa_node);
122 123 124 125
		if (area != NULL)
			return area;
	}

126 127
	return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
					   __builtin_return_address(0));
128 129 130 131 132 133 134
}

void bpf_map_area_free(void *area)
{
	kvfree(area);
}

135 136 137 138 139 140 141 142 143 144 145 146 147
int bpf_map_precharge_memlock(u32 pages)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit, cur;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	cur = atomic_long_read(&user->locked_vm);
	free_uid(user);
	if (cur + pages > memlock_limit)
		return -EPERM;
	return 0;
}

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
static int bpf_map_charge_memlock(struct bpf_map *map)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

	atomic_long_add(map->pages, &user->locked_vm);

	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
		atomic_long_sub(map->pages, &user->locked_vm);
		free_uid(user);
		return -EPERM;
	}
	map->user = user;
	return 0;
}

static void bpf_map_uncharge_memlock(struct bpf_map *map)
{
	struct user_struct *user = map->user;

	atomic_long_sub(map->pages, &user->locked_vm);
	free_uid(user);
}

M
Martin KaFai Lau 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
static int bpf_map_alloc_id(struct bpf_map *map)
{
	int id;

	spin_lock_bh(&map_idr_lock);
	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
	if (id > 0)
		map->id = id;
	spin_unlock_bh(&map_idr_lock);

	if (WARN_ON_ONCE(!id))
		return -ENOSPC;

	return id > 0 ? 0 : id;
}

M
Martin KaFai Lau 已提交
190
static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
M
Martin KaFai Lau 已提交
191
{
192 193
	unsigned long flags;

M
Martin KaFai Lau 已提交
194
	if (do_idr_lock)
195
		spin_lock_irqsave(&map_idr_lock, flags);
M
Martin KaFai Lau 已提交
196 197 198
	else
		__acquire(&map_idr_lock);

M
Martin KaFai Lau 已提交
199
	idr_remove(&map_idr, map->id);
M
Martin KaFai Lau 已提交
200 201

	if (do_idr_lock)
202
		spin_unlock_irqrestore(&map_idr_lock, flags);
M
Martin KaFai Lau 已提交
203 204
	else
		__release(&map_idr_lock);
M
Martin KaFai Lau 已提交
205 206
}

207 208 209 210 211
/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{
	struct bpf_map *map = container_of(work, struct bpf_map, work);

212
	bpf_map_uncharge_memlock(map);
213 214 215 216
	/* implementation dependent freeing */
	map->ops->map_free(map);
}

217 218 219 220 221 222 223 224
static void bpf_map_put_uref(struct bpf_map *map)
{
	if (atomic_dec_and_test(&map->usercnt)) {
		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
			bpf_fd_array_map_clear(map);
	}
}

225 226 227
/* decrement map refcnt and schedule it for freeing via workqueue
 * (unrelying map implementation ops->map_free() might sleep)
 */
M
Martin KaFai Lau 已提交
228
static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
229 230
{
	if (atomic_dec_and_test(&map->refcnt)) {
231
		/* bpf_map_free_id() must be called first */
M
Martin KaFai Lau 已提交
232
		bpf_map_free_id(map, do_idr_lock);
233 234 235 236 237
		INIT_WORK(&map->work, bpf_map_free_deferred);
		schedule_work(&map->work);
	}
}

M
Martin KaFai Lau 已提交
238 239 240 241 242
void bpf_map_put(struct bpf_map *map)
{
	__bpf_map_put(map, true);
}

243
void bpf_map_put_with_uref(struct bpf_map *map)
244
{
245
	bpf_map_put_uref(map);
246
	bpf_map_put(map);
247 248 249 250
}

static int bpf_map_release(struct inode *inode, struct file *filp)
{
251 252 253 254 255 256
	struct bpf_map *map = filp->private_data;

	if (map->ops->map_release)
		map->ops->map_release(map, filp);

	bpf_map_put_with_uref(map);
257 258 259
	return 0;
}

260 261 262 263
#ifdef CONFIG_PROC_FS
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
	const struct bpf_map *map = filp->private_data;
264 265
	const struct bpf_array *array;
	u32 owner_prog_type = 0;
266
	u32 owner_jited = 0;
267 268 269 270

	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
		array = container_of(map, struct bpf_array, map);
		owner_prog_type = array->owner_prog_type;
271
		owner_jited = array->owner_jited;
272
	}
273 274 275 276 277

	seq_printf(m,
		   "map_type:\t%u\n"
		   "key_size:\t%u\n"
		   "value_size:\t%u\n"
278
		   "max_entries:\t%u\n"
279 280
		   "map_flags:\t%#x\n"
		   "memlock:\t%llu\n",
281 282 283
		   map->map_type,
		   map->key_size,
		   map->value_size,
284
		   map->max_entries,
285 286 287
		   map->map_flags,
		   map->pages * 1ULL << PAGE_SHIFT);

288
	if (owner_prog_type) {
289 290
		seq_printf(m, "owner_prog_type:\t%u\n",
			   owner_prog_type);
291 292 293
		seq_printf(m, "owner_jited:\t%u\n",
			   owner_jited);
	}
294 295 296
}
#endif

297
static const struct file_operations bpf_map_fops = {
298 299 300 301
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= bpf_map_show_fdinfo,
#endif
	.release	= bpf_map_release,
302 303
};

304
int bpf_map_new_fd(struct bpf_map *map)
305 306 307 308 309
{
	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
				O_RDWR | O_CLOEXEC);
}

310 311 312 313 314 315 316 317
/* helper macro to check that unused fields 'union bpf_attr' are zero */
#define CHECK_ATTR(CMD) \
	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
		   sizeof(attr->CMD##_LAST_FIELD), 0, \
		   sizeof(*attr) - \
		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
		   sizeof(attr->CMD##_LAST_FIELD)) != NULL

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
/* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes.
 * Return 0 on success and < 0 on error.
 */
static int bpf_obj_name_cpy(char *dst, const char *src)
{
	const char *end = src + BPF_OBJ_NAME_LEN;

	/* Copy all isalnum() and '_' char */
	while (src < end && *src) {
		if (!isalnum(*src) && *src != '_')
			return -EINVAL;
		*dst++ = *src++;
	}

	/* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */
	if (src == end)
		return -EINVAL;

	/* '\0' terminates dst */
	*dst = 0;

	return 0;
}

342
#define BPF_MAP_CREATE_LAST_FIELD map_name
343 344 345
/* called via syscall */
static int map_create(union bpf_attr *attr)
{
346
	int numa_node = bpf_map_attr_numa_node(attr);
347 348 349 350 351 352 353
	struct bpf_map *map;
	int err;

	err = CHECK_ATTR(BPF_MAP_CREATE);
	if (err)
		return -EINVAL;

354
	if (numa_node != NUMA_NO_NODE &&
E
Eric Dumazet 已提交
355 356
	    ((unsigned int)numa_node >= nr_node_ids ||
	     !node_online(numa_node)))
357 358
		return -EINVAL;

359 360 361 362 363
	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
	map = find_and_alloc_map(attr);
	if (IS_ERR(map))
		return PTR_ERR(map);

364 365 366 367
	err = bpf_obj_name_cpy(map->name, attr->map_name);
	if (err)
		goto free_map_nouncharge;

368
	atomic_set(&map->refcnt, 1);
369
	atomic_set(&map->usercnt, 1);
370

371 372
	err = bpf_map_charge_memlock(map);
	if (err)
373
		goto free_map_nouncharge;
374

M
Martin KaFai Lau 已提交
375 376 377 378
	err = bpf_map_alloc_id(map);
	if (err)
		goto free_map;

379
	err = bpf_map_new_fd(map);
M
Martin KaFai Lau 已提交
380 381 382 383 384 385 386 387 388 389
	if (err < 0) {
		/* failed to allocate fd.
		 * bpf_map_put() is needed because the above
		 * bpf_map_alloc_id() has published the map
		 * to the userspace and the userspace may
		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
		 */
		bpf_map_put(map);
		return err;
	}
390

391
	trace_bpf_map_create(map, err);
392 393 394
	return err;

free_map:
395 396
	bpf_map_uncharge_memlock(map);
free_map_nouncharge:
397 398 399 400
	map->ops->map_free(map);
	return err;
}

401 402 403
/* if error is returned, fd is released.
 * On success caller should complete fd access with matching fdput()
 */
404
struct bpf_map *__bpf_map_get(struct fd f)
405 406 407 408 409 410 411 412
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_map_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

413 414 415
	return f.file->private_data;
}

A
Alexei Starovoitov 已提交
416 417 418 419
/* prog's and map's refcnt limit */
#define BPF_MAX_REFCNT 32768

struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
420
{
A
Alexei Starovoitov 已提交
421 422 423 424
	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
		atomic_dec(&map->refcnt);
		return ERR_PTR(-EBUSY);
	}
425 426
	if (uref)
		atomic_inc(&map->usercnt);
A
Alexei Starovoitov 已提交
427
	return map;
428 429 430
}

struct bpf_map *bpf_map_get_with_uref(u32 ufd)
431 432 433 434 435 436 437 438
{
	struct fd f = fdget(ufd);
	struct bpf_map *map;

	map = __bpf_map_get(f);
	if (IS_ERR(map))
		return map;

A
Alexei Starovoitov 已提交
439
	map = bpf_map_inc(map, true);
440
	fdput(f);
441 442 443 444

	return map;
}

M
Martin KaFai Lau 已提交
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
/* map_idr_lock should have been held */
static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
					    bool uref)
{
	int refold;

	refold = __atomic_add_unless(&map->refcnt, 1, 0);

	if (refold >= BPF_MAX_REFCNT) {
		__bpf_map_put(map, false);
		return ERR_PTR(-EBUSY);
	}

	if (!refold)
		return ERR_PTR(-ENOENT);

	if (uref)
		atomic_inc(&map->usercnt);

	return map;
}

467 468 469 470 471
int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{
	return -ENOTSUPP;
}

472 473 474 475 476
/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value

static int map_lookup_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
477 478
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
479 480
	int ufd = attr->map_fd;
	struct bpf_map *map;
481
	void *key, *value, *ptr;
482
	u32 value_size;
483
	struct fd f;
484 485 486 487 488
	int err;

	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
		return -EINVAL;

489
	f = fdget(ufd);
490
	map = __bpf_map_get(f);
491 492 493
	if (IS_ERR(map))
		return PTR_ERR(map);

A
Al Viro 已提交
494 495 496
	key = memdup_user(ukey, map->key_size);
	if (IS_ERR(key)) {
		err = PTR_ERR(key);
497
		goto err_put;
A
Al Viro 已提交
498
	}
499

500
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
501
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
502 503
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
504 505
	else if (IS_FD_MAP(map))
		value_size = sizeof(u32);
506 507 508
	else
		value_size = map->value_size;

509
	err = -ENOMEM;
510
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
511
	if (!value)
512 513
		goto free_key;

514 515
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
516 517 518
		err = bpf_percpu_hash_copy(map, key, value);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_copy(map, key, value);
519 520
	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
		err = bpf_stackmap_copy(map, key, value);
521 522 523 524
	} else if (IS_FD_ARRAY(map)) {
		err = bpf_fd_array_map_lookup_elem(map, key, value);
	} else if (IS_FD_HASH(map)) {
		err = bpf_fd_htab_map_lookup_elem(map, key, value);
525 526 527 528 529 530 531 532
	} else {
		rcu_read_lock();
		ptr = map->ops->map_lookup_elem(map, key);
		if (ptr)
			memcpy(value, ptr, value_size);
		rcu_read_unlock();
		err = ptr ? 0 : -ENOENT;
	}
533

534
	if (err)
535
		goto free_value;
536 537

	err = -EFAULT;
538
	if (copy_to_user(uvalue, value, value_size) != 0)
539
		goto free_value;
540

541
	trace_bpf_map_lookup_elem(map, ufd, key, value);
542 543
	err = 0;

544 545
free_value:
	kfree(value);
546 547 548 549 550 551 552
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

553
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
554 555 556

static int map_update_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
557 558
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
559 560 561
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *value;
562
	u32 value_size;
563
	struct fd f;
564 565 566 567 568
	int err;

	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
		return -EINVAL;

569
	f = fdget(ufd);
570
	map = __bpf_map_get(f);
571 572 573
	if (IS_ERR(map))
		return PTR_ERR(map);

A
Al Viro 已提交
574 575 576
	key = memdup_user(ukey, map->key_size);
	if (IS_ERR(key)) {
		err = PTR_ERR(key);
577
		goto err_put;
A
Al Viro 已提交
578
	}
579

580
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
581
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
582 583 584 585 586
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
	else
		value_size = map->value_size;

587
	err = -ENOMEM;
588
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
589 590 591 592
	if (!value)
		goto free_key;

	err = -EFAULT;
593
	if (copy_from_user(value, uvalue, value_size) != 0)
594 595
		goto free_value;

596 597 598 599 600
	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
	 * inside bpf map update or delete otherwise deadlocks are possible
	 */
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
601 602
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
603 604 605
		err = bpf_percpu_hash_update(map, key, value, attr->flags);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_update(map, key, value, attr->flags);
606
	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
607
		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
608 609
		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
		   map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
610 611 612 613
		rcu_read_lock();
		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
						   attr->flags);
		rcu_read_unlock();
M
Martin KaFai Lau 已提交
614 615 616 617 618
	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
		rcu_read_lock();
		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
						  attr->flags);
		rcu_read_unlock();
619 620 621 622 623
	} else {
		rcu_read_lock();
		err = map->ops->map_update_elem(map, key, value, attr->flags);
		rcu_read_unlock();
	}
624 625
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
626

627 628
	if (!err)
		trace_bpf_map_update_elem(map, ufd, key, value);
629 630 631 632 633 634 635 636 637 638 639 640 641
free_value:
	kfree(value);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

#define BPF_MAP_DELETE_ELEM_LAST_FIELD key

static int map_delete_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
642
	void __user *ukey = u64_to_user_ptr(attr->key);
643 644
	int ufd = attr->map_fd;
	struct bpf_map *map;
645
	struct fd f;
646 647 648 649 650 651
	void *key;
	int err;

	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
		return -EINVAL;

652
	f = fdget(ufd);
653
	map = __bpf_map_get(f);
654 655 656
	if (IS_ERR(map))
		return PTR_ERR(map);

A
Al Viro 已提交
657 658 659
	key = memdup_user(ukey, map->key_size);
	if (IS_ERR(key)) {
		err = PTR_ERR(key);
660
		goto err_put;
A
Al Viro 已提交
661
	}
662

663 664
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
665 666 667
	rcu_read_lock();
	err = map->ops->map_delete_elem(map, key);
	rcu_read_unlock();
668 669
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
670

671 672
	if (!err)
		trace_bpf_map_delete_elem(map, ufd, key);
673 674 675 676 677 678 679 680 681 682 683
	kfree(key);
err_put:
	fdput(f);
	return err;
}

/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key

static int map_get_next_key(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
684 685
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *unext_key = u64_to_user_ptr(attr->next_key);
686 687 688
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *next_key;
689
	struct fd f;
690 691 692 693 694
	int err;

	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
		return -EINVAL;

695
	f = fdget(ufd);
696
	map = __bpf_map_get(f);
697 698 699
	if (IS_ERR(map))
		return PTR_ERR(map);

700
	if (ukey) {
A
Al Viro 已提交
701 702 703
		key = memdup_user(ukey, map->key_size);
		if (IS_ERR(key)) {
			err = PTR_ERR(key);
704
			goto err_put;
A
Al Viro 已提交
705
		}
706 707 708
	} else {
		key = NULL;
	}
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724

	err = -ENOMEM;
	next_key = kmalloc(map->key_size, GFP_USER);
	if (!next_key)
		goto free_key;

	rcu_read_lock();
	err = map->ops->map_get_next_key(map, key, next_key);
	rcu_read_unlock();
	if (err)
		goto free_next_key;

	err = -EFAULT;
	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
		goto free_next_key;

725
	trace_bpf_map_next_key(map, ufd, key, next_key);
726 727 728 729 730 731 732 733 734 735 736
	err = 0;

free_next_key:
	kfree(next_key);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

737 738 739
static const struct bpf_verifier_ops * const bpf_prog_types[] = {
#define BPF_PROG_TYPE(_id, _ops) \
	[_id] = &_ops,
740
#define BPF_MAP_TYPE(_id, _ops)
741 742
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
743
#undef BPF_MAP_TYPE
744
};
745 746 747

static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
{
748 749
	if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
		return -EINVAL;
750

751 752 753
	prog->aux->ops = bpf_prog_types[type];
	prog->type = type;
	return 0;
754 755 756 757 758 759 760 761 762 763 764 765 766
}

/* drop refcnt on maps used by eBPF program and free auxilary data */
static void free_used_maps(struct bpf_prog_aux *aux)
{
	int i;

	for (i = 0; i < aux->used_map_cnt; i++)
		bpf_map_put(aux->used_maps[i]);

	kfree(aux->used_maps);
}

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	unsigned long user_bufs;

	if (user) {
		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
		if (user_bufs > memlock_limit) {
			atomic_long_sub(pages, &user->locked_vm);
			return -EPERM;
		}
	}

	return 0;
}

void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
	if (user)
		atomic_long_sub(pages, &user->locked_vm);
}

789 790 791
static int bpf_prog_charge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = get_current_user();
792
	int ret;
793

794 795
	ret = __bpf_prog_charge(user, prog->pages);
	if (ret) {
796
		free_uid(user);
797
		return ret;
798
	}
799

800 801 802 803 804 805 806 807
	prog->aux->user = user;
	return 0;
}

static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = prog->aux->user;

808
	__bpf_prog_uncharge(user, prog->pages);
809 810 811
	free_uid(user);
}

M
Martin KaFai Lau 已提交
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
static int bpf_prog_alloc_id(struct bpf_prog *prog)
{
	int id;

	spin_lock_bh(&prog_idr_lock);
	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
	if (id > 0)
		prog->aux->id = id;
	spin_unlock_bh(&prog_idr_lock);

	/* id is in [1, INT_MAX) */
	if (WARN_ON_ONCE(!id))
		return -ENOSPC;

	return id > 0 ? 0 : id;
}

829
static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
M
Martin KaFai Lau 已提交
830 831 832 833 834
{
	/* cBPF to eBPF migrations are currently not in the idr store. */
	if (!prog->aux->id)
		return;

835 836 837 838 839
	if (do_idr_lock)
		spin_lock_bh(&prog_idr_lock);
	else
		__acquire(&prog_idr_lock);

M
Martin KaFai Lau 已提交
840
	idr_remove(&prog_idr, prog->aux->id);
841 842 843 844 845

	if (do_idr_lock)
		spin_unlock_bh(&prog_idr_lock);
	else
		__release(&prog_idr_lock);
M
Martin KaFai Lau 已提交
846 847
}

848
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
849 850 851 852
{
	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);

	free_used_maps(aux);
853
	bpf_prog_uncharge_memlock(aux->prog);
854 855 856
	bpf_prog_free(aux->prog);
}

857
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
858
{
859 860
	if (atomic_dec_and_test(&prog->aux->refcnt)) {
		trace_bpf_prog_put_rcu(prog);
861
		/* bpf_prog_free_id() must be called first */
862
		bpf_prog_free_id(prog, do_idr_lock);
863
		bpf_prog_kallsyms_del(prog);
864
		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
865
	}
866
}
867 868 869 870 871

void bpf_prog_put(struct bpf_prog *prog)
{
	__bpf_prog_put(prog, true);
}
872
EXPORT_SYMBOL_GPL(bpf_prog_put);
873 874 875 876 877

static int bpf_prog_release(struct inode *inode, struct file *filp)
{
	struct bpf_prog *prog = filp->private_data;

878
	bpf_prog_put(prog);
879 880 881
	return 0;
}

882 883 884 885
#ifdef CONFIG_PROC_FS
static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{
	const struct bpf_prog *prog = filp->private_data;
886
	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
887

888
	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
889 890 891
	seq_printf(m,
		   "prog_type:\t%u\n"
		   "prog_jited:\t%u\n"
892
		   "prog_tag:\t%s\n"
893 894 895
		   "memlock:\t%llu\n",
		   prog->type,
		   prog->jited,
896
		   prog_tag,
897 898 899 900
		   prog->pages * 1ULL << PAGE_SHIFT);
}
#endif

901
static const struct file_operations bpf_prog_fops = {
902 903 904 905
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= bpf_prog_show_fdinfo,
#endif
	.release	= bpf_prog_release,
906 907
};

908
int bpf_prog_new_fd(struct bpf_prog *prog)
909 910 911 912 913
{
	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
				O_RDWR | O_CLOEXEC);
}

914
static struct bpf_prog *____bpf_prog_get(struct fd f)
915 916 917 918 919 920 921 922
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_prog_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

923
	return f.file->private_data;
924 925
}

926
struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
A
Alexei Starovoitov 已提交
927
{
928 929
	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
		atomic_sub(i, &prog->aux->refcnt);
A
Alexei Starovoitov 已提交
930 931 932 933
		return ERR_PTR(-EBUSY);
	}
	return prog;
}
934 935
EXPORT_SYMBOL_GPL(bpf_prog_add);

936 937 938 939 940 941 942 943 944 945 946
void bpf_prog_sub(struct bpf_prog *prog, int i)
{
	/* Only to be used for undoing previous bpf_prog_add() in some
	 * error path. We still know that another entity in our call
	 * path holds a reference to the program, thus atomic_sub() can
	 * be safely used in such cases!
	 */
	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
}
EXPORT_SYMBOL_GPL(bpf_prog_sub);

947 948 949 950
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
{
	return bpf_prog_add(prog, 1);
}
951
EXPORT_SYMBOL_GPL(bpf_prog_inc);
A
Alexei Starovoitov 已提交
952

953
/* prog_idr_lock should have been held */
954
struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
{
	int refold;

	refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);

	if (refold >= BPF_MAX_REFCNT) {
		__bpf_prog_put(prog, false);
		return ERR_PTR(-EBUSY);
	}

	if (!refold)
		return ERR_PTR(-ENOENT);

	return prog;
}
970
EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
971

972
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
973 974 975 976
{
	struct fd f = fdget(ufd);
	struct bpf_prog *prog;

977
	prog = ____bpf_prog_get(f);
978 979
	if (IS_ERR(prog))
		return prog;
980 981 982 983
	if (type && prog->type != *type) {
		prog = ERR_PTR(-EINVAL);
		goto out;
	}
984

A
Alexei Starovoitov 已提交
985
	prog = bpf_prog_inc(prog);
986
out:
987 988 989
	fdput(f);
	return prog;
}
990 991 992 993 994 995 996 997

struct bpf_prog *bpf_prog_get(u32 ufd)
{
	return __bpf_prog_get(ufd, NULL);
}

struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
{
998 999 1000 1001 1002
	struct bpf_prog *prog = __bpf_prog_get(ufd, &type);

	if (!IS_ERR(prog))
		trace_bpf_prog_get_type(prog);
	return prog;
1003 1004
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type);
1005 1006

/* last field in 'union bpf_attr' used by this command */
1007
#define	BPF_PROG_LOAD_LAST_FIELD prog_name
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019

static int bpf_prog_load(union bpf_attr *attr)
{
	enum bpf_prog_type type = attr->prog_type;
	struct bpf_prog *prog;
	int err;
	char license[128];
	bool is_gpl;

	if (CHECK_ATTR(BPF_PROG_LOAD))
		return -EINVAL;

1020 1021 1022
	if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
		return -EINVAL;

1023
	/* copy eBPF program license from user space */
M
Mickaël Salaün 已提交
1024
	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
1025 1026 1027 1028 1029 1030 1031
			      sizeof(license) - 1) < 0)
		return -EFAULT;
	license[sizeof(license) - 1] = 0;

	/* eBPF programs must be GPL compatible to use GPL-ed functions */
	is_gpl = license_is_gpl_compatible(license);

1032 1033
	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
		return -E2BIG;
1034

1035 1036 1037 1038
	if (type == BPF_PROG_TYPE_KPROBE &&
	    attr->kern_version != LINUX_VERSION_CODE)
		return -EINVAL;

1039 1040 1041
	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
	    type != BPF_PROG_TYPE_CGROUP_SKB &&
	    !capable(CAP_SYS_ADMIN))
1042 1043
		return -EPERM;

1044 1045 1046 1047 1048
	/* plain bpf_prog allocation */
	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
	if (!prog)
		return -ENOMEM;

1049 1050 1051 1052
	err = bpf_prog_charge_memlock(prog);
	if (err)
		goto free_prog_nouncharge;

1053 1054 1055
	prog->len = attr->insn_cnt;

	err = -EFAULT;
M
Mickaël Salaün 已提交
1056
	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
1057
			   bpf_prog_insn_size(prog)) != 0)
1058 1059 1060
		goto free_prog;

	prog->orig_prog = NULL;
1061
	prog->jited = 0;
1062 1063

	atomic_set(&prog->aux->refcnt, 1);
1064
	prog->gpl_compatible = is_gpl ? 1 : 0;
1065 1066 1067 1068 1069 1070

	/* find program type: socket_filter vs tracing_filter */
	err = find_prog_type(type, prog);
	if (err < 0)
		goto free_prog;

1071 1072 1073 1074 1075
	prog->aux->load_time = ktime_get_boot_ns();
	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
	if (err)
		goto free_prog;

1076
	/* run eBPF verifier */
1077
	err = bpf_check(&prog, attr);
1078 1079 1080 1081
	if (err < 0)
		goto free_used_maps;

	/* eBPF program is ready to be JITed */
1082
	prog = bpf_prog_select_runtime(prog, &err);
1083 1084
	if (err < 0)
		goto free_used_maps;
1085

M
Martin KaFai Lau 已提交
1086 1087 1088 1089
	err = bpf_prog_alloc_id(prog);
	if (err)
		goto free_used_maps;

1090
	err = bpf_prog_new_fd(prog);
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
	if (err < 0) {
		/* failed to allocate fd.
		 * bpf_prog_put() is needed because the above
		 * bpf_prog_alloc_id() has published the prog
		 * to the userspace and the userspace may
		 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
		 */
		bpf_prog_put(prog);
		return err;
	}
1101

1102
	bpf_prog_kallsyms_add(prog);
1103
	trace_bpf_prog_load(prog, err);
1104 1105 1106 1107 1108
	return err;

free_used_maps:
	free_used_maps(prog->aux);
free_prog:
1109 1110
	bpf_prog_uncharge_memlock(prog);
free_prog_nouncharge:
1111 1112 1113 1114
	bpf_prog_free(prog);
	return err;
}

1115 1116 1117 1118 1119 1120 1121
#define BPF_OBJ_LAST_FIELD bpf_fd

static int bpf_obj_pin(const union bpf_attr *attr)
{
	if (CHECK_ATTR(BPF_OBJ))
		return -EINVAL;

M
Mickaël Salaün 已提交
1122
	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1123 1124 1125 1126 1127 1128 1129
}

static int bpf_obj_get(const union bpf_attr *attr)
{
	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
		return -EINVAL;

M
Mickaël Salaün 已提交
1130
	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
1131 1132
}

1133 1134
#ifdef CONFIG_CGROUP_BPF

1135
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1136

1137
static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
1138
{
1139
	struct bpf_prog *prog = NULL;
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
	int ufd = attr->target_fd;
	struct bpf_map *map;
	struct fd f;
	int err;

	f = fdget(ufd);
	map = __bpf_map_get(f);
	if (IS_ERR(map))
		return PTR_ERR(map);

1150 1151 1152 1153 1154 1155 1156
	if (attach) {
		prog = bpf_prog_get_type(attr->attach_bpf_fd,
					 BPF_PROG_TYPE_SK_SKB);
		if (IS_ERR(prog)) {
			fdput(f);
			return PTR_ERR(prog);
		}
1157 1158
	}

1159
	err = sock_map_prog(map, prog, attr->attach_type);
1160 1161
	if (err) {
		fdput(f);
1162 1163
		if (prog)
			bpf_prog_put(prog);
1164
		return err;
1165 1166 1167
	}

	fdput(f);
1168
	return 0;
1169
}
1170 1171 1172

static int bpf_prog_attach(const union bpf_attr *attr)
{
1173
	enum bpf_prog_type ptype;
1174 1175
	struct bpf_prog *prog;
	struct cgroup *cgrp;
1176
	int ret;
1177 1178 1179 1180 1181 1182 1183

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (CHECK_ATTR(BPF_PROG_ATTACH))
		return -EINVAL;

1184 1185 1186
	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
		return -EINVAL;

1187 1188 1189
	switch (attr->attach_type) {
	case BPF_CGROUP_INET_INGRESS:
	case BPF_CGROUP_INET_EGRESS:
1190
		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1191
		break;
1192 1193 1194
	case BPF_CGROUP_INET_SOCK_CREATE:
		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
		break;
L
Lawrence Brakmo 已提交
1195 1196 1197
	case BPF_CGROUP_SOCK_OPS:
		ptype = BPF_PROG_TYPE_SOCK_OPS;
		break;
1198 1199
	case BPF_SK_SKB_STREAM_PARSER:
	case BPF_SK_SKB_STREAM_VERDICT:
1200
		return sockmap_get_from_fd(attr, true);
1201 1202 1203 1204
	default:
		return -EINVAL;
	}

1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
	if (IS_ERR(prog))
		return PTR_ERR(prog);

	cgrp = cgroup_get_from_fd(attr->target_fd);
	if (IS_ERR(cgrp)) {
		bpf_prog_put(prog);
		return PTR_ERR(cgrp);
	}

1215 1216 1217 1218
	ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
				attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
	if (ret)
		bpf_prog_put(prog);
1219 1220
	cgroup_put(cgrp);

1221
	return ret;
1222 1223 1224 1225 1226 1227 1228
}

#define BPF_PROG_DETACH_LAST_FIELD attach_type

static int bpf_prog_detach(const union bpf_attr *attr)
{
	struct cgroup *cgrp;
1229
	int ret;
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (CHECK_ATTR(BPF_PROG_DETACH))
		return -EINVAL;

	switch (attr->attach_type) {
	case BPF_CGROUP_INET_INGRESS:
	case BPF_CGROUP_INET_EGRESS:
1240
	case BPF_CGROUP_INET_SOCK_CREATE:
L
Lawrence Brakmo 已提交
1241
	case BPF_CGROUP_SOCK_OPS:
1242 1243 1244 1245
		cgrp = cgroup_get_from_fd(attr->target_fd);
		if (IS_ERR(cgrp))
			return PTR_ERR(cgrp);

1246
		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
1247 1248
		cgroup_put(cgrp);
		break;
1249 1250 1251 1252
	case BPF_SK_SKB_STREAM_PARSER:
	case BPF_SK_SKB_STREAM_VERDICT:
		ret = sockmap_get_from_fd(attr, false);
		break;
1253 1254 1255 1256
	default:
		return -EINVAL;
	}

1257
	return ret;
1258
}
L
Lawrence Brakmo 已提交
1259

1260 1261
#endif /* CONFIG_CGROUP_BPF */

1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration

static int bpf_prog_test_run(const union bpf_attr *attr,
			     union bpf_attr __user *uattr)
{
	struct bpf_prog *prog;
	int ret = -ENOTSUPP;

	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
		return -EINVAL;

	prog = bpf_prog_get(attr->test.prog_fd);
	if (IS_ERR(prog))
		return PTR_ERR(prog);

	if (prog->aux->ops->test_run)
		ret = prog->aux->ops->test_run(prog, attr, uattr);

	bpf_prog_put(prog);
	return ret;
}

1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id

static int bpf_obj_get_next_id(const union bpf_attr *attr,
			       union bpf_attr __user *uattr,
			       struct idr *idr,
			       spinlock_t *lock)
{
	u32 next_id = attr->start_id;
	int err = 0;

	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	next_id++;
	spin_lock_bh(lock);
	if (!idr_get_next(idr, &next_id))
		err = -ENOENT;
	spin_unlock_bh(lock);

	if (!err)
		err = put_user(next_id, &uattr->next_id);

	return err;
}

1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id

static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
{
	struct bpf_prog *prog;
	u32 id = attr->prog_id;
	int fd;

	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	spin_lock_bh(&prog_idr_lock);
	prog = idr_find(&prog_idr, id);
	if (prog)
		prog = bpf_prog_inc_not_zero(prog);
	else
		prog = ERR_PTR(-ENOENT);
	spin_unlock_bh(&prog_idr_lock);

	if (IS_ERR(prog))
		return PTR_ERR(prog);

	fd = bpf_prog_new_fd(prog);
	if (fd < 0)
		bpf_prog_put(prog);

	return fd;
}

M
Martin KaFai Lau 已提交
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id

static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
{
	struct bpf_map *map;
	u32 id = attr->map_id;
	int fd;

	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	spin_lock_bh(&map_idr_lock);
	map = idr_find(&map_idr, id);
	if (map)
		map = bpf_map_inc_not_zero(map, true);
	else
		map = ERR_PTR(-ENOENT);
	spin_unlock_bh(&map_idr_lock);

	if (IS_ERR(map))
		return PTR_ERR(map);

	fd = bpf_map_new_fd(map);
	if (fd < 0)
		bpf_map_put(map);

	return fd;
}

1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
				   const union bpf_attr *attr,
				   union bpf_attr __user *uattr)
{
	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
	struct bpf_prog_info info = {};
	u32 info_len = attr->info.info_len;
	char __user *uinsns;
	u32 ulen;
	int err;

	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
	if (err)
		return err;
	info_len = min_t(u32, sizeof(info), info_len);

	if (copy_from_user(&info, uinfo, info_len))
1393
		return -EFAULT;
1394 1395 1396

	info.type = prog->type;
	info.id = prog->aux->id;
1397 1398 1399
	info.load_time = prog->aux->load_time;
	info.created_by_uid = from_kuid_munged(current_user_ns(),
					       prog->aux->user->uid);
1400 1401

	memcpy(info.tag, prog->tag, sizeof(prog->tag));
1402 1403 1404 1405 1406 1407
	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));

	ulen = info.nr_map_ids;
	info.nr_map_ids = prog->aux->used_map_cnt;
	ulen = min_t(u32, info.nr_map_ids, ulen);
	if (ulen) {
1408
		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
1409 1410 1411 1412 1413 1414 1415
		u32 i;

		for (i = 0; i < ulen; i++)
			if (put_user(prog->aux->used_maps[i]->id,
				     &user_map_ids[i]))
				return -EFAULT;
	}
1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432

	if (!capable(CAP_SYS_ADMIN)) {
		info.jited_prog_len = 0;
		info.xlated_prog_len = 0;
		goto done;
	}

	ulen = info.jited_prog_len;
	info.jited_prog_len = prog->jited_len;
	if (info.jited_prog_len && ulen) {
		uinsns = u64_to_user_ptr(info.jited_prog_insns);
		ulen = min_t(u32, info.jited_prog_len, ulen);
		if (copy_to_user(uinsns, prog->bpf_func, ulen))
			return -EFAULT;
	}

	ulen = info.xlated_prog_len;
1433
	info.xlated_prog_len = bpf_prog_insn_size(prog);
1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
	if (info.xlated_prog_len && ulen) {
		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
		ulen = min_t(u32, info.xlated_prog_len, ulen);
		if (copy_to_user(uinsns, prog->insnsi, ulen))
			return -EFAULT;
	}

done:
	if (copy_to_user(uinfo, &info, info_len) ||
	    put_user(info_len, &uattr->info.info_len))
		return -EFAULT;

	return 0;
}

static int bpf_map_get_info_by_fd(struct bpf_map *map,
				  const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{
	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
	struct bpf_map_info info = {};
	u32 info_len = attr->info.info_len;
	int err;

	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
	if (err)
		return err;
	info_len = min_t(u32, sizeof(info), info_len);

	info.type = map->map_type;
	info.id = map->id;
	info.key_size = map->key_size;
	info.value_size = map->value_size;
	info.max_entries = map->max_entries;
	info.map_flags = map->map_flags;
1469
	memcpy(info.name, map->name, sizeof(map->name));
1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506

	if (copy_to_user(uinfo, &info, info_len) ||
	    put_user(info_len, &uattr->info.info_len))
		return -EFAULT;

	return 0;
}

#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info

static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{
	int ufd = attr->info.bpf_fd;
	struct fd f;
	int err;

	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
		return -EINVAL;

	f = fdget(ufd);
	if (!f.file)
		return -EBADFD;

	if (f.file->f_op == &bpf_prog_fops)
		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
					      uattr);
	else if (f.file->f_op == &bpf_map_fops)
		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
					     uattr);
	else
		err = -EINVAL;

	fdput(f);
	return err;
}

1507 1508 1509 1510 1511
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
	union bpf_attr attr = {};
	int err;

1512
	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1513 1514
		return -EPERM;

1515 1516 1517 1518
	err = check_uarg_tail_zero(uattr, sizeof(attr), size);
	if (err)
		return err;
	size = min_t(u32, size, sizeof(attr));
1519 1520 1521 1522 1523 1524 1525 1526 1527

	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
	if (copy_from_user(&attr, uattr, size) != 0)
		return -EFAULT;

	switch (cmd) {
	case BPF_MAP_CREATE:
		err = map_create(&attr);
		break;
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
	case BPF_MAP_LOOKUP_ELEM:
		err = map_lookup_elem(&attr);
		break;
	case BPF_MAP_UPDATE_ELEM:
		err = map_update_elem(&attr);
		break;
	case BPF_MAP_DELETE_ELEM:
		err = map_delete_elem(&attr);
		break;
	case BPF_MAP_GET_NEXT_KEY:
		err = map_get_next_key(&attr);
		break;
1540 1541 1542
	case BPF_PROG_LOAD:
		err = bpf_prog_load(&attr);
		break;
1543 1544 1545 1546 1547 1548
	case BPF_OBJ_PIN:
		err = bpf_obj_pin(&attr);
		break;
	case BPF_OBJ_GET:
		err = bpf_obj_get(&attr);
		break;
1549 1550 1551 1552 1553 1554 1555 1556
#ifdef CONFIG_CGROUP_BPF
	case BPF_PROG_ATTACH:
		err = bpf_prog_attach(&attr);
		break;
	case BPF_PROG_DETACH:
		err = bpf_prog_detach(&attr);
		break;
#endif
1557 1558 1559
	case BPF_PROG_TEST_RUN:
		err = bpf_prog_test_run(&attr, uattr);
		break;
1560 1561 1562 1563 1564 1565 1566 1567
	case BPF_PROG_GET_NEXT_ID:
		err = bpf_obj_get_next_id(&attr, uattr,
					  &prog_idr, &prog_idr_lock);
		break;
	case BPF_MAP_GET_NEXT_ID:
		err = bpf_obj_get_next_id(&attr, uattr,
					  &map_idr, &map_idr_lock);
		break;
1568 1569 1570
	case BPF_PROG_GET_FD_BY_ID:
		err = bpf_prog_get_fd_by_id(&attr);
		break;
M
Martin KaFai Lau 已提交
1571 1572 1573
	case BPF_MAP_GET_FD_BY_ID:
		err = bpf_map_get_fd_by_id(&attr);
		break;
1574 1575 1576
	case BPF_OBJ_GET_INFO_BY_FD:
		err = bpf_obj_get_info_by_fd(&attr, uattr);
		break;
1577 1578 1579 1580 1581 1582 1583
	default:
		err = -EINVAL;
		break;
	}

	return err;
}