syscall.c 31.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 */
#include <linux/bpf.h>
13
#include <linux/bpf_trace.h>
14 15
#include <linux/syscalls.h>
#include <linux/slab.h>
16
#include <linux/sched/signal.h>
17 18
#include <linux/vmalloc.h>
#include <linux/mmzone.h>
19
#include <linux/anon_inodes.h>
20
#include <linux/file.h>
21 22
#include <linux/license.h>
#include <linux/filter.h>
23
#include <linux/version.h>
M
Mickaël Salaün 已提交
24
#include <linux/kernel.h>
M
Martin KaFai Lau 已提交
25
#include <linux/idr.h>
26

27
DEFINE_PER_CPU(int, bpf_prog_active);
M
Martin KaFai Lau 已提交
28 29
static DEFINE_IDR(prog_idr);
static DEFINE_SPINLOCK(prog_idr_lock);
M
Martin KaFai Lau 已提交
30 31
static DEFINE_IDR(map_idr);
static DEFINE_SPINLOCK(map_idr_lock);
32

33 34
int sysctl_unprivileged_bpf_disabled __read_mostly;

35 36 37 38 39 40 41 42
static const struct bpf_map_ops * const bpf_map_types[] = {
#define BPF_PROG_TYPE(_id, _ops)
#define BPF_MAP_TYPE(_id, _ops) \
	[_id] = &_ops,
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
};
43 44 45 46 47

static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
{
	struct bpf_map *map;

48 49 50
	if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
	    !bpf_map_types[attr->map_type])
		return ERR_PTR(-EINVAL);
51

52 53 54 55 56 57
	map = bpf_map_types[attr->map_type]->map_alloc(attr);
	if (IS_ERR(map))
		return map;
	map->ops = bpf_map_types[attr->map_type];
	map->map_type = attr->map_type;
	return map;
58 59
}

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
void *bpf_map_area_alloc(size_t size)
{
	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
	 * trigger under memory pressure as we really just want to
	 * fail instead.
	 */
	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
	void *area;

	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
		area = kmalloc(size, GFP_USER | flags);
		if (area != NULL)
			return area;
	}

75
	return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
76 77 78 79 80 81 82
}

void bpf_map_area_free(void *area)
{
	kvfree(area);
}

83 84 85 86 87 88 89 90 91 92 93 94 95
int bpf_map_precharge_memlock(u32 pages)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit, cur;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	cur = atomic_long_read(&user->locked_vm);
	free_uid(user);
	if (cur + pages > memlock_limit)
		return -EPERM;
	return 0;
}

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
static int bpf_map_charge_memlock(struct bpf_map *map)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

	atomic_long_add(map->pages, &user->locked_vm);

	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
		atomic_long_sub(map->pages, &user->locked_vm);
		free_uid(user);
		return -EPERM;
	}
	map->user = user;
	return 0;
}

static void bpf_map_uncharge_memlock(struct bpf_map *map)
{
	struct user_struct *user = map->user;

	atomic_long_sub(map->pages, &user->locked_vm);
	free_uid(user);
}

M
Martin KaFai Lau 已提交
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
static int bpf_map_alloc_id(struct bpf_map *map)
{
	int id;

	spin_lock_bh(&map_idr_lock);
	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
	if (id > 0)
		map->id = id;
	spin_unlock_bh(&map_idr_lock);

	if (WARN_ON_ONCE(!id))
		return -ENOSPC;

	return id > 0 ? 0 : id;
}

M
Martin KaFai Lau 已提交
138
static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
M
Martin KaFai Lau 已提交
139
{
M
Martin KaFai Lau 已提交
140 141 142 143 144
	if (do_idr_lock)
		spin_lock_bh(&map_idr_lock);
	else
		__acquire(&map_idr_lock);

M
Martin KaFai Lau 已提交
145
	idr_remove(&map_idr, map->id);
M
Martin KaFai Lau 已提交
146 147 148 149 150

	if (do_idr_lock)
		spin_unlock_bh(&map_idr_lock);
	else
		__release(&map_idr_lock);
M
Martin KaFai Lau 已提交
151 152
}

153 154 155 156 157
/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{
	struct bpf_map *map = container_of(work, struct bpf_map, work);

158
	bpf_map_uncharge_memlock(map);
159 160 161 162
	/* implementation dependent freeing */
	map->ops->map_free(map);
}

163 164 165 166 167 168 169 170
static void bpf_map_put_uref(struct bpf_map *map)
{
	if (atomic_dec_and_test(&map->usercnt)) {
		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
			bpf_fd_array_map_clear(map);
	}
}

171 172 173
/* decrement map refcnt and schedule it for freeing via workqueue
 * (unrelying map implementation ops->map_free() might sleep)
 */
M
Martin KaFai Lau 已提交
174
static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
175 176
{
	if (atomic_dec_and_test(&map->refcnt)) {
177
		/* bpf_map_free_id() must be called first */
M
Martin KaFai Lau 已提交
178
		bpf_map_free_id(map, do_idr_lock);
179 180 181 182 183
		INIT_WORK(&map->work, bpf_map_free_deferred);
		schedule_work(&map->work);
	}
}

M
Martin KaFai Lau 已提交
184 185 186 187 188
void bpf_map_put(struct bpf_map *map)
{
	__bpf_map_put(map, true);
}

189
void bpf_map_put_with_uref(struct bpf_map *map)
190
{
191
	bpf_map_put_uref(map);
192
	bpf_map_put(map);
193 194 195 196
}

static int bpf_map_release(struct inode *inode, struct file *filp)
{
197 198 199 200 201 202
	struct bpf_map *map = filp->private_data;

	if (map->ops->map_release)
		map->ops->map_release(map, filp);

	bpf_map_put_with_uref(map);
203 204 205
	return 0;
}

206 207 208 209
#ifdef CONFIG_PROC_FS
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
	const struct bpf_map *map = filp->private_data;
210 211 212 213 214 215 216
	const struct bpf_array *array;
	u32 owner_prog_type = 0;

	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
		array = container_of(map, struct bpf_array, map);
		owner_prog_type = array->owner_prog_type;
	}
217 218 219 220 221

	seq_printf(m,
		   "map_type:\t%u\n"
		   "key_size:\t%u\n"
		   "value_size:\t%u\n"
222
		   "max_entries:\t%u\n"
223 224
		   "map_flags:\t%#x\n"
		   "memlock:\t%llu\n",
225 226 227
		   map->map_type,
		   map->key_size,
		   map->value_size,
228
		   map->max_entries,
229 230 231 232 233 234
		   map->map_flags,
		   map->pages * 1ULL << PAGE_SHIFT);

	if (owner_prog_type)
		seq_printf(m, "owner_prog_type:\t%u\n",
			   owner_prog_type);
235 236 237
}
#endif

238
static const struct file_operations bpf_map_fops = {
239 240 241 242
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= bpf_map_show_fdinfo,
#endif
	.release	= bpf_map_release,
243 244
};

245
int bpf_map_new_fd(struct bpf_map *map)
246 247 248 249 250
{
	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
				O_RDWR | O_CLOEXEC);
}

251 252 253 254 255 256 257 258
/* helper macro to check that unused fields 'union bpf_attr' are zero */
#define CHECK_ATTR(CMD) \
	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
		   sizeof(attr->CMD##_LAST_FIELD), 0, \
		   sizeof(*attr) - \
		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
		   sizeof(attr->CMD##_LAST_FIELD)) != NULL

259
#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
/* called via syscall */
static int map_create(union bpf_attr *attr)
{
	struct bpf_map *map;
	int err;

	err = CHECK_ATTR(BPF_MAP_CREATE);
	if (err)
		return -EINVAL;

	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
	map = find_and_alloc_map(attr);
	if (IS_ERR(map))
		return PTR_ERR(map);

	atomic_set(&map->refcnt, 1);
276
	atomic_set(&map->usercnt, 1);
277

278 279
	err = bpf_map_charge_memlock(map);
	if (err)
280
		goto free_map_nouncharge;
281

M
Martin KaFai Lau 已提交
282 283 284 285
	err = bpf_map_alloc_id(map);
	if (err)
		goto free_map;

286
	err = bpf_map_new_fd(map);
M
Martin KaFai Lau 已提交
287 288 289 290 291 292 293 294 295 296
	if (err < 0) {
		/* failed to allocate fd.
		 * bpf_map_put() is needed because the above
		 * bpf_map_alloc_id() has published the map
		 * to the userspace and the userspace may
		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
		 */
		bpf_map_put(map);
		return err;
	}
297

298
	trace_bpf_map_create(map, err);
299 300 301
	return err;

free_map:
302 303
	bpf_map_uncharge_memlock(map);
free_map_nouncharge:
304 305 306 307
	map->ops->map_free(map);
	return err;
}

308 309 310
/* if error is returned, fd is released.
 * On success caller should complete fd access with matching fdput()
 */
311
struct bpf_map *__bpf_map_get(struct fd f)
312 313 314 315 316 317 318 319
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_map_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

320 321 322
	return f.file->private_data;
}

A
Alexei Starovoitov 已提交
323 324 325 326
/* prog's and map's refcnt limit */
#define BPF_MAX_REFCNT 32768

struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
327
{
A
Alexei Starovoitov 已提交
328 329 330 331
	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
		atomic_dec(&map->refcnt);
		return ERR_PTR(-EBUSY);
	}
332 333
	if (uref)
		atomic_inc(&map->usercnt);
A
Alexei Starovoitov 已提交
334
	return map;
335 336 337
}

struct bpf_map *bpf_map_get_with_uref(u32 ufd)
338 339 340 341 342 343 344 345
{
	struct fd f = fdget(ufd);
	struct bpf_map *map;

	map = __bpf_map_get(f);
	if (IS_ERR(map))
		return map;

A
Alexei Starovoitov 已提交
346
	map = bpf_map_inc(map, true);
347
	fdput(f);
348 349 350 351

	return map;
}

M
Martin KaFai Lau 已提交
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
/* map_idr_lock should have been held */
static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
					    bool uref)
{
	int refold;

	refold = __atomic_add_unless(&map->refcnt, 1, 0);

	if (refold >= BPF_MAX_REFCNT) {
		__bpf_map_put(map, false);
		return ERR_PTR(-EBUSY);
	}

	if (!refold)
		return ERR_PTR(-ENOENT);

	if (uref)
		atomic_inc(&map->usercnt);

	return map;
}

374 375 376 377 378
int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{
	return -ENOTSUPP;
}

379 380 381 382 383
/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value

static int map_lookup_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
384 385
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
386 387
	int ufd = attr->map_fd;
	struct bpf_map *map;
388
	void *key, *value, *ptr;
389
	u32 value_size;
390
	struct fd f;
391 392 393 394 395
	int err;

	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
		return -EINVAL;

396
	f = fdget(ufd);
397
	map = __bpf_map_get(f);
398 399 400 401 402 403 404 405 406 407 408 409
	if (IS_ERR(map))
		return PTR_ERR(map);

	err = -ENOMEM;
	key = kmalloc(map->key_size, GFP_USER);
	if (!key)
		goto err_put;

	err = -EFAULT;
	if (copy_from_user(key, ukey, map->key_size) != 0)
		goto free_key;

410
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
411
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
412 413 414 415 416
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
	else
		value_size = map->value_size;

417
	err = -ENOMEM;
418
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
419
	if (!value)
420 421
		goto free_key;

422 423
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
424 425 426
		err = bpf_percpu_hash_copy(map, key, value);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_copy(map, key, value);
427 428
	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
		err = bpf_stackmap_copy(map, key, value);
M
Martin KaFai Lau 已提交
429 430
	} else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
		   map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
431
		err = -ENOTSUPP;
432 433 434 435 436 437 438 439
	} else {
		rcu_read_lock();
		ptr = map->ops->map_lookup_elem(map, key);
		if (ptr)
			memcpy(value, ptr, value_size);
		rcu_read_unlock();
		err = ptr ? 0 : -ENOENT;
	}
440

441
	if (err)
442
		goto free_value;
443 444

	err = -EFAULT;
445
	if (copy_to_user(uvalue, value, value_size) != 0)
446
		goto free_value;
447

448
	trace_bpf_map_lookup_elem(map, ufd, key, value);
449 450
	err = 0;

451 452
free_value:
	kfree(value);
453 454 455 456 457 458 459
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

460
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
461 462 463

static int map_update_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
464 465
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
466 467 468
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *value;
469
	u32 value_size;
470
	struct fd f;
471 472 473 474 475
	int err;

	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
		return -EINVAL;

476
	f = fdget(ufd);
477
	map = __bpf_map_get(f);
478 479 480 481 482 483 484 485 486 487 488 489
	if (IS_ERR(map))
		return PTR_ERR(map);

	err = -ENOMEM;
	key = kmalloc(map->key_size, GFP_USER);
	if (!key)
		goto err_put;

	err = -EFAULT;
	if (copy_from_user(key, ukey, map->key_size) != 0)
		goto free_key;

490
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
491
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
492 493 494 495 496
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
	else
		value_size = map->value_size;

497
	err = -ENOMEM;
498
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
499 500 501 502
	if (!value)
		goto free_key;

	err = -EFAULT;
503
	if (copy_from_user(value, uvalue, value_size) != 0)
504 505
		goto free_value;

506 507 508 509 510
	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
	 * inside bpf map update or delete otherwise deadlocks are possible
	 */
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
511 512
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
513 514 515
		err = bpf_percpu_hash_update(map, key, value, attr->flags);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_update(map, key, value, attr->flags);
516
	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
517
		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
518 519
		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
		   map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
520 521 522 523
		rcu_read_lock();
		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
						   attr->flags);
		rcu_read_unlock();
M
Martin KaFai Lau 已提交
524 525 526 527 528
	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
		rcu_read_lock();
		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
						  attr->flags);
		rcu_read_unlock();
529 530 531 532 533
	} else {
		rcu_read_lock();
		err = map->ops->map_update_elem(map, key, value, attr->flags);
		rcu_read_unlock();
	}
534 535
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
536

537 538
	if (!err)
		trace_bpf_map_update_elem(map, ufd, key, value);
539 540 541 542 543 544 545 546 547 548 549 550 551
free_value:
	kfree(value);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

#define BPF_MAP_DELETE_ELEM_LAST_FIELD key

static int map_delete_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
552
	void __user *ukey = u64_to_user_ptr(attr->key);
553 554
	int ufd = attr->map_fd;
	struct bpf_map *map;
555
	struct fd f;
556 557 558 559 560 561
	void *key;
	int err;

	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
		return -EINVAL;

562
	f = fdget(ufd);
563
	map = __bpf_map_get(f);
564 565 566 567 568 569 570 571 572 573 574 575
	if (IS_ERR(map))
		return PTR_ERR(map);

	err = -ENOMEM;
	key = kmalloc(map->key_size, GFP_USER);
	if (!key)
		goto err_put;

	err = -EFAULT;
	if (copy_from_user(key, ukey, map->key_size) != 0)
		goto free_key;

576 577
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
578 579 580
	rcu_read_lock();
	err = map->ops->map_delete_elem(map, key);
	rcu_read_unlock();
581 582
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
583

584 585
	if (!err)
		trace_bpf_map_delete_elem(map, ufd, key);
586 587 588 589 590 591 592 593 594 595 596 597
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key

static int map_get_next_key(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
598 599
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *unext_key = u64_to_user_ptr(attr->next_key);
600 601 602
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *next_key;
603
	struct fd f;
604 605 606 607 608
	int err;

	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
		return -EINVAL;

609
	f = fdget(ufd);
610
	map = __bpf_map_get(f);
611 612 613
	if (IS_ERR(map))
		return PTR_ERR(map);

614 615 616 617 618 619 620 621 622 623 624 625
	if (ukey) {
		err = -ENOMEM;
		key = kmalloc(map->key_size, GFP_USER);
		if (!key)
			goto err_put;

		err = -EFAULT;
		if (copy_from_user(key, ukey, map->key_size) != 0)
			goto free_key;
	} else {
		key = NULL;
	}
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641

	err = -ENOMEM;
	next_key = kmalloc(map->key_size, GFP_USER);
	if (!next_key)
		goto free_key;

	rcu_read_lock();
	err = map->ops->map_get_next_key(map, key, next_key);
	rcu_read_unlock();
	if (err)
		goto free_next_key;

	err = -EFAULT;
	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
		goto free_next_key;

642
	trace_bpf_map_next_key(map, ufd, key, next_key);
643 644 645 646 647 648 649 650 651 652 653
	err = 0;

free_next_key:
	kfree(next_key);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

654 655 656
static const struct bpf_verifier_ops * const bpf_prog_types[] = {
#define BPF_PROG_TYPE(_id, _ops) \
	[_id] = &_ops,
657
#define BPF_MAP_TYPE(_id, _ops)
658 659
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
660
#undef BPF_MAP_TYPE
661
};
662 663 664

static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
{
665 666
	if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
		return -EINVAL;
667

668 669 670
	prog->aux->ops = bpf_prog_types[type];
	prog->type = type;
	return 0;
671 672 673 674 675 676 677 678 679 680 681 682 683
}

/* drop refcnt on maps used by eBPF program and free auxilary data */
static void free_used_maps(struct bpf_prog_aux *aux)
{
	int i;

	for (i = 0; i < aux->used_map_cnt; i++)
		bpf_map_put(aux->used_maps[i]);

	kfree(aux->used_maps);
}

684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	unsigned long user_bufs;

	if (user) {
		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
		if (user_bufs > memlock_limit) {
			atomic_long_sub(pages, &user->locked_vm);
			return -EPERM;
		}
	}

	return 0;
}

void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
	if (user)
		atomic_long_sub(pages, &user->locked_vm);
}

706 707 708
static int bpf_prog_charge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = get_current_user();
709
	int ret;
710

711 712
	ret = __bpf_prog_charge(user, prog->pages);
	if (ret) {
713
		free_uid(user);
714
		return ret;
715
	}
716

717 718 719 720 721 722 723 724
	prog->aux->user = user;
	return 0;
}

static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = prog->aux->user;

725
	__bpf_prog_uncharge(user, prog->pages);
726 727 728
	free_uid(user);
}

M
Martin KaFai Lau 已提交
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
static int bpf_prog_alloc_id(struct bpf_prog *prog)
{
	int id;

	spin_lock_bh(&prog_idr_lock);
	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
	if (id > 0)
		prog->aux->id = id;
	spin_unlock_bh(&prog_idr_lock);

	/* id is in [1, INT_MAX) */
	if (WARN_ON_ONCE(!id))
		return -ENOSPC;

	return id > 0 ? 0 : id;
}

746
static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
M
Martin KaFai Lau 已提交
747 748 749 750 751
{
	/* cBPF to eBPF migrations are currently not in the idr store. */
	if (!prog->aux->id)
		return;

752 753 754 755 756
	if (do_idr_lock)
		spin_lock_bh(&prog_idr_lock);
	else
		__acquire(&prog_idr_lock);

M
Martin KaFai Lau 已提交
757
	idr_remove(&prog_idr, prog->aux->id);
758 759 760 761 762

	if (do_idr_lock)
		spin_unlock_bh(&prog_idr_lock);
	else
		__release(&prog_idr_lock);
M
Martin KaFai Lau 已提交
763 764
}

765
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
766 767 768 769
{
	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);

	free_used_maps(aux);
770
	bpf_prog_uncharge_memlock(aux->prog);
771 772 773
	bpf_prog_free(aux->prog);
}

774
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
775
{
776 777
	if (atomic_dec_and_test(&prog->aux->refcnt)) {
		trace_bpf_prog_put_rcu(prog);
778
		/* bpf_prog_free_id() must be called first */
779
		bpf_prog_free_id(prog, do_idr_lock);
780
		bpf_prog_kallsyms_del(prog);
781
		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
782
	}
783
}
784 785 786 787 788

void bpf_prog_put(struct bpf_prog *prog)
{
	__bpf_prog_put(prog, true);
}
789
EXPORT_SYMBOL_GPL(bpf_prog_put);
790 791 792 793 794

static int bpf_prog_release(struct inode *inode, struct file *filp)
{
	struct bpf_prog *prog = filp->private_data;

795
	bpf_prog_put(prog);
796 797 798
	return 0;
}

799 800 801 802
#ifdef CONFIG_PROC_FS
static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{
	const struct bpf_prog *prog = filp->private_data;
803
	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
804

805
	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
806 807 808
	seq_printf(m,
		   "prog_type:\t%u\n"
		   "prog_jited:\t%u\n"
809
		   "prog_tag:\t%s\n"
810 811 812
		   "memlock:\t%llu\n",
		   prog->type,
		   prog->jited,
813
		   prog_tag,
814 815 816 817
		   prog->pages * 1ULL << PAGE_SHIFT);
}
#endif

818
static const struct file_operations bpf_prog_fops = {
819 820 821 822
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= bpf_prog_show_fdinfo,
#endif
	.release	= bpf_prog_release,
823 824
};

825
int bpf_prog_new_fd(struct bpf_prog *prog)
826 827 828 829 830
{
	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
				O_RDWR | O_CLOEXEC);
}

831
static struct bpf_prog *____bpf_prog_get(struct fd f)
832 833 834 835 836 837 838 839
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_prog_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

840
	return f.file->private_data;
841 842
}

843
struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
A
Alexei Starovoitov 已提交
844
{
845 846
	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
		atomic_sub(i, &prog->aux->refcnt);
A
Alexei Starovoitov 已提交
847 848 849 850
		return ERR_PTR(-EBUSY);
	}
	return prog;
}
851 852
EXPORT_SYMBOL_GPL(bpf_prog_add);

853 854 855 856 857 858 859 860 861 862 863
void bpf_prog_sub(struct bpf_prog *prog, int i)
{
	/* Only to be used for undoing previous bpf_prog_add() in some
	 * error path. We still know that another entity in our call
	 * path holds a reference to the program, thus atomic_sub() can
	 * be safely used in such cases!
	 */
	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
}
EXPORT_SYMBOL_GPL(bpf_prog_sub);

864 865 866 867
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
{
	return bpf_prog_add(prog, 1);
}
868
EXPORT_SYMBOL_GPL(bpf_prog_inc);
A
Alexei Starovoitov 已提交
869

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
/* prog_idr_lock should have been held */
static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
	int refold;

	refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);

	if (refold >= BPF_MAX_REFCNT) {
		__bpf_prog_put(prog, false);
		return ERR_PTR(-EBUSY);
	}

	if (!refold)
		return ERR_PTR(-ENOENT);

	return prog;
}

888
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
889 890 891 892
{
	struct fd f = fdget(ufd);
	struct bpf_prog *prog;

893
	prog = ____bpf_prog_get(f);
894 895
	if (IS_ERR(prog))
		return prog;
896 897 898 899
	if (type && prog->type != *type) {
		prog = ERR_PTR(-EINVAL);
		goto out;
	}
900

A
Alexei Starovoitov 已提交
901
	prog = bpf_prog_inc(prog);
902
out:
903 904 905
	fdput(f);
	return prog;
}
906 907 908 909 910 911 912 913

struct bpf_prog *bpf_prog_get(u32 ufd)
{
	return __bpf_prog_get(ufd, NULL);
}

struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
{
914 915 916 917 918
	struct bpf_prog *prog = __bpf_prog_get(ufd, &type);

	if (!IS_ERR(prog))
		trace_bpf_prog_get_type(prog);
	return prog;
919 920
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type);
921 922

/* last field in 'union bpf_attr' used by this command */
923
#define	BPF_PROG_LOAD_LAST_FIELD prog_flags
924 925 926 927 928 929 930 931 932 933 934 935

static int bpf_prog_load(union bpf_attr *attr)
{
	enum bpf_prog_type type = attr->prog_type;
	struct bpf_prog *prog;
	int err;
	char license[128];
	bool is_gpl;

	if (CHECK_ATTR(BPF_PROG_LOAD))
		return -EINVAL;

936 937 938
	if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
		return -EINVAL;

939
	/* copy eBPF program license from user space */
M
Mickaël Salaün 已提交
940
	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
941 942 943 944 945 946 947
			      sizeof(license) - 1) < 0)
		return -EFAULT;
	license[sizeof(license) - 1] = 0;

	/* eBPF programs must be GPL compatible to use GPL-ed functions */
	is_gpl = license_is_gpl_compatible(license);

948 949
	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
		return -E2BIG;
950

951 952 953 954
	if (type == BPF_PROG_TYPE_KPROBE &&
	    attr->kern_version != LINUX_VERSION_CODE)
		return -EINVAL;

955 956 957
	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
	    type != BPF_PROG_TYPE_CGROUP_SKB &&
	    !capable(CAP_SYS_ADMIN))
958 959
		return -EPERM;

960 961 962 963 964
	/* plain bpf_prog allocation */
	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
	if (!prog)
		return -ENOMEM;

965 966 967 968
	err = bpf_prog_charge_memlock(prog);
	if (err)
		goto free_prog_nouncharge;

969 970 971
	prog->len = attr->insn_cnt;

	err = -EFAULT;
M
Mickaël Salaün 已提交
972
	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
973
			   bpf_prog_insn_size(prog)) != 0)
974 975 976
		goto free_prog;

	prog->orig_prog = NULL;
977
	prog->jited = 0;
978 979

	atomic_set(&prog->aux->refcnt, 1);
980
	prog->gpl_compatible = is_gpl ? 1 : 0;
981 982 983 984 985 986 987

	/* find program type: socket_filter vs tracing_filter */
	err = find_prog_type(type, prog);
	if (err < 0)
		goto free_prog;

	/* run eBPF verifier */
988
	err = bpf_check(&prog, attr);
989 990 991 992
	if (err < 0)
		goto free_used_maps;

	/* eBPF program is ready to be JITed */
993
	prog = bpf_prog_select_runtime(prog, &err);
994 995
	if (err < 0)
		goto free_used_maps;
996

M
Martin KaFai Lau 已提交
997 998 999 1000
	err = bpf_prog_alloc_id(prog);
	if (err)
		goto free_used_maps;

1001
	err = bpf_prog_new_fd(prog);
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
	if (err < 0) {
		/* failed to allocate fd.
		 * bpf_prog_put() is needed because the above
		 * bpf_prog_alloc_id() has published the prog
		 * to the userspace and the userspace may
		 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
		 */
		bpf_prog_put(prog);
		return err;
	}
1012

1013
	bpf_prog_kallsyms_add(prog);
1014
	trace_bpf_prog_load(prog, err);
1015 1016 1017 1018 1019
	return err;

free_used_maps:
	free_used_maps(prog->aux);
free_prog:
1020 1021
	bpf_prog_uncharge_memlock(prog);
free_prog_nouncharge:
1022 1023 1024 1025
	bpf_prog_free(prog);
	return err;
}

1026 1027 1028 1029 1030 1031 1032
#define BPF_OBJ_LAST_FIELD bpf_fd

static int bpf_obj_pin(const union bpf_attr *attr)
{
	if (CHECK_ATTR(BPF_OBJ))
		return -EINVAL;

M
Mickaël Salaün 已提交
1033
	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1034 1035 1036 1037 1038 1039 1040
}

static int bpf_obj_get(const union bpf_attr *attr)
{
	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
		return -EINVAL;

M
Mickaël Salaün 已提交
1041
	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
1042 1043
}

1044 1045
#ifdef CONFIG_CGROUP_BPF

1046
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1047 1048 1049

static int bpf_prog_attach(const union bpf_attr *attr)
{
1050
	enum bpf_prog_type ptype;
1051 1052
	struct bpf_prog *prog;
	struct cgroup *cgrp;
1053
	int ret;
1054 1055 1056 1057 1058 1059 1060

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (CHECK_ATTR(BPF_PROG_ATTACH))
		return -EINVAL;

1061 1062 1063
	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
		return -EINVAL;

1064 1065 1066
	switch (attr->attach_type) {
	case BPF_CGROUP_INET_INGRESS:
	case BPF_CGROUP_INET_EGRESS:
1067
		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1068
		break;
1069 1070 1071
	case BPF_CGROUP_INET_SOCK_CREATE:
		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
		break;
1072 1073 1074 1075
	default:
		return -EINVAL;
	}

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
	if (IS_ERR(prog))
		return PTR_ERR(prog);

	cgrp = cgroup_get_from_fd(attr->target_fd);
	if (IS_ERR(cgrp)) {
		bpf_prog_put(prog);
		return PTR_ERR(cgrp);
	}

1086 1087 1088 1089
	ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
				attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
	if (ret)
		bpf_prog_put(prog);
1090 1091
	cgroup_put(cgrp);

1092
	return ret;
1093 1094 1095 1096 1097 1098 1099
}

#define BPF_PROG_DETACH_LAST_FIELD attach_type

static int bpf_prog_detach(const union bpf_attr *attr)
{
	struct cgroup *cgrp;
1100
	int ret;
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (CHECK_ATTR(BPF_PROG_DETACH))
		return -EINVAL;

	switch (attr->attach_type) {
	case BPF_CGROUP_INET_INGRESS:
	case BPF_CGROUP_INET_EGRESS:
1111
	case BPF_CGROUP_INET_SOCK_CREATE:
1112 1113 1114 1115
		cgrp = cgroup_get_from_fd(attr->target_fd);
		if (IS_ERR(cgrp))
			return PTR_ERR(cgrp);

1116
		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
1117 1118 1119 1120 1121 1122 1123
		cgroup_put(cgrp);
		break;

	default:
		return -EINVAL;
	}

1124
	return ret;
1125 1126 1127
}
#endif /* CONFIG_CGROUP_BPF */

1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration

static int bpf_prog_test_run(const union bpf_attr *attr,
			     union bpf_attr __user *uattr)
{
	struct bpf_prog *prog;
	int ret = -ENOTSUPP;

	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
		return -EINVAL;

	prog = bpf_prog_get(attr->test.prog_fd);
	if (IS_ERR(prog))
		return PTR_ERR(prog);

	if (prog->aux->ops->test_run)
		ret = prog->aux->ops->test_run(prog, attr, uattr);

	bpf_prog_put(prog);
	return ret;
}

1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id

static int bpf_obj_get_next_id(const union bpf_attr *attr,
			       union bpf_attr __user *uattr,
			       struct idr *idr,
			       spinlock_t *lock)
{
	u32 next_id = attr->start_id;
	int err = 0;

	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	next_id++;
	spin_lock_bh(lock);
	if (!idr_get_next(idr, &next_id))
		err = -ENOENT;
	spin_unlock_bh(lock);

	if (!err)
		err = put_user(next_id, &uattr->next_id);

	return err;
}

1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id

static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
{
	struct bpf_prog *prog;
	u32 id = attr->prog_id;
	int fd;

	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	spin_lock_bh(&prog_idr_lock);
	prog = idr_find(&prog_idr, id);
	if (prog)
		prog = bpf_prog_inc_not_zero(prog);
	else
		prog = ERR_PTR(-ENOENT);
	spin_unlock_bh(&prog_idr_lock);

	if (IS_ERR(prog))
		return PTR_ERR(prog);

	fd = bpf_prog_new_fd(prog);
	if (fd < 0)
		bpf_prog_put(prog);

	return fd;
}

M
Martin KaFai Lau 已提交
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id

static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
{
	struct bpf_map *map;
	u32 id = attr->map_id;
	int fd;

	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	spin_lock_bh(&map_idr_lock);
	map = idr_find(&map_idr, id);
	if (map)
		map = bpf_map_inc_not_zero(map, true);
	else
		map = ERR_PTR(-ENOENT);
	spin_unlock_bh(&map_idr_lock);

	if (IS_ERR(map))
		return PTR_ERR(map);

	fd = bpf_map_new_fd(map);
	if (fd < 0)
		bpf_map_put(map);

	return fd;
}

1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
static int check_uarg_tail_zero(void __user *uaddr,
				size_t expected_size,
				size_t actual_size)
{
	unsigned char __user *addr;
	unsigned char __user *end;
	unsigned char val;
	int err;

	if (actual_size <= expected_size)
		return 0;

	addr = uaddr + expected_size;
	end  = uaddr + actual_size;

	for (; addr < end; addr++) {
		err = get_user(val, addr);
		if (err)
			return err;
		if (val)
			return -E2BIG;
	}

	return 0;
}

static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
				   const union bpf_attr *attr,
				   union bpf_attr __user *uattr)
{
	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
	struct bpf_prog_info info = {};
	u32 info_len = attr->info.info_len;
	char __user *uinsns;
	u32 ulen;
	int err;

	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
	if (err)
		return err;
	info_len = min_t(u32, sizeof(info), info_len);

	if (copy_from_user(&info, uinfo, info_len))
		return err;

	info.type = prog->type;
	info.id = prog->aux->id;

	memcpy(info.tag, prog->tag, sizeof(prog->tag));

	if (!capable(CAP_SYS_ADMIN)) {
		info.jited_prog_len = 0;
		info.xlated_prog_len = 0;
		goto done;
	}

	ulen = info.jited_prog_len;
	info.jited_prog_len = prog->jited_len;
	if (info.jited_prog_len && ulen) {
		uinsns = u64_to_user_ptr(info.jited_prog_insns);
		ulen = min_t(u32, info.jited_prog_len, ulen);
		if (copy_to_user(uinsns, prog->bpf_func, ulen))
			return -EFAULT;
	}

	ulen = info.xlated_prog_len;
	info.xlated_prog_len = bpf_prog_size(prog->len);
	if (info.xlated_prog_len && ulen) {
		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
		ulen = min_t(u32, info.xlated_prog_len, ulen);
		if (copy_to_user(uinsns, prog->insnsi, ulen))
			return -EFAULT;
	}

done:
	if (copy_to_user(uinfo, &info, info_len) ||
	    put_user(info_len, &uattr->info.info_len))
		return -EFAULT;

	return 0;
}

static int bpf_map_get_info_by_fd(struct bpf_map *map,
				  const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{
	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
	struct bpf_map_info info = {};
	u32 info_len = attr->info.info_len;
	int err;

	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
	if (err)
		return err;
	info_len = min_t(u32, sizeof(info), info_len);

	info.type = map->map_type;
	info.id = map->id;
	info.key_size = map->key_size;
	info.value_size = map->value_size;
	info.max_entries = map->max_entries;
	info.map_flags = map->map_flags;

	if (copy_to_user(uinfo, &info, info_len) ||
	    put_user(info_len, &uattr->info.info_len))
		return -EFAULT;

	return 0;
}

#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info

static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{
	int ufd = attr->info.bpf_fd;
	struct fd f;
	int err;

	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
		return -EINVAL;

	f = fdget(ufd);
	if (!f.file)
		return -EBADFD;

	if (f.file->f_op == &bpf_prog_fops)
		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
					      uattr);
	else if (f.file->f_op == &bpf_map_fops)
		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
					     uattr);
	else
		err = -EINVAL;

	fdput(f);
	return err;
}

1381 1382 1383 1384 1385
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
	union bpf_attr attr = {};
	int err;

1386
	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
		return -EPERM;

	if (!access_ok(VERIFY_READ, uattr, 1))
		return -EFAULT;

	if (size > PAGE_SIZE)	/* silly large */
		return -E2BIG;

	/* If we're handed a bigger struct than we know of,
	 * ensure all the unknown bits are 0 - i.e. new
	 * user-space does not rely on any kernel feature
	 * extensions we dont know about yet.
	 */
1400 1401 1402 1403
	err = check_uarg_tail_zero(uattr, sizeof(attr), size);
	if (err)
		return err;
	size = min_t(u32, size, sizeof(attr));
1404 1405 1406 1407 1408 1409 1410 1411 1412

	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
	if (copy_from_user(&attr, uattr, size) != 0)
		return -EFAULT;

	switch (cmd) {
	case BPF_MAP_CREATE:
		err = map_create(&attr);
		break;
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
	case BPF_MAP_LOOKUP_ELEM:
		err = map_lookup_elem(&attr);
		break;
	case BPF_MAP_UPDATE_ELEM:
		err = map_update_elem(&attr);
		break;
	case BPF_MAP_DELETE_ELEM:
		err = map_delete_elem(&attr);
		break;
	case BPF_MAP_GET_NEXT_KEY:
		err = map_get_next_key(&attr);
		break;
1425 1426 1427
	case BPF_PROG_LOAD:
		err = bpf_prog_load(&attr);
		break;
1428 1429 1430 1431 1432 1433
	case BPF_OBJ_PIN:
		err = bpf_obj_pin(&attr);
		break;
	case BPF_OBJ_GET:
		err = bpf_obj_get(&attr);
		break;
1434 1435 1436 1437 1438 1439 1440 1441
#ifdef CONFIG_CGROUP_BPF
	case BPF_PROG_ATTACH:
		err = bpf_prog_attach(&attr);
		break;
	case BPF_PROG_DETACH:
		err = bpf_prog_detach(&attr);
		break;
#endif
1442 1443 1444
	case BPF_PROG_TEST_RUN:
		err = bpf_prog_test_run(&attr, uattr);
		break;
1445 1446 1447 1448 1449 1450 1451 1452
	case BPF_PROG_GET_NEXT_ID:
		err = bpf_obj_get_next_id(&attr, uattr,
					  &prog_idr, &prog_idr_lock);
		break;
	case BPF_MAP_GET_NEXT_ID:
		err = bpf_obj_get_next_id(&attr, uattr,
					  &map_idr, &map_idr_lock);
		break;
1453 1454 1455
	case BPF_PROG_GET_FD_BY_ID:
		err = bpf_prog_get_fd_by_id(&attr);
		break;
M
Martin KaFai Lau 已提交
1456 1457 1458
	case BPF_MAP_GET_FD_BY_ID:
		err = bpf_map_get_fd_by_id(&attr);
		break;
1459 1460 1461
	case BPF_OBJ_GET_INFO_BY_FD:
		err = bpf_obj_get_info_by_fd(&attr, uattr);
		break;
1462 1463 1464 1465 1466 1467 1468
	default:
		err = -EINVAL;
		break;
	}

	return err;
}