syscall.c 32.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 */
#include <linux/bpf.h>
13
#include <linux/bpf_trace.h>
14 15
#include <linux/syscalls.h>
#include <linux/slab.h>
16
#include <linux/sched/signal.h>
17 18
#include <linux/vmalloc.h>
#include <linux/mmzone.h>
19
#include <linux/anon_inodes.h>
20
#include <linux/file.h>
21 22
#include <linux/license.h>
#include <linux/filter.h>
23
#include <linux/version.h>
M
Mickaël Salaün 已提交
24
#include <linux/kernel.h>
M
Martin KaFai Lau 已提交
25
#include <linux/idr.h>
26

27 28 29 30 31 32 33
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
			   (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
			   (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
			   (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))

34
DEFINE_PER_CPU(int, bpf_prog_active);
M
Martin KaFai Lau 已提交
35 36
static DEFINE_IDR(prog_idr);
static DEFINE_SPINLOCK(prog_idr_lock);
M
Martin KaFai Lau 已提交
37 38
static DEFINE_IDR(map_idr);
static DEFINE_SPINLOCK(map_idr_lock);
39

40 41
int sysctl_unprivileged_bpf_disabled __read_mostly;

42 43 44 45 46 47 48 49
static const struct bpf_map_ops * const bpf_map_types[] = {
#define BPF_PROG_TYPE(_id, _ops)
#define BPF_MAP_TYPE(_id, _ops) \
	[_id] = &_ops,
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
#undef BPF_MAP_TYPE
};
50 51 52 53 54

static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
{
	struct bpf_map *map;

55 56 57
	if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
	    !bpf_map_types[attr->map_type])
		return ERR_PTR(-EINVAL);
58

59 60 61 62 63 64
	map = bpf_map_types[attr->map_type]->map_alloc(attr);
	if (IS_ERR(map))
		return map;
	map->ops = bpf_map_types[attr->map_type];
	map->map_type = attr->map_type;
	return map;
65 66
}

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
void *bpf_map_area_alloc(size_t size)
{
	/* We definitely need __GFP_NORETRY, so OOM killer doesn't
	 * trigger under memory pressure as we really just want to
	 * fail instead.
	 */
	const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
	void *area;

	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
		area = kmalloc(size, GFP_USER | flags);
		if (area != NULL)
			return area;
	}

82
	return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
83 84 85 86 87 88 89
}

void bpf_map_area_free(void *area)
{
	kvfree(area);
}

90 91 92 93 94 95 96 97 98 99 100 101 102
int bpf_map_precharge_memlock(u32 pages)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit, cur;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	cur = atomic_long_read(&user->locked_vm);
	free_uid(user);
	if (cur + pages > memlock_limit)
		return -EPERM;
	return 0;
}

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
static int bpf_map_charge_memlock(struct bpf_map *map)
{
	struct user_struct *user = get_current_user();
	unsigned long memlock_limit;

	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

	atomic_long_add(map->pages, &user->locked_vm);

	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
		atomic_long_sub(map->pages, &user->locked_vm);
		free_uid(user);
		return -EPERM;
	}
	map->user = user;
	return 0;
}

static void bpf_map_uncharge_memlock(struct bpf_map *map)
{
	struct user_struct *user = map->user;

	atomic_long_sub(map->pages, &user->locked_vm);
	free_uid(user);
}

M
Martin KaFai Lau 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
static int bpf_map_alloc_id(struct bpf_map *map)
{
	int id;

	spin_lock_bh(&map_idr_lock);
	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
	if (id > 0)
		map->id = id;
	spin_unlock_bh(&map_idr_lock);

	if (WARN_ON_ONCE(!id))
		return -ENOSPC;

	return id > 0 ? 0 : id;
}

M
Martin KaFai Lau 已提交
145
static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
M
Martin KaFai Lau 已提交
146
{
M
Martin KaFai Lau 已提交
147 148 149 150 151
	if (do_idr_lock)
		spin_lock_bh(&map_idr_lock);
	else
		__acquire(&map_idr_lock);

M
Martin KaFai Lau 已提交
152
	idr_remove(&map_idr, map->id);
M
Martin KaFai Lau 已提交
153 154 155 156 157

	if (do_idr_lock)
		spin_unlock_bh(&map_idr_lock);
	else
		__release(&map_idr_lock);
M
Martin KaFai Lau 已提交
158 159
}

160 161 162 163 164
/* called from workqueue */
static void bpf_map_free_deferred(struct work_struct *work)
{
	struct bpf_map *map = container_of(work, struct bpf_map, work);

165
	bpf_map_uncharge_memlock(map);
166 167 168 169
	/* implementation dependent freeing */
	map->ops->map_free(map);
}

170 171 172 173 174 175 176 177
static void bpf_map_put_uref(struct bpf_map *map)
{
	if (atomic_dec_and_test(&map->usercnt)) {
		if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
			bpf_fd_array_map_clear(map);
	}
}

178 179 180
/* decrement map refcnt and schedule it for freeing via workqueue
 * (unrelying map implementation ops->map_free() might sleep)
 */
M
Martin KaFai Lau 已提交
181
static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
182 183
{
	if (atomic_dec_and_test(&map->refcnt)) {
184
		/* bpf_map_free_id() must be called first */
M
Martin KaFai Lau 已提交
185
		bpf_map_free_id(map, do_idr_lock);
186 187 188 189 190
		INIT_WORK(&map->work, bpf_map_free_deferred);
		schedule_work(&map->work);
	}
}

M
Martin KaFai Lau 已提交
191 192 193 194 195
void bpf_map_put(struct bpf_map *map)
{
	__bpf_map_put(map, true);
}

196
void bpf_map_put_with_uref(struct bpf_map *map)
197
{
198
	bpf_map_put_uref(map);
199
	bpf_map_put(map);
200 201 202 203
}

static int bpf_map_release(struct inode *inode, struct file *filp)
{
204 205 206 207 208 209
	struct bpf_map *map = filp->private_data;

	if (map->ops->map_release)
		map->ops->map_release(map, filp);

	bpf_map_put_with_uref(map);
210 211 212
	return 0;
}

213 214 215 216
#ifdef CONFIG_PROC_FS
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{
	const struct bpf_map *map = filp->private_data;
217 218
	const struct bpf_array *array;
	u32 owner_prog_type = 0;
219
	u32 owner_jited = 0;
220 221 222 223

	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
		array = container_of(map, struct bpf_array, map);
		owner_prog_type = array->owner_prog_type;
224
		owner_jited = array->owner_jited;
225
	}
226 227 228 229 230

	seq_printf(m,
		   "map_type:\t%u\n"
		   "key_size:\t%u\n"
		   "value_size:\t%u\n"
231
		   "max_entries:\t%u\n"
232 233
		   "map_flags:\t%#x\n"
		   "memlock:\t%llu\n",
234 235 236
		   map->map_type,
		   map->key_size,
		   map->value_size,
237
		   map->max_entries,
238 239 240
		   map->map_flags,
		   map->pages * 1ULL << PAGE_SHIFT);

241
	if (owner_prog_type) {
242 243
		seq_printf(m, "owner_prog_type:\t%u\n",
			   owner_prog_type);
244 245 246
		seq_printf(m, "owner_jited:\t%u\n",
			   owner_jited);
	}
247 248 249
}
#endif

250
static const struct file_operations bpf_map_fops = {
251 252 253 254
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= bpf_map_show_fdinfo,
#endif
	.release	= bpf_map_release,
255 256
};

257
int bpf_map_new_fd(struct bpf_map *map)
258 259 260 261 262
{
	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
				O_RDWR | O_CLOEXEC);
}

263 264 265 266 267 268 269 270
/* helper macro to check that unused fields 'union bpf_attr' are zero */
#define CHECK_ATTR(CMD) \
	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
		   sizeof(attr->CMD##_LAST_FIELD), 0, \
		   sizeof(*attr) - \
		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
		   sizeof(attr->CMD##_LAST_FIELD)) != NULL

271
#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
/* called via syscall */
static int map_create(union bpf_attr *attr)
{
	struct bpf_map *map;
	int err;

	err = CHECK_ATTR(BPF_MAP_CREATE);
	if (err)
		return -EINVAL;

	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
	map = find_and_alloc_map(attr);
	if (IS_ERR(map))
		return PTR_ERR(map);

	atomic_set(&map->refcnt, 1);
288
	atomic_set(&map->usercnt, 1);
289

290 291
	err = bpf_map_charge_memlock(map);
	if (err)
292
		goto free_map_nouncharge;
293

M
Martin KaFai Lau 已提交
294 295 296 297
	err = bpf_map_alloc_id(map);
	if (err)
		goto free_map;

298
	err = bpf_map_new_fd(map);
M
Martin KaFai Lau 已提交
299 300 301 302 303 304 305 306 307 308
	if (err < 0) {
		/* failed to allocate fd.
		 * bpf_map_put() is needed because the above
		 * bpf_map_alloc_id() has published the map
		 * to the userspace and the userspace may
		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
		 */
		bpf_map_put(map);
		return err;
	}
309

310
	trace_bpf_map_create(map, err);
311 312 313
	return err;

free_map:
314 315
	bpf_map_uncharge_memlock(map);
free_map_nouncharge:
316 317 318 319
	map->ops->map_free(map);
	return err;
}

320 321 322
/* if error is returned, fd is released.
 * On success caller should complete fd access with matching fdput()
 */
323
struct bpf_map *__bpf_map_get(struct fd f)
324 325 326 327 328 329 330 331
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_map_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

332 333 334
	return f.file->private_data;
}

A
Alexei Starovoitov 已提交
335 336 337 338
/* prog's and map's refcnt limit */
#define BPF_MAX_REFCNT 32768

struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
339
{
A
Alexei Starovoitov 已提交
340 341 342 343
	if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
		atomic_dec(&map->refcnt);
		return ERR_PTR(-EBUSY);
	}
344 345
	if (uref)
		atomic_inc(&map->usercnt);
A
Alexei Starovoitov 已提交
346
	return map;
347 348 349
}

struct bpf_map *bpf_map_get_with_uref(u32 ufd)
350 351 352 353 354 355 356 357
{
	struct fd f = fdget(ufd);
	struct bpf_map *map;

	map = __bpf_map_get(f);
	if (IS_ERR(map))
		return map;

A
Alexei Starovoitov 已提交
358
	map = bpf_map_inc(map, true);
359
	fdput(f);
360 361 362 363

	return map;
}

M
Martin KaFai Lau 已提交
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
/* map_idr_lock should have been held */
static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
					    bool uref)
{
	int refold;

	refold = __atomic_add_unless(&map->refcnt, 1, 0);

	if (refold >= BPF_MAX_REFCNT) {
		__bpf_map_put(map, false);
		return ERR_PTR(-EBUSY);
	}

	if (!refold)
		return ERR_PTR(-ENOENT);

	if (uref)
		atomic_inc(&map->usercnt);

	return map;
}

386 387 388 389 390
int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
{
	return -ENOTSUPP;
}

391 392 393 394 395
/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value

static int map_lookup_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
396 397
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
398 399
	int ufd = attr->map_fd;
	struct bpf_map *map;
400
	void *key, *value, *ptr;
401
	u32 value_size;
402
	struct fd f;
403 404 405 406 407
	int err;

	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
		return -EINVAL;

408
	f = fdget(ufd);
409
	map = __bpf_map_get(f);
410 411 412
	if (IS_ERR(map))
		return PTR_ERR(map);

A
Al Viro 已提交
413 414 415
	key = memdup_user(ukey, map->key_size);
	if (IS_ERR(key)) {
		err = PTR_ERR(key);
416
		goto err_put;
A
Al Viro 已提交
417
	}
418

419
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
420
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
421 422
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
423 424
	else if (IS_FD_MAP(map))
		value_size = sizeof(u32);
425 426 427
	else
		value_size = map->value_size;

428
	err = -ENOMEM;
429
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
430
	if (!value)
431 432
		goto free_key;

433 434
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
435 436 437
		err = bpf_percpu_hash_copy(map, key, value);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_copy(map, key, value);
438 439
	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
		err = bpf_stackmap_copy(map, key, value);
440 441 442 443
	} else if (IS_FD_ARRAY(map)) {
		err = bpf_fd_array_map_lookup_elem(map, key, value);
	} else if (IS_FD_HASH(map)) {
		err = bpf_fd_htab_map_lookup_elem(map, key, value);
444 445 446 447 448 449 450 451
	} else {
		rcu_read_lock();
		ptr = map->ops->map_lookup_elem(map, key);
		if (ptr)
			memcpy(value, ptr, value_size);
		rcu_read_unlock();
		err = ptr ? 0 : -ENOENT;
	}
452

453
	if (err)
454
		goto free_value;
455 456

	err = -EFAULT;
457
	if (copy_to_user(uvalue, value, value_size) != 0)
458
		goto free_value;
459

460
	trace_bpf_map_lookup_elem(map, ufd, key, value);
461 462
	err = 0;

463 464
free_value:
	kfree(value);
465 466 467 468 469 470 471
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

472
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
473 474 475

static int map_update_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
476 477
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *uvalue = u64_to_user_ptr(attr->value);
478 479 480
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *value;
481
	u32 value_size;
482
	struct fd f;
483 484 485 486 487
	int err;

	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
		return -EINVAL;

488
	f = fdget(ufd);
489
	map = __bpf_map_get(f);
490 491 492
	if (IS_ERR(map))
		return PTR_ERR(map);

A
Al Viro 已提交
493 494 495
	key = memdup_user(ukey, map->key_size);
	if (IS_ERR(key)) {
		err = PTR_ERR(key);
496
		goto err_put;
A
Al Viro 已提交
497
	}
498

499
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
500
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
501 502 503 504 505
	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
		value_size = round_up(map->value_size, 8) * num_possible_cpus();
	else
		value_size = map->value_size;

506
	err = -ENOMEM;
507
	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
508 509 510 511
	if (!value)
		goto free_key;

	err = -EFAULT;
512
	if (copy_from_user(value, uvalue, value_size) != 0)
513 514
		goto free_value;

515 516 517 518 519
	/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
	 * inside bpf map update or delete otherwise deadlocks are possible
	 */
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
520 521
	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
522 523 524
		err = bpf_percpu_hash_update(map, key, value, attr->flags);
	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
		err = bpf_percpu_array_update(map, key, value, attr->flags);
525
	} else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
526
		   map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
527 528
		   map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
		   map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
529 530 531 532
		rcu_read_lock();
		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
						   attr->flags);
		rcu_read_unlock();
M
Martin KaFai Lau 已提交
533 534 535 536 537
	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
		rcu_read_lock();
		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
						  attr->flags);
		rcu_read_unlock();
538 539 540 541 542
	} else {
		rcu_read_lock();
		err = map->ops->map_update_elem(map, key, value, attr->flags);
		rcu_read_unlock();
	}
543 544
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
545

546 547
	if (!err)
		trace_bpf_map_update_elem(map, ufd, key, value);
548 549 550 551 552 553 554 555 556 557 558 559 560
free_value:
	kfree(value);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

#define BPF_MAP_DELETE_ELEM_LAST_FIELD key

static int map_delete_elem(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
561
	void __user *ukey = u64_to_user_ptr(attr->key);
562 563
	int ufd = attr->map_fd;
	struct bpf_map *map;
564
	struct fd f;
565 566 567 568 569 570
	void *key;
	int err;

	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
		return -EINVAL;

571
	f = fdget(ufd);
572
	map = __bpf_map_get(f);
573 574 575
	if (IS_ERR(map))
		return PTR_ERR(map);

A
Al Viro 已提交
576 577 578
	key = memdup_user(ukey, map->key_size);
	if (IS_ERR(key)) {
		err = PTR_ERR(key);
579
		goto err_put;
A
Al Viro 已提交
580
	}
581

582 583
	preempt_disable();
	__this_cpu_inc(bpf_prog_active);
584 585 586
	rcu_read_lock();
	err = map->ops->map_delete_elem(map, key);
	rcu_read_unlock();
587 588
	__this_cpu_dec(bpf_prog_active);
	preempt_enable();
589

590 591
	if (!err)
		trace_bpf_map_delete_elem(map, ufd, key);
592 593 594 595 596 597 598 599 600 601 602
	kfree(key);
err_put:
	fdput(f);
	return err;
}

/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key

static int map_get_next_key(union bpf_attr *attr)
{
M
Mickaël Salaün 已提交
603 604
	void __user *ukey = u64_to_user_ptr(attr->key);
	void __user *unext_key = u64_to_user_ptr(attr->next_key);
605 606 607
	int ufd = attr->map_fd;
	struct bpf_map *map;
	void *key, *next_key;
608
	struct fd f;
609 610 611 612 613
	int err;

	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
		return -EINVAL;

614
	f = fdget(ufd);
615
	map = __bpf_map_get(f);
616 617 618
	if (IS_ERR(map))
		return PTR_ERR(map);

619
	if (ukey) {
A
Al Viro 已提交
620 621 622
		key = memdup_user(ukey, map->key_size);
		if (IS_ERR(key)) {
			err = PTR_ERR(key);
623
			goto err_put;
A
Al Viro 已提交
624
		}
625 626 627
	} else {
		key = NULL;
	}
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643

	err = -ENOMEM;
	next_key = kmalloc(map->key_size, GFP_USER);
	if (!next_key)
		goto free_key;

	rcu_read_lock();
	err = map->ops->map_get_next_key(map, key, next_key);
	rcu_read_unlock();
	if (err)
		goto free_next_key;

	err = -EFAULT;
	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
		goto free_next_key;

644
	trace_bpf_map_next_key(map, ufd, key, next_key);
645 646 647 648 649 650 651 652 653 654 655
	err = 0;

free_next_key:
	kfree(next_key);
free_key:
	kfree(key);
err_put:
	fdput(f);
	return err;
}

656 657 658
static const struct bpf_verifier_ops * const bpf_prog_types[] = {
#define BPF_PROG_TYPE(_id, _ops) \
	[_id] = &_ops,
659
#define BPF_MAP_TYPE(_id, _ops)
660 661
#include <linux/bpf_types.h>
#undef BPF_PROG_TYPE
662
#undef BPF_MAP_TYPE
663
};
664 665 666

static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
{
667 668
	if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
		return -EINVAL;
669

670 671 672
	prog->aux->ops = bpf_prog_types[type];
	prog->type = type;
	return 0;
673 674 675 676 677 678 679 680 681 682 683 684 685
}

/* drop refcnt on maps used by eBPF program and free auxilary data */
static void free_used_maps(struct bpf_prog_aux *aux)
{
	int i;

	for (i = 0; i < aux->used_map_cnt; i++)
		bpf_map_put(aux->used_maps[i]);

	kfree(aux->used_maps);
}

686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	unsigned long user_bufs;

	if (user) {
		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
		if (user_bufs > memlock_limit) {
			atomic_long_sub(pages, &user->locked_vm);
			return -EPERM;
		}
	}

	return 0;
}

void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
	if (user)
		atomic_long_sub(pages, &user->locked_vm);
}

708 709 710
static int bpf_prog_charge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = get_current_user();
711
	int ret;
712

713 714
	ret = __bpf_prog_charge(user, prog->pages);
	if (ret) {
715
		free_uid(user);
716
		return ret;
717
	}
718

719 720 721 722 723 724 725 726
	prog->aux->user = user;
	return 0;
}

static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
{
	struct user_struct *user = prog->aux->user;

727
	__bpf_prog_uncharge(user, prog->pages);
728 729 730
	free_uid(user);
}

M
Martin KaFai Lau 已提交
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
static int bpf_prog_alloc_id(struct bpf_prog *prog)
{
	int id;

	spin_lock_bh(&prog_idr_lock);
	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
	if (id > 0)
		prog->aux->id = id;
	spin_unlock_bh(&prog_idr_lock);

	/* id is in [1, INT_MAX) */
	if (WARN_ON_ONCE(!id))
		return -ENOSPC;

	return id > 0 ? 0 : id;
}

748
static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
M
Martin KaFai Lau 已提交
749 750 751 752 753
{
	/* cBPF to eBPF migrations are currently not in the idr store. */
	if (!prog->aux->id)
		return;

754 755 756 757 758
	if (do_idr_lock)
		spin_lock_bh(&prog_idr_lock);
	else
		__acquire(&prog_idr_lock);

M
Martin KaFai Lau 已提交
759
	idr_remove(&prog_idr, prog->aux->id);
760 761 762 763 764

	if (do_idr_lock)
		spin_unlock_bh(&prog_idr_lock);
	else
		__release(&prog_idr_lock);
M
Martin KaFai Lau 已提交
765 766
}

767
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
768 769 770 771
{
	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);

	free_used_maps(aux);
772
	bpf_prog_uncharge_memlock(aux->prog);
773 774 775
	bpf_prog_free(aux->prog);
}

776
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
777
{
778 779
	if (atomic_dec_and_test(&prog->aux->refcnt)) {
		trace_bpf_prog_put_rcu(prog);
780
		/* bpf_prog_free_id() must be called first */
781
		bpf_prog_free_id(prog, do_idr_lock);
782
		bpf_prog_kallsyms_del(prog);
783
		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
784
	}
785
}
786 787 788 789 790

void bpf_prog_put(struct bpf_prog *prog)
{
	__bpf_prog_put(prog, true);
}
791
EXPORT_SYMBOL_GPL(bpf_prog_put);
792 793 794 795 796

static int bpf_prog_release(struct inode *inode, struct file *filp)
{
	struct bpf_prog *prog = filp->private_data;

797
	bpf_prog_put(prog);
798 799 800
	return 0;
}

801 802 803 804
#ifdef CONFIG_PROC_FS
static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{
	const struct bpf_prog *prog = filp->private_data;
805
	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
806

807
	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
808 809 810
	seq_printf(m,
		   "prog_type:\t%u\n"
		   "prog_jited:\t%u\n"
811
		   "prog_tag:\t%s\n"
812 813 814
		   "memlock:\t%llu\n",
		   prog->type,
		   prog->jited,
815
		   prog_tag,
816 817 818 819
		   prog->pages * 1ULL << PAGE_SHIFT);
}
#endif

820
static const struct file_operations bpf_prog_fops = {
821 822 823 824
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= bpf_prog_show_fdinfo,
#endif
	.release	= bpf_prog_release,
825 826
};

827
int bpf_prog_new_fd(struct bpf_prog *prog)
828 829 830 831 832
{
	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
				O_RDWR | O_CLOEXEC);
}

833
static struct bpf_prog *____bpf_prog_get(struct fd f)
834 835 836 837 838 839 840 841
{
	if (!f.file)
		return ERR_PTR(-EBADF);
	if (f.file->f_op != &bpf_prog_fops) {
		fdput(f);
		return ERR_PTR(-EINVAL);
	}

842
	return f.file->private_data;
843 844
}

845
struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
A
Alexei Starovoitov 已提交
846
{
847 848
	if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
		atomic_sub(i, &prog->aux->refcnt);
A
Alexei Starovoitov 已提交
849 850 851 852
		return ERR_PTR(-EBUSY);
	}
	return prog;
}
853 854
EXPORT_SYMBOL_GPL(bpf_prog_add);

855 856 857 858 859 860 861 862 863 864 865
void bpf_prog_sub(struct bpf_prog *prog, int i)
{
	/* Only to be used for undoing previous bpf_prog_add() in some
	 * error path. We still know that another entity in our call
	 * path holds a reference to the program, thus atomic_sub() can
	 * be safely used in such cases!
	 */
	WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
}
EXPORT_SYMBOL_GPL(bpf_prog_sub);

866 867 868 869
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
{
	return bpf_prog_add(prog, 1);
}
870
EXPORT_SYMBOL_GPL(bpf_prog_inc);
A
Alexei Starovoitov 已提交
871

872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
/* prog_idr_lock should have been held */
static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
{
	int refold;

	refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);

	if (refold >= BPF_MAX_REFCNT) {
		__bpf_prog_put(prog, false);
		return ERR_PTR(-EBUSY);
	}

	if (!refold)
		return ERR_PTR(-ENOENT);

	return prog;
}

890
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
891 892 893 894
{
	struct fd f = fdget(ufd);
	struct bpf_prog *prog;

895
	prog = ____bpf_prog_get(f);
896 897
	if (IS_ERR(prog))
		return prog;
898 899 900 901
	if (type && prog->type != *type) {
		prog = ERR_PTR(-EINVAL);
		goto out;
	}
902

A
Alexei Starovoitov 已提交
903
	prog = bpf_prog_inc(prog);
904
out:
905 906 907
	fdput(f);
	return prog;
}
908 909 910 911 912 913 914 915

struct bpf_prog *bpf_prog_get(u32 ufd)
{
	return __bpf_prog_get(ufd, NULL);
}

struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
{
916 917 918 919 920
	struct bpf_prog *prog = __bpf_prog_get(ufd, &type);

	if (!IS_ERR(prog))
		trace_bpf_prog_get_type(prog);
	return prog;
921 922
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type);
923 924

/* last field in 'union bpf_attr' used by this command */
925
#define	BPF_PROG_LOAD_LAST_FIELD prog_flags
926 927 928 929 930 931 932 933 934 935 936 937

static int bpf_prog_load(union bpf_attr *attr)
{
	enum bpf_prog_type type = attr->prog_type;
	struct bpf_prog *prog;
	int err;
	char license[128];
	bool is_gpl;

	if (CHECK_ATTR(BPF_PROG_LOAD))
		return -EINVAL;

938 939 940
	if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
		return -EINVAL;

941
	/* copy eBPF program license from user space */
M
Mickaël Salaün 已提交
942
	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
943 944 945 946 947 948 949
			      sizeof(license) - 1) < 0)
		return -EFAULT;
	license[sizeof(license) - 1] = 0;

	/* eBPF programs must be GPL compatible to use GPL-ed functions */
	is_gpl = license_is_gpl_compatible(license);

950 951
	if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
		return -E2BIG;
952

953 954 955 956
	if (type == BPF_PROG_TYPE_KPROBE &&
	    attr->kern_version != LINUX_VERSION_CODE)
		return -EINVAL;

957 958 959
	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
	    type != BPF_PROG_TYPE_CGROUP_SKB &&
	    !capable(CAP_SYS_ADMIN))
960 961
		return -EPERM;

962 963 964 965 966
	/* plain bpf_prog allocation */
	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
	if (!prog)
		return -ENOMEM;

967 968 969 970
	err = bpf_prog_charge_memlock(prog);
	if (err)
		goto free_prog_nouncharge;

971 972 973
	prog->len = attr->insn_cnt;

	err = -EFAULT;
M
Mickaël Salaün 已提交
974
	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
975
			   bpf_prog_insn_size(prog)) != 0)
976 977 978
		goto free_prog;

	prog->orig_prog = NULL;
979
	prog->jited = 0;
980 981

	atomic_set(&prog->aux->refcnt, 1);
982
	prog->gpl_compatible = is_gpl ? 1 : 0;
983 984 985 986 987 988 989

	/* find program type: socket_filter vs tracing_filter */
	err = find_prog_type(type, prog);
	if (err < 0)
		goto free_prog;

	/* run eBPF verifier */
990
	err = bpf_check(&prog, attr);
991 992 993 994
	if (err < 0)
		goto free_used_maps;

	/* eBPF program is ready to be JITed */
995
	prog = bpf_prog_select_runtime(prog, &err);
996 997
	if (err < 0)
		goto free_used_maps;
998

M
Martin KaFai Lau 已提交
999 1000 1001 1002
	err = bpf_prog_alloc_id(prog);
	if (err)
		goto free_used_maps;

1003
	err = bpf_prog_new_fd(prog);
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
	if (err < 0) {
		/* failed to allocate fd.
		 * bpf_prog_put() is needed because the above
		 * bpf_prog_alloc_id() has published the prog
		 * to the userspace and the userspace may
		 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
		 */
		bpf_prog_put(prog);
		return err;
	}
1014

1015
	bpf_prog_kallsyms_add(prog);
1016
	trace_bpf_prog_load(prog, err);
1017 1018 1019 1020 1021
	return err;

free_used_maps:
	free_used_maps(prog->aux);
free_prog:
1022 1023
	bpf_prog_uncharge_memlock(prog);
free_prog_nouncharge:
1024 1025 1026 1027
	bpf_prog_free(prog);
	return err;
}

1028 1029 1030 1031 1032 1033 1034
#define BPF_OBJ_LAST_FIELD bpf_fd

static int bpf_obj_pin(const union bpf_attr *attr)
{
	if (CHECK_ATTR(BPF_OBJ))
		return -EINVAL;

M
Mickaël Salaün 已提交
1035
	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
1036 1037 1038 1039 1040 1041 1042
}

static int bpf_obj_get(const union bpf_attr *attr)
{
	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
		return -EINVAL;

M
Mickaël Salaün 已提交
1043
	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
1044 1045
}

1046 1047
#ifdef CONFIG_CGROUP_BPF

1048
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
1049 1050 1051

static int bpf_prog_attach(const union bpf_attr *attr)
{
1052
	enum bpf_prog_type ptype;
1053 1054
	struct bpf_prog *prog;
	struct cgroup *cgrp;
1055
	int ret;
1056 1057 1058 1059 1060 1061 1062

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (CHECK_ATTR(BPF_PROG_ATTACH))
		return -EINVAL;

1063 1064 1065
	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
		return -EINVAL;

1066 1067 1068
	switch (attr->attach_type) {
	case BPF_CGROUP_INET_INGRESS:
	case BPF_CGROUP_INET_EGRESS:
1069
		ptype = BPF_PROG_TYPE_CGROUP_SKB;
1070
		break;
1071 1072 1073
	case BPF_CGROUP_INET_SOCK_CREATE:
		ptype = BPF_PROG_TYPE_CGROUP_SOCK;
		break;
L
Lawrence Brakmo 已提交
1074 1075 1076
	case BPF_CGROUP_SOCK_OPS:
		ptype = BPF_PROG_TYPE_SOCK_OPS;
		break;
1077 1078 1079 1080
	default:
		return -EINVAL;
	}

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
	if (IS_ERR(prog))
		return PTR_ERR(prog);

	cgrp = cgroup_get_from_fd(attr->target_fd);
	if (IS_ERR(cgrp)) {
		bpf_prog_put(prog);
		return PTR_ERR(cgrp);
	}

1091 1092 1093 1094
	ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
				attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
	if (ret)
		bpf_prog_put(prog);
1095 1096
	cgroup_put(cgrp);

1097
	return ret;
1098 1099 1100 1101 1102 1103 1104
}

#define BPF_PROG_DETACH_LAST_FIELD attach_type

static int bpf_prog_detach(const union bpf_attr *attr)
{
	struct cgroup *cgrp;
1105
	int ret;
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (CHECK_ATTR(BPF_PROG_DETACH))
		return -EINVAL;

	switch (attr->attach_type) {
	case BPF_CGROUP_INET_INGRESS:
	case BPF_CGROUP_INET_EGRESS:
1116
	case BPF_CGROUP_INET_SOCK_CREATE:
L
Lawrence Brakmo 已提交
1117
	case BPF_CGROUP_SOCK_OPS:
1118 1119 1120 1121
		cgrp = cgroup_get_from_fd(attr->target_fd);
		if (IS_ERR(cgrp))
			return PTR_ERR(cgrp);

1122
		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
1123 1124 1125 1126 1127 1128 1129
		cgroup_put(cgrp);
		break;

	default:
		return -EINVAL;
	}

1130
	return ret;
1131
}
L
Lawrence Brakmo 已提交
1132

1133 1134
#endif /* CONFIG_CGROUP_BPF */

1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration

static int bpf_prog_test_run(const union bpf_attr *attr,
			     union bpf_attr __user *uattr)
{
	struct bpf_prog *prog;
	int ret = -ENOTSUPP;

	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
		return -EINVAL;

	prog = bpf_prog_get(attr->test.prog_fd);
	if (IS_ERR(prog))
		return PTR_ERR(prog);

	if (prog->aux->ops->test_run)
		ret = prog->aux->ops->test_run(prog, attr, uattr);

	bpf_prog_put(prog);
	return ret;
}

1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id

static int bpf_obj_get_next_id(const union bpf_attr *attr,
			       union bpf_attr __user *uattr,
			       struct idr *idr,
			       spinlock_t *lock)
{
	u32 next_id = attr->start_id;
	int err = 0;

	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	next_id++;
	spin_lock_bh(lock);
	if (!idr_get_next(idr, &next_id))
		err = -ENOENT;
	spin_unlock_bh(lock);

	if (!err)
		err = put_user(next_id, &uattr->next_id);

	return err;
}

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id

static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
{
	struct bpf_prog *prog;
	u32 id = attr->prog_id;
	int fd;

	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	spin_lock_bh(&prog_idr_lock);
	prog = idr_find(&prog_idr, id);
	if (prog)
		prog = bpf_prog_inc_not_zero(prog);
	else
		prog = ERR_PTR(-ENOENT);
	spin_unlock_bh(&prog_idr_lock);

	if (IS_ERR(prog))
		return PTR_ERR(prog);

	fd = bpf_prog_new_fd(prog);
	if (fd < 0)
		bpf_prog_put(prog);

	return fd;
}

M
Martin KaFai Lau 已提交
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id

static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
{
	struct bpf_map *map;
	u32 id = attr->map_id;
	int fd;

	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	spin_lock_bh(&map_idr_lock);
	map = idr_find(&map_idr, id);
	if (map)
		map = bpf_map_inc_not_zero(map, true);
	else
		map = ERR_PTR(-ENOENT);
	spin_unlock_bh(&map_idr_lock);

	if (IS_ERR(map))
		return PTR_ERR(map);

	fd = bpf_map_new_fd(map);
	if (fd < 0)
		bpf_map_put(map);

	return fd;
}

1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
static int check_uarg_tail_zero(void __user *uaddr,
				size_t expected_size,
				size_t actual_size)
{
	unsigned char __user *addr;
	unsigned char __user *end;
	unsigned char val;
	int err;

	if (actual_size <= expected_size)
		return 0;

	addr = uaddr + expected_size;
	end  = uaddr + actual_size;

	for (; addr < end; addr++) {
		err = get_user(val, addr);
		if (err)
			return err;
		if (val)
			return -E2BIG;
	}

	return 0;
}

static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
				   const union bpf_attr *attr,
				   union bpf_attr __user *uattr)
{
	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
	struct bpf_prog_info info = {};
	u32 info_len = attr->info.info_len;
	char __user *uinsns;
	u32 ulen;
	int err;

	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
	if (err)
		return err;
	info_len = min_t(u32, sizeof(info), info_len);

	if (copy_from_user(&info, uinfo, info_len))
1292
		return -EFAULT;
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314

	info.type = prog->type;
	info.id = prog->aux->id;

	memcpy(info.tag, prog->tag, sizeof(prog->tag));

	if (!capable(CAP_SYS_ADMIN)) {
		info.jited_prog_len = 0;
		info.xlated_prog_len = 0;
		goto done;
	}

	ulen = info.jited_prog_len;
	info.jited_prog_len = prog->jited_len;
	if (info.jited_prog_len && ulen) {
		uinsns = u64_to_user_ptr(info.jited_prog_insns);
		ulen = min_t(u32, info.jited_prog_len, ulen);
		if (copy_to_user(uinsns, prog->bpf_func, ulen))
			return -EFAULT;
	}

	ulen = info.xlated_prog_len;
1315
	info.xlated_prog_len = bpf_prog_insn_size(prog);
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
	if (info.xlated_prog_len && ulen) {
		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
		ulen = min_t(u32, info.xlated_prog_len, ulen);
		if (copy_to_user(uinsns, prog->insnsi, ulen))
			return -EFAULT;
	}

done:
	if (copy_to_user(uinfo, &info, info_len) ||
	    put_user(info_len, &uattr->info.info_len))
		return -EFAULT;

	return 0;
}

static int bpf_map_get_info_by_fd(struct bpf_map *map,
				  const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{
	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
	struct bpf_map_info info = {};
	u32 info_len = attr->info.info_len;
	int err;

	err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
	if (err)
		return err;
	info_len = min_t(u32, sizeof(info), info_len);

	info.type = map->map_type;
	info.id = map->id;
	info.key_size = map->key_size;
	info.value_size = map->value_size;
	info.max_entries = map->max_entries;
	info.map_flags = map->map_flags;

	if (copy_to_user(uinfo, &info, info_len) ||
	    put_user(info_len, &uattr->info.info_len))
		return -EFAULT;

	return 0;
}

#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info

static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
				  union bpf_attr __user *uattr)
{
	int ufd = attr->info.bpf_fd;
	struct fd f;
	int err;

	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
		return -EINVAL;

	f = fdget(ufd);
	if (!f.file)
		return -EBADFD;

	if (f.file->f_op == &bpf_prog_fops)
		err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
					      uattr);
	else if (f.file->f_op == &bpf_map_fops)
		err = bpf_map_get_info_by_fd(f.file->private_data, attr,
					     uattr);
	else
		err = -EINVAL;

	fdput(f);
	return err;
}

1388 1389 1390 1391 1392
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
	union bpf_attr attr = {};
	int err;

1393
	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
		return -EPERM;

	if (!access_ok(VERIFY_READ, uattr, 1))
		return -EFAULT;

	if (size > PAGE_SIZE)	/* silly large */
		return -E2BIG;

	/* If we're handed a bigger struct than we know of,
	 * ensure all the unknown bits are 0 - i.e. new
	 * user-space does not rely on any kernel feature
	 * extensions we dont know about yet.
	 */
1407 1408 1409 1410
	err = check_uarg_tail_zero(uattr, sizeof(attr), size);
	if (err)
		return err;
	size = min_t(u32, size, sizeof(attr));
1411 1412 1413 1414 1415 1416 1417 1418 1419

	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
	if (copy_from_user(&attr, uattr, size) != 0)
		return -EFAULT;

	switch (cmd) {
	case BPF_MAP_CREATE:
		err = map_create(&attr);
		break;
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	case BPF_MAP_LOOKUP_ELEM:
		err = map_lookup_elem(&attr);
		break;
	case BPF_MAP_UPDATE_ELEM:
		err = map_update_elem(&attr);
		break;
	case BPF_MAP_DELETE_ELEM:
		err = map_delete_elem(&attr);
		break;
	case BPF_MAP_GET_NEXT_KEY:
		err = map_get_next_key(&attr);
		break;
1432 1433 1434
	case BPF_PROG_LOAD:
		err = bpf_prog_load(&attr);
		break;
1435 1436 1437 1438 1439 1440
	case BPF_OBJ_PIN:
		err = bpf_obj_pin(&attr);
		break;
	case BPF_OBJ_GET:
		err = bpf_obj_get(&attr);
		break;
1441 1442 1443 1444 1445 1446 1447 1448
#ifdef CONFIG_CGROUP_BPF
	case BPF_PROG_ATTACH:
		err = bpf_prog_attach(&attr);
		break;
	case BPF_PROG_DETACH:
		err = bpf_prog_detach(&attr);
		break;
#endif
1449 1450 1451
	case BPF_PROG_TEST_RUN:
		err = bpf_prog_test_run(&attr, uattr);
		break;
1452 1453 1454 1455 1456 1457 1458 1459
	case BPF_PROG_GET_NEXT_ID:
		err = bpf_obj_get_next_id(&attr, uattr,
					  &prog_idr, &prog_idr_lock);
		break;
	case BPF_MAP_GET_NEXT_ID:
		err = bpf_obj_get_next_id(&attr, uattr,
					  &map_idr, &map_idr_lock);
		break;
1460 1461 1462
	case BPF_PROG_GET_FD_BY_ID:
		err = bpf_prog_get_fd_by_id(&attr);
		break;
M
Martin KaFai Lau 已提交
1463 1464 1465
	case BPF_MAP_GET_FD_BY_ID:
		err = bpf_map_get_fd_by_id(&attr);
		break;
1466 1467 1468
	case BPF_OBJ_GET_INFO_BY_FD:
		err = bpf_obj_get_info_by_fd(&attr, uattr);
		break;
1469 1470 1471 1472 1473 1474 1475
	default:
		err = -EINVAL;
		break;
	}

	return err;
}