seccomp.c 30.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * linux/kernel/seccomp.c
 *
 * Copyright 2004-2005  Andrea Arcangeli <andrea@cpushare.com>
 *
6 7 8 9 10 11 12 13
 * Copyright (C) 2012 Google, Inc.
 * Will Drewry <wad@chromium.org>
 *
 * This defines a simple but solid secure-computing facility.
 *
 * Mode 1 uses a fixed list of allowed system calls.
 * Mode 2 allows user-defined system call filters in the form
 *        of Berkeley Packet Filters/Linux Socket Filters.
L
Linus Torvalds 已提交
14 15
 */

16
#include <linux/refcount.h>
17
#include <linux/audit.h>
18
#include <linux/compat.h>
19
#include <linux/coredump.h>
20
#include <linux/kmemleak.h>
21
#include <linux/sched.h>
22
#include <linux/sched/task_stack.h>
23
#include <linux/seccomp.h>
24
#include <linux/slab.h>
K
Kees Cook 已提交
25
#include <linux/syscalls.h>
26
#include <linux/sysctl.h>
L
Linus Torvalds 已提交
27

28
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
29
#include <asm/syscall.h>
30
#endif
31 32 33

#ifdef CONFIG_SECCOMP_FILTER
#include <linux/filter.h>
34
#include <linux/pid.h>
35
#include <linux/ptrace.h>
36 37 38 39 40 41 42 43 44 45 46
#include <linux/security.h>
#include <linux/tracehook.h>
#include <linux/uaccess.h>

/**
 * struct seccomp_filter - container for seccomp BPF programs
 *
 * @usage: reference count to manage the object lifetime.
 *         get/put helpers should be used when accessing an instance
 *         outside of a lifetime-guarded section.  In general, this
 *         is only needed for handling filters shared across tasks.
47
 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
48
 * @prev: points to a previously installed, or inherited, filter
M
Mickaël Salaün 已提交
49
 * @prog: the BPF program to evaluate
50 51 52 53 54 55 56 57 58 59 60 61
 *
 * seccomp_filter objects are organized in a tree linked via the @prev
 * pointer.  For any task, it appears to be a singly-linked list starting
 * with current->seccomp.filter, the most recently attached or inherited filter.
 * However, multiple filters may share a @prev node, by way of fork(), which
 * results in a unidirectional tree existing in memory.  This is similar to
 * how namespaces work.
 *
 * seccomp_filter objects should never be modified after being attached
 * to a task_struct (other than @usage).
 */
struct seccomp_filter {
62
	refcount_t usage;
63
	bool log;
64
	struct seccomp_filter *prev;
65
	struct bpf_prog *prog;
66 67 68 69 70
};

/* Limit any path through the tree to 256KB worth of instructions. */
#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))

71
/*
72 73 74
 * Endianness is explicitly ignored and left for BPF program authors to manage
 * as per the specific architecture.
 */
75
static void populate_seccomp_data(struct seccomp_data *sd)
76
{
77 78
	struct task_struct *task = current;
	struct pt_regs *regs = task_pt_regs(task);
79
	unsigned long args[6];
80

81
	sd->nr = syscall_get_nr(task, regs);
82
	sd->arch = syscall_get_arch();
83 84 85 86 87 88 89
	syscall_get_arguments(task, regs, 0, 6, args);
	sd->args[0] = args[0];
	sd->args[1] = args[1];
	sd->args[2] = args[2];
	sd->args[3] = args[3];
	sd->args[4] = args[4];
	sd->args[5] = args[5];
90
	sd->instruction_pointer = KSTK_EIP(task);
91 92 93 94 95 96 97
}

/**
 *	seccomp_check_filter - verify seccomp filter code
 *	@filter: filter to verify
 *	@flen: length of filter
 *
98
 * Takes a previously checked filter (by bpf_check_classic) and
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
 * redirects all filter code that loads struct sk_buff data
 * and related data through seccomp_bpf_load.  It also
 * enforces length and alignment checking of those loads.
 *
 * Returns 0 if the rule set is legal or -EINVAL if not.
 */
static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
{
	int pc;
	for (pc = 0; pc < flen; pc++) {
		struct sock_filter *ftest = &filter[pc];
		u16 code = ftest->code;
		u32 k = ftest->k;

		switch (code) {
114
		case BPF_LD | BPF_W | BPF_ABS:
115
			ftest->code = BPF_LDX | BPF_W | BPF_ABS;
116 117 118 119
			/* 32-bit aligned and not out of bounds. */
			if (k >= sizeof(struct seccomp_data) || k & 3)
				return -EINVAL;
			continue;
120
		case BPF_LD | BPF_W | BPF_LEN:
121
			ftest->code = BPF_LD | BPF_IMM;
122 123
			ftest->k = sizeof(struct seccomp_data);
			continue;
124
		case BPF_LDX | BPF_W | BPF_LEN:
125
			ftest->code = BPF_LDX | BPF_IMM;
126 127 128
			ftest->k = sizeof(struct seccomp_data);
			continue;
		/* Explicitly include allowed calls. */
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
		case BPF_RET | BPF_K:
		case BPF_RET | BPF_A:
		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_NEG:
		case BPF_LD | BPF_IMM:
		case BPF_LDX | BPF_IMM:
		case BPF_MISC | BPF_TAX:
		case BPF_MISC | BPF_TXA:
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
		case BPF_ST:
		case BPF_STX:
		case BPF_JMP | BPF_JA:
		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
167 168 169 170 171 172 173 174 175
			continue;
		default:
			return -EINVAL;
		}
	}
	return 0;
}

/**
M
Mickaël Salaün 已提交
176 177
 * seccomp_run_filters - evaluates all seccomp filters against @sd
 * @sd: optional seccomp data to be passed to filters
178 179 180
 * @match: stores struct seccomp_filter that resulted in the return value,
 *         unless filter returned SECCOMP_RET_ALLOW, in which case it will
 *         be unchanged.
181 182 183
 *
 * Returns valid seccomp BPF response codes.
 */
184 185
static u32 seccomp_run_filters(const struct seccomp_data *sd,
			       struct seccomp_filter **match)
186
{
187
	struct seccomp_data sd_local;
W
Will Drewry 已提交
188
	u32 ret = SECCOMP_RET_ALLOW;
189 190 191
	/* Make sure cross-thread synced filter points somewhere sane. */
	struct seccomp_filter *f =
			lockless_dereference(current->seccomp.filter);
W
Will Drewry 已提交
192 193

	/* Ensure unexpected behavior doesn't result in failing open. */
194
	if (unlikely(WARN_ON(f == NULL)))
W
Will Drewry 已提交
195 196
		return SECCOMP_RET_KILL;

197 198 199 200
	if (!sd) {
		populate_seccomp_data(&sd_local);
		sd = &sd_local;
	}
201

202 203
	/*
	 * All filters in the list are evaluated and the lowest BPF return
W
Will Drewry 已提交
204
	 * value always takes priority (ignoring the DATA).
205
	 */
206
	for (; f; f = f->prev) {
207
		u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
208

209
		if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) {
W
Will Drewry 已提交
210
			ret = cur_ret;
211 212
			*match = f;
		}
213 214 215
	}
	return ret;
}
216
#endif /* CONFIG_SECCOMP_FILTER */
217

218 219
static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
{
220
	assert_spin_locked(&current->sighand->siglock);
K
Kees Cook 已提交
221

222 223 224 225 226 227
	if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
		return false;

	return true;
}

228 229
static inline void seccomp_assign_mode(struct task_struct *task,
				       unsigned long seccomp_mode)
230
{
231
	assert_spin_locked(&task->sighand->siglock);
K
Kees Cook 已提交
232

233 234 235 236 237 238 239
	task->seccomp.mode = seccomp_mode;
	/*
	 * Make sure TIF_SECCOMP cannot be set before the mode (and
	 * filter) is set.
	 */
	smp_mb__before_atomic();
	set_tsk_thread_flag(task, TIF_SECCOMP);
240 241 242
}

#ifdef CONFIG_SECCOMP_FILTER
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
/* Returns 1 if the parent is an ancestor of the child. */
static int is_ancestor(struct seccomp_filter *parent,
		       struct seccomp_filter *child)
{
	/* NULL is the root ancestor. */
	if (parent == NULL)
		return 1;
	for (; child; child = child->prev)
		if (child == parent)
			return 1;
	return 0;
}

/**
 * seccomp_can_sync_threads: checks if all threads can be synchronized
 *
 * Expects sighand and cred_guard_mutex locks to be held.
 *
 * Returns 0 on success, -ve on error, or the pid of a thread which was
 * either not in the correct seccomp mode or it did not have an ancestral
 * seccomp filter.
 */
static inline pid_t seccomp_can_sync_threads(void)
{
	struct task_struct *thread, *caller;

	BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
270
	assert_spin_locked(&current->sighand->siglock);
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310

	/* Validate all threads being eligible for synchronization. */
	caller = current;
	for_each_thread(caller, thread) {
		pid_t failed;

		/* Skip current, since it is initiating the sync. */
		if (thread == caller)
			continue;

		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
		    (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
		     is_ancestor(thread->seccomp.filter,
				 caller->seccomp.filter)))
			continue;

		/* Return the first thread that cannot be synchronized. */
		failed = task_pid_vnr(thread);
		/* If the pid cannot be resolved, then return -ESRCH */
		if (unlikely(WARN_ON(failed == 0)))
			failed = -ESRCH;
		return failed;
	}

	return 0;
}

/**
 * seccomp_sync_threads: sets all threads to use current's filter
 *
 * Expects sighand and cred_guard_mutex locks to be held, and for
 * seccomp_can_sync_threads() to have returned success already
 * without dropping the locks.
 *
 */
static inline void seccomp_sync_threads(void)
{
	struct task_struct *thread, *caller;

	BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
311
	assert_spin_locked(&current->sighand->siglock);
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329

	/* Synchronize all threads. */
	caller = current;
	for_each_thread(caller, thread) {
		/* Skip current, since it needs no changes. */
		if (thread == caller)
			continue;

		/* Get a task reference for the new leaf node. */
		get_seccomp_filter(caller);
		/*
		 * Drop the task reference to the shared ancestor since
		 * current's path will hold a reference.  (This also
		 * allows a put before the assignment.)
		 */
		put_seccomp_filter(thread);
		smp_store_release(&thread->seccomp.filter,
				  caller->seccomp.filter);
330 331 332 333 334 335 336 337 338 339

		/*
		 * Don't let an unprivileged task work around
		 * the no_new_privs restriction by creating
		 * a thread that sets it up, enters seccomp,
		 * then dies.
		 */
		if (task_no_new_privs(caller))
			task_set_no_new_privs(thread);

340 341 342 343 344 345
		/*
		 * Opt the other thread into seccomp if needed.
		 * As threads are considered to be trust-realm
		 * equivalent (see ptrace_may_access), it is safe to
		 * allow one thread to transition the other.
		 */
346
		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
347 348 349 350
			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
	}
}

351
/**
352
 * seccomp_prepare_filter: Prepares a seccomp filter for use.
353 354
 * @fprog: BPF program to install
 *
355
 * Returns filter on success or an ERR_PTR on failure.
356
 */
357
static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
358
{
359 360
	struct seccomp_filter *sfilter;
	int ret;
361
	const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
362 363

	if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
364
		return ERR_PTR(-EINVAL);
365

366
	BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
367 368

	/*
369
	 * Installing a seccomp filter requires that the task has
370 371 372 373
	 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
	 * This avoids scenarios where unprivileged tasks can affect the
	 * behavior of privileged children.
	 */
374
	if (!task_no_new_privs(current) &&
375 376
	    security_capable_noaudit(current_cred(), current_user_ns(),
				     CAP_SYS_ADMIN) != 0)
377
		return ERR_PTR(-EACCES);
378

379
	/* Allocate a new seccomp_filter */
380 381
	sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
	if (!sfilter)
382
		return ERR_PTR(-ENOMEM);
383 384

	ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
385
					seccomp_check_filter, save_orig);
386 387 388
	if (ret < 0) {
		kfree(sfilter);
		return ERR_PTR(ret);
389
	}
390

391
	refcount_set(&sfilter->usage, 1);
392

393
	return sfilter;
394 395 396
}

/**
397
 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
398 399 400 401
 * @user_filter: pointer to the user data containing a sock_fprog.
 *
 * Returns 0 on success and non-zero otherwise.
 */
402 403
static struct seccomp_filter *
seccomp_prepare_user_filter(const char __user *user_filter)
404 405
{
	struct sock_fprog fprog;
406
	struct seccomp_filter *filter = ERR_PTR(-EFAULT);
407 408

#ifdef CONFIG_COMPAT
409
	if (in_compat_syscall()) {
410 411 412 413 414 415 416 417 418
		struct compat_sock_fprog fprog32;
		if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
			goto out;
		fprog.len = fprog32.len;
		fprog.filter = compat_ptr(fprog32.filter);
	} else /* falls through to the if below. */
#endif
	if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
		goto out;
419
	filter = seccomp_prepare_filter(&fprog);
420
out:
421 422 423 424 425 426 427 428
	return filter;
}

/**
 * seccomp_attach_filter: validate and attach filter
 * @flags:  flags to change filter behavior
 * @filter: seccomp filter to add to the current process
 *
K
Kees Cook 已提交
429 430
 * Caller must be holding current->sighand->siglock lock.
 *
431 432 433 434 435 436 437 438
 * Returns 0 on success, -ve on error.
 */
static long seccomp_attach_filter(unsigned int flags,
				  struct seccomp_filter *filter)
{
	unsigned long total_insns;
	struct seccomp_filter *walker;

439
	assert_spin_locked(&current->sighand->siglock);
K
Kees Cook 已提交
440

441 442 443 444 445 446 447
	/* Validate resulting filter length. */
	total_insns = filter->prog->len;
	for (walker = current->seccomp.filter; walker; walker = walker->prev)
		total_insns += walker->prog->len + 4;  /* 4 instr penalty */
	if (total_insns > MAX_INSNS_PER_PATH)
		return -ENOMEM;

448 449 450 451 452 453 454 455 456
	/* If thread sync has been requested, check that it is possible. */
	if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
		int ret;

		ret = seccomp_can_sync_threads();
		if (ret)
			return ret;
	}

457 458 459 460
	/* Set log flag, if present. */
	if (flags & SECCOMP_FILTER_FLAG_LOG)
		filter->log = true;

461 462 463 464 465 466 467
	/*
	 * If there is an existing filter, make it the prev and don't drop its
	 * task reference.
	 */
	filter->prev = current->seccomp.filter;
	current->seccomp.filter = filter;

468 469 470 471
	/* Now that the new filter is in place, synchronize to all threads. */
	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
		seccomp_sync_threads();

472
	return 0;
473 474 475 476 477 478 479 480 481
}

/* get_seccomp_filter - increments the reference count of the filter on @tsk */
void get_seccomp_filter(struct task_struct *tsk)
{
	struct seccomp_filter *orig = tsk->seccomp.filter;
	if (!orig)
		return;
	/* Reference count is bounded by the number of total processes. */
482
	refcount_inc(&orig->usage);
483 484
}

485 486 487
static inline void seccomp_filter_free(struct seccomp_filter *filter)
{
	if (filter) {
488
		bpf_prog_destroy(filter->prog);
489 490 491 492
		kfree(filter);
	}
}

493 494 495 496 497
/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
void put_seccomp_filter(struct task_struct *tsk)
{
	struct seccomp_filter *orig = tsk->seccomp.filter;
	/* Clean up single-reference branches iteratively. */
498
	while (orig && refcount_dec_and_test(&orig->usage)) {
499 500
		struct seccomp_filter *freeme = orig;
		orig = orig->prev;
501
		seccomp_filter_free(freeme);
502 503
	}
}
W
Will Drewry 已提交
504

505 506 507 508 509 510 511 512 513 514 515
static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
{
	memset(info, 0, sizeof(*info));
	info->si_signo = SIGSYS;
	info->si_code = SYS_SECCOMP;
	info->si_call_addr = (void __user *)KSTK_EIP(current);
	info->si_errno = reason;
	info->si_arch = syscall_get_arch();
	info->si_syscall = syscall;
}

W
Will Drewry 已提交
516 517 518 519 520 521 522 523 524 525
/**
 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
 * @syscall: syscall number to send to userland
 * @reason: filter-supplied reason code to send to userland (via si_errno)
 *
 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
 */
static void seccomp_send_sigsys(int syscall, int reason)
{
	struct siginfo info;
526
	seccomp_init_siginfo(&info, syscall, reason);
W
Will Drewry 已提交
527 528
	force_sig_info(SIGSYS, &info, current);
}
529
#endif	/* CONFIG_SECCOMP_FILTER */
L
Linus Torvalds 已提交
530

531 532 533 534 535
/* For use with seccomp_actions_logged */
#define SECCOMP_LOG_KILL		(1 << 0)
#define SECCOMP_LOG_TRAP		(1 << 2)
#define SECCOMP_LOG_ERRNO		(1 << 3)
#define SECCOMP_LOG_TRACE		(1 << 4)
536 537
#define SECCOMP_LOG_LOG			(1 << 5)
#define SECCOMP_LOG_ALLOW		(1 << 6)
538 539

static u32 seccomp_actions_logged = SECCOMP_LOG_KILL  | SECCOMP_LOG_TRAP  |
540 541
				    SECCOMP_LOG_ERRNO | SECCOMP_LOG_TRACE |
				    SECCOMP_LOG_LOG;
542

543 544
static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
			       bool requested)
545 546 547 548 549
{
	bool log = false;

	switch (action) {
	case SECCOMP_RET_ALLOW:
550
		break;
551
	case SECCOMP_RET_TRAP:
552 553
		log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
		break;
554
	case SECCOMP_RET_ERRNO:
555 556
		log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
		break;
557
	case SECCOMP_RET_TRACE:
558
		log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
559
		break;
560 561 562
	case SECCOMP_RET_LOG:
		log = seccomp_actions_logged & SECCOMP_LOG_LOG;
		break;
563 564 565 566 567 568
	case SECCOMP_RET_KILL:
	default:
		log = seccomp_actions_logged & SECCOMP_LOG_KILL;
	}

	/*
569 570 571
	 * Force an audit message to be emitted when the action is RET_KILL,
	 * RET_LOG, or the FILTER_FLAG_LOG bit was set and the action is
	 * allowed to be logged by the admin.
572 573 574 575 576 577 578 579 580 581 582
	 */
	if (log)
		return __audit_seccomp(syscall, signr, action);

	/*
	 * Let the audit subsystem decide if the action should be audited based
	 * on whether the current task itself is being audited.
	 */
	return audit_seccomp(syscall, signr, action);
}

L
Linus Torvalds 已提交
583 584 585 586 587
/*
 * Secure computing mode 1 allows only read/write/exit/sigreturn.
 * To be fully secure this must be combined with rlimit
 * to limit the stack allocations too.
 */
588
static const int mode1_syscalls[] = {
L
Linus Torvalds 已提交
589 590 591 592
	__NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
	0, /* null terminated */
};

593
static void __secure_computing_strict(int this_syscall)
L
Linus Torvalds 已提交
594
{
595
	const int *syscall_whitelist = mode1_syscalls;
596
#ifdef CONFIG_COMPAT
597
	if (in_compat_syscall())
598
		syscall_whitelist = get_compat_mode1_syscalls();
599 600 601 602 603 604 605 606 607
#endif
	do {
		if (*syscall_whitelist == this_syscall)
			return;
	} while (*++syscall_whitelist);

#ifdef SECCOMP_DEBUG
	dump_stack();
#endif
608
	seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL, true);
609 610 611 612 613 614 615 616
	do_exit(SIGKILL);
}

#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
void secure_computing_strict(int this_syscall)
{
	int mode = current->seccomp.mode;

617
	if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
618 619 620
	    unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
		return;

621
	if (mode == SECCOMP_MODE_DISABLED)
622 623 624 625 626 627 628
		return;
	else if (mode == SECCOMP_MODE_STRICT)
		__secure_computing_strict(this_syscall);
	else
		BUG();
}
#else
629 630

#ifdef CONFIG_SECCOMP_FILTER
631 632
static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
			    const bool recheck_after_trace)
633 634
{
	u32 filter_ret, action;
635
	struct seccomp_filter *match = NULL;
636
	int data;
L
Linus Torvalds 已提交
637

638 639 640 641 642 643
	/*
	 * Make sure that any changes to mode from another thread have
	 * been seen after TIF_SECCOMP was seen.
	 */
	rmb();

644
	filter_ret = seccomp_run_filters(sd, &match);
645 646 647 648 649
	data = filter_ret & SECCOMP_RET_DATA;
	action = filter_ret & SECCOMP_RET_ACTION;

	switch (action) {
	case SECCOMP_RET_ERRNO:
650 651 652
		/* Set low-order bits as an errno, capped at MAX_ERRNO. */
		if (data > MAX_ERRNO)
			data = MAX_ERRNO;
653
		syscall_set_return_value(current, task_pt_regs(current),
654 655 656 657 658
					 -data, 0);
		goto skip;

	case SECCOMP_RET_TRAP:
		/* Show the handler the original registers. */
659
		syscall_rollback(current, task_pt_regs(current));
660 661 662 663 664
		/* Let the filter pass back 16 bits of data. */
		seccomp_send_sigsys(this_syscall, data);
		goto skip;

	case SECCOMP_RET_TRACE:
665 666 667 668
		/* We've been put in this state by the ptracer already. */
		if (recheck_after_trace)
			return 0;

K
Kees Cook 已提交
669 670 671 672 673 674 675 676 677 678 679 680
		/* ENOSYS these calls if there is no tracer attached. */
		if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
			syscall_set_return_value(current,
						 task_pt_regs(current),
						 -ENOSYS, 0);
			goto skip;
		}

		/* Allow the BPF to provide the event message */
		ptrace_event(PTRACE_EVENT_SECCOMP, data);
		/*
		 * The delivery of a fatal signal during event
681 682 683 684 685 686 687
		 * notification may silently skip tracer notification,
		 * which could leave us with a potentially unmodified
		 * syscall that the tracer would have liked to have
		 * changed. Since the process is about to die, we just
		 * force the syscall to be skipped and let the signal
		 * kill the process and correctly handle any tracer exit
		 * notifications.
K
Kees Cook 已提交
688 689
		 */
		if (fatal_signal_pending(current))
690
			goto skip;
K
Kees Cook 已提交
691 692 693 694 695
		/* Check if the tracer forced the syscall to be skipped. */
		this_syscall = syscall_get_nr(current, task_pt_regs(current));
		if (this_syscall < 0)
			goto skip;

696 697 698 699 700 701 702 703 704
		/*
		 * Recheck the syscall, since it may have changed. This
		 * intentionally uses a NULL struct seccomp_data to force
		 * a reload of all registers. This does not goto skip since
		 * a skip would have already been reported.
		 */
		if (__seccomp_filter(this_syscall, NULL, true))
			return -1;

K
Kees Cook 已提交
705
		return 0;
706

707 708 709 710
	case SECCOMP_RET_LOG:
		seccomp_log(this_syscall, 0, action, true);
		return 0;

711
	case SECCOMP_RET_ALLOW:
712 713 714 715 716
		/*
		 * Note that the "match" filter will always be NULL for
		 * this action since SECCOMP_RET_ALLOW is the starting
		 * state in seccomp_run_filters().
		 */
K
Kees Cook 已提交
717
		return 0;
718 719

	case SECCOMP_RET_KILL:
K
Kees Cook 已提交
720
	default:
721
		seccomp_log(this_syscall, SIGSYS, action, true);
722 723
		/* Dump core only if this is the last remaining thread. */
		if (get_nr_threads(current) == 1) {
K
Kees Cook 已提交
724 725
			siginfo_t info;

726 727 728 729 730 731
			/* Show the original registers in the dump. */
			syscall_rollback(current, task_pt_regs(current));
			/* Trigger a manual coredump since do_exit skips it. */
			seccomp_init_siginfo(&info, this_syscall, data);
			do_coredump(&info);
		}
732 733 734 735 736 737
		do_exit(SIGSYS);
	}

	unreachable();

skip:
738
	seccomp_log(this_syscall, 0, action, match ? match->log : false);
K
Kees Cook 已提交
739 740 741
	return -1;
}
#else
742 743
static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
			    const bool recheck_after_trace)
K
Kees Cook 已提交
744 745
{
	BUG();
746
}
L
Linus Torvalds 已提交
747
#endif
748

K
Kees Cook 已提交
749
int __secure_computing(const struct seccomp_data *sd)
750 751
{
	int mode = current->seccomp.mode;
K
Kees Cook 已提交
752
	int this_syscall;
753

754
	if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
755
	    unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
K
Kees Cook 已提交
756 757 758 759
		return 0;

	this_syscall = sd ? sd->nr :
		syscall_get_nr(current, task_pt_regs(current));
760

761
	switch (mode) {
762
	case SECCOMP_MODE_STRICT:
763
		__secure_computing_strict(this_syscall);  /* may call do_exit */
K
Kees Cook 已提交
764
		return 0;
765
	case SECCOMP_MODE_FILTER:
766
		return __seccomp_filter(this_syscall, sd, false);
L
Linus Torvalds 已提交
767 768 769
	default:
		BUG();
	}
770
}
771
#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
772 773 774 775 776 777

long prctl_get_seccomp(void)
{
	return current->seccomp.mode;
}

778
/**
K
Kees Cook 已提交
779
 * seccomp_set_mode_strict: internal function for setting strict seccomp
780 781 782 783 784
 *
 * Once current->seccomp.mode is non-zero, it may not be changed.
 *
 * Returns 0 on success or -EINVAL on failure.
 */
K
Kees Cook 已提交
785
static long seccomp_set_mode_strict(void)
786
{
K
Kees Cook 已提交
787
	const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
788
	long ret = -EINVAL;
789

K
Kees Cook 已提交
790 791
	spin_lock_irq(&current->sighand->siglock);

792
	if (!seccomp_may_assign_mode(seccomp_mode))
793 794
		goto out;

795
#ifdef TIF_NOTSC
K
Kees Cook 已提交
796
	disable_TSC();
797
#endif
798
	seccomp_assign_mode(current, seccomp_mode);
K
Kees Cook 已提交
799 800 801
	ret = 0;

out:
K
Kees Cook 已提交
802
	spin_unlock_irq(&current->sighand->siglock);
K
Kees Cook 已提交
803 804 805 806

	return ret;
}

807
#ifdef CONFIG_SECCOMP_FILTER
K
Kees Cook 已提交
808 809
/**
 * seccomp_set_mode_filter: internal function for setting seccomp filter
K
Kees Cook 已提交
810
 * @flags:  flags to change filter behavior
K
Kees Cook 已提交
811 812 813 814 815 816 817 818 819 820
 * @filter: struct sock_fprog containing filter
 *
 * This function may be called repeatedly to install additional filters.
 * Every filter successfully installed will be evaluated (in reverse order)
 * for each system call the task makes.
 *
 * Once current->seccomp.mode is non-zero, it may not be changed.
 *
 * Returns 0 on success or -EINVAL on failure.
 */
K
Kees Cook 已提交
821 822
static long seccomp_set_mode_filter(unsigned int flags,
				    const char __user *filter)
K
Kees Cook 已提交
823 824
{
	const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
825
	struct seccomp_filter *prepared = NULL;
K
Kees Cook 已提交
826 827
	long ret = -EINVAL;

K
Kees Cook 已提交
828
	/* Validate flags. */
829
	if (flags & ~SECCOMP_FILTER_FLAG_MASK)
K
Kees Cook 已提交
830
		return -EINVAL;
K
Kees Cook 已提交
831

832 833 834 835 836
	/* Prepare the new filter before holding any locks. */
	prepared = seccomp_prepare_user_filter(filter);
	if (IS_ERR(prepared))
		return PTR_ERR(prepared);

837 838 839 840 841 842 843 844
	/*
	 * Make sure we cannot change seccomp or nnp state via TSYNC
	 * while another thread is in the middle of calling exec.
	 */
	if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
	    mutex_lock_killable(&current->signal->cred_guard_mutex))
		goto out_free;

K
Kees Cook 已提交
845 846
	spin_lock_irq(&current->sighand->siglock);

K
Kees Cook 已提交
847 848 849
	if (!seccomp_may_assign_mode(seccomp_mode))
		goto out;

850
	ret = seccomp_attach_filter(flags, prepared);
K
Kees Cook 已提交
851
	if (ret)
852
		goto out;
853 854
	/* Do not free the successfully attached filter. */
	prepared = NULL;
855

856
	seccomp_assign_mode(current, seccomp_mode);
857
out:
K
Kees Cook 已提交
858
	spin_unlock_irq(&current->sighand->siglock);
859 860 861
	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
		mutex_unlock(&current->signal->cred_guard_mutex);
out_free:
862
	seccomp_filter_free(prepared);
863 864
	return ret;
}
K
Kees Cook 已提交
865
#else
K
Kees Cook 已提交
866 867
static inline long seccomp_set_mode_filter(unsigned int flags,
					   const char __user *filter)
K
Kees Cook 已提交
868 869 870 871
{
	return -EINVAL;
}
#endif
872

873 874 875 876 877 878 879 880 881 882 883 884
static long seccomp_get_action_avail(const char __user *uaction)
{
	u32 action;

	if (copy_from_user(&action, uaction, sizeof(action)))
		return -EFAULT;

	switch (action) {
	case SECCOMP_RET_KILL:
	case SECCOMP_RET_TRAP:
	case SECCOMP_RET_ERRNO:
	case SECCOMP_RET_TRACE:
885
	case SECCOMP_RET_LOG:
886 887 888 889 890 891 892 893 894
	case SECCOMP_RET_ALLOW:
		break;
	default:
		return -EOPNOTSUPP;
	}

	return 0;
}

K
Kees Cook 已提交
895 896 897 898 899 900 901 902 903 904 905
/* Common entry point for both prctl and syscall. */
static long do_seccomp(unsigned int op, unsigned int flags,
		       const char __user *uargs)
{
	switch (op) {
	case SECCOMP_SET_MODE_STRICT:
		if (flags != 0 || uargs != NULL)
			return -EINVAL;
		return seccomp_set_mode_strict();
	case SECCOMP_SET_MODE_FILTER:
		return seccomp_set_mode_filter(flags, uargs);
906 907 908 909 910
	case SECCOMP_GET_ACTION_AVAIL:
		if (flags != 0)
			return -EINVAL;

		return seccomp_get_action_avail(uargs);
K
Kees Cook 已提交
911 912 913 914 915 916 917 918 919 920 921
	default:
		return -EINVAL;
	}
}

SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
			 const char __user *, uargs)
{
	return do_seccomp(op, flags, uargs);
}

922 923 924 925 926 927 928 929 930
/**
 * prctl_set_seccomp: configures current->seccomp.mode
 * @seccomp_mode: requested mode to use
 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
 *
 * Returns 0 on success or -EINVAL on failure.
 */
long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
{
K
Kees Cook 已提交
931 932 933
	unsigned int op;
	char __user *uargs;

K
Kees Cook 已提交
934 935
	switch (seccomp_mode) {
	case SECCOMP_MODE_STRICT:
K
Kees Cook 已提交
936 937 938 939 940 941 942 943
		op = SECCOMP_SET_MODE_STRICT;
		/*
		 * Setting strict mode through prctl always ignored filter,
		 * so make sure it is always NULL here to pass the internal
		 * check in do_seccomp().
		 */
		uargs = NULL;
		break;
K
Kees Cook 已提交
944
	case SECCOMP_MODE_FILTER:
K
Kees Cook 已提交
945 946 947
		op = SECCOMP_SET_MODE_FILTER;
		uargs = filter;
		break;
K
Kees Cook 已提交
948 949 950
	default:
		return -EINVAL;
	}
K
Kees Cook 已提交
951 952 953

	/* prctl interface doesn't have flags, so they are always zero. */
	return do_seccomp(op, 0, uargs);
954
}
955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001

#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
			void __user *data)
{
	struct seccomp_filter *filter;
	struct sock_fprog_kern *fprog;
	long ret;
	unsigned long count = 0;

	if (!capable(CAP_SYS_ADMIN) ||
	    current->seccomp.mode != SECCOMP_MODE_DISABLED) {
		return -EACCES;
	}

	spin_lock_irq(&task->sighand->siglock);
	if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
		ret = -EINVAL;
		goto out;
	}

	filter = task->seccomp.filter;
	while (filter) {
		filter = filter->prev;
		count++;
	}

	if (filter_off >= count) {
		ret = -ENOENT;
		goto out;
	}
	count -= filter_off;

	filter = task->seccomp.filter;
	while (filter && count > 1) {
		filter = filter->prev;
		count--;
	}

	if (WARN_ON(count != 1 || !filter)) {
		/* The filter tree shouldn't shrink while we're using it. */
		ret = -ENOENT;
		goto out;
	}

	fprog = filter->prog->orig_prog;
	if (!fprog) {
M
Mickaël Salaün 已提交
1002
		/* This must be a new non-cBPF filter, since we save
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
		 * every cBPF filter's orig_prog above when
		 * CONFIG_CHECKPOINT_RESTORE is enabled.
		 */
		ret = -EMEDIUMTYPE;
		goto out;
	}

	ret = fprog->len;
	if (!data)
		goto out;

	get_seccomp_filter(task);
	spin_unlock_irq(&task->sighand->siglock);

	if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
		ret = -EFAULT;

	put_seccomp_filter(task);
	return ret;

out:
	spin_unlock_irq(&task->sighand->siglock);
	return ret;
}
#endif
1028 1029 1030 1031 1032 1033 1034 1035

#ifdef CONFIG_SYSCTL

/* Human readable action names for friendly sysctl interaction */
#define SECCOMP_RET_KILL_NAME		"kill"
#define SECCOMP_RET_TRAP_NAME		"trap"
#define SECCOMP_RET_ERRNO_NAME		"errno"
#define SECCOMP_RET_TRACE_NAME		"trace"
1036
#define SECCOMP_RET_LOG_NAME		"log"
1037 1038 1039 1040 1041 1042
#define SECCOMP_RET_ALLOW_NAME		"allow"

static const char seccomp_actions_avail[] = SECCOMP_RET_KILL_NAME	" "
					    SECCOMP_RET_TRAP_NAME	" "
					    SECCOMP_RET_ERRNO_NAME	" "
					    SECCOMP_RET_TRACE_NAME	" "
1043
					    SECCOMP_RET_LOG_NAME	" "
1044 1045
					    SECCOMP_RET_ALLOW_NAME;

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
struct seccomp_log_name {
	u32		log;
	const char	*name;
};

static const struct seccomp_log_name seccomp_log_names[] = {
	{ SECCOMP_LOG_KILL, SECCOMP_RET_KILL_NAME },
	{ SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
	{ SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
	{ SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
1056
	{ SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
	{ SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
	{ }
};

static bool seccomp_names_from_actions_logged(char *names, size_t size,
					      u32 actions_logged)
{
	const struct seccomp_log_name *cur;
	bool append_space = false;

	for (cur = seccomp_log_names; cur->name && size; cur++) {
		ssize_t ret;

		if (!(actions_logged & cur->log))
			continue;

		if (append_space) {
			ret = strscpy(names, " ", size);
			if (ret < 0)
				return false;

			names += ret;
			size -= ret;
		} else
			append_space = true;

		ret = strscpy(names, cur->name, size);
		if (ret < 0)
			return false;

		names += ret;
		size -= ret;
	}

	return true;
}

static bool seccomp_action_logged_from_name(u32 *action_logged,
					    const char *name)
{
	const struct seccomp_log_name *cur;

	for (cur = seccomp_log_names; cur->name; cur++) {
		if (!strcmp(cur->name, name)) {
			*action_logged = cur->log;
			return true;
		}
	}

	return false;
}

static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
{
	char *name;

	*actions_logged = 0;
	while ((name = strsep(&names, " ")) && *name) {
		u32 action_logged = 0;

		if (!seccomp_action_logged_from_name(&action_logged, name))
			return false;

		*actions_logged |= action_logged;
	}

	return true;
}

static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
					  void __user *buffer, size_t *lenp,
					  loff_t *ppos)
{
	char names[sizeof(seccomp_actions_avail)];
	struct ctl_table table;
	int ret;

	if (write && !capable(CAP_SYS_ADMIN))
		return -EPERM;

	memset(names, 0, sizeof(names));

	if (!write) {
		if (!seccomp_names_from_actions_logged(names, sizeof(names),
						       seccomp_actions_logged))
			return -EINVAL;
	}

	table = *ro_table;
	table.data = names;
	table.maxlen = sizeof(names);
	ret = proc_dostring(&table, write, buffer, lenp, ppos);
	if (ret)
		return ret;

	if (write) {
		u32 actions_logged;

		if (!seccomp_actions_logged_from_names(&actions_logged,
						       table.data))
			return -EINVAL;

		if (actions_logged & SECCOMP_LOG_ALLOW)
			return -EINVAL;

		seccomp_actions_logged = actions_logged;
	}

	return 0;
}

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
static struct ctl_path seccomp_sysctl_path[] = {
	{ .procname = "kernel", },
	{ .procname = "seccomp", },
	{ }
};

static struct ctl_table seccomp_sysctl_table[] = {
	{
		.procname	= "actions_avail",
		.data		= (void *) &seccomp_actions_avail,
		.maxlen		= sizeof(seccomp_actions_avail),
		.mode		= 0444,
		.proc_handler	= proc_dostring,
	},
1182 1183 1184 1185 1186
	{
		.procname	= "actions_logged",
		.mode		= 0644,
		.proc_handler	= seccomp_actions_logged_handler,
	},
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
	{ }
};

static int __init seccomp_sysctl_init(void)
{
	struct ctl_table_header *hdr;

	hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
	if (!hdr)
		pr_warn("seccomp: sysctl registration failed\n");
	else
		kmemleak_not_leak(hdr);

	return 0;
}

device_initcall(seccomp_sysctl_init)

#endif /* CONFIG_SYSCTL */