seccomp.c 31.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * linux/kernel/seccomp.c
 *
 * Copyright 2004-2005  Andrea Arcangeli <andrea@cpushare.com>
 *
6 7 8 9 10 11 12 13
 * Copyright (C) 2012 Google, Inc.
 * Will Drewry <wad@chromium.org>
 *
 * This defines a simple but solid secure-computing facility.
 *
 * Mode 1 uses a fixed list of allowed system calls.
 * Mode 2 allows user-defined system call filters in the form
 *        of Berkeley Packet Filters/Linux Socket Filters.
L
Linus Torvalds 已提交
14 15
 */

16
#include <linux/refcount.h>
17
#include <linux/audit.h>
18
#include <linux/compat.h>
19
#include <linux/coredump.h>
20
#include <linux/kmemleak.h>
21
#include <linux/sched.h>
22
#include <linux/sched/task_stack.h>
23
#include <linux/seccomp.h>
24
#include <linux/slab.h>
K
Kees Cook 已提交
25
#include <linux/syscalls.h>
26
#include <linux/sysctl.h>
L
Linus Torvalds 已提交
27

28
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
29
#include <asm/syscall.h>
30
#endif
31 32 33

#ifdef CONFIG_SECCOMP_FILTER
#include <linux/filter.h>
34
#include <linux/pid.h>
35
#include <linux/ptrace.h>
36 37 38 39 40 41 42 43 44 45 46
#include <linux/security.h>
#include <linux/tracehook.h>
#include <linux/uaccess.h>

/**
 * struct seccomp_filter - container for seccomp BPF programs
 *
 * @usage: reference count to manage the object lifetime.
 *         get/put helpers should be used when accessing an instance
 *         outside of a lifetime-guarded section.  In general, this
 *         is only needed for handling filters shared across tasks.
47
 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
48
 * @prev: points to a previously installed, or inherited, filter
M
Mickaël Salaün 已提交
49
 * @prog: the BPF program to evaluate
50 51 52 53 54 55 56 57 58 59 60 61
 *
 * seccomp_filter objects are organized in a tree linked via the @prev
 * pointer.  For any task, it appears to be a singly-linked list starting
 * with current->seccomp.filter, the most recently attached or inherited filter.
 * However, multiple filters may share a @prev node, by way of fork(), which
 * results in a unidirectional tree existing in memory.  This is similar to
 * how namespaces work.
 *
 * seccomp_filter objects should never be modified after being attached
 * to a task_struct (other than @usage).
 */
struct seccomp_filter {
62
	refcount_t usage;
63
	bool log;
64
	struct seccomp_filter *prev;
65
	struct bpf_prog *prog;
66 67 68 69 70
};

/* Limit any path through the tree to 256KB worth of instructions. */
#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))

71
/*
72 73 74
 * Endianness is explicitly ignored and left for BPF program authors to manage
 * as per the specific architecture.
 */
75
static void populate_seccomp_data(struct seccomp_data *sd)
76
{
77 78
	struct task_struct *task = current;
	struct pt_regs *regs = task_pt_regs(task);
79
	unsigned long args[6];
80

81
	sd->nr = syscall_get_nr(task, regs);
82
	sd->arch = syscall_get_arch();
83 84 85 86 87 88 89
	syscall_get_arguments(task, regs, 0, 6, args);
	sd->args[0] = args[0];
	sd->args[1] = args[1];
	sd->args[2] = args[2];
	sd->args[3] = args[3];
	sd->args[4] = args[4];
	sd->args[5] = args[5];
90
	sd->instruction_pointer = KSTK_EIP(task);
91 92 93 94 95 96 97
}

/**
 *	seccomp_check_filter - verify seccomp filter code
 *	@filter: filter to verify
 *	@flen: length of filter
 *
98
 * Takes a previously checked filter (by bpf_check_classic) and
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
 * redirects all filter code that loads struct sk_buff data
 * and related data through seccomp_bpf_load.  It also
 * enforces length and alignment checking of those loads.
 *
 * Returns 0 if the rule set is legal or -EINVAL if not.
 */
static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
{
	int pc;
	for (pc = 0; pc < flen; pc++) {
		struct sock_filter *ftest = &filter[pc];
		u16 code = ftest->code;
		u32 k = ftest->k;

		switch (code) {
114
		case BPF_LD | BPF_W | BPF_ABS:
115
			ftest->code = BPF_LDX | BPF_W | BPF_ABS;
116 117 118 119
			/* 32-bit aligned and not out of bounds. */
			if (k >= sizeof(struct seccomp_data) || k & 3)
				return -EINVAL;
			continue;
120
		case BPF_LD | BPF_W | BPF_LEN:
121
			ftest->code = BPF_LD | BPF_IMM;
122 123
			ftest->k = sizeof(struct seccomp_data);
			continue;
124
		case BPF_LDX | BPF_W | BPF_LEN:
125
			ftest->code = BPF_LDX | BPF_IMM;
126 127 128
			ftest->k = sizeof(struct seccomp_data);
			continue;
		/* Explicitly include allowed calls. */
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
		case BPF_RET | BPF_K:
		case BPF_RET | BPF_A:
		case BPF_ALU | BPF_ADD | BPF_K:
		case BPF_ALU | BPF_ADD | BPF_X:
		case BPF_ALU | BPF_SUB | BPF_K:
		case BPF_ALU | BPF_SUB | BPF_X:
		case BPF_ALU | BPF_MUL | BPF_K:
		case BPF_ALU | BPF_MUL | BPF_X:
		case BPF_ALU | BPF_DIV | BPF_K:
		case BPF_ALU | BPF_DIV | BPF_X:
		case BPF_ALU | BPF_AND | BPF_K:
		case BPF_ALU | BPF_AND | BPF_X:
		case BPF_ALU | BPF_OR | BPF_K:
		case BPF_ALU | BPF_OR | BPF_X:
		case BPF_ALU | BPF_XOR | BPF_K:
		case BPF_ALU | BPF_XOR | BPF_X:
		case BPF_ALU | BPF_LSH | BPF_K:
		case BPF_ALU | BPF_LSH | BPF_X:
		case BPF_ALU | BPF_RSH | BPF_K:
		case BPF_ALU | BPF_RSH | BPF_X:
		case BPF_ALU | BPF_NEG:
		case BPF_LD | BPF_IMM:
		case BPF_LDX | BPF_IMM:
		case BPF_MISC | BPF_TAX:
		case BPF_MISC | BPF_TXA:
		case BPF_LD | BPF_MEM:
		case BPF_LDX | BPF_MEM:
		case BPF_ST:
		case BPF_STX:
		case BPF_JMP | BPF_JA:
		case BPF_JMP | BPF_JEQ | BPF_K:
		case BPF_JMP | BPF_JEQ | BPF_X:
		case BPF_JMP | BPF_JGE | BPF_K:
		case BPF_JMP | BPF_JGE | BPF_X:
		case BPF_JMP | BPF_JGT | BPF_K:
		case BPF_JMP | BPF_JGT | BPF_X:
		case BPF_JMP | BPF_JSET | BPF_K:
		case BPF_JMP | BPF_JSET | BPF_X:
167 168 169 170 171 172 173 174 175
			continue;
		default:
			return -EINVAL;
		}
	}
	return 0;
}

/**
M
Mickaël Salaün 已提交
176 177
 * seccomp_run_filters - evaluates all seccomp filters against @sd
 * @sd: optional seccomp data to be passed to filters
178 179 180
 * @match: stores struct seccomp_filter that resulted in the return value,
 *         unless filter returned SECCOMP_RET_ALLOW, in which case it will
 *         be unchanged.
181 182 183
 *
 * Returns valid seccomp BPF response codes.
 */
184
#define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
185 186
static u32 seccomp_run_filters(const struct seccomp_data *sd,
			       struct seccomp_filter **match)
187
{
188
	struct seccomp_data sd_local;
W
Will Drewry 已提交
189
	u32 ret = SECCOMP_RET_ALLOW;
190 191 192
	/* Make sure cross-thread synced filter points somewhere sane. */
	struct seccomp_filter *f =
			lockless_dereference(current->seccomp.filter);
W
Will Drewry 已提交
193 194

	/* Ensure unexpected behavior doesn't result in failing open. */
195
	if (unlikely(WARN_ON(f == NULL)))
196
		return SECCOMP_RET_KILL_PROCESS;
W
Will Drewry 已提交
197

198 199 200 201
	if (!sd) {
		populate_seccomp_data(&sd_local);
		sd = &sd_local;
	}
202

203 204
	/*
	 * All filters in the list are evaluated and the lowest BPF return
W
Will Drewry 已提交
205
	 * value always takes priority (ignoring the DATA).
206
	 */
207
	for (; f; f = f->prev) {
208
		u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
209

210
		if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
W
Will Drewry 已提交
211
			ret = cur_ret;
212 213
			*match = f;
		}
214 215 216
	}
	return ret;
}
217
#endif /* CONFIG_SECCOMP_FILTER */
218

219 220
static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
{
221
	assert_spin_locked(&current->sighand->siglock);
K
Kees Cook 已提交
222

223 224 225 226 227 228
	if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
		return false;

	return true;
}

229 230
static inline void seccomp_assign_mode(struct task_struct *task,
				       unsigned long seccomp_mode)
231
{
232
	assert_spin_locked(&task->sighand->siglock);
K
Kees Cook 已提交
233

234 235 236 237 238 239 240
	task->seccomp.mode = seccomp_mode;
	/*
	 * Make sure TIF_SECCOMP cannot be set before the mode (and
	 * filter) is set.
	 */
	smp_mb__before_atomic();
	set_tsk_thread_flag(task, TIF_SECCOMP);
241 242 243
}

#ifdef CONFIG_SECCOMP_FILTER
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
/* Returns 1 if the parent is an ancestor of the child. */
static int is_ancestor(struct seccomp_filter *parent,
		       struct seccomp_filter *child)
{
	/* NULL is the root ancestor. */
	if (parent == NULL)
		return 1;
	for (; child; child = child->prev)
		if (child == parent)
			return 1;
	return 0;
}

/**
 * seccomp_can_sync_threads: checks if all threads can be synchronized
 *
 * Expects sighand and cred_guard_mutex locks to be held.
 *
 * Returns 0 on success, -ve on error, or the pid of a thread which was
 * either not in the correct seccomp mode or it did not have an ancestral
 * seccomp filter.
 */
static inline pid_t seccomp_can_sync_threads(void)
{
	struct task_struct *thread, *caller;

	BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
271
	assert_spin_locked(&current->sighand->siglock);
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311

	/* Validate all threads being eligible for synchronization. */
	caller = current;
	for_each_thread(caller, thread) {
		pid_t failed;

		/* Skip current, since it is initiating the sync. */
		if (thread == caller)
			continue;

		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
		    (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
		     is_ancestor(thread->seccomp.filter,
				 caller->seccomp.filter)))
			continue;

		/* Return the first thread that cannot be synchronized. */
		failed = task_pid_vnr(thread);
		/* If the pid cannot be resolved, then return -ESRCH */
		if (unlikely(WARN_ON(failed == 0)))
			failed = -ESRCH;
		return failed;
	}

	return 0;
}

/**
 * seccomp_sync_threads: sets all threads to use current's filter
 *
 * Expects sighand and cred_guard_mutex locks to be held, and for
 * seccomp_can_sync_threads() to have returned success already
 * without dropping the locks.
 *
 */
static inline void seccomp_sync_threads(void)
{
	struct task_struct *thread, *caller;

	BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
312
	assert_spin_locked(&current->sighand->siglock);
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330

	/* Synchronize all threads. */
	caller = current;
	for_each_thread(caller, thread) {
		/* Skip current, since it needs no changes. */
		if (thread == caller)
			continue;

		/* Get a task reference for the new leaf node. */
		get_seccomp_filter(caller);
		/*
		 * Drop the task reference to the shared ancestor since
		 * current's path will hold a reference.  (This also
		 * allows a put before the assignment.)
		 */
		put_seccomp_filter(thread);
		smp_store_release(&thread->seccomp.filter,
				  caller->seccomp.filter);
331 332 333 334 335 336 337 338 339 340

		/*
		 * Don't let an unprivileged task work around
		 * the no_new_privs restriction by creating
		 * a thread that sets it up, enters seccomp,
		 * then dies.
		 */
		if (task_no_new_privs(caller))
			task_set_no_new_privs(thread);

341 342 343 344 345 346
		/*
		 * Opt the other thread into seccomp if needed.
		 * As threads are considered to be trust-realm
		 * equivalent (see ptrace_may_access), it is safe to
		 * allow one thread to transition the other.
		 */
347
		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
348 349 350 351
			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
	}
}

352
/**
353
 * seccomp_prepare_filter: Prepares a seccomp filter for use.
354 355
 * @fprog: BPF program to install
 *
356
 * Returns filter on success or an ERR_PTR on failure.
357
 */
358
static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
359
{
360 361
	struct seccomp_filter *sfilter;
	int ret;
362
	const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
363 364

	if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
365
		return ERR_PTR(-EINVAL);
366

367
	BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
368 369

	/*
370
	 * Installing a seccomp filter requires that the task has
371 372 373 374
	 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
	 * This avoids scenarios where unprivileged tasks can affect the
	 * behavior of privileged children.
	 */
375
	if (!task_no_new_privs(current) &&
376 377
	    security_capable_noaudit(current_cred(), current_user_ns(),
				     CAP_SYS_ADMIN) != 0)
378
		return ERR_PTR(-EACCES);
379

380
	/* Allocate a new seccomp_filter */
381 382
	sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
	if (!sfilter)
383
		return ERR_PTR(-ENOMEM);
384 385

	ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
386
					seccomp_check_filter, save_orig);
387 388 389
	if (ret < 0) {
		kfree(sfilter);
		return ERR_PTR(ret);
390
	}
391

392
	refcount_set(&sfilter->usage, 1);
393

394
	return sfilter;
395 396 397
}

/**
398
 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
399 400 401 402
 * @user_filter: pointer to the user data containing a sock_fprog.
 *
 * Returns 0 on success and non-zero otherwise.
 */
403 404
static struct seccomp_filter *
seccomp_prepare_user_filter(const char __user *user_filter)
405 406
{
	struct sock_fprog fprog;
407
	struct seccomp_filter *filter = ERR_PTR(-EFAULT);
408 409

#ifdef CONFIG_COMPAT
410
	if (in_compat_syscall()) {
411 412 413 414 415 416 417 418 419
		struct compat_sock_fprog fprog32;
		if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
			goto out;
		fprog.len = fprog32.len;
		fprog.filter = compat_ptr(fprog32.filter);
	} else /* falls through to the if below. */
#endif
	if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
		goto out;
420
	filter = seccomp_prepare_filter(&fprog);
421
out:
422 423 424 425 426 427 428 429
	return filter;
}

/**
 * seccomp_attach_filter: validate and attach filter
 * @flags:  flags to change filter behavior
 * @filter: seccomp filter to add to the current process
 *
K
Kees Cook 已提交
430 431
 * Caller must be holding current->sighand->siglock lock.
 *
432 433 434 435 436 437 438 439
 * Returns 0 on success, -ve on error.
 */
static long seccomp_attach_filter(unsigned int flags,
				  struct seccomp_filter *filter)
{
	unsigned long total_insns;
	struct seccomp_filter *walker;

440
	assert_spin_locked(&current->sighand->siglock);
K
Kees Cook 已提交
441

442 443 444 445 446 447 448
	/* Validate resulting filter length. */
	total_insns = filter->prog->len;
	for (walker = current->seccomp.filter; walker; walker = walker->prev)
		total_insns += walker->prog->len + 4;  /* 4 instr penalty */
	if (total_insns > MAX_INSNS_PER_PATH)
		return -ENOMEM;

449 450 451 452 453 454 455 456 457
	/* If thread sync has been requested, check that it is possible. */
	if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
		int ret;

		ret = seccomp_can_sync_threads();
		if (ret)
			return ret;
	}

458 459 460 461
	/* Set log flag, if present. */
	if (flags & SECCOMP_FILTER_FLAG_LOG)
		filter->log = true;

462 463 464 465 466 467 468
	/*
	 * If there is an existing filter, make it the prev and don't drop its
	 * task reference.
	 */
	filter->prev = current->seccomp.filter;
	current->seccomp.filter = filter;

469 470 471 472
	/* Now that the new filter is in place, synchronize to all threads. */
	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
		seccomp_sync_threads();

473
	return 0;
474 475 476 477 478 479 480 481 482
}

/* get_seccomp_filter - increments the reference count of the filter on @tsk */
void get_seccomp_filter(struct task_struct *tsk)
{
	struct seccomp_filter *orig = tsk->seccomp.filter;
	if (!orig)
		return;
	/* Reference count is bounded by the number of total processes. */
483
	refcount_inc(&orig->usage);
484 485
}

486 487 488
static inline void seccomp_filter_free(struct seccomp_filter *filter)
{
	if (filter) {
489
		bpf_prog_destroy(filter->prog);
490 491 492 493
		kfree(filter);
	}
}

494 495 496 497 498
/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
void put_seccomp_filter(struct task_struct *tsk)
{
	struct seccomp_filter *orig = tsk->seccomp.filter;
	/* Clean up single-reference branches iteratively. */
499
	while (orig && refcount_dec_and_test(&orig->usage)) {
500 501
		struct seccomp_filter *freeme = orig;
		orig = orig->prev;
502
		seccomp_filter_free(freeme);
503 504
	}
}
W
Will Drewry 已提交
505

506 507 508 509 510 511 512 513 514 515 516
static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
{
	memset(info, 0, sizeof(*info));
	info->si_signo = SIGSYS;
	info->si_code = SYS_SECCOMP;
	info->si_call_addr = (void __user *)KSTK_EIP(current);
	info->si_errno = reason;
	info->si_arch = syscall_get_arch();
	info->si_syscall = syscall;
}

W
Will Drewry 已提交
517 518 519 520 521 522 523 524 525 526
/**
 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
 * @syscall: syscall number to send to userland
 * @reason: filter-supplied reason code to send to userland (via si_errno)
 *
 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
 */
static void seccomp_send_sigsys(int syscall, int reason)
{
	struct siginfo info;
527
	seccomp_init_siginfo(&info, syscall, reason);
W
Will Drewry 已提交
528 529
	force_sig_info(SIGSYS, &info, current);
}
530
#endif	/* CONFIG_SECCOMP_FILTER */
L
Linus Torvalds 已提交
531

532
/* For use with seccomp_actions_logged */
533 534
#define SECCOMP_LOG_KILL_PROCESS	(1 << 0)
#define SECCOMP_LOG_KILL_THREAD		(1 << 1)
535 536 537
#define SECCOMP_LOG_TRAP		(1 << 2)
#define SECCOMP_LOG_ERRNO		(1 << 3)
#define SECCOMP_LOG_TRACE		(1 << 4)
538 539
#define SECCOMP_LOG_LOG			(1 << 5)
#define SECCOMP_LOG_ALLOW		(1 << 6)
540

541 542
static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
				    SECCOMP_LOG_KILL_THREAD  |
543 544 545
				    SECCOMP_LOG_TRAP  |
				    SECCOMP_LOG_ERRNO |
				    SECCOMP_LOG_TRACE |
546
				    SECCOMP_LOG_LOG;
547

548 549
static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
			       bool requested)
550 551 552 553 554
{
	bool log = false;

	switch (action) {
	case SECCOMP_RET_ALLOW:
555
		break;
556
	case SECCOMP_RET_TRAP:
557 558
		log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
		break;
559
	case SECCOMP_RET_ERRNO:
560 561
		log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
		break;
562
	case SECCOMP_RET_TRACE:
563
		log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
564
		break;
565 566 567
	case SECCOMP_RET_LOG:
		log = seccomp_actions_logged & SECCOMP_LOG_LOG;
		break;
568 569
	case SECCOMP_RET_KILL_THREAD:
		log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
570 571 572 573
		break;
	case SECCOMP_RET_KILL_PROCESS:
	default:
		log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
574 575 576
	}

	/*
577
	 * Force an audit message to be emitted when the action is RET_KILL_*,
578 579
	 * RET_LOG, or the FILTER_FLAG_LOG bit was set and the action is
	 * allowed to be logged by the admin.
580 581 582 583 584 585 586 587 588 589 590
	 */
	if (log)
		return __audit_seccomp(syscall, signr, action);

	/*
	 * Let the audit subsystem decide if the action should be audited based
	 * on whether the current task itself is being audited.
	 */
	return audit_seccomp(syscall, signr, action);
}

L
Linus Torvalds 已提交
591 592 593 594 595
/*
 * Secure computing mode 1 allows only read/write/exit/sigreturn.
 * To be fully secure this must be combined with rlimit
 * to limit the stack allocations too.
 */
596
static const int mode1_syscalls[] = {
L
Linus Torvalds 已提交
597 598 599 600
	__NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
	0, /* null terminated */
};

601
static void __secure_computing_strict(int this_syscall)
L
Linus Torvalds 已提交
602
{
603
	const int *syscall_whitelist = mode1_syscalls;
604
#ifdef CONFIG_COMPAT
605
	if (in_compat_syscall())
606
		syscall_whitelist = get_compat_mode1_syscalls();
607 608 609 610 611 612 613 614 615
#endif
	do {
		if (*syscall_whitelist == this_syscall)
			return;
	} while (*++syscall_whitelist);

#ifdef SECCOMP_DEBUG
	dump_stack();
#endif
616
	seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
617 618 619 620 621 622 623 624
	do_exit(SIGKILL);
}

#ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
void secure_computing_strict(int this_syscall)
{
	int mode = current->seccomp.mode;

625
	if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
626 627 628
	    unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
		return;

629
	if (mode == SECCOMP_MODE_DISABLED)
630 631 632 633 634 635 636
		return;
	else if (mode == SECCOMP_MODE_STRICT)
		__secure_computing_strict(this_syscall);
	else
		BUG();
}
#else
637 638

#ifdef CONFIG_SECCOMP_FILTER
639 640
static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
			    const bool recheck_after_trace)
641 642
{
	u32 filter_ret, action;
643
	struct seccomp_filter *match = NULL;
644
	int data;
L
Linus Torvalds 已提交
645

646 647 648 649 650 651
	/*
	 * Make sure that any changes to mode from another thread have
	 * been seen after TIF_SECCOMP was seen.
	 */
	rmb();

652
	filter_ret = seccomp_run_filters(sd, &match);
653
	data = filter_ret & SECCOMP_RET_DATA;
654
	action = filter_ret & SECCOMP_RET_ACTION_FULL;
655 656 657

	switch (action) {
	case SECCOMP_RET_ERRNO:
658 659 660
		/* Set low-order bits as an errno, capped at MAX_ERRNO. */
		if (data > MAX_ERRNO)
			data = MAX_ERRNO;
661
		syscall_set_return_value(current, task_pt_regs(current),
662 663 664 665 666
					 -data, 0);
		goto skip;

	case SECCOMP_RET_TRAP:
		/* Show the handler the original registers. */
667
		syscall_rollback(current, task_pt_regs(current));
668 669 670 671 672
		/* Let the filter pass back 16 bits of data. */
		seccomp_send_sigsys(this_syscall, data);
		goto skip;

	case SECCOMP_RET_TRACE:
673 674 675 676
		/* We've been put in this state by the ptracer already. */
		if (recheck_after_trace)
			return 0;

K
Kees Cook 已提交
677 678 679 680 681 682 683 684 685 686 687 688
		/* ENOSYS these calls if there is no tracer attached. */
		if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
			syscall_set_return_value(current,
						 task_pt_regs(current),
						 -ENOSYS, 0);
			goto skip;
		}

		/* Allow the BPF to provide the event message */
		ptrace_event(PTRACE_EVENT_SECCOMP, data);
		/*
		 * The delivery of a fatal signal during event
689 690 691 692 693 694 695
		 * notification may silently skip tracer notification,
		 * which could leave us with a potentially unmodified
		 * syscall that the tracer would have liked to have
		 * changed. Since the process is about to die, we just
		 * force the syscall to be skipped and let the signal
		 * kill the process and correctly handle any tracer exit
		 * notifications.
K
Kees Cook 已提交
696 697
		 */
		if (fatal_signal_pending(current))
698
			goto skip;
K
Kees Cook 已提交
699 700 701 702 703
		/* Check if the tracer forced the syscall to be skipped. */
		this_syscall = syscall_get_nr(current, task_pt_regs(current));
		if (this_syscall < 0)
			goto skip;

704 705 706 707 708 709 710 711 712
		/*
		 * Recheck the syscall, since it may have changed. This
		 * intentionally uses a NULL struct seccomp_data to force
		 * a reload of all registers. This does not goto skip since
		 * a skip would have already been reported.
		 */
		if (__seccomp_filter(this_syscall, NULL, true))
			return -1;

K
Kees Cook 已提交
713
		return 0;
714

715 716 717 718
	case SECCOMP_RET_LOG:
		seccomp_log(this_syscall, 0, action, true);
		return 0;

719
	case SECCOMP_RET_ALLOW:
720 721 722 723 724
		/*
		 * Note that the "match" filter will always be NULL for
		 * this action since SECCOMP_RET_ALLOW is the starting
		 * state in seccomp_run_filters().
		 */
K
Kees Cook 已提交
725
		return 0;
726

727
	case SECCOMP_RET_KILL_THREAD:
728
	case SECCOMP_RET_KILL_PROCESS:
K
Kees Cook 已提交
729
	default:
730
		seccomp_log(this_syscall, SIGSYS, action, true);
731
		/* Dump core only if this is the last remaining thread. */
732 733
		if (action == SECCOMP_RET_KILL_PROCESS ||
		    get_nr_threads(current) == 1) {
K
Kees Cook 已提交
734 735
			siginfo_t info;

736 737 738 739 740 741
			/* Show the original registers in the dump. */
			syscall_rollback(current, task_pt_regs(current));
			/* Trigger a manual coredump since do_exit skips it. */
			seccomp_init_siginfo(&info, this_syscall, data);
			do_coredump(&info);
		}
742 743 744 745
		if (action == SECCOMP_RET_KILL_PROCESS)
			do_group_exit(SIGSYS);
		else
			do_exit(SIGSYS);
746 747 748 749 750
	}

	unreachable();

skip:
751
	seccomp_log(this_syscall, 0, action, match ? match->log : false);
K
Kees Cook 已提交
752 753 754
	return -1;
}
#else
755 756
static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
			    const bool recheck_after_trace)
K
Kees Cook 已提交
757 758
{
	BUG();
759
}
L
Linus Torvalds 已提交
760
#endif
761

K
Kees Cook 已提交
762
int __secure_computing(const struct seccomp_data *sd)
763 764
{
	int mode = current->seccomp.mode;
K
Kees Cook 已提交
765
	int this_syscall;
766

767
	if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
768
	    unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
K
Kees Cook 已提交
769 770 771 772
		return 0;

	this_syscall = sd ? sd->nr :
		syscall_get_nr(current, task_pt_regs(current));
773

774
	switch (mode) {
775
	case SECCOMP_MODE_STRICT:
776
		__secure_computing_strict(this_syscall);  /* may call do_exit */
K
Kees Cook 已提交
777
		return 0;
778
	case SECCOMP_MODE_FILTER:
779
		return __seccomp_filter(this_syscall, sd, false);
L
Linus Torvalds 已提交
780 781 782
	default:
		BUG();
	}
783
}
784
#endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
785 786 787 788 789 790

long prctl_get_seccomp(void)
{
	return current->seccomp.mode;
}

791
/**
K
Kees Cook 已提交
792
 * seccomp_set_mode_strict: internal function for setting strict seccomp
793 794 795 796 797
 *
 * Once current->seccomp.mode is non-zero, it may not be changed.
 *
 * Returns 0 on success or -EINVAL on failure.
 */
K
Kees Cook 已提交
798
static long seccomp_set_mode_strict(void)
799
{
K
Kees Cook 已提交
800
	const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
801
	long ret = -EINVAL;
802

K
Kees Cook 已提交
803 804
	spin_lock_irq(&current->sighand->siglock);

805
	if (!seccomp_may_assign_mode(seccomp_mode))
806 807
		goto out;

808
#ifdef TIF_NOTSC
K
Kees Cook 已提交
809
	disable_TSC();
810
#endif
811
	seccomp_assign_mode(current, seccomp_mode);
K
Kees Cook 已提交
812 813 814
	ret = 0;

out:
K
Kees Cook 已提交
815
	spin_unlock_irq(&current->sighand->siglock);
K
Kees Cook 已提交
816 817 818 819

	return ret;
}

820
#ifdef CONFIG_SECCOMP_FILTER
K
Kees Cook 已提交
821 822
/**
 * seccomp_set_mode_filter: internal function for setting seccomp filter
K
Kees Cook 已提交
823
 * @flags:  flags to change filter behavior
K
Kees Cook 已提交
824 825 826 827 828 829 830 831 832 833
 * @filter: struct sock_fprog containing filter
 *
 * This function may be called repeatedly to install additional filters.
 * Every filter successfully installed will be evaluated (in reverse order)
 * for each system call the task makes.
 *
 * Once current->seccomp.mode is non-zero, it may not be changed.
 *
 * Returns 0 on success or -EINVAL on failure.
 */
K
Kees Cook 已提交
834 835
static long seccomp_set_mode_filter(unsigned int flags,
				    const char __user *filter)
K
Kees Cook 已提交
836 837
{
	const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
838
	struct seccomp_filter *prepared = NULL;
K
Kees Cook 已提交
839 840
	long ret = -EINVAL;

K
Kees Cook 已提交
841
	/* Validate flags. */
842
	if (flags & ~SECCOMP_FILTER_FLAG_MASK)
K
Kees Cook 已提交
843
		return -EINVAL;
K
Kees Cook 已提交
844

845 846 847 848 849
	/* Prepare the new filter before holding any locks. */
	prepared = seccomp_prepare_user_filter(filter);
	if (IS_ERR(prepared))
		return PTR_ERR(prepared);

850 851 852 853 854 855 856 857
	/*
	 * Make sure we cannot change seccomp or nnp state via TSYNC
	 * while another thread is in the middle of calling exec.
	 */
	if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
	    mutex_lock_killable(&current->signal->cred_guard_mutex))
		goto out_free;

K
Kees Cook 已提交
858 859
	spin_lock_irq(&current->sighand->siglock);

K
Kees Cook 已提交
860 861 862
	if (!seccomp_may_assign_mode(seccomp_mode))
		goto out;

863
	ret = seccomp_attach_filter(flags, prepared);
K
Kees Cook 已提交
864
	if (ret)
865
		goto out;
866 867
	/* Do not free the successfully attached filter. */
	prepared = NULL;
868

869
	seccomp_assign_mode(current, seccomp_mode);
870
out:
K
Kees Cook 已提交
871
	spin_unlock_irq(&current->sighand->siglock);
872 873 874
	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
		mutex_unlock(&current->signal->cred_guard_mutex);
out_free:
875
	seccomp_filter_free(prepared);
876 877
	return ret;
}
K
Kees Cook 已提交
878
#else
K
Kees Cook 已提交
879 880
static inline long seccomp_set_mode_filter(unsigned int flags,
					   const char __user *filter)
K
Kees Cook 已提交
881 882 883 884
{
	return -EINVAL;
}
#endif
885

886 887 888 889 890 891 892 893
static long seccomp_get_action_avail(const char __user *uaction)
{
	u32 action;

	if (copy_from_user(&action, uaction, sizeof(action)))
		return -EFAULT;

	switch (action) {
894
	case SECCOMP_RET_KILL_PROCESS:
895
	case SECCOMP_RET_KILL_THREAD:
896 897 898
	case SECCOMP_RET_TRAP:
	case SECCOMP_RET_ERRNO:
	case SECCOMP_RET_TRACE:
899
	case SECCOMP_RET_LOG:
900 901 902 903 904 905 906 907 908
	case SECCOMP_RET_ALLOW:
		break;
	default:
		return -EOPNOTSUPP;
	}

	return 0;
}

K
Kees Cook 已提交
909 910 911 912 913 914 915 916 917 918 919
/* Common entry point for both prctl and syscall. */
static long do_seccomp(unsigned int op, unsigned int flags,
		       const char __user *uargs)
{
	switch (op) {
	case SECCOMP_SET_MODE_STRICT:
		if (flags != 0 || uargs != NULL)
			return -EINVAL;
		return seccomp_set_mode_strict();
	case SECCOMP_SET_MODE_FILTER:
		return seccomp_set_mode_filter(flags, uargs);
920 921 922 923 924
	case SECCOMP_GET_ACTION_AVAIL:
		if (flags != 0)
			return -EINVAL;

		return seccomp_get_action_avail(uargs);
K
Kees Cook 已提交
925 926 927 928 929 930 931 932 933 934 935
	default:
		return -EINVAL;
	}
}

SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
			 const char __user *, uargs)
{
	return do_seccomp(op, flags, uargs);
}

936 937 938 939 940 941 942 943 944
/**
 * prctl_set_seccomp: configures current->seccomp.mode
 * @seccomp_mode: requested mode to use
 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
 *
 * Returns 0 on success or -EINVAL on failure.
 */
long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
{
K
Kees Cook 已提交
945 946 947
	unsigned int op;
	char __user *uargs;

K
Kees Cook 已提交
948 949
	switch (seccomp_mode) {
	case SECCOMP_MODE_STRICT:
K
Kees Cook 已提交
950 951 952 953 954 955 956 957
		op = SECCOMP_SET_MODE_STRICT;
		/*
		 * Setting strict mode through prctl always ignored filter,
		 * so make sure it is always NULL here to pass the internal
		 * check in do_seccomp().
		 */
		uargs = NULL;
		break;
K
Kees Cook 已提交
958
	case SECCOMP_MODE_FILTER:
K
Kees Cook 已提交
959 960 961
		op = SECCOMP_SET_MODE_FILTER;
		uargs = filter;
		break;
K
Kees Cook 已提交
962 963 964
	default:
		return -EINVAL;
	}
K
Kees Cook 已提交
965 966 967

	/* prctl interface doesn't have flags, so they are always zero. */
	return do_seccomp(op, 0, uargs);
968
}
969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015

#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
			void __user *data)
{
	struct seccomp_filter *filter;
	struct sock_fprog_kern *fprog;
	long ret;
	unsigned long count = 0;

	if (!capable(CAP_SYS_ADMIN) ||
	    current->seccomp.mode != SECCOMP_MODE_DISABLED) {
		return -EACCES;
	}

	spin_lock_irq(&task->sighand->siglock);
	if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
		ret = -EINVAL;
		goto out;
	}

	filter = task->seccomp.filter;
	while (filter) {
		filter = filter->prev;
		count++;
	}

	if (filter_off >= count) {
		ret = -ENOENT;
		goto out;
	}
	count -= filter_off;

	filter = task->seccomp.filter;
	while (filter && count > 1) {
		filter = filter->prev;
		count--;
	}

	if (WARN_ON(count != 1 || !filter)) {
		/* The filter tree shouldn't shrink while we're using it. */
		ret = -ENOENT;
		goto out;
	}

	fprog = filter->prog->orig_prog;
	if (!fprog) {
M
Mickaël Salaün 已提交
1016
		/* This must be a new non-cBPF filter, since we save
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
		 * every cBPF filter's orig_prog above when
		 * CONFIG_CHECKPOINT_RESTORE is enabled.
		 */
		ret = -EMEDIUMTYPE;
		goto out;
	}

	ret = fprog->len;
	if (!data)
		goto out;

	get_seccomp_filter(task);
	spin_unlock_irq(&task->sighand->siglock);

	if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
		ret = -EFAULT;

	put_seccomp_filter(task);
	return ret;

out:
	spin_unlock_irq(&task->sighand->siglock);
	return ret;
}
#endif
1042 1043 1044 1045

#ifdef CONFIG_SYSCTL

/* Human readable action names for friendly sysctl interaction */
1046
#define SECCOMP_RET_KILL_PROCESS_NAME	"kill_process"
1047
#define SECCOMP_RET_KILL_THREAD_NAME	"kill_thread"
1048 1049 1050
#define SECCOMP_RET_TRAP_NAME		"trap"
#define SECCOMP_RET_ERRNO_NAME		"errno"
#define SECCOMP_RET_TRACE_NAME		"trace"
1051
#define SECCOMP_RET_LOG_NAME		"log"
1052 1053
#define SECCOMP_RET_ALLOW_NAME		"allow"

1054
static const char seccomp_actions_avail[] =
1055
				SECCOMP_RET_KILL_PROCESS_NAME	" "
1056 1057 1058 1059 1060 1061
				SECCOMP_RET_KILL_THREAD_NAME	" "
				SECCOMP_RET_TRAP_NAME		" "
				SECCOMP_RET_ERRNO_NAME		" "
				SECCOMP_RET_TRACE_NAME		" "
				SECCOMP_RET_LOG_NAME		" "
				SECCOMP_RET_ALLOW_NAME;
1062

1063 1064 1065 1066 1067 1068
struct seccomp_log_name {
	u32		log;
	const char	*name;
};

static const struct seccomp_log_name seccomp_log_names[] = {
1069
	{ SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
1070
	{ SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
1071 1072 1073
	{ SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
	{ SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
	{ SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
1074
	{ SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
	{ SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
	{ }
};

static bool seccomp_names_from_actions_logged(char *names, size_t size,
					      u32 actions_logged)
{
	const struct seccomp_log_name *cur;
	bool append_space = false;

	for (cur = seccomp_log_names; cur->name && size; cur++) {
		ssize_t ret;

		if (!(actions_logged & cur->log))
			continue;

		if (append_space) {
			ret = strscpy(names, " ", size);
			if (ret < 0)
				return false;

			names += ret;
			size -= ret;
		} else
			append_space = true;

		ret = strscpy(names, cur->name, size);
		if (ret < 0)
			return false;

		names += ret;
		size -= ret;
	}

	return true;
}

static bool seccomp_action_logged_from_name(u32 *action_logged,
					    const char *name)
{
	const struct seccomp_log_name *cur;

	for (cur = seccomp_log_names; cur->name; cur++) {
		if (!strcmp(cur->name, name)) {
			*action_logged = cur->log;
			return true;
		}
	}

	return false;
}

static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
{
	char *name;

	*actions_logged = 0;
	while ((name = strsep(&names, " ")) && *name) {
		u32 action_logged = 0;

		if (!seccomp_action_logged_from_name(&action_logged, name))
			return false;

		*actions_logged |= action_logged;
	}

	return true;
}

static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
					  void __user *buffer, size_t *lenp,
					  loff_t *ppos)
{
	char names[sizeof(seccomp_actions_avail)];
	struct ctl_table table;
	int ret;

	if (write && !capable(CAP_SYS_ADMIN))
		return -EPERM;

	memset(names, 0, sizeof(names));

	if (!write) {
		if (!seccomp_names_from_actions_logged(names, sizeof(names),
						       seccomp_actions_logged))
			return -EINVAL;
	}

	table = *ro_table;
	table.data = names;
	table.maxlen = sizeof(names);
	ret = proc_dostring(&table, write, buffer, lenp, ppos);
	if (ret)
		return ret;

	if (write) {
		u32 actions_logged;

		if (!seccomp_actions_logged_from_names(&actions_logged,
						       table.data))
			return -EINVAL;

		if (actions_logged & SECCOMP_LOG_ALLOW)
			return -EINVAL;

		seccomp_actions_logged = actions_logged;
	}

	return 0;
}

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
static struct ctl_path seccomp_sysctl_path[] = {
	{ .procname = "kernel", },
	{ .procname = "seccomp", },
	{ }
};

static struct ctl_table seccomp_sysctl_table[] = {
	{
		.procname	= "actions_avail",
		.data		= (void *) &seccomp_actions_avail,
		.maxlen		= sizeof(seccomp_actions_avail),
		.mode		= 0444,
		.proc_handler	= proc_dostring,
	},
1200 1201 1202 1203 1204
	{
		.procname	= "actions_logged",
		.mode		= 0644,
		.proc_handler	= seccomp_actions_logged_handler,
	},
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
	{ }
};

static int __init seccomp_sysctl_init(void)
{
	struct ctl_table_header *hdr;

	hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
	if (!hdr)
		pr_warn("seccomp: sysctl registration failed\n");
	else
		kmemleak_not_leak(hdr);

	return 0;
}

device_initcall(seccomp_sysctl_init)

#endif /* CONFIG_SYSCTL */