yama_lsm.c 9.7 KB
Newer Older
K
Kees Cook 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Yama Linux Security Module
 *
 * Author: Kees Cook <keescook@chromium.org>
 *
 * Copyright (C) 2010 Canonical, Ltd.
 * Copyright (C) 2011 The Chromium OS Authors.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2, as
 * published by the Free Software Foundation.
 *
 */

#include <linux/security.h>
#include <linux/sysctl.h>
#include <linux/ptrace.h>
#include <linux/prctl.h>
#include <linux/ratelimit.h>

K
Kees Cook 已提交
21 22 23 24 25 26
#define YAMA_SCOPE_DISABLED	0
#define YAMA_SCOPE_RELATIONAL	1
#define YAMA_SCOPE_CAPABILITY	2
#define YAMA_SCOPE_NO_ATTACH	3

static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
K
Kees Cook 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91

/* describe a ptrace relationship for potential exception */
struct ptrace_relation {
	struct task_struct *tracer;
	struct task_struct *tracee;
	struct list_head node;
};

static LIST_HEAD(ptracer_relations);
static DEFINE_SPINLOCK(ptracer_relations_lock);

/**
 * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
 * @tracer: the task_struct of the process doing the ptrace
 * @tracee: the task_struct of the process to be ptraced
 *
 * Each tracee can have, at most, one tracer registered. Each time this
 * is called, the prior registered tracer will be replaced for the tracee.
 *
 * Returns 0 if relationship was added, -ve on error.
 */
static int yama_ptracer_add(struct task_struct *tracer,
			    struct task_struct *tracee)
{
	int rc = 0;
	struct ptrace_relation *added;
	struct ptrace_relation *entry, *relation = NULL;

	added = kmalloc(sizeof(*added), GFP_KERNEL);
	if (!added)
		return -ENOMEM;

	spin_lock_bh(&ptracer_relations_lock);
	list_for_each_entry(entry, &ptracer_relations, node)
		if (entry->tracee == tracee) {
			relation = entry;
			break;
		}
	if (!relation) {
		relation = added;
		relation->tracee = tracee;
		list_add(&relation->node, &ptracer_relations);
	}
	relation->tracer = tracer;

	spin_unlock_bh(&ptracer_relations_lock);
	if (added != relation)
		kfree(added);

	return rc;
}

/**
 * yama_ptracer_del - remove exceptions related to the given tasks
 * @tracer: remove any relation where tracer task matches
 * @tracee: remove any relation where tracee task matches
 */
static void yama_ptracer_del(struct task_struct *tracer,
			     struct task_struct *tracee)
{
	struct ptrace_relation *relation, *safe;

	spin_lock_bh(&ptracer_relations_lock);
	list_for_each_entry_safe(relation, safe, &ptracer_relations, node)
		if (relation->tracee == tracee ||
K
Kees Cook 已提交
92
		    (tracer && relation->tracer == tracer)) {
K
Kees Cook 已提交
93 94 95 96 97 98 99 100 101 102
			list_del(&relation->node);
			kfree(relation);
		}
	spin_unlock_bh(&ptracer_relations_lock);
}

/**
 * yama_task_free - check for task_pid to remove from exception list
 * @task: task being removed
 */
103
void yama_task_free(struct task_struct *task)
K
Kees Cook 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
{
	yama_ptracer_del(task, task);
}

/**
 * yama_task_prctl - check for Yama-specific prctl operations
 * @option: operation
 * @arg2: argument
 * @arg3: argument
 * @arg4: argument
 * @arg5: argument
 *
 * Return 0 on success, -ve on error.  -ENOSYS is returned when Yama
 * does not handle the given option.
 */
119
int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
K
Kees Cook 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
			   unsigned long arg4, unsigned long arg5)
{
	int rc;
	struct task_struct *myself = current;

	rc = cap_task_prctl(option, arg2, arg3, arg4, arg5);
	if (rc != -ENOSYS)
		return rc;

	switch (option) {
	case PR_SET_PTRACER:
		/* Since a thread can call prctl(), find the group leader
		 * before calling _add() or _del() on it, since we want
		 * process-level granularity of control. The tracer group
		 * leader checking is handled later when walking the ancestry
		 * at the time of PTRACE_ATTACH check.
		 */
		rcu_read_lock();
		if (!thread_group_leader(myself))
			myself = rcu_dereference(myself->group_leader);
		get_task_struct(myself);
		rcu_read_unlock();

		if (arg2 == 0) {
			yama_ptracer_del(NULL, myself);
			rc = 0;
K
Kees Cook 已提交
146
		} else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
K
Kees Cook 已提交
147
			rc = yama_ptracer_add(NULL, myself);
K
Kees Cook 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
		} else {
			struct task_struct *tracer;

			rcu_read_lock();
			tracer = find_task_by_vpid(arg2);
			if (tracer)
				get_task_struct(tracer);
			else
				rc = -EINVAL;
			rcu_read_unlock();

			if (tracer) {
				rc = yama_ptracer_add(tracer, myself);
				put_task_struct(tracer);
			}
		}

		put_task_struct(myself);
		break;
	}

	return rc;
}

/**
 * task_is_descendant - walk up a process family tree looking for a match
 * @parent: the process to compare against while walking up from child
 * @child: the process to start from while looking upwards for parent
 *
 * Returns 1 if child is a descendant of parent, 0 if not.
 */
static int task_is_descendant(struct task_struct *parent,
			      struct task_struct *child)
{
	int rc = 0;
	struct task_struct *walker = child;

	if (!parent || !child)
		return 0;

	rcu_read_lock();
	if (!thread_group_leader(parent))
		parent = rcu_dereference(parent->group_leader);
	while (walker->pid > 0) {
		if (!thread_group_leader(walker))
			walker = rcu_dereference(walker->group_leader);
		if (walker == parent) {
			rc = 1;
			break;
		}
		walker = rcu_dereference(walker->real_parent);
	}
	rcu_read_unlock();

	return rc;
}

/**
 * ptracer_exception_found - tracer registered as exception for this tracee
 * @tracer: the task_struct of the process attempting ptrace
 * @tracee: the task_struct of the process to be ptraced
 *
 * Returns 1 if tracer has is ptracer exception ancestor for tracee.
 */
static int ptracer_exception_found(struct task_struct *tracer,
				   struct task_struct *tracee)
{
	int rc = 0;
	struct ptrace_relation *relation;
	struct task_struct *parent = NULL;
K
Kees Cook 已提交
218
	bool found = false;
K
Kees Cook 已提交
219 220 221 222 223 224 225 226

	spin_lock_bh(&ptracer_relations_lock);
	rcu_read_lock();
	if (!thread_group_leader(tracee))
		tracee = rcu_dereference(tracee->group_leader);
	list_for_each_entry(relation, &ptracer_relations, node)
		if (relation->tracee == tracee) {
			parent = relation->tracer;
K
Kees Cook 已提交
227
			found = true;
K
Kees Cook 已提交
228 229 230
			break;
		}

K
Kees Cook 已提交
231
	if (found && (parent == NULL || task_is_descendant(parent, tracer)))
K
Kees Cook 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244 245
		rc = 1;
	rcu_read_unlock();
	spin_unlock_bh(&ptracer_relations_lock);

	return rc;
}

/**
 * yama_ptrace_access_check - validate PTRACE_ATTACH calls
 * @child: task that current task is attempting to ptrace
 * @mode: ptrace attach mode
 *
 * Returns 0 if following the ptrace is allowed, -ve on error.
 */
246
int yama_ptrace_access_check(struct task_struct *child,
K
Kees Cook 已提交
247 248 249 250 251 252 253 254 255 256 257 258
				    unsigned int mode)
{
	int rc;

	/* If standard caps disallows it, so does Yama.  We should
	 * only tighten restrictions further.
	 */
	rc = cap_ptrace_access_check(child, mode);
	if (rc)
		return rc;

	/* require ptrace target be a child of ptracer on attach */
K
Kees Cook 已提交
259 260 261 262 263 264 265 266
	if (mode == PTRACE_MODE_ATTACH) {
		switch (ptrace_scope) {
		case YAMA_SCOPE_DISABLED:
			/* No additional restrictions. */
			break;
		case YAMA_SCOPE_RELATIONAL:
			if (!task_is_descendant(current, child) &&
			    !ptracer_exception_found(current, child) &&
267
			    !ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
K
Kees Cook 已提交
268 269 270
				rc = -EPERM;
			break;
		case YAMA_SCOPE_CAPABILITY:
271
			if (!ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
K
Kees Cook 已提交
272 273 274 275 276 277 278 279
				rc = -EPERM;
			break;
		case YAMA_SCOPE_NO_ATTACH:
		default:
			rc = -EPERM;
			break;
		}
	}
K
Kees Cook 已提交
280 281

	if (rc) {
K
Kees Cook 已提交
282 283
		printk_ratelimited(KERN_NOTICE
			"ptrace of pid %d was attempted by: %s (pid %d)\n",
284
			child->pid, current->comm, current->pid);
K
Kees Cook 已提交
285 286 287 288 289
	}

	return rc;
}

290 291 292 293 294 295
/**
 * yama_ptrace_traceme - validate PTRACE_TRACEME calls
 * @parent: task that will become the ptracer of the current task
 *
 * Returns 0 if following the ptrace is allowed, -ve on error.
 */
296
int yama_ptrace_traceme(struct task_struct *parent)
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
{
	int rc;

	/* If standard caps disallows it, so does Yama.  We should
	 * only tighten restrictions further.
	 */
	rc = cap_ptrace_traceme(parent);
	if (rc)
		return rc;

	/* Only disallow PTRACE_TRACEME on more aggressive settings. */
	switch (ptrace_scope) {
	case YAMA_SCOPE_CAPABILITY:
		if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE))
			rc = -EPERM;
		break;
	case YAMA_SCOPE_NO_ATTACH:
		rc = -EPERM;
		break;
	}

	if (rc) {
		printk_ratelimited(KERN_NOTICE
			"ptraceme of pid %d was attempted by: %s (pid %d)\n",
321
			current->pid, parent->comm, parent->pid);
322 323 324 325 326
	}

	return rc;
}

327
#ifndef CONFIG_SECURITY_YAMA_STACKED
K
Kees Cook 已提交
328 329 330 331
static struct security_operations yama_ops = {
	.name =			"yama",

	.ptrace_access_check =	yama_ptrace_access_check,
332
	.ptrace_traceme =	yama_ptrace_traceme,
K
Kees Cook 已提交
333 334 335
	.task_prctl =		yama_task_prctl,
	.task_free =		yama_task_free,
};
336
#endif
K
Kees Cook 已提交
337 338

#ifdef CONFIG_SYSCTL
K
Kees Cook 已提交
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
static int yama_dointvec_minmax(struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int rc;

	if (write && !capable(CAP_SYS_PTRACE))
		return -EPERM;

	rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	if (rc)
		return rc;

	/* Lock the max value if it ever gets set. */
	if (write && *(int *)table->data == *(int *)table->extra2)
		table->extra1 = table->extra2;

	return rc;
}

K
Kees Cook 已提交
358
static int zero;
K
Kees Cook 已提交
359
static int max_scope = YAMA_SCOPE_NO_ATTACH;
K
Kees Cook 已提交
360 361 362 363 364 365 366 367 368 369 370 371 372

struct ctl_path yama_sysctl_path[] = {
	{ .procname = "kernel", },
	{ .procname = "yama", },
	{ }
};

static struct ctl_table yama_sysctl_table[] = {
	{
		.procname       = "ptrace_scope",
		.data           = &ptrace_scope,
		.maxlen         = sizeof(int),
		.mode           = 0644,
K
Kees Cook 已提交
373
		.proc_handler   = yama_dointvec_minmax,
K
Kees Cook 已提交
374
		.extra1         = &zero,
K
Kees Cook 已提交
375
		.extra2         = &max_scope,
K
Kees Cook 已提交
376 377 378 379 380 381 382
	},
	{ }
};
#endif /* CONFIG_SYSCTL */

static __init int yama_init(void)
{
383
#ifndef CONFIG_SECURITY_YAMA_STACKED
K
Kees Cook 已提交
384 385
	if (!security_module_enable(&yama_ops))
		return 0;
386
#endif
K
Kees Cook 已提交
387 388 389

	printk(KERN_INFO "Yama: becoming mindful.\n");

390
#ifndef CONFIG_SECURITY_YAMA_STACKED
K
Kees Cook 已提交
391 392
	if (register_security(&yama_ops))
		panic("Yama: kernel registration failed.\n");
393
#endif
K
Kees Cook 已提交
394 395 396 397 398 399 400 401 402 403

#ifdef CONFIG_SYSCTL
	if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
		panic("Yama: sysctl registration failed.\n");
#endif

	return 0;
}

security_initcall(yama_init);