security.c 65.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3 4 5 6 7
/*
 * Security plug functions
 *
 * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
 * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
 * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
8
 * Copyright (C) 2016 Mellanox Technologies
L
Linus Torvalds 已提交
9 10
 */

11 12
#define pr_fmt(fmt) "LSM: " fmt

13
#include <linux/bpf.h>
14
#include <linux/capability.h>
15
#include <linux/dcache.h>
16
#include <linux/export.h>
L
Linus Torvalds 已提交
17 18
#include <linux/init.h>
#include <linux/kernel.h>
19
#include <linux/kernel_read_file.h>
C
Casey Schaufler 已提交
20
#include <linux/lsm_hooks.h>
21
#include <linux/integrity.h>
22
#include <linux/ima.h>
23
#include <linux/evm.h>
A
Al Viro 已提交
24
#include <linux/fsnotify.h>
25 26 27
#include <linux/mman.h>
#include <linux/mount.h>
#include <linux/personality.h>
P
Paul Mundt 已提交
28
#include <linux/backing-dev.h>
29
#include <linux/string.h>
30
#include <linux/msg.h>
A
Al Viro 已提交
31
#include <net/flow.h>
L
Linus Torvalds 已提交
32

33
#define MAX_LSM_EVM_XATTR	2
L
Linus Torvalds 已提交
34

35 36 37
/* How many LSMs were built into the kernel? */
#define LSM_COUNT (__end_lsm_info - __start_lsm_info)

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
/*
 * These are descriptions of the reasons that can be passed to the
 * security_locked_down() LSM hook. Placing this array here allows
 * all security modules to use the same descriptions for auditing
 * purposes.
 */
const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
	[LOCKDOWN_NONE] = "none",
	[LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
	[LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
	[LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
	[LOCKDOWN_KEXEC] = "kexec of unsigned images",
	[LOCKDOWN_HIBERNATION] = "hibernation",
	[LOCKDOWN_PCI_ACCESS] = "direct PCI access",
	[LOCKDOWN_IOPORT] = "raw io port access",
	[LOCKDOWN_MSR] = "raw MSR access",
	[LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables",
	[LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage",
	[LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO",
	[LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
	[LOCKDOWN_MMIOTRACE] = "unsafe mmio",
	[LOCKDOWN_DEBUGFS] = "debugfs access",
	[LOCKDOWN_XMON_WR] = "xmon write access",
	[LOCKDOWN_INTEGRITY_MAX] = "integrity",
	[LOCKDOWN_KCORE] = "/proc/kcore access",
	[LOCKDOWN_KPROBES] = "use of kprobes",
64
	[LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
65 66 67
	[LOCKDOWN_PERF] = "unsafe use of perf",
	[LOCKDOWN_TRACEFS] = "use of tracefs",
	[LOCKDOWN_XMON_RW] = "xmon read and write access",
68
	[LOCKDOWN_XFRM_SECRET] = "xfrm SA secret",
69 70 71
	[LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
};

72
struct security_hook_heads security_hook_heads __lsm_ro_after_init;
73
static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
74

75
static struct kmem_cache *lsm_file_cache;
76
static struct kmem_cache *lsm_inode_cache;
77

78
char *lsm_names;
79 80
static struct lsm_blob_sizes blob_sizes __lsm_ro_after_init;

81
/* Boot-time LSM user choice */
82
static __initdata const char *chosen_lsm_order;
83
static __initdata const char *chosen_major_lsm;
L
Linus Torvalds 已提交
84

K
Kees Cook 已提交
85 86
static __initconst const char * const builtin_lsm_order = CONFIG_LSM;

87 88
/* Ordered list of LSMs to initialize. */
static __initdata struct lsm_info **ordered_lsms;
89
static __initdata struct lsm_info *exclusive;
90

91 92 93 94 95 96 97
static __initdata bool debug;
#define init_debug(...)						\
	do {							\
		if (debug)					\
			pr_info(__VA_ARGS__);			\
	} while (0)

98 99
static bool __init is_enabled(struct lsm_info *lsm)
{
100 101
	if (!lsm->enabled)
		return false;
102

103
	return *lsm->enabled;
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
}

/* Mark an LSM's enabled flag. */
static int lsm_enabled_true __initdata = 1;
static int lsm_enabled_false __initdata = 0;
static void __init set_enabled(struct lsm_info *lsm, bool enabled)
{
	/*
	 * When an LSM hasn't configured an enable variable, we can use
	 * a hard-coded location for storing the default enabled state.
	 */
	if (!lsm->enabled) {
		if (enabled)
			lsm->enabled = &lsm_enabled_true;
		else
			lsm->enabled = &lsm_enabled_false;
	} else if (lsm->enabled == &lsm_enabled_true) {
		if (!enabled)
			lsm->enabled = &lsm_enabled_false;
	} else if (lsm->enabled == &lsm_enabled_false) {
		if (enabled)
			lsm->enabled = &lsm_enabled_true;
	} else {
		*lsm->enabled = enabled;
	}
}

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
/* Is an LSM already listed in the ordered LSMs list? */
static bool __init exists_ordered_lsm(struct lsm_info *lsm)
{
	struct lsm_info **check;

	for (check = ordered_lsms; *check; check++)
		if (*check == lsm)
			return true;

	return false;
}

/* Append an LSM to the list of ordered LSMs to initialize. */
static int last_lsm __initdata;
static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from)
{
	/* Ignore duplicate selections. */
	if (exists_ordered_lsm(lsm))
		return;

	if (WARN(last_lsm == LSM_COUNT, "%s: out of LSM slots!?\n", from))
		return;

154 155 156
	/* Enable this LSM, if it is not already set. */
	if (!lsm->enabled)
		lsm->enabled = &lsm_enabled_true;
157
	ordered_lsms[last_lsm++] = lsm;
158

159 160 161 162
	init_debug("%s ordering: %s (%sabled)\n", from, lsm->name,
		   is_enabled(lsm) ? "en" : "dis");
}

163 164 165 166 167 168 169
/* Is an LSM allowed to be initialized? */
static bool __init lsm_allowed(struct lsm_info *lsm)
{
	/* Skip if the LSM is disabled. */
	if (!is_enabled(lsm))
		return false;

170 171 172 173 174 175
	/* Not allowed if another exclusive LSM already initialized. */
	if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) {
		init_debug("exclusive disabled: %s\n", lsm->name);
		return false;
	}

176 177 178
	return true;
}

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
static void __init lsm_set_blob_size(int *need, int *lbs)
{
	int offset;

	if (*need > 0) {
		offset = *lbs;
		*lbs += *need;
		*need = offset;
	}
}

static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
{
	if (!needed)
		return;

	lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred);
196
	lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file);
197 198 199 200 201 202 203
	/*
	 * The inode blob gets an rcu_head in addition to
	 * what the modules might need.
	 */
	if (needed->lbs_inode && blob_sizes.lbs_inode == 0)
		blob_sizes.lbs_inode = sizeof(struct rcu_head);
	lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
204 205
	lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
	lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
206
	lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock);
207
	lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
208 209
}

210 211
/* Prepare LSM for initialization. */
static void __init prepare_lsm(struct lsm_info *lsm)
212 213 214 215 216 217
{
	int enabled = lsm_allowed(lsm);

	/* Record enablement (to handle any following exclusive LSMs). */
	set_enabled(lsm, enabled);

218
	/* If enabled, do pre-initialization work. */
219
	if (enabled) {
220 221 222 223
		if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) {
			exclusive = lsm;
			init_debug("exclusive chosen: %s\n", lsm->name);
		}
224 225

		lsm_set_blob_sizes(lsm->blobs);
226 227 228 229 230 231 232 233
	}
}

/* Initialize a given LSM, if it is enabled. */
static void __init initialize_lsm(struct lsm_info *lsm)
{
	if (is_enabled(lsm)) {
		int ret;
234

235 236 237 238 239 240
		init_debug("initializing %s\n", lsm->name);
		ret = lsm->init();
		WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret);
	}
}

K
Kees Cook 已提交
241
/* Populate ordered LSMs list from comma-separated LSM name list. */
242
static void __init ordered_lsm_parse(const char *order, const char *origin)
243 244
{
	struct lsm_info *lsm;
K
Kees Cook 已提交
245 246
	char *sep, *name, *next;

K
Kees Cook 已提交
247 248 249 250 251 252
	/* LSM_ORDER_FIRST is always first. */
	for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
		if (lsm->order == LSM_ORDER_FIRST)
			append_ordered_lsm(lsm, "first");
	}

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
	/* Process "security=", if given. */
	if (chosen_major_lsm) {
		struct lsm_info *major;

		/*
		 * To match the original "security=" behavior, this
		 * explicitly does NOT fallback to another Legacy Major
		 * if the selected one was separately disabled: disable
		 * all non-matching Legacy Major LSMs.
		 */
		for (major = __start_lsm_info; major < __end_lsm_info;
		     major++) {
			if ((major->flags & LSM_FLAG_LEGACY_MAJOR) &&
			    strcmp(major->name, chosen_major_lsm) != 0) {
				set_enabled(major, false);
				init_debug("security=%s disabled: %s\n",
					   chosen_major_lsm, major->name);
			}
		}
	}
273

K
Kees Cook 已提交
274 275 276 277 278 279 280
	sep = kstrdup(order, GFP_KERNEL);
	next = sep;
	/* Walk the list, looking for matching LSMs. */
	while ((name = strsep(&next, ",")) != NULL) {
		bool found = false;

		for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
K
Kees Cook 已提交
281 282
			if (lsm->order == LSM_ORDER_MUTABLE &&
			    strcmp(lsm->name, name) == 0) {
K
Kees Cook 已提交
283 284 285 286 287 288 289
				append_ordered_lsm(lsm, origin);
				found = true;
			}
		}

		if (!found)
			init_debug("%s ignored: %s\n", origin, name);
290
	}
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309

	/* Process "security=", if given. */
	if (chosen_major_lsm) {
		for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
			if (exists_ordered_lsm(lsm))
				continue;
			if (strcmp(lsm->name, chosen_major_lsm) == 0)
				append_ordered_lsm(lsm, "security=");
		}
	}

	/* Disable all LSMs not in the ordered list. */
	for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
		if (exists_ordered_lsm(lsm))
			continue;
		set_enabled(lsm, false);
		init_debug("%s disabled: %s\n", origin, lsm->name);
	}

K
Kees Cook 已提交
310
	kfree(sep);
311 312
}

313 314 315
static void __init lsm_early_cred(struct cred *cred);
static void __init lsm_early_task(struct task_struct *task);

M
Matthew Garrett 已提交
316 317
static int lsm_append(const char *new, char **result);

318 319 320 321 322 323 324
static void __init ordered_lsm_init(void)
{
	struct lsm_info **lsm;

	ordered_lsms = kcalloc(LSM_COUNT + 1, sizeof(*ordered_lsms),
				GFP_KERNEL);

325 326 327 328 329
	if (chosen_lsm_order) {
		if (chosen_major_lsm) {
			pr_info("security= is ignored because it is superseded by lsm=\n");
			chosen_major_lsm = NULL;
		}
330
		ordered_lsm_parse(chosen_lsm_order, "cmdline");
331
	} else
332
		ordered_lsm_parse(builtin_lsm_order, "builtin");
333 334

	for (lsm = ordered_lsms; *lsm; lsm++)
335 336
		prepare_lsm(*lsm);

337 338 339 340 341 342 343
	init_debug("cred blob size       = %d\n", blob_sizes.lbs_cred);
	init_debug("file blob size       = %d\n", blob_sizes.lbs_file);
	init_debug("inode blob size      = %d\n", blob_sizes.lbs_inode);
	init_debug("ipc blob size        = %d\n", blob_sizes.lbs_ipc);
	init_debug("msg_msg blob size    = %d\n", blob_sizes.lbs_msg_msg);
	init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock);
	init_debug("task blob size       = %d\n", blob_sizes.lbs_task);
344 345 346 347 348 349 350 351

	/*
	 * Create any kmem_caches needed for blobs
	 */
	if (blob_sizes.lbs_file)
		lsm_file_cache = kmem_cache_create("lsm_file_cache",
						   blob_sizes.lbs_file, 0,
						   SLAB_PANIC, NULL);
352 353 354 355
	if (blob_sizes.lbs_inode)
		lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
						    blob_sizes.lbs_inode, 0,
						    SLAB_PANIC, NULL);
356

357 358
	lsm_early_cred((struct cred *) current->cred);
	lsm_early_task(current);
359 360
	for (lsm = ordered_lsms; *lsm; lsm++)
		initialize_lsm(*lsm);
361 362 363 364

	kfree(ordered_lsms);
}

M
Matthew Garrett 已提交
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
int __init early_security_init(void)
{
	int i;
	struct hlist_head *list = (struct hlist_head *) &security_hook_heads;
	struct lsm_info *lsm;

	for (i = 0; i < sizeof(security_hook_heads) / sizeof(struct hlist_head);
	     i++)
		INIT_HLIST_HEAD(&list[i]);

	for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
		if (!lsm->enabled)
			lsm->enabled = &lsm_enabled_true;
		prepare_lsm(lsm);
		initialize_lsm(lsm);
	}

	return 0;
}

L
Linus Torvalds 已提交
385 386 387 388 389 390 391
/**
 * security_init - initializes the security framework
 *
 * This should be called early in the kernel initialization sequence.
 */
int __init security_init(void)
{
M
Matthew Garrett 已提交
392
	struct lsm_info *lsm;
393

394 395
	pr_info("Security Framework initializing\n");

M
Matthew Garrett 已提交
396 397 398 399 400 401 402 403
	/*
	 * Append the names of the early LSM modules now that kmalloc() is
	 * available
	 */
	for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
		if (lsm->enabled)
			lsm_append(lsm->name, &lsm_names);
	}
L
Linus Torvalds 已提交
404

405 406 407
	/* Load LSMs in specified order. */
	ordered_lsm_init();

L
Linus Torvalds 已提交
408 409 410
	return 0;
}

411
/* Save user chosen LSM */
412
static int __init choose_major_lsm(char *str)
413
{
414
	chosen_major_lsm = str;
415 416
	return 1;
}
417
__setup("security=", choose_major_lsm);
418

419 420 421 422 423 424 425 426
/* Explicitly choose LSM initialization order. */
static int __init choose_lsm_order(char *str)
{
	chosen_lsm_order = str;
	return 1;
}
__setup("lsm=", choose_lsm_order);

427 428 429 430 431 432 433 434
/* Enable LSM order debugging. */
static int __init enable_debug(char *str)
{
	debug = true;
	return 1;
}
__setup("lsm.debug", enable_debug);

435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
static bool match_last_lsm(const char *list, const char *lsm)
{
	const char *last;

	if (WARN_ON(!list || !lsm))
		return false;
	last = strrchr(list, ',');
	if (last)
		/* Pass the comma, strcmp() will check for '\0' */
		last++;
	else
		last = list;
	return !strcmp(last, lsm);
}

M
Matthew Garrett 已提交
450
static int lsm_append(const char *new, char **result)
451 452 453 454 455
{
	char *cp;

	if (*result == NULL) {
		*result = kstrdup(new, GFP_KERNEL);
456 457
		if (*result == NULL)
			return -ENOMEM;
458
	} else {
459 460 461
		/* Check if it is the last registered name */
		if (match_last_lsm(*result, new))
			return 0;
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
		cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new);
		if (cp == NULL)
			return -ENOMEM;
		kfree(*result);
		*result = cp;
	}
	return 0;
}

/**
 * security_add_hooks - Add a modules hooks to the hook lists.
 * @hooks: the hooks to add
 * @count: the number of hooks to add
 * @lsm: the name of the security module
 *
 * Each LSM has to register its hooks with the infrastructure.
 */
void __init security_add_hooks(struct security_hook_list *hooks, int count,
				char *lsm)
{
	int i;

	for (i = 0; i < count; i++) {
		hooks[i].lsm = lsm;
486
		hlist_add_tail_rcu(&hooks[i].list, hooks[i].head);
487
	}
M
Matthew Garrett 已提交
488 489 490 491 492 493 494 495 496

	/*
	 * Don't try to append during early_security_init(), we'll come back
	 * and fix this up afterwards.
	 */
	if (slab_is_available()) {
		if (lsm_append(lsm, &lsm_names) < 0)
			panic("%s - Cannot get early memory.\n", __func__);
	}
497 498
}

499
int call_blocking_lsm_notifier(enum lsm_event event, void *data)
500
{
501 502
	return blocking_notifier_call_chain(&blocking_lsm_notifier_chain,
					    event, data);
503
}
504
EXPORT_SYMBOL(call_blocking_lsm_notifier);
505

506
int register_blocking_lsm_notifier(struct notifier_block *nb)
507
{
508 509
	return blocking_notifier_chain_register(&blocking_lsm_notifier_chain,
						nb);
510
}
511
EXPORT_SYMBOL(register_blocking_lsm_notifier);
512

513
int unregister_blocking_lsm_notifier(struct notifier_block *nb)
514
{
515 516
	return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain,
						  nb);
517
}
518
EXPORT_SYMBOL(unregister_blocking_lsm_notifier);
519

520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
/**
 * lsm_cred_alloc - allocate a composite cred blob
 * @cred: the cred that needs a blob
 * @gfp: allocation type
 *
 * Allocate the cred blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
static int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
{
	if (blob_sizes.lbs_cred == 0) {
		cred->security = NULL;
		return 0;
	}

	cred->security = kzalloc(blob_sizes.lbs_cred, gfp);
	if (cred->security == NULL)
		return -ENOMEM;
	return 0;
}

/**
 * lsm_early_cred - during initialization allocate a composite cred blob
 * @cred: the cred that needs a blob
 *
546
 * Allocate the cred blob for all the modules
547
 */
548
static void __init lsm_early_cred(struct cred *cred)
549
{
550
	int rc = lsm_cred_alloc(cred, GFP_KERNEL);
551 552 553 554 555

	if (rc)
		panic("%s: Early cred alloc failed.\n", __func__);
}

556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
/**
 * lsm_file_alloc - allocate a composite file blob
 * @file: the file that needs a blob
 *
 * Allocate the file blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
static int lsm_file_alloc(struct file *file)
{
	if (!lsm_file_cache) {
		file->f_security = NULL;
		return 0;
	}

	file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL);
	if (file->f_security == NULL)
		return -ENOMEM;
	return 0;
}

577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
/**
 * lsm_inode_alloc - allocate a composite inode blob
 * @inode: the inode that needs a blob
 *
 * Allocate the inode blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
int lsm_inode_alloc(struct inode *inode)
{
	if (!lsm_inode_cache) {
		inode->i_security = NULL;
		return 0;
	}

	inode->i_security = kmem_cache_zalloc(lsm_inode_cache, GFP_NOFS);
	if (inode->i_security == NULL)
		return -ENOMEM;
	return 0;
}

598 599 600 601 602 603 604 605
/**
 * lsm_task_alloc - allocate a composite task blob
 * @task: the task that needs a blob
 *
 * Allocate the task blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
W
Wei Yongjun 已提交
606
static int lsm_task_alloc(struct task_struct *task)
607 608 609 610 611 612 613 614 615 616 617 618
{
	if (blob_sizes.lbs_task == 0) {
		task->security = NULL;
		return 0;
	}

	task->security = kzalloc(blob_sizes.lbs_task, GFP_KERNEL);
	if (task->security == NULL)
		return -ENOMEM;
	return 0;
}

619 620 621 622 623 624 625 626
/**
 * lsm_ipc_alloc - allocate a composite ipc blob
 * @kip: the ipc that needs a blob
 *
 * Allocate the ipc blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
W
Wei Yongjun 已提交
627
static int lsm_ipc_alloc(struct kern_ipc_perm *kip)
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
{
	if (blob_sizes.lbs_ipc == 0) {
		kip->security = NULL;
		return 0;
	}

	kip->security = kzalloc(blob_sizes.lbs_ipc, GFP_KERNEL);
	if (kip->security == NULL)
		return -ENOMEM;
	return 0;
}

/**
 * lsm_msg_msg_alloc - allocate a composite msg_msg blob
 * @mp: the msg_msg that needs a blob
 *
 * Allocate the ipc blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
W
Wei Yongjun 已提交
648
static int lsm_msg_msg_alloc(struct msg_msg *mp)
649 650 651 652 653 654 655 656 657 658 659 660
{
	if (blob_sizes.lbs_msg_msg == 0) {
		mp->security = NULL;
		return 0;
	}

	mp->security = kzalloc(blob_sizes.lbs_msg_msg, GFP_KERNEL);
	if (mp->security == NULL)
		return -ENOMEM;
	return 0;
}

661 662 663 664
/**
 * lsm_early_task - during initialization allocate a composite task blob
 * @task: the task that needs a blob
 *
665
 * Allocate the task blob for all the modules
666
 */
667
static void __init lsm_early_task(struct task_struct *task)
668
{
669
	int rc = lsm_task_alloc(task);
670 671 672 673 674

	if (rc)
		panic("%s: Early task alloc failed.\n", __func__);
}

675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
/**
 * lsm_superblock_alloc - allocate a composite superblock blob
 * @sb: the superblock that needs a blob
 *
 * Allocate the superblock blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
static int lsm_superblock_alloc(struct super_block *sb)
{
	if (blob_sizes.lbs_superblock == 0) {
		sb->s_security = NULL;
		return 0;
	}

	sb->s_security = kzalloc(blob_sizes.lbs_superblock, GFP_KERNEL);
	if (sb->s_security == NULL)
		return -ENOMEM;
	return 0;
}

696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
/*
 * The default value of the LSM hook is defined in linux/lsm_hook_defs.h and
 * can be accessed with:
 *
 *	LSM_RET_DEFAULT(<hook_name>)
 *
 * The macros below define static constants for the default value of each
 * LSM hook.
 */
#define LSM_RET_DEFAULT(NAME) (NAME##_default)
#define DECLARE_LSM_RET_DEFAULT_void(DEFAULT, NAME)
#define DECLARE_LSM_RET_DEFAULT_int(DEFAULT, NAME) \
	static const int LSM_RET_DEFAULT(NAME) = (DEFAULT);
#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
	DECLARE_LSM_RET_DEFAULT_##RET(DEFAULT, NAME)

#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK

715
/*
C
Casey Schaufler 已提交
716
 * Hook list operation macros.
L
Linus Torvalds 已提交
717
 *
718 719
 * call_void_hook:
 *	This is a hook that does not return a value.
L
Linus Torvalds 已提交
720
 *
721 722
 * call_int_hook:
 *	This is a hook that returns a value.
L
Linus Torvalds 已提交
723 724
 */

C
Casey Schaufler 已提交
725 726 727 728
#define call_void_hook(FUNC, ...)				\
	do {							\
		struct security_hook_list *P;			\
								\
729
		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
C
Casey Schaufler 已提交
730 731 732 733 734 735 736 737
			P->hook.FUNC(__VA_ARGS__);		\
	} while (0)

#define call_int_hook(FUNC, IRC, ...) ({			\
	int RC = IRC;						\
	do {							\
		struct security_hook_list *P;			\
								\
738
		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
C
Casey Schaufler 已提交
739 740 741 742 743 744 745
			RC = P->hook.FUNC(__VA_ARGS__);		\
			if (RC != 0)				\
				break;				\
		}						\
	} while (0);						\
	RC;							\
})
L
Linus Torvalds 已提交
746

747 748
/* Security operations */

749 750
int security_binder_set_context_mgr(struct task_struct *mgr)
{
751
	return call_int_hook(binder_set_context_mgr, 0, mgr);
752 753 754 755 756
}

int security_binder_transaction(struct task_struct *from,
				struct task_struct *to)
{
757
	return call_int_hook(binder_transaction, 0, from, to);
758 759 760 761 762
}

int security_binder_transfer_binder(struct task_struct *from,
				    struct task_struct *to)
{
763
	return call_int_hook(binder_transfer_binder, 0, from, to);
764 765 766 767 768
}

int security_binder_transfer_file(struct task_struct *from,
				  struct task_struct *to, struct file *file)
{
769
	return call_int_hook(binder_transfer_file, 0, from, to, file);
770 771
}

772
int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
773
{
774
	return call_int_hook(ptrace_access_check, 0, child, mode);
775 776 777 778
}

int security_ptrace_traceme(struct task_struct *parent)
{
779
	return call_int_hook(ptrace_traceme, 0, parent);
780 781 782 783 784 785 786
}

int security_capget(struct task_struct *target,
		     kernel_cap_t *effective,
		     kernel_cap_t *inheritable,
		     kernel_cap_t *permitted)
{
787 788
	return call_int_hook(capget, 0, target,
				effective, inheritable, permitted);
789 790
}

D
David Howells 已提交
791 792 793 794
int security_capset(struct cred *new, const struct cred *old,
		    const kernel_cap_t *effective,
		    const kernel_cap_t *inheritable,
		    const kernel_cap_t *permitted)
795
{
796 797
	return call_int_hook(capset, 0, new, old,
				effective, inheritable, permitted);
798 799
}

800 801 802 803
int security_capable(const struct cred *cred,
		     struct user_namespace *ns,
		     int cap,
		     unsigned int opts)
804
{
805
	return call_int_hook(capable, 0, cred, ns, cap, opts);
806 807 808 809
}

int security_quotactl(int cmds, int type, int id, struct super_block *sb)
{
810
	return call_int_hook(quotactl, 0, cmds, type, id, sb);
811 812 813 814
}

int security_quota_on(struct dentry *dentry)
{
815
	return call_int_hook(quota_on, 0, dentry);
816 817
}

818
int security_syslog(int type)
819
{
820
	return call_int_hook(syslog, 0, type);
821 822
}

823
int security_settime64(const struct timespec64 *ts, const struct timezone *tz)
824
{
825
	return call_int_hook(settime, 0, ts, tz);
826 827 828 829
}

int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
C
Casey Schaufler 已提交
830 831 832 833 834 835 836 837 838 839 840
	struct security_hook_list *hp;
	int cap_sys_admin = 1;
	int rc;

	/*
	 * The module will respond with a positive value if
	 * it thinks the __vm_enough_memory() call should be
	 * made with the cap_sys_admin set. If all of the modules
	 * agree that it should be set it will. If any module
	 * thinks it should not be set it won't.
	 */
841
	hlist_for_each_entry(hp, &security_hook_heads.vm_enough_memory, list) {
C
Casey Schaufler 已提交
842 843 844 845 846 847 848
		rc = hp->hook.vm_enough_memory(mm, pages);
		if (rc <= 0) {
			cap_sys_admin = 0;
			break;
		}
	}
	return __vm_enough_memory(mm, pages, cap_sys_admin);
849 850
}

851
int security_bprm_creds_for_exec(struct linux_binprm *bprm)
852
{
853 854 855
	return call_int_hook(bprm_creds_for_exec, 0, bprm);
}

856
int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file)
857
{
858
	return call_int_hook(bprm_creds_from_file, 0, bprm, file);
859 860
}

861
int security_bprm_check(struct linux_binprm *bprm)
862
{
863 864
	int ret;

865
	ret = call_int_hook(bprm_check_security, 0, bprm);
866 867 868
	if (ret)
		return ret;
	return ima_bprm_check(bprm);
869 870
}

871
void security_bprm_committing_creds(struct linux_binprm *bprm)
872
{
873
	call_void_hook(bprm_committing_creds, bprm);
874 875
}

876
void security_bprm_committed_creds(struct linux_binprm *bprm)
877
{
878
	call_void_hook(bprm_committed_creds, bprm);
879 880
}

A
Al Viro 已提交
881 882 883 884 885
int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
{
	return call_int_hook(fs_context_dup, 0, fc, src_fc);
}

886 887 888 889 890
int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
	return call_int_hook(fs_context_parse_param, -ENOPARAM, fc, param);
}

891 892
int security_sb_alloc(struct super_block *sb)
{
893 894 895 896 897 898 899 900
	int rc = lsm_superblock_alloc(sb);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(sb_alloc_security, 0, sb);
	if (unlikely(rc))
		security_sb_free(sb);
	return rc;
901 902
}

903 904 905
void security_sb_delete(struct super_block *sb)
{
	call_void_hook(sb_delete, sb);
906 907 908 909
}

void security_sb_free(struct super_block *sb)
{
910
	call_void_hook(sb_free_security, sb);
911 912
	kfree(sb->s_security);
	sb->s_security = NULL;
913 914
}

915
void security_free_mnt_opts(void **mnt_opts)
916
{
917 918 919 920
	if (!*mnt_opts)
		return;
	call_void_hook(sb_free_mnt_opts, *mnt_opts);
	*mnt_opts = NULL;
921
}
922
EXPORT_SYMBOL(security_free_mnt_opts);
923

924
int security_sb_eat_lsm_opts(char *options, void **mnt_opts)
925
{
926
	return call_int_hook(sb_eat_lsm_opts, 0, options, mnt_opts);
927
}
A
Al Viro 已提交
928
EXPORT_SYMBOL(security_sb_eat_lsm_opts);
929

930 931 932 933 934 935 936
int security_sb_mnt_opts_compat(struct super_block *sb,
				void *mnt_opts)
{
	return call_int_hook(sb_mnt_opts_compat, 0, sb, mnt_opts);
}
EXPORT_SYMBOL(security_sb_mnt_opts_compat);

937
int security_sb_remount(struct super_block *sb,
938
			void *mnt_opts)
939
{
940
	return call_int_hook(sb_remount, 0, sb, mnt_opts);
941
}
A
Al Viro 已提交
942
EXPORT_SYMBOL(security_sb_remount);
943

944
int security_sb_kern_mount(struct super_block *sb)
945
{
946
	return call_int_hook(sb_kern_mount, 0, sb);
947 948
}

949 950
int security_sb_show_options(struct seq_file *m, struct super_block *sb)
{
951
	return call_int_hook(sb_show_options, 0, m, sb);
952 953
}

954 955
int security_sb_statfs(struct dentry *dentry)
{
956
	return call_int_hook(sb_statfs, 0, dentry);
957 958
}

A
Al Viro 已提交
959
int security_sb_mount(const char *dev_name, const struct path *path,
A
Al Viro 已提交
960
                       const char *type, unsigned long flags, void *data)
961
{
962
	return call_int_hook(sb_mount, 0, dev_name, path, type, flags, data);
963 964 965 966
}

int security_sb_umount(struct vfsmount *mnt, int flags)
{
967
	return call_int_hook(sb_umount, 0, mnt, flags);
968 969
}

A
Al Viro 已提交
970
int security_sb_pivotroot(const struct path *old_path, const struct path *new_path)
971
{
972
	return call_int_hook(sb_pivotroot, 0, old_path, new_path);
973 974
}

975
int security_sb_set_mnt_opts(struct super_block *sb,
976
				void *mnt_opts,
977 978
				unsigned long kern_flags,
				unsigned long *set_kern_flags)
979
{
C
Casey Schaufler 已提交
980
	return call_int_hook(sb_set_mnt_opts,
981 982
				mnt_opts ? -EOPNOTSUPP : 0, sb,
				mnt_opts, kern_flags, set_kern_flags);
983
}
984
EXPORT_SYMBOL(security_sb_set_mnt_opts);
985

986
int security_sb_clone_mnt_opts(const struct super_block *oldsb,
987 988 989
				struct super_block *newsb,
				unsigned long kern_flags,
				unsigned long *set_kern_flags)
990
{
991 992
	return call_int_hook(sb_clone_mnt_opts, 0, oldsb, newsb,
				kern_flags, set_kern_flags);
993
}
994 995
EXPORT_SYMBOL(security_sb_clone_mnt_opts);

A
Al Viro 已提交
996 997
int security_add_mnt_opt(const char *option, const char *val, int len,
			 void **mnt_opts)
998
{
A
Al Viro 已提交
999 1000
	return call_int_hook(sb_add_mnt_opt, -EINVAL,
					option, val, len, mnt_opts);
1001
}
A
Al Viro 已提交
1002
EXPORT_SYMBOL(security_add_mnt_opt);
1003

1004 1005 1006 1007 1008
int security_move_mount(const struct path *from_path, const struct path *to_path)
{
	return call_int_hook(move_mount, 0, from_path, to_path);
}

1009 1010 1011 1012 1013 1014
int security_path_notify(const struct path *path, u64 mask,
				unsigned int obj_type)
{
	return call_int_hook(path_notify, 0, path, mask, obj_type);
}

1015 1016
int security_inode_alloc(struct inode *inode)
{
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
	int rc = lsm_inode_alloc(inode);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(inode_alloc_security, 0, inode);
	if (unlikely(rc))
		security_inode_free(inode);
	return rc;
}

static void inode_free_by_rcu(struct rcu_head *head)
{
	/*
	 * The rcu head is at the start of the inode blob
	 */
	kmem_cache_free(lsm_inode_cache, head);
1033 1034 1035 1036
}

void security_inode_free(struct inode *inode)
{
1037
	integrity_inode_free(inode);
1038
	call_void_hook(inode_free_security, inode);
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
	/*
	 * The inode may still be referenced in a path walk and
	 * a call to security_inode_permission() can be made
	 * after inode_free_security() is called. Ideally, the VFS
	 * wouldn't do this, but fixing that is a much harder
	 * job. For now, simply free the i_security via RCU, and
	 * leave the current inode->i_security pointer intact.
	 * The inode will be freed after the RCU grace period too.
	 */
	if (inode->i_security)
		call_rcu((struct rcu_head *)inode->i_security,
				inode_free_by_rcu);
1051 1052
}

1053
int security_dentry_init_security(struct dentry *dentry, int mode,
A
Al Viro 已提交
1054
					const struct qstr *name, void **ctx,
1055 1056
					u32 *ctxlen)
{
C
Casey Schaufler 已提交
1057 1058
	return call_int_hook(dentry_init_security, -EOPNOTSUPP, dentry, mode,
				name, ctx, ctxlen);
1059 1060 1061
}
EXPORT_SYMBOL(security_dentry_init_security);

1062 1063 1064 1065 1066 1067 1068 1069 1070
int security_dentry_create_files_as(struct dentry *dentry, int mode,
				    struct qstr *name,
				    const struct cred *old, struct cred *new)
{
	return call_int_hook(dentry_create_files_as, 0, dentry, mode,
				name, old, new);
}
EXPORT_SYMBOL(security_dentry_create_files_as);

1071
int security_inode_init_security(struct inode *inode, struct inode *dir,
1072 1073
				 const struct qstr *qstr,
				 const initxattrs initxattrs, void *fs_data)
1074
{
1075 1076
	struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1];
	struct xattr *lsm_xattr, *evm_xattr, *xattr;
1077 1078
	int ret;

1079
	if (unlikely(IS_PRIVATE(inode)))
1080
		return 0;
1081 1082

	if (!initxattrs)
1083 1084
		return call_int_hook(inode_init_security, -EOPNOTSUPP, inode,
				     dir, qstr, NULL, NULL, NULL);
1085
	memset(new_xattrs, 0, sizeof(new_xattrs));
1086
	lsm_xattr = new_xattrs;
C
Casey Schaufler 已提交
1087
	ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr,
1088 1089 1090 1091 1092
						&lsm_xattr->name,
						&lsm_xattr->value,
						&lsm_xattr->value_len);
	if (ret)
		goto out;
1093 1094 1095 1096 1097

	evm_xattr = lsm_xattr + 1;
	ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr);
	if (ret)
		goto out;
1098 1099
	ret = initxattrs(inode, new_xattrs, fs_data);
out:
1100
	for (xattr = new_xattrs; xattr->value != NULL; xattr++)
1101
		kfree(xattr->value);
1102 1103 1104 1105
	return (ret == -EOPNOTSUPP) ? 0 : ret;
}
EXPORT_SYMBOL(security_inode_init_security);

1106 1107 1108 1109 1110 1111 1112 1113
int security_inode_init_security_anon(struct inode *inode,
				      const struct qstr *name,
				      const struct inode *context_inode)
{
	return call_int_hook(inode_init_security_anon, 0, inode, name,
			     context_inode);
}

1114
int security_old_inode_init_security(struct inode *inode, struct inode *dir,
1115
				     const struct qstr *qstr, const char **name,
1116
				     void **value, size_t *len)
1117 1118
{
	if (unlikely(IS_PRIVATE(inode)))
1119
		return -EOPNOTSUPP;
1120 1121
	return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir,
			     qstr, name, value, len);
1122
}
1123
EXPORT_SYMBOL(security_old_inode_init_security);
1124

1125
#ifdef CONFIG_SECURITY_PATH
1126
int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode,
1127 1128
			unsigned int dev)
{
1129
	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1130
		return 0;
1131
	return call_int_hook(path_mknod, 0, dir, dentry, mode, dev);
1132 1133 1134
}
EXPORT_SYMBOL(security_path_mknod);

1135
int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode)
1136
{
1137
	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1138
		return 0;
1139
	return call_int_hook(path_mkdir, 0, dir, dentry, mode);
1140
}
1141
EXPORT_SYMBOL(security_path_mkdir);
1142

A
Al Viro 已提交
1143
int security_path_rmdir(const struct path *dir, struct dentry *dentry)
1144
{
1145
	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1146
		return 0;
1147
	return call_int_hook(path_rmdir, 0, dir, dentry);
1148 1149
}

A
Al Viro 已提交
1150
int security_path_unlink(const struct path *dir, struct dentry *dentry)
1151
{
1152
	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1153
		return 0;
1154
	return call_int_hook(path_unlink, 0, dir, dentry);
1155
}
1156
EXPORT_SYMBOL(security_path_unlink);
1157

1158
int security_path_symlink(const struct path *dir, struct dentry *dentry,
1159 1160
			  const char *old_name)
{
1161
	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1162
		return 0;
1163
	return call_int_hook(path_symlink, 0, dir, dentry, old_name);
1164 1165
}

A
Al Viro 已提交
1166
int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
1167 1168
		       struct dentry *new_dentry)
{
1169
	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
1170
		return 0;
1171
	return call_int_hook(path_link, 0, old_dentry, new_dir, new_dentry);
1172 1173
}

A
Al Viro 已提交
1174 1175
int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
			 const struct path *new_dir, struct dentry *new_dentry,
1176
			 unsigned int flags)
1177
{
1178 1179
	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
		     (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
1180
		return 0;
M
Miklos Szeredi 已提交
1181 1182

	if (flags & RENAME_EXCHANGE) {
1183 1184
		int err = call_int_hook(path_rename, 0, new_dir, new_dentry,
					old_dir, old_dentry);
M
Miklos Szeredi 已提交
1185 1186 1187 1188
		if (err)
			return err;
	}

1189 1190
	return call_int_hook(path_rename, 0, old_dir, old_dentry, new_dir,
				new_dentry);
1191
}
1192
EXPORT_SYMBOL(security_path_rename);
1193

A
Al Viro 已提交
1194
int security_path_truncate(const struct path *path)
1195
{
1196
	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1197
		return 0;
1198
	return call_int_hook(path_truncate, 0, path);
1199
}
1200

1201
int security_path_chmod(const struct path *path, umode_t mode)
1202
{
1203
	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1204
		return 0;
1205
	return call_int_hook(path_chmod, 0, path, mode);
1206 1207
}

1208
int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
1209
{
1210
	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1211
		return 0;
1212
	return call_int_hook(path_chown, 0, path, uid, gid);
1213
}
T
Tetsuo Handa 已提交
1214

A
Al Viro 已提交
1215
int security_path_chroot(const struct path *path)
T
Tetsuo Handa 已提交
1216
{
1217
	return call_int_hook(path_chroot, 0, path);
T
Tetsuo Handa 已提交
1218
}
1219 1220
#endif

A
Al Viro 已提交
1221
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
1222 1223 1224
{
	if (unlikely(IS_PRIVATE(dir)))
		return 0;
1225
	return call_int_hook(inode_create, 0, dir, dentry, mode);
1226
}
1227
EXPORT_SYMBOL_GPL(security_inode_create);
1228 1229 1230 1231

int security_inode_link(struct dentry *old_dentry, struct inode *dir,
			 struct dentry *new_dentry)
{
1232
	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
1233
		return 0;
1234
	return call_int_hook(inode_link, 0, old_dentry, dir, new_dentry);
1235 1236 1237 1238
}

int security_inode_unlink(struct inode *dir, struct dentry *dentry)
{
1239
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1240
		return 0;
1241
	return call_int_hook(inode_unlink, 0, dir, dentry);
1242 1243 1244 1245 1246 1247 1248
}

int security_inode_symlink(struct inode *dir, struct dentry *dentry,
			    const char *old_name)
{
	if (unlikely(IS_PRIVATE(dir)))
		return 0;
1249
	return call_int_hook(inode_symlink, 0, dir, dentry, old_name);
1250 1251
}

1252
int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1253 1254 1255
{
	if (unlikely(IS_PRIVATE(dir)))
		return 0;
1256
	return call_int_hook(inode_mkdir, 0, dir, dentry, mode);
1257
}
1258
EXPORT_SYMBOL_GPL(security_inode_mkdir);
1259 1260 1261

int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
{
1262
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1263
		return 0;
1264
	return call_int_hook(inode_rmdir, 0, dir, dentry);
1265 1266
}

A
Al Viro 已提交
1267
int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
1268 1269 1270
{
	if (unlikely(IS_PRIVATE(dir)))
		return 0;
1271
	return call_int_hook(inode_mknod, 0, dir, dentry, mode, dev);
1272 1273 1274
}

int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
1275 1276
			   struct inode *new_dir, struct dentry *new_dentry,
			   unsigned int flags)
1277
{
1278 1279
        if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
            (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
1280
		return 0;
M
Miklos Szeredi 已提交
1281 1282

	if (flags & RENAME_EXCHANGE) {
1283
		int err = call_int_hook(inode_rename, 0, new_dir, new_dentry,
M
Miklos Szeredi 已提交
1284 1285 1286 1287 1288
						     old_dir, old_dentry);
		if (err)
			return err;
	}

1289
	return call_int_hook(inode_rename, 0, old_dir, old_dentry,
1290 1291 1292 1293 1294
					   new_dir, new_dentry);
}

int security_inode_readlink(struct dentry *dentry)
{
1295
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1296
		return 0;
1297
	return call_int_hook(inode_readlink, 0, dentry);
1298 1299
}

1300 1301
int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
			       bool rcu)
1302
{
1303
	if (unlikely(IS_PRIVATE(inode)))
1304
		return 0;
1305
	return call_int_hook(inode_follow_link, 0, dentry, inode, rcu);
1306 1307
}

1308
int security_inode_permission(struct inode *inode, int mask)
1309 1310 1311
{
	if (unlikely(IS_PRIVATE(inode)))
		return 0;
1312
	return call_int_hook(inode_permission, 0, inode, mask);
1313 1314 1315 1316
}

int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
1317 1318
	int ret;

1319
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1320
		return 0;
1321
	ret = call_int_hook(inode_setattr, 0, dentry, attr);
1322 1323 1324
	if (ret)
		return ret;
	return evm_inode_setattr(dentry, attr);
1325
}
1326
EXPORT_SYMBOL_GPL(security_inode_setattr);
1327

1328
int security_inode_getattr(const struct path *path)
1329
{
1330
	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1331
		return 0;
1332
	return call_int_hook(inode_getattr, 0, path);
1333 1334
}

1335 1336
int security_inode_setxattr(struct user_namespace *mnt_userns,
			    struct dentry *dentry, const char *name,
1337
			    const void *value, size_t size, int flags)
1338
{
1339 1340
	int ret;

1341
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1342
		return 0;
C
Casey Schaufler 已提交
1343 1344 1345 1346
	/*
	 * SELinux and Smack integrate the cap call,
	 * so assume that all LSMs supplying this call do so.
	 */
1347 1348
	ret = call_int_hook(inode_setxattr, 1, mnt_userns, dentry, name, value,
			    size, flags);
C
Casey Schaufler 已提交
1349 1350 1351

	if (ret == 1)
		ret = cap_inode_setxattr(dentry, name, value, size, flags);
1352 1353 1354
	if (ret)
		return ret;
	ret = ima_inode_setxattr(dentry, name, value, size);
1355 1356
	if (ret)
		return ret;
1357
	return evm_inode_setxattr(mnt_userns, dentry, name, value, size);
1358 1359
}

1360 1361
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
				  const void *value, size_t size, int flags)
1362
{
1363
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1364
		return;
1365
	call_void_hook(inode_post_setxattr, dentry, name, value, size, flags);
1366
	evm_inode_post_setxattr(dentry, name, value, size);
1367 1368
}

1369
int security_inode_getxattr(struct dentry *dentry, const char *name)
1370
{
1371
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1372
		return 0;
1373
	return call_int_hook(inode_getxattr, 0, dentry, name);
1374 1375 1376 1377
}

int security_inode_listxattr(struct dentry *dentry)
{
1378
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1379
		return 0;
1380
	return call_int_hook(inode_listxattr, 0, dentry);
1381 1382
}

1383 1384
int security_inode_removexattr(struct user_namespace *mnt_userns,
			       struct dentry *dentry, const char *name)
1385
{
1386 1387
	int ret;

1388
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1389
		return 0;
C
Casey Schaufler 已提交
1390 1391 1392 1393
	/*
	 * SELinux and Smack integrate the cap call,
	 * so assume that all LSMs supplying this call do so.
	 */
1394
	ret = call_int_hook(inode_removexattr, 1, mnt_userns, dentry, name);
C
Casey Schaufler 已提交
1395
	if (ret == 1)
1396
		ret = cap_inode_removexattr(mnt_userns, dentry, name);
1397 1398 1399
	if (ret)
		return ret;
	ret = ima_inode_removexattr(dentry, name);
1400 1401
	if (ret)
		return ret;
1402
	return evm_inode_removexattr(mnt_userns, dentry, name);
1403 1404
}

1405 1406
int security_inode_need_killpriv(struct dentry *dentry)
{
1407
	return call_int_hook(inode_need_killpriv, 0, dentry);
1408 1409
}

1410 1411
int security_inode_killpriv(struct user_namespace *mnt_userns,
			    struct dentry *dentry)
1412
{
1413
	return call_int_hook(inode_killpriv, 0, mnt_userns, dentry);
1414 1415
}

1416 1417 1418
int security_inode_getsecurity(struct user_namespace *mnt_userns,
			       struct inode *inode, const char *name,
			       void **buffer, bool alloc)
1419
{
1420 1421 1422
	struct security_hook_list *hp;
	int rc;

1423
	if (unlikely(IS_PRIVATE(inode)))
1424
		return LSM_RET_DEFAULT(inode_getsecurity);
1425 1426 1427
	/*
	 * Only one module will provide an attribute with a given name.
	 */
1428
	hlist_for_each_entry(hp, &security_hook_heads.inode_getsecurity, list) {
1429
		rc = hp->hook.inode_getsecurity(mnt_userns, inode, name, buffer, alloc);
1430
		if (rc != LSM_RET_DEFAULT(inode_getsecurity))
1431 1432
			return rc;
	}
1433
	return LSM_RET_DEFAULT(inode_getsecurity);
1434 1435 1436 1437
}

int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
1438 1439 1440
	struct security_hook_list *hp;
	int rc;

1441
	if (unlikely(IS_PRIVATE(inode)))
1442
		return LSM_RET_DEFAULT(inode_setsecurity);
1443 1444 1445
	/*
	 * Only one module will provide an attribute with a given name.
	 */
1446
	hlist_for_each_entry(hp, &security_hook_heads.inode_setsecurity, list) {
1447 1448
		rc = hp->hook.inode_setsecurity(inode, name, value, size,
								flags);
1449
		if (rc != LSM_RET_DEFAULT(inode_setsecurity))
1450 1451
			return rc;
	}
1452
	return LSM_RET_DEFAULT(inode_setsecurity);
1453 1454 1455 1456 1457 1458
}

int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
	if (unlikely(IS_PRIVATE(inode)))
		return 0;
1459
	return call_int_hook(inode_listsecurity, 0, inode, buffer, buffer_size);
1460
}
1461
EXPORT_SYMBOL(security_inode_listsecurity);
1462

1463
void security_inode_getsecid(struct inode *inode, u32 *secid)
1464
{
1465
	call_void_hook(inode_getsecid, inode, secid);
1466 1467
}

1468 1469 1470 1471 1472 1473
int security_inode_copy_up(struct dentry *src, struct cred **new)
{
	return call_int_hook(inode_copy_up, 0, src, new);
}
EXPORT_SYMBOL(security_inode_copy_up);

1474 1475
int security_inode_copy_up_xattr(const char *name)
{
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
	struct security_hook_list *hp;
	int rc;

	/*
	 * The implementation can return 0 (accept the xattr), 1 (discard the
	 * xattr), -EOPNOTSUPP if it does not know anything about the xattr or
	 * any other error code incase of an error.
	 */
	hlist_for_each_entry(hp,
		&security_hook_heads.inode_copy_up_xattr, list) {
		rc = hp->hook.inode_copy_up_xattr(name);
		if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr))
			return rc;
	}

	return LSM_RET_DEFAULT(inode_copy_up_xattr);
1492 1493 1494
}
EXPORT_SYMBOL(security_inode_copy_up_xattr);

1495 1496 1497 1498 1499 1500
int security_kernfs_init_security(struct kernfs_node *kn_dir,
				  struct kernfs_node *kn)
{
	return call_int_hook(kernfs_init_security, 0, kn_dir, kn);
}

1501 1502
int security_file_permission(struct file *file, int mask)
{
1503 1504
	int ret;

1505
	ret = call_int_hook(file_permission, 0, file, mask);
1506 1507 1508 1509
	if (ret)
		return ret;

	return fsnotify_perm(file, mask);
1510 1511 1512 1513
}

int security_file_alloc(struct file *file)
{
1514 1515 1516 1517 1518 1519 1520 1521
	int rc = lsm_file_alloc(file);

	if (rc)
		return rc;
	rc = call_int_hook(file_alloc_security, 0, file);
	if (unlikely(rc))
		security_file_free(file);
	return rc;
1522 1523 1524 1525
}

void security_file_free(struct file *file)
{
1526 1527
	void *blob;

1528
	call_void_hook(file_free_security, file);
1529 1530 1531 1532 1533 1534

	blob = file->f_security;
	if (blob) {
		file->f_security = NULL;
		kmem_cache_free(lsm_file_cache, blob);
	}
1535 1536 1537 1538
}

int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
1539
	return call_int_hook(file_ioctl, 0, file, cmd, arg);
1540
}
1541
EXPORT_SYMBOL_GPL(security_file_ioctl);
1542

1543
static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
1544
{
1545
	/*
1546 1547
	 * Does we have PROT_READ and does the application expect
	 * it to imply PROT_EXEC?  If not, nothing to talk about...
1548
	 */
1549 1550
	if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
		return prot;
1551
	if (!(current->personality & READ_IMPLIES_EXEC))
1552 1553 1554 1555 1556 1557 1558 1559
		return prot;
	/*
	 * if that's an anonymous mapping, let it.
	 */
	if (!file)
		return prot | PROT_EXEC;
	/*
	 * ditto if it's not on noexec mount, except that on !MMU we need
1560
	 * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
1561
	 */
1562
	if (!path_noexec(&file->f_path)) {
1563
#ifndef CONFIG_MMU
1564 1565 1566 1567 1568
		if (file->f_op->mmap_capabilities) {
			unsigned caps = file->f_op->mmap_capabilities(file);
			if (!(caps & NOMMU_MAP_EXEC))
				return prot;
		}
1569
#endif
1570
		return prot | PROT_EXEC;
1571
	}
1572 1573 1574 1575 1576 1577 1578 1579
	/* anything on noexec mount won't get PROT_EXEC */
	return prot;
}

int security_mmap_file(struct file *file, unsigned long prot,
			unsigned long flags)
{
	int ret;
1580
	ret = call_int_hook(mmap_file, 0, file, prot,
1581
					mmap_prot(file, prot), flags);
1582 1583 1584
	if (ret)
		return ret;
	return ima_file_mmap(file, prot);
1585 1586
}

1587 1588
int security_mmap_addr(unsigned long addr)
{
1589
	return call_int_hook(mmap_addr, 0, addr);
1590 1591
}

1592 1593 1594
int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
			    unsigned long prot)
{
1595 1596 1597 1598 1599 1600
	int ret;

	ret = call_int_hook(file_mprotect, 0, vma, reqprot, prot);
	if (ret)
		return ret;
	return ima_file_mprotect(vma, prot);
1601 1602 1603 1604
}

int security_file_lock(struct file *file, unsigned int cmd)
{
1605
	return call_int_hook(file_lock, 0, file, cmd);
1606 1607 1608 1609
}

int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
1610
	return call_int_hook(file_fcntl, 0, file, cmd, arg);
1611 1612
}

1613
void security_file_set_fowner(struct file *file)
1614
{
1615
	call_void_hook(file_set_fowner, file);
1616 1617 1618 1619 1620
}

int security_file_send_sigiotask(struct task_struct *tsk,
				  struct fown_struct *fown, int sig)
{
1621
	return call_int_hook(file_send_sigiotask, 0, tsk, fown, sig);
1622 1623 1624 1625
}

int security_file_receive(struct file *file)
{
1626
	return call_int_hook(file_receive, 0, file);
1627 1628
}

1629
int security_file_open(struct file *file)
1630
{
1631 1632
	int ret;

A
Al Viro 已提交
1633
	ret = call_int_hook(file_open, 0, file);
1634 1635 1636 1637
	if (ret)
		return ret;

	return fsnotify_perm(file, MAY_OPEN);
1638 1639
}

1640 1641
int security_task_alloc(struct task_struct *task, unsigned long clone_flags)
{
1642 1643 1644 1645 1646 1647 1648 1649
	int rc = lsm_task_alloc(task);

	if (rc)
		return rc;
	rc = call_int_hook(task_alloc, 0, task, clone_flags);
	if (unlikely(rc))
		security_task_free(task);
	return rc;
1650 1651
}

1652 1653
void security_task_free(struct task_struct *task)
{
1654
	call_void_hook(task_free, task);
1655 1656 1657

	kfree(task->security);
	task->security = NULL;
1658 1659
}

1660 1661
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
1662 1663 1664 1665 1666 1667
	int rc = lsm_cred_alloc(cred, gfp);

	if (rc)
		return rc;

	rc = call_int_hook(cred_alloc_blank, 0, cred, gfp);
1668
	if (unlikely(rc))
1669 1670
		security_cred_free(cred);
	return rc;
1671 1672
}

D
David Howells 已提交
1673
void security_cred_free(struct cred *cred)
1674
{
1675 1676 1677 1678 1679 1680 1681
	/*
	 * There is a failure case in prepare_creds() that
	 * may result in a call here with ->security being NULL.
	 */
	if (unlikely(cred->security == NULL))
		return;

1682
	call_void_hook(cred_free, cred);
1683 1684 1685

	kfree(cred->security);
	cred->security = NULL;
1686 1687
}

D
David Howells 已提交
1688
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
1689
{
1690 1691 1692 1693 1694 1695
	int rc = lsm_cred_alloc(new, gfp);

	if (rc)
		return rc;

	rc = call_int_hook(cred_prepare, 0, new, old, gfp);
1696
	if (unlikely(rc))
1697 1698
		security_cred_free(new);
	return rc;
D
David Howells 已提交
1699 1700
}

1701 1702
void security_transfer_creds(struct cred *new, const struct cred *old)
{
1703
	call_void_hook(cred_transfer, new, old);
1704 1705
}

1706 1707 1708 1709 1710 1711 1712
void security_cred_getsecid(const struct cred *c, u32 *secid)
{
	*secid = 0;
	call_void_hook(cred_getsecid, c, secid);
}
EXPORT_SYMBOL(security_cred_getsecid);

1713 1714
int security_kernel_act_as(struct cred *new, u32 secid)
{
1715
	return call_int_hook(kernel_act_as, 0, new, secid);
1716 1717 1718 1719
}

int security_kernel_create_files_as(struct cred *new, struct inode *inode)
{
1720
	return call_int_hook(kernel_create_files_as, 0, new, inode);
1721 1722
}

1723
int security_kernel_module_request(char *kmod_name)
1724
{
1725 1726 1727 1728 1729 1730
	int ret;

	ret = call_int_hook(kernel_module_request, 0, kmod_name);
	if (ret)
		return ret;
	return integrity_kernel_module_request(kmod_name);
1731 1732
}

1733 1734
int security_kernel_read_file(struct file *file, enum kernel_read_file_id id,
			      bool contents)
1735 1736 1737
{
	int ret;

1738
	ret = call_int_hook(kernel_read_file, 0, file, id, contents);
1739 1740
	if (ret)
		return ret;
1741
	return ima_read_file(file, id, contents);
1742 1743 1744
}
EXPORT_SYMBOL_GPL(security_kernel_read_file);

1745 1746
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
				   enum kernel_read_file_id id)
1747
{
1748 1749 1750 1751 1752 1753
	int ret;

	ret = call_int_hook(kernel_post_read_file, 0, file, buf, size, id);
	if (ret)
		return ret;
	return ima_post_read_file(file, buf, size, id);
1754 1755 1756
}
EXPORT_SYMBOL_GPL(security_kernel_post_read_file);

1757
int security_kernel_load_data(enum kernel_load_data_id id, bool contents)
1758
{
1759 1760
	int ret;

1761
	ret = call_int_hook(kernel_load_data, 0, id, contents);
1762 1763
	if (ret)
		return ret;
1764
	return ima_load_data(id, contents);
1765
}
1766
EXPORT_SYMBOL_GPL(security_kernel_load_data);
1767

1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781
int security_kernel_post_load_data(char *buf, loff_t size,
				   enum kernel_load_data_id id,
				   char *description)
{
	int ret;

	ret = call_int_hook(kernel_post_load_data, 0, buf, size, id,
			    description);
	if (ret)
		return ret;
	return ima_post_load_data(buf, size, id, description);
}
EXPORT_SYMBOL_GPL(security_kernel_post_load_data);

D
David Howells 已提交
1782 1783
int security_task_fix_setuid(struct cred *new, const struct cred *old,
			     int flags)
1784
{
1785
	return call_int_hook(task_fix_setuid, 0, new, old, flags);
1786 1787
}

1788 1789 1790 1791 1792 1793
int security_task_fix_setgid(struct cred *new, const struct cred *old,
				 int flags)
{
	return call_int_hook(task_fix_setgid, 0, new, old, flags);
}

1794 1795
int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
1796
	return call_int_hook(task_setpgid, 0, p, pgid);
1797 1798 1799 1800
}

int security_task_getpgid(struct task_struct *p)
{
1801
	return call_int_hook(task_getpgid, 0, p);
1802 1803 1804 1805
}

int security_task_getsid(struct task_struct *p)
{
1806
	return call_int_hook(task_getsid, 0, p);
1807 1808
}

1809
void security_task_getsecid_subj(struct task_struct *p, u32 *secid)
1810
{
C
Casey Schaufler 已提交
1811
	*secid = 0;
1812
	call_void_hook(task_getsecid_subj, p, secid);
1813
}
1814 1815 1816 1817 1818 1819 1820 1821
EXPORT_SYMBOL(security_task_getsecid_subj);

void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
	*secid = 0;
	call_void_hook(task_getsecid_obj, p, secid);
}
EXPORT_SYMBOL(security_task_getsecid_obj);
1822 1823 1824

int security_task_setnice(struct task_struct *p, int nice)
{
1825
	return call_int_hook(task_setnice, 0, p, nice);
1826 1827 1828 1829
}

int security_task_setioprio(struct task_struct *p, int ioprio)
{
1830
	return call_int_hook(task_setioprio, 0, p, ioprio);
1831 1832 1833 1834
}

int security_task_getioprio(struct task_struct *p)
{
1835
	return call_int_hook(task_getioprio, 0, p);
1836 1837
}

1838 1839 1840 1841 1842 1843
int security_task_prlimit(const struct cred *cred, const struct cred *tcred,
			  unsigned int flags)
{
	return call_int_hook(task_prlimit, 0, cred, tcred, flags);
}

1844 1845
int security_task_setrlimit(struct task_struct *p, unsigned int resource,
		struct rlimit *new_rlim)
1846
{
1847
	return call_int_hook(task_setrlimit, 0, p, resource, new_rlim);
1848 1849
}

1850
int security_task_setscheduler(struct task_struct *p)
1851
{
1852
	return call_int_hook(task_setscheduler, 0, p);
1853 1854 1855 1856
}

int security_task_getscheduler(struct task_struct *p)
{
1857
	return call_int_hook(task_getscheduler, 0, p);
1858 1859 1860 1861
}

int security_task_movememory(struct task_struct *p)
{
1862
	return call_int_hook(task_movememory, 0, p);
1863 1864
}

1865
int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
1866
			int sig, const struct cred *cred)
1867
{
1868
	return call_int_hook(task_kill, 0, p, info, sig, cred);
1869 1870 1871
}

int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
D
David Howells 已提交
1872
			 unsigned long arg4, unsigned long arg5)
1873
{
C
Casey Schaufler 已提交
1874
	int thisrc;
1875
	int rc = LSM_RET_DEFAULT(task_prctl);
C
Casey Schaufler 已提交
1876 1877
	struct security_hook_list *hp;

1878
	hlist_for_each_entry(hp, &security_hook_heads.task_prctl, list) {
C
Casey Schaufler 已提交
1879
		thisrc = hp->hook.task_prctl(option, arg2, arg3, arg4, arg5);
1880
		if (thisrc != LSM_RET_DEFAULT(task_prctl)) {
C
Casey Schaufler 已提交
1881 1882 1883 1884 1885 1886
			rc = thisrc;
			if (thisrc != 0)
				break;
		}
	}
	return rc;
1887 1888 1889 1890
}

void security_task_to_inode(struct task_struct *p, struct inode *inode)
{
1891
	call_void_hook(task_to_inode, p, inode);
1892 1893 1894 1895
}

int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
{
1896
	return call_int_hook(ipc_permission, 0, ipcp, flag);
1897 1898
}

1899 1900
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
{
C
Casey Schaufler 已提交
1901
	*secid = 0;
1902
	call_void_hook(ipc_getsecid, ipcp, secid);
1903 1904
}

1905 1906
int security_msg_msg_alloc(struct msg_msg *msg)
{
1907 1908 1909 1910 1911 1912 1913 1914
	int rc = lsm_msg_msg_alloc(msg);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(msg_msg_alloc_security, 0, msg);
	if (unlikely(rc))
		security_msg_msg_free(msg);
	return rc;
1915 1916 1917 1918
}

void security_msg_msg_free(struct msg_msg *msg)
{
1919
	call_void_hook(msg_msg_free_security, msg);
1920 1921
	kfree(msg->security);
	msg->security = NULL;
1922 1923
}

1924
int security_msg_queue_alloc(struct kern_ipc_perm *msq)
1925
{
1926 1927 1928 1929 1930 1931 1932 1933
	int rc = lsm_ipc_alloc(msq);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(msg_queue_alloc_security, 0, msq);
	if (unlikely(rc))
		security_msg_queue_free(msq);
	return rc;
1934 1935
}

1936
void security_msg_queue_free(struct kern_ipc_perm *msq)
1937
{
1938
	call_void_hook(msg_queue_free_security, msq);
1939 1940
	kfree(msq->security);
	msq->security = NULL;
1941 1942
}

1943
int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
1944
{
1945
	return call_int_hook(msg_queue_associate, 0, msq, msqflg);
1946 1947
}

1948
int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd)
1949
{
1950
	return call_int_hook(msg_queue_msgctl, 0, msq, cmd);
1951 1952
}

1953
int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
1954 1955
			       struct msg_msg *msg, int msqflg)
{
1956
	return call_int_hook(msg_queue_msgsnd, 0, msq, msg, msqflg);
1957 1958
}

1959
int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg,
1960 1961
			       struct task_struct *target, long type, int mode)
{
1962
	return call_int_hook(msg_queue_msgrcv, 0, msq, msg, target, type, mode);
1963 1964
}

1965
int security_shm_alloc(struct kern_ipc_perm *shp)
1966
{
1967 1968 1969 1970 1971 1972 1973 1974
	int rc = lsm_ipc_alloc(shp);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(shm_alloc_security, 0, shp);
	if (unlikely(rc))
		security_shm_free(shp);
	return rc;
1975 1976
}

1977
void security_shm_free(struct kern_ipc_perm *shp)
1978
{
1979
	call_void_hook(shm_free_security, shp);
1980 1981
	kfree(shp->security);
	shp->security = NULL;
1982 1983
}

1984
int security_shm_associate(struct kern_ipc_perm *shp, int shmflg)
1985
{
1986
	return call_int_hook(shm_associate, 0, shp, shmflg);
1987 1988
}

1989
int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
1990
{
1991
	return call_int_hook(shm_shmctl, 0, shp, cmd);
1992 1993
}

1994
int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg)
1995
{
1996
	return call_int_hook(shm_shmat, 0, shp, shmaddr, shmflg);
1997 1998
}

1999
int security_sem_alloc(struct kern_ipc_perm *sma)
2000
{
2001 2002 2003 2004 2005 2006 2007 2008
	int rc = lsm_ipc_alloc(sma);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(sem_alloc_security, 0, sma);
	if (unlikely(rc))
		security_sem_free(sma);
	return rc;
2009 2010
}

2011
void security_sem_free(struct kern_ipc_perm *sma)
2012
{
2013
	call_void_hook(sem_free_security, sma);
2014 2015
	kfree(sma->security);
	sma->security = NULL;
2016 2017
}

2018
int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
2019
{
2020
	return call_int_hook(sem_associate, 0, sma, semflg);
2021 2022
}

2023
int security_sem_semctl(struct kern_ipc_perm *sma, int cmd)
2024
{
2025
	return call_int_hook(sem_semctl, 0, sma, cmd);
2026 2027
}

2028
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
2029 2030
			unsigned nsops, int alter)
{
2031
	return call_int_hook(sem_semop, 0, sma, sops, nsops, alter);
2032 2033 2034 2035 2036 2037
}

void security_d_instantiate(struct dentry *dentry, struct inode *inode)
{
	if (unlikely(inode && IS_PRIVATE(inode)))
		return;
2038
	call_void_hook(d_instantiate, dentry, inode);
2039 2040 2041
}
EXPORT_SYMBOL(security_d_instantiate);

2042 2043
int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
				char **value)
2044
{
2045 2046 2047 2048 2049 2050 2051
	struct security_hook_list *hp;

	hlist_for_each_entry(hp, &security_hook_heads.getprocattr, list) {
		if (lsm != NULL && strcmp(lsm, hp->lsm))
			continue;
		return hp->hook.getprocattr(p, name, value);
	}
2052
	return LSM_RET_DEFAULT(getprocattr);
2053 2054
}

2055 2056
int security_setprocattr(const char *lsm, const char *name, void *value,
			 size_t size)
2057
{
2058 2059 2060 2061 2062 2063 2064
	struct security_hook_list *hp;

	hlist_for_each_entry(hp, &security_hook_heads.setprocattr, list) {
		if (lsm != NULL && strcmp(lsm, hp->lsm))
			continue;
		return hp->hook.setprocattr(name, value, size);
	}
2065
	return LSM_RET_DEFAULT(setprocattr);
2066 2067 2068 2069
}

int security_netlink_send(struct sock *sk, struct sk_buff *skb)
{
2070
	return call_int_hook(netlink_send, 0, sk, skb);
2071 2072
}

2073 2074
int security_ismaclabel(const char *name)
{
2075
	return call_int_hook(ismaclabel, 0, name);
2076 2077 2078
}
EXPORT_SYMBOL(security_ismaclabel);

2079 2080
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
	struct security_hook_list *hp;
	int rc;

	/*
	 * Currently, only one LSM can implement secid_to_secctx (i.e this
	 * LSM hook is not "stackable").
	 */
	hlist_for_each_entry(hp, &security_hook_heads.secid_to_secctx, list) {
		rc = hp->hook.secid_to_secctx(secid, secdata, seclen);
		if (rc != LSM_RET_DEFAULT(secid_to_secctx))
			return rc;
	}

	return LSM_RET_DEFAULT(secid_to_secctx);
2095 2096 2097
}
EXPORT_SYMBOL(security_secid_to_secctx);

2098
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
2099
{
C
Casey Schaufler 已提交
2100
	*secid = 0;
2101
	return call_int_hook(secctx_to_secid, 0, secdata, seclen, secid);
2102 2103 2104
}
EXPORT_SYMBOL(security_secctx_to_secid);

2105 2106
void security_release_secctx(char *secdata, u32 seclen)
{
2107
	call_void_hook(release_secctx, secdata, seclen);
2108 2109 2110
}
EXPORT_SYMBOL(security_release_secctx);

2111 2112 2113 2114 2115 2116
void security_inode_invalidate_secctx(struct inode *inode)
{
	call_void_hook(inode_invalidate_secctx, inode);
}
EXPORT_SYMBOL(security_inode_invalidate_secctx);

2117 2118
int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
2119
	return call_int_hook(inode_notifysecctx, 0, inode, ctx, ctxlen);
2120 2121 2122 2123 2124
}
EXPORT_SYMBOL(security_inode_notifysecctx);

int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
{
2125
	return call_int_hook(inode_setsecctx, 0, dentry, ctx, ctxlen);
2126 2127 2128 2129 2130
}
EXPORT_SYMBOL(security_inode_setsecctx);

int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
C
Casey Schaufler 已提交
2131
	return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen);
2132 2133 2134
}
EXPORT_SYMBOL(security_inode_getsecctx);

2135 2136 2137 2138 2139 2140 2141 2142 2143
#ifdef CONFIG_WATCH_QUEUE
int security_post_notification(const struct cred *w_cred,
			       const struct cred *cred,
			       struct watch_notification *n)
{
	return call_int_hook(post_notification, 0, w_cred, cred, n);
}
#endif /* CONFIG_WATCH_QUEUE */

2144 2145 2146 2147 2148 2149 2150
#ifdef CONFIG_KEY_NOTIFICATIONS
int security_watch_key(struct key *key)
{
	return call_int_hook(watch_key, 0, key);
}
#endif

2151 2152
#ifdef CONFIG_SECURITY_NETWORK

2153
int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk)
2154
{
2155
	return call_int_hook(unix_stream_connect, 0, sock, other, newsk);
2156 2157 2158 2159 2160
}
EXPORT_SYMBOL(security_unix_stream_connect);

int security_unix_may_send(struct socket *sock,  struct socket *other)
{
2161
	return call_int_hook(unix_may_send, 0, sock, other);
2162 2163 2164 2165 2166
}
EXPORT_SYMBOL(security_unix_may_send);

int security_socket_create(int family, int type, int protocol, int kern)
{
2167
	return call_int_hook(socket_create, 0, family, type, protocol, kern);
2168 2169 2170 2171 2172
}

int security_socket_post_create(struct socket *sock, int family,
				int type, int protocol, int kern)
{
2173
	return call_int_hook(socket_post_create, 0, sock, family, type,
2174 2175 2176
						protocol, kern);
}

2177 2178 2179 2180 2181 2182
int security_socket_socketpair(struct socket *socka, struct socket *sockb)
{
	return call_int_hook(socket_socketpair, 0, socka, sockb);
}
EXPORT_SYMBOL(security_socket_socketpair);

2183 2184
int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
{
2185
	return call_int_hook(socket_bind, 0, sock, address, addrlen);
2186 2187 2188 2189
}

int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen)
{
2190
	return call_int_hook(socket_connect, 0, sock, address, addrlen);
2191 2192 2193 2194
}

int security_socket_listen(struct socket *sock, int backlog)
{
2195
	return call_int_hook(socket_listen, 0, sock, backlog);
2196 2197 2198 2199
}

int security_socket_accept(struct socket *sock, struct socket *newsock)
{
2200
	return call_int_hook(socket_accept, 0, sock, newsock);
2201 2202 2203 2204
}

int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size)
{
2205
	return call_int_hook(socket_sendmsg, 0, sock, msg, size);
2206 2207 2208 2209 2210
}

int security_socket_recvmsg(struct socket *sock, struct msghdr *msg,
			    int size, int flags)
{
2211
	return call_int_hook(socket_recvmsg, 0, sock, msg, size, flags);
2212 2213 2214 2215
}

int security_socket_getsockname(struct socket *sock)
{
2216
	return call_int_hook(socket_getsockname, 0, sock);
2217 2218 2219 2220
}

int security_socket_getpeername(struct socket *sock)
{
2221
	return call_int_hook(socket_getpeername, 0, sock);
2222 2223 2224 2225
}

int security_socket_getsockopt(struct socket *sock, int level, int optname)
{
2226
	return call_int_hook(socket_getsockopt, 0, sock, level, optname);
2227 2228 2229 2230
}

int security_socket_setsockopt(struct socket *sock, int level, int optname)
{
2231
	return call_int_hook(socket_setsockopt, 0, sock, level, optname);
2232 2233 2234 2235
}

int security_socket_shutdown(struct socket *sock, int how)
{
2236
	return call_int_hook(socket_shutdown, 0, sock, how);
2237 2238 2239 2240
}

int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
2241
	return call_int_hook(socket_sock_rcv_skb, 0, sk, skb);
2242 2243 2244 2245 2246 2247
}
EXPORT_SYMBOL(security_sock_rcv_skb);

int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
				      int __user *optlen, unsigned len)
{
C
Casey Schaufler 已提交
2248 2249
	return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock,
				optval, optlen, len);
2250 2251 2252 2253
}

int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
{
2254 2255
	return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
			     skb, secid);
2256 2257 2258 2259 2260
}
EXPORT_SYMBOL(security_socket_getpeersec_dgram);

int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
{
2261
	return call_int_hook(sk_alloc_security, 0, sk, family, priority);
2262 2263 2264 2265
}

void security_sk_free(struct sock *sk)
{
2266
	call_void_hook(sk_free_security, sk);
2267 2268 2269 2270
}

void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
2271
	call_void_hook(sk_clone_security, sk, newsk);
2272
}
2273
EXPORT_SYMBOL(security_sk_clone);
2274

2275
void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic)
2276
{
2277
	call_void_hook(sk_getsecid, sk, &flic->flowic_secid);
2278 2279 2280
}
EXPORT_SYMBOL(security_sk_classify_flow);

2281 2282
void security_req_classify_flow(const struct request_sock *req,
				struct flowi_common *flic)
2283
{
2284
	call_void_hook(req_classify_flow, req, flic);
2285 2286 2287 2288 2289
}
EXPORT_SYMBOL(security_req_classify_flow);

void security_sock_graft(struct sock *sk, struct socket *parent)
{
2290
	call_void_hook(sock_graft, sk, parent);
2291 2292 2293
}
EXPORT_SYMBOL(security_sock_graft);

2294
int security_inet_conn_request(const struct sock *sk,
2295 2296
			struct sk_buff *skb, struct request_sock *req)
{
2297
	return call_int_hook(inet_conn_request, 0, sk, skb, req);
2298 2299 2300 2301 2302 2303
}
EXPORT_SYMBOL(security_inet_conn_request);

void security_inet_csk_clone(struct sock *newsk,
			const struct request_sock *req)
{
2304
	call_void_hook(inet_csk_clone, newsk, req);
2305 2306 2307 2308 2309
}

void security_inet_conn_established(struct sock *sk,
			struct sk_buff *skb)
{
2310
	call_void_hook(inet_conn_established, sk, skb);
2311
}
2312
EXPORT_SYMBOL(security_inet_conn_established);
2313

2314 2315
int security_secmark_relabel_packet(u32 secid)
{
2316
	return call_int_hook(secmark_relabel_packet, 0, secid);
2317 2318 2319 2320 2321
}
EXPORT_SYMBOL(security_secmark_relabel_packet);

void security_secmark_refcount_inc(void)
{
2322
	call_void_hook(secmark_refcount_inc);
2323 2324 2325 2326 2327
}
EXPORT_SYMBOL(security_secmark_refcount_inc);

void security_secmark_refcount_dec(void)
{
2328
	call_void_hook(secmark_refcount_dec);
2329 2330 2331
}
EXPORT_SYMBOL(security_secmark_refcount_dec);

2332 2333
int security_tun_dev_alloc_security(void **security)
{
2334
	return call_int_hook(tun_dev_alloc_security, 0, security);
2335 2336 2337 2338 2339
}
EXPORT_SYMBOL(security_tun_dev_alloc_security);

void security_tun_dev_free_security(void *security)
{
2340
	call_void_hook(tun_dev_free_security, security);
2341 2342 2343
}
EXPORT_SYMBOL(security_tun_dev_free_security);

P
Paul Moore 已提交
2344 2345
int security_tun_dev_create(void)
{
2346
	return call_int_hook(tun_dev_create, 0);
P
Paul Moore 已提交
2347 2348 2349
}
EXPORT_SYMBOL(security_tun_dev_create);

2350
int security_tun_dev_attach_queue(void *security)
P
Paul Moore 已提交
2351
{
2352
	return call_int_hook(tun_dev_attach_queue, 0, security);
P
Paul Moore 已提交
2353
}
2354
EXPORT_SYMBOL(security_tun_dev_attach_queue);
P
Paul Moore 已提交
2355

2356
int security_tun_dev_attach(struct sock *sk, void *security)
P
Paul Moore 已提交
2357
{
2358
	return call_int_hook(tun_dev_attach, 0, sk, security);
P
Paul Moore 已提交
2359 2360 2361
}
EXPORT_SYMBOL(security_tun_dev_attach);

2362 2363
int security_tun_dev_open(void *security)
{
2364
	return call_int_hook(tun_dev_open, 0, security);
2365 2366 2367
}
EXPORT_SYMBOL(security_tun_dev_open);

2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb)
{
	return call_int_hook(sctp_assoc_request, 0, ep, skb);
}
EXPORT_SYMBOL(security_sctp_assoc_request);

int security_sctp_bind_connect(struct sock *sk, int optname,
			       struct sockaddr *address, int addrlen)
{
	return call_int_hook(sctp_bind_connect, 0, sk, optname,
			     address, addrlen);
}
EXPORT_SYMBOL(security_sctp_bind_connect);

void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
			    struct sock *newsk)
{
	call_void_hook(sctp_sk_clone, ep, sk, newsk);
}
EXPORT_SYMBOL(security_sctp_sk_clone);

2389 2390
#endif	/* CONFIG_SECURITY_NETWORK */

2391 2392 2393 2394 2395 2396 2397 2398
#ifdef CONFIG_SECURITY_INFINIBAND

int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey)
{
	return call_int_hook(ib_pkey_access, 0, sec, subnet_prefix, pkey);
}
EXPORT_SYMBOL(security_ib_pkey_access);

2399 2400 2401 2402 2403 2404
int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num)
{
	return call_int_hook(ib_endport_manage_subnet, 0, sec, dev_name, port_num);
}
EXPORT_SYMBOL(security_ib_endport_manage_subnet);

2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417
int security_ib_alloc_security(void **sec)
{
	return call_int_hook(ib_alloc_security, 0, sec);
}
EXPORT_SYMBOL(security_ib_alloc_security);

void security_ib_free_security(void *sec)
{
	call_void_hook(ib_free_security, sec);
}
EXPORT_SYMBOL(security_ib_free_security);
#endif	/* CONFIG_SECURITY_INFINIBAND */

2418 2419
#ifdef CONFIG_SECURITY_NETWORK_XFRM

2420 2421 2422
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
			       struct xfrm_user_sec_ctx *sec_ctx,
			       gfp_t gfp)
2423
{
2424
	return call_int_hook(xfrm_policy_alloc_security, 0, ctxp, sec_ctx, gfp);
2425 2426 2427
}
EXPORT_SYMBOL(security_xfrm_policy_alloc);

2428 2429
int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
			      struct xfrm_sec_ctx **new_ctxp)
2430
{
2431
	return call_int_hook(xfrm_policy_clone_security, 0, old_ctx, new_ctxp);
2432 2433
}

2434
void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
2435
{
2436
	call_void_hook(xfrm_policy_free_security, ctx);
2437 2438 2439
}
EXPORT_SYMBOL(security_xfrm_policy_free);

2440
int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
2441
{
2442
	return call_int_hook(xfrm_policy_delete_security, 0, ctx);
2443 2444
}

2445 2446
int security_xfrm_state_alloc(struct xfrm_state *x,
			      struct xfrm_user_sec_ctx *sec_ctx)
2447
{
2448
	return call_int_hook(xfrm_state_alloc, 0, x, sec_ctx);
2449 2450 2451 2452 2453 2454
}
EXPORT_SYMBOL(security_xfrm_state_alloc);

int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
				      struct xfrm_sec_ctx *polsec, u32 secid)
{
2455
	return call_int_hook(xfrm_state_alloc_acquire, 0, x, polsec, secid);
2456 2457 2458 2459
}

int security_xfrm_state_delete(struct xfrm_state *x)
{
2460
	return call_int_hook(xfrm_state_delete_security, 0, x);
2461 2462 2463 2464 2465
}
EXPORT_SYMBOL(security_xfrm_state_delete);

void security_xfrm_state_free(struct xfrm_state *x)
{
2466
	call_void_hook(xfrm_state_free_security, x);
2467 2468
}

2469
int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
2470
{
2471
	return call_int_hook(xfrm_policy_lookup, 0, ctx, fl_secid);
2472 2473 2474
}

int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
2475
				       struct xfrm_policy *xp,
2476
				       const struct flowi_common *flic)
2477
{
C
Casey Schaufler 已提交
2478
	struct security_hook_list *hp;
2479
	int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match);
C
Casey Schaufler 已提交
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489

	/*
	 * Since this function is expected to return 0 or 1, the judgment
	 * becomes difficult if multiple LSMs supply this call. Fortunately,
	 * we can use the first LSM's judgment because currently only SELinux
	 * supplies this call.
	 *
	 * For speed optimization, we explicitly break the loop rather than
	 * using the macro
	 */
2490
	hlist_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match,
C
Casey Schaufler 已提交
2491
				list) {
2492
		rc = hp->hook.xfrm_state_pol_flow_match(x, xp, flic);
C
Casey Schaufler 已提交
2493 2494 2495
		break;
	}
	return rc;
2496 2497 2498 2499
}

int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
{
2500
	return call_int_hook(xfrm_decode_session, 0, skb, secid, 1);
2501 2502
}

2503
void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic)
2504
{
2505
	int rc = call_int_hook(xfrm_decode_session, 0, skb, &flic->flowic_secid,
2506
				0);
2507 2508 2509 2510 2511 2512 2513 2514 2515

	BUG_ON(rc);
}
EXPORT_SYMBOL(security_skb_classify_flow);

#endif	/* CONFIG_SECURITY_NETWORK_XFRM */

#ifdef CONFIG_KEYS

D
David Howells 已提交
2516 2517
int security_key_alloc(struct key *key, const struct cred *cred,
		       unsigned long flags)
2518
{
2519
	return call_int_hook(key_alloc, 0, key, cred, flags);
2520 2521 2522 2523
}

void security_key_free(struct key *key)
{
2524
	call_void_hook(key_free, key);
2525 2526
}

2527 2528
int security_key_permission(key_ref_t key_ref, const struct cred *cred,
			    enum key_need_perm need_perm)
2529
{
2530
	return call_int_hook(key_permission, 0, key_ref, cred, need_perm);
2531 2532
}

2533 2534
int security_key_getsecurity(struct key *key, char **_buffer)
{
C
Casey Schaufler 已提交
2535
	*_buffer = NULL;
2536
	return call_int_hook(key_getsecurity, 0, key, _buffer);
2537 2538
}

2539
#endif	/* CONFIG_KEYS */
2540 2541 2542 2543 2544

#ifdef CONFIG_AUDIT

int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
{
2545
	return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule);
2546 2547 2548 2549
}

int security_audit_rule_known(struct audit_krule *krule)
{
2550
	return call_int_hook(audit_rule_known, 0, krule);
2551 2552 2553 2554
}

void security_audit_rule_free(void *lsmrule)
{
2555
	call_void_hook(audit_rule_free, lsmrule);
2556 2557
}

2558
int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule)
2559
{
2560
	return call_int_hook(audit_rule_match, 0, secid, field, op, lsmrule);
2561
}
C
Casey Schaufler 已提交
2562
#endif /* CONFIG_AUDIT */
2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593

#ifdef CONFIG_BPF_SYSCALL
int security_bpf(int cmd, union bpf_attr *attr, unsigned int size)
{
	return call_int_hook(bpf, 0, cmd, attr, size);
}
int security_bpf_map(struct bpf_map *map, fmode_t fmode)
{
	return call_int_hook(bpf_map, 0, map, fmode);
}
int security_bpf_prog(struct bpf_prog *prog)
{
	return call_int_hook(bpf_prog, 0, prog);
}
int security_bpf_map_alloc(struct bpf_map *map)
{
	return call_int_hook(bpf_map_alloc_security, 0, map);
}
int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
{
	return call_int_hook(bpf_prog_alloc_security, 0, aux);
}
void security_bpf_map_free(struct bpf_map *map)
{
	call_void_hook(bpf_map_free_security, map);
}
void security_bpf_prog_free(struct bpf_prog_aux *aux)
{
	call_void_hook(bpf_prog_free_security, aux);
}
#endif /* CONFIG_BPF_SYSCALL */
2594 2595 2596 2597 2598 2599

int security_locked_down(enum lockdown_reason what)
{
	return call_int_hook(locked_down, 0, what);
}
EXPORT_SYMBOL(security_locked_down);
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626

#ifdef CONFIG_PERF_EVENTS
int security_perf_event_open(struct perf_event_attr *attr, int type)
{
	return call_int_hook(perf_event_open, 0, attr, type);
}

int security_perf_event_alloc(struct perf_event *event)
{
	return call_int_hook(perf_event_alloc, 0, event);
}

void security_perf_event_free(struct perf_event *event)
{
	call_void_hook(perf_event_free, event);
}

int security_perf_event_read(struct perf_event *event)
{
	return call_int_hook(perf_event_read, 0, event);
}

int security_perf_event_write(struct perf_event *event)
{
	return call_int_hook(perf_event_write, 0, event);
}
#endif /* CONFIG_PERF_EVENTS */