security.c 66.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
L
Linus Torvalds 已提交
2 3 4 5 6 7
/*
 * Security plug functions
 *
 * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
 * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
 * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
8
 * Copyright (C) 2016 Mellanox Technologies
L
Linus Torvalds 已提交
9 10
 */

11 12
#define pr_fmt(fmt) "LSM: " fmt

13
#include <linux/bpf.h>
14
#include <linux/capability.h>
15
#include <linux/dcache.h>
16
#include <linux/export.h>
L
Linus Torvalds 已提交
17 18
#include <linux/init.h>
#include <linux/kernel.h>
19
#include <linux/kernel_read_file.h>
C
Casey Schaufler 已提交
20
#include <linux/lsm_hooks.h>
21
#include <linux/integrity.h>
22
#include <linux/ima.h>
23
#include <linux/evm.h>
A
Al Viro 已提交
24
#include <linux/fsnotify.h>
25 26 27
#include <linux/mman.h>
#include <linux/mount.h>
#include <linux/personality.h>
P
Paul Mundt 已提交
28
#include <linux/backing-dev.h>
29
#include <linux/string.h>
30
#include <linux/msg.h>
A
Al Viro 已提交
31
#include <net/flow.h>
L
Linus Torvalds 已提交
32

33
#define MAX_LSM_EVM_XATTR	2
L
Linus Torvalds 已提交
34

35 36 37
/* How many LSMs were built into the kernel? */
#define LSM_COUNT (__end_lsm_info - __start_lsm_info)

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
/*
 * These are descriptions of the reasons that can be passed to the
 * security_locked_down() LSM hook. Placing this array here allows
 * all security modules to use the same descriptions for auditing
 * purposes.
 */
const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
	[LOCKDOWN_NONE] = "none",
	[LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
	[LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
	[LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
	[LOCKDOWN_KEXEC] = "kexec of unsigned images",
	[LOCKDOWN_HIBERNATION] = "hibernation",
	[LOCKDOWN_PCI_ACCESS] = "direct PCI access",
	[LOCKDOWN_IOPORT] = "raw io port access",
	[LOCKDOWN_MSR] = "raw MSR access",
	[LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables",
	[LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage",
	[LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO",
	[LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
	[LOCKDOWN_MMIOTRACE] = "unsafe mmio",
	[LOCKDOWN_DEBUGFS] = "debugfs access",
	[LOCKDOWN_XMON_WR] = "xmon write access",
61
	[LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM",
62
	[LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM",
63 64 65
	[LOCKDOWN_INTEGRITY_MAX] = "integrity",
	[LOCKDOWN_KCORE] = "/proc/kcore access",
	[LOCKDOWN_KPROBES] = "use of kprobes",
66
	[LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM",
67
	[LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM",
68 69 70
	[LOCKDOWN_PERF] = "unsafe use of perf",
	[LOCKDOWN_TRACEFS] = "use of tracefs",
	[LOCKDOWN_XMON_RW] = "xmon read and write access",
71
	[LOCKDOWN_XFRM_SECRET] = "xfrm SA secret",
72 73 74
	[LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
};

75
struct security_hook_heads security_hook_heads __lsm_ro_after_init;
76
static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
77

78
static struct kmem_cache *lsm_file_cache;
79
static struct kmem_cache *lsm_inode_cache;
80

81
char *lsm_names;
82 83
static struct lsm_blob_sizes blob_sizes __lsm_ro_after_init;

84
/* Boot-time LSM user choice */
85
static __initdata const char *chosen_lsm_order;
86
static __initdata const char *chosen_major_lsm;
L
Linus Torvalds 已提交
87

K
Kees Cook 已提交
88 89
static __initconst const char * const builtin_lsm_order = CONFIG_LSM;

90 91
/* Ordered list of LSMs to initialize. */
static __initdata struct lsm_info **ordered_lsms;
92
static __initdata struct lsm_info *exclusive;
93

94 95 96 97 98 99 100
static __initdata bool debug;
#define init_debug(...)						\
	do {							\
		if (debug)					\
			pr_info(__VA_ARGS__);			\
	} while (0)

101 102
static bool __init is_enabled(struct lsm_info *lsm)
{
103 104
	if (!lsm->enabled)
		return false;
105

106
	return *lsm->enabled;
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
}

/* Mark an LSM's enabled flag. */
static int lsm_enabled_true __initdata = 1;
static int lsm_enabled_false __initdata = 0;
static void __init set_enabled(struct lsm_info *lsm, bool enabled)
{
	/*
	 * When an LSM hasn't configured an enable variable, we can use
	 * a hard-coded location for storing the default enabled state.
	 */
	if (!lsm->enabled) {
		if (enabled)
			lsm->enabled = &lsm_enabled_true;
		else
			lsm->enabled = &lsm_enabled_false;
	} else if (lsm->enabled == &lsm_enabled_true) {
		if (!enabled)
			lsm->enabled = &lsm_enabled_false;
	} else if (lsm->enabled == &lsm_enabled_false) {
		if (enabled)
			lsm->enabled = &lsm_enabled_true;
	} else {
		*lsm->enabled = enabled;
	}
}

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
/* Is an LSM already listed in the ordered LSMs list? */
static bool __init exists_ordered_lsm(struct lsm_info *lsm)
{
	struct lsm_info **check;

	for (check = ordered_lsms; *check; check++)
		if (*check == lsm)
			return true;

	return false;
}

/* Append an LSM to the list of ordered LSMs to initialize. */
static int last_lsm __initdata;
static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from)
{
	/* Ignore duplicate selections. */
	if (exists_ordered_lsm(lsm))
		return;

	if (WARN(last_lsm == LSM_COUNT, "%s: out of LSM slots!?\n", from))
		return;

157 158 159
	/* Enable this LSM, if it is not already set. */
	if (!lsm->enabled)
		lsm->enabled = &lsm_enabled_true;
160
	ordered_lsms[last_lsm++] = lsm;
161

162 163 164 165
	init_debug("%s ordering: %s (%sabled)\n", from, lsm->name,
		   is_enabled(lsm) ? "en" : "dis");
}

166 167 168 169 170 171 172
/* Is an LSM allowed to be initialized? */
static bool __init lsm_allowed(struct lsm_info *lsm)
{
	/* Skip if the LSM is disabled. */
	if (!is_enabled(lsm))
		return false;

173 174 175 176 177 178
	/* Not allowed if another exclusive LSM already initialized. */
	if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) {
		init_debug("exclusive disabled: %s\n", lsm->name);
		return false;
	}

179 180 181
	return true;
}

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
static void __init lsm_set_blob_size(int *need, int *lbs)
{
	int offset;

	if (*need > 0) {
		offset = *lbs;
		*lbs += *need;
		*need = offset;
	}
}

static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed)
{
	if (!needed)
		return;

	lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred);
199
	lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file);
200 201 202 203 204 205 206
	/*
	 * The inode blob gets an rcu_head in addition to
	 * what the modules might need.
	 */
	if (needed->lbs_inode && blob_sizes.lbs_inode == 0)
		blob_sizes.lbs_inode = sizeof(struct rcu_head);
	lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode);
207 208
	lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc);
	lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
209
	lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock);
210
	lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task);
211 212
}

213 214
/* Prepare LSM for initialization. */
static void __init prepare_lsm(struct lsm_info *lsm)
215 216 217 218 219 220
{
	int enabled = lsm_allowed(lsm);

	/* Record enablement (to handle any following exclusive LSMs). */
	set_enabled(lsm, enabled);

221
	/* If enabled, do pre-initialization work. */
222
	if (enabled) {
223 224 225 226
		if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) {
			exclusive = lsm;
			init_debug("exclusive chosen: %s\n", lsm->name);
		}
227 228

		lsm_set_blob_sizes(lsm->blobs);
229 230 231 232 233 234 235 236
	}
}

/* Initialize a given LSM, if it is enabled. */
static void __init initialize_lsm(struct lsm_info *lsm)
{
	if (is_enabled(lsm)) {
		int ret;
237

238 239 240 241 242 243
		init_debug("initializing %s\n", lsm->name);
		ret = lsm->init();
		WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret);
	}
}

K
Kees Cook 已提交
244
/* Populate ordered LSMs list from comma-separated LSM name list. */
245
static void __init ordered_lsm_parse(const char *order, const char *origin)
246 247
{
	struct lsm_info *lsm;
K
Kees Cook 已提交
248 249
	char *sep, *name, *next;

K
Kees Cook 已提交
250 251 252 253 254 255
	/* LSM_ORDER_FIRST is always first. */
	for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
		if (lsm->order == LSM_ORDER_FIRST)
			append_ordered_lsm(lsm, "first");
	}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
	/* Process "security=", if given. */
	if (chosen_major_lsm) {
		struct lsm_info *major;

		/*
		 * To match the original "security=" behavior, this
		 * explicitly does NOT fallback to another Legacy Major
		 * if the selected one was separately disabled: disable
		 * all non-matching Legacy Major LSMs.
		 */
		for (major = __start_lsm_info; major < __end_lsm_info;
		     major++) {
			if ((major->flags & LSM_FLAG_LEGACY_MAJOR) &&
			    strcmp(major->name, chosen_major_lsm) != 0) {
				set_enabled(major, false);
				init_debug("security=%s disabled: %s\n",
					   chosen_major_lsm, major->name);
			}
		}
	}
276

K
Kees Cook 已提交
277 278 279 280 281 282 283
	sep = kstrdup(order, GFP_KERNEL);
	next = sep;
	/* Walk the list, looking for matching LSMs. */
	while ((name = strsep(&next, ",")) != NULL) {
		bool found = false;

		for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
K
Kees Cook 已提交
284 285
			if (lsm->order == LSM_ORDER_MUTABLE &&
			    strcmp(lsm->name, name) == 0) {
K
Kees Cook 已提交
286 287 288 289 290 291 292
				append_ordered_lsm(lsm, origin);
				found = true;
			}
		}

		if (!found)
			init_debug("%s ignored: %s\n", origin, name);
293
	}
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312

	/* Process "security=", if given. */
	if (chosen_major_lsm) {
		for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
			if (exists_ordered_lsm(lsm))
				continue;
			if (strcmp(lsm->name, chosen_major_lsm) == 0)
				append_ordered_lsm(lsm, "security=");
		}
	}

	/* Disable all LSMs not in the ordered list. */
	for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) {
		if (exists_ordered_lsm(lsm))
			continue;
		set_enabled(lsm, false);
		init_debug("%s disabled: %s\n", origin, lsm->name);
	}

K
Kees Cook 已提交
313
	kfree(sep);
314 315
}

316 317 318
static void __init lsm_early_cred(struct cred *cred);
static void __init lsm_early_task(struct task_struct *task);

M
Matthew Garrett 已提交
319 320
static int lsm_append(const char *new, char **result);

321 322 323 324 325 326 327
static void __init ordered_lsm_init(void)
{
	struct lsm_info **lsm;

	ordered_lsms = kcalloc(LSM_COUNT + 1, sizeof(*ordered_lsms),
				GFP_KERNEL);

328 329 330 331 332
	if (chosen_lsm_order) {
		if (chosen_major_lsm) {
			pr_info("security= is ignored because it is superseded by lsm=\n");
			chosen_major_lsm = NULL;
		}
333
		ordered_lsm_parse(chosen_lsm_order, "cmdline");
334
	} else
335
		ordered_lsm_parse(builtin_lsm_order, "builtin");
336 337

	for (lsm = ordered_lsms; *lsm; lsm++)
338 339
		prepare_lsm(*lsm);

340 341 342 343 344 345 346
	init_debug("cred blob size       = %d\n", blob_sizes.lbs_cred);
	init_debug("file blob size       = %d\n", blob_sizes.lbs_file);
	init_debug("inode blob size      = %d\n", blob_sizes.lbs_inode);
	init_debug("ipc blob size        = %d\n", blob_sizes.lbs_ipc);
	init_debug("msg_msg blob size    = %d\n", blob_sizes.lbs_msg_msg);
	init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock);
	init_debug("task blob size       = %d\n", blob_sizes.lbs_task);
347 348 349 350 351 352 353 354

	/*
	 * Create any kmem_caches needed for blobs
	 */
	if (blob_sizes.lbs_file)
		lsm_file_cache = kmem_cache_create("lsm_file_cache",
						   blob_sizes.lbs_file, 0,
						   SLAB_PANIC, NULL);
355 356 357 358
	if (blob_sizes.lbs_inode)
		lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
						    blob_sizes.lbs_inode, 0,
						    SLAB_PANIC, NULL);
359

360 361
	lsm_early_cred((struct cred *) current->cred);
	lsm_early_task(current);
362 363
	for (lsm = ordered_lsms; *lsm; lsm++)
		initialize_lsm(*lsm);
364 365 366 367

	kfree(ordered_lsms);
}

M
Matthew Garrett 已提交
368 369 370 371
int __init early_security_init(void)
{
	struct lsm_info *lsm;

372 373 374 375
#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
	INIT_HLIST_HEAD(&security_hook_heads.NAME);
#include "linux/lsm_hook_defs.h"
#undef LSM_HOOK
M
Matthew Garrett 已提交
376 377 378 379 380 381 382 383 384 385 386

	for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
		if (!lsm->enabled)
			lsm->enabled = &lsm_enabled_true;
		prepare_lsm(lsm);
		initialize_lsm(lsm);
	}

	return 0;
}

L
Linus Torvalds 已提交
387 388 389 390 391 392 393
/**
 * security_init - initializes the security framework
 *
 * This should be called early in the kernel initialization sequence.
 */
int __init security_init(void)
{
M
Matthew Garrett 已提交
394
	struct lsm_info *lsm;
395

396 397
	pr_info("Security Framework initializing\n");

M
Matthew Garrett 已提交
398 399 400 401 402 403 404 405
	/*
	 * Append the names of the early LSM modules now that kmalloc() is
	 * available
	 */
	for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
		if (lsm->enabled)
			lsm_append(lsm->name, &lsm_names);
	}
L
Linus Torvalds 已提交
406

407 408 409
	/* Load LSMs in specified order. */
	ordered_lsm_init();

L
Linus Torvalds 已提交
410 411 412
	return 0;
}

413
/* Save user chosen LSM */
414
static int __init choose_major_lsm(char *str)
415
{
416
	chosen_major_lsm = str;
417 418
	return 1;
}
419
__setup("security=", choose_major_lsm);
420

421 422 423 424 425 426 427 428
/* Explicitly choose LSM initialization order. */
static int __init choose_lsm_order(char *str)
{
	chosen_lsm_order = str;
	return 1;
}
__setup("lsm=", choose_lsm_order);

429 430 431 432 433 434 435 436
/* Enable LSM order debugging. */
static int __init enable_debug(char *str)
{
	debug = true;
	return 1;
}
__setup("lsm.debug", enable_debug);

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
static bool match_last_lsm(const char *list, const char *lsm)
{
	const char *last;

	if (WARN_ON(!list || !lsm))
		return false;
	last = strrchr(list, ',');
	if (last)
		/* Pass the comma, strcmp() will check for '\0' */
		last++;
	else
		last = list;
	return !strcmp(last, lsm);
}

M
Matthew Garrett 已提交
452
static int lsm_append(const char *new, char **result)
453 454 455 456 457
{
	char *cp;

	if (*result == NULL) {
		*result = kstrdup(new, GFP_KERNEL);
458 459
		if (*result == NULL)
			return -ENOMEM;
460
	} else {
461 462 463
		/* Check if it is the last registered name */
		if (match_last_lsm(*result, new))
			return 0;
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
		cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new);
		if (cp == NULL)
			return -ENOMEM;
		kfree(*result);
		*result = cp;
	}
	return 0;
}

/**
 * security_add_hooks - Add a modules hooks to the hook lists.
 * @hooks: the hooks to add
 * @count: the number of hooks to add
 * @lsm: the name of the security module
 *
 * Each LSM has to register its hooks with the infrastructure.
 */
void __init security_add_hooks(struct security_hook_list *hooks, int count,
482
				const char *lsm)
483 484 485 486 487
{
	int i;

	for (i = 0; i < count; i++) {
		hooks[i].lsm = lsm;
488
		hlist_add_tail_rcu(&hooks[i].list, hooks[i].head);
489
	}
M
Matthew Garrett 已提交
490 491 492 493 494 495 496 497 498

	/*
	 * Don't try to append during early_security_init(), we'll come back
	 * and fix this up afterwards.
	 */
	if (slab_is_available()) {
		if (lsm_append(lsm, &lsm_names) < 0)
			panic("%s - Cannot get early memory.\n", __func__);
	}
499 500
}

501
int call_blocking_lsm_notifier(enum lsm_event event, void *data)
502
{
503 504
	return blocking_notifier_call_chain(&blocking_lsm_notifier_chain,
					    event, data);
505
}
506
EXPORT_SYMBOL(call_blocking_lsm_notifier);
507

508
int register_blocking_lsm_notifier(struct notifier_block *nb)
509
{
510 511
	return blocking_notifier_chain_register(&blocking_lsm_notifier_chain,
						nb);
512
}
513
EXPORT_SYMBOL(register_blocking_lsm_notifier);
514

515
int unregister_blocking_lsm_notifier(struct notifier_block *nb)
516
{
517 518
	return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain,
						  nb);
519
}
520
EXPORT_SYMBOL(unregister_blocking_lsm_notifier);
521

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
/**
 * lsm_cred_alloc - allocate a composite cred blob
 * @cred: the cred that needs a blob
 * @gfp: allocation type
 *
 * Allocate the cred blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
static int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
{
	if (blob_sizes.lbs_cred == 0) {
		cred->security = NULL;
		return 0;
	}

	cred->security = kzalloc(blob_sizes.lbs_cred, gfp);
	if (cred->security == NULL)
		return -ENOMEM;
	return 0;
}

/**
 * lsm_early_cred - during initialization allocate a composite cred blob
 * @cred: the cred that needs a blob
 *
548
 * Allocate the cred blob for all the modules
549
 */
550
static void __init lsm_early_cred(struct cred *cred)
551
{
552
	int rc = lsm_cred_alloc(cred, GFP_KERNEL);
553 554 555 556 557

	if (rc)
		panic("%s: Early cred alloc failed.\n", __func__);
}

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
/**
 * lsm_file_alloc - allocate a composite file blob
 * @file: the file that needs a blob
 *
 * Allocate the file blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
static int lsm_file_alloc(struct file *file)
{
	if (!lsm_file_cache) {
		file->f_security = NULL;
		return 0;
	}

	file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL);
	if (file->f_security == NULL)
		return -ENOMEM;
	return 0;
}

579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
/**
 * lsm_inode_alloc - allocate a composite inode blob
 * @inode: the inode that needs a blob
 *
 * Allocate the inode blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
int lsm_inode_alloc(struct inode *inode)
{
	if (!lsm_inode_cache) {
		inode->i_security = NULL;
		return 0;
	}

	inode->i_security = kmem_cache_zalloc(lsm_inode_cache, GFP_NOFS);
	if (inode->i_security == NULL)
		return -ENOMEM;
	return 0;
}

600 601 602 603 604 605 606 607
/**
 * lsm_task_alloc - allocate a composite task blob
 * @task: the task that needs a blob
 *
 * Allocate the task blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
W
Wei Yongjun 已提交
608
static int lsm_task_alloc(struct task_struct *task)
609 610 611 612 613 614 615 616 617 618 619 620
{
	if (blob_sizes.lbs_task == 0) {
		task->security = NULL;
		return 0;
	}

	task->security = kzalloc(blob_sizes.lbs_task, GFP_KERNEL);
	if (task->security == NULL)
		return -ENOMEM;
	return 0;
}

621 622 623 624 625 626 627 628
/**
 * lsm_ipc_alloc - allocate a composite ipc blob
 * @kip: the ipc that needs a blob
 *
 * Allocate the ipc blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
W
Wei Yongjun 已提交
629
static int lsm_ipc_alloc(struct kern_ipc_perm *kip)
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
{
	if (blob_sizes.lbs_ipc == 0) {
		kip->security = NULL;
		return 0;
	}

	kip->security = kzalloc(blob_sizes.lbs_ipc, GFP_KERNEL);
	if (kip->security == NULL)
		return -ENOMEM;
	return 0;
}

/**
 * lsm_msg_msg_alloc - allocate a composite msg_msg blob
 * @mp: the msg_msg that needs a blob
 *
 * Allocate the ipc blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
W
Wei Yongjun 已提交
650
static int lsm_msg_msg_alloc(struct msg_msg *mp)
651 652 653 654 655 656 657 658 659 660 661 662
{
	if (blob_sizes.lbs_msg_msg == 0) {
		mp->security = NULL;
		return 0;
	}

	mp->security = kzalloc(blob_sizes.lbs_msg_msg, GFP_KERNEL);
	if (mp->security == NULL)
		return -ENOMEM;
	return 0;
}

663 664 665 666
/**
 * lsm_early_task - during initialization allocate a composite task blob
 * @task: the task that needs a blob
 *
667
 * Allocate the task blob for all the modules
668
 */
669
static void __init lsm_early_task(struct task_struct *task)
670
{
671
	int rc = lsm_task_alloc(task);
672 673 674 675 676

	if (rc)
		panic("%s: Early task alloc failed.\n", __func__);
}

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
/**
 * lsm_superblock_alloc - allocate a composite superblock blob
 * @sb: the superblock that needs a blob
 *
 * Allocate the superblock blob for all the modules
 *
 * Returns 0, or -ENOMEM if memory can't be allocated.
 */
static int lsm_superblock_alloc(struct super_block *sb)
{
	if (blob_sizes.lbs_superblock == 0) {
		sb->s_security = NULL;
		return 0;
	}

	sb->s_security = kzalloc(blob_sizes.lbs_superblock, GFP_KERNEL);
	if (sb->s_security == NULL)
		return -ENOMEM;
	return 0;
}

698 699 700 701 702 703 704 705 706 707 708 709
/*
 * The default value of the LSM hook is defined in linux/lsm_hook_defs.h and
 * can be accessed with:
 *
 *	LSM_RET_DEFAULT(<hook_name>)
 *
 * The macros below define static constants for the default value of each
 * LSM hook.
 */
#define LSM_RET_DEFAULT(NAME) (NAME##_default)
#define DECLARE_LSM_RET_DEFAULT_void(DEFAULT, NAME)
#define DECLARE_LSM_RET_DEFAULT_int(DEFAULT, NAME) \
710
	static const int __maybe_unused LSM_RET_DEFAULT(NAME) = (DEFAULT);
711 712 713 714 715 716
#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
	DECLARE_LSM_RET_DEFAULT_##RET(DEFAULT, NAME)

#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK

717
/*
C
Casey Schaufler 已提交
718
 * Hook list operation macros.
L
Linus Torvalds 已提交
719
 *
720 721
 * call_void_hook:
 *	This is a hook that does not return a value.
L
Linus Torvalds 已提交
722
 *
723 724
 * call_int_hook:
 *	This is a hook that returns a value.
L
Linus Torvalds 已提交
725 726
 */

C
Casey Schaufler 已提交
727 728 729 730
#define call_void_hook(FUNC, ...)				\
	do {							\
		struct security_hook_list *P;			\
								\
731
		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) \
C
Casey Schaufler 已提交
732 733 734 735 736 737 738 739
			P->hook.FUNC(__VA_ARGS__);		\
	} while (0)

#define call_int_hook(FUNC, IRC, ...) ({			\
	int RC = IRC;						\
	do {							\
		struct security_hook_list *P;			\
								\
740
		hlist_for_each_entry(P, &security_hook_heads.FUNC, list) { \
C
Casey Schaufler 已提交
741 742 743 744 745 746 747
			RC = P->hook.FUNC(__VA_ARGS__);		\
			if (RC != 0)				\
				break;				\
		}						\
	} while (0);						\
	RC;							\
})
L
Linus Torvalds 已提交
748

749 750
/* Security operations */

751
int security_binder_set_context_mgr(const struct cred *mgr)
752
{
753
	return call_int_hook(binder_set_context_mgr, 0, mgr);
754 755
}

756 757
int security_binder_transaction(const struct cred *from,
				const struct cred *to)
758
{
759
	return call_int_hook(binder_transaction, 0, from, to);
760 761
}

762 763
int security_binder_transfer_binder(const struct cred *from,
				    const struct cred *to)
764
{
765
	return call_int_hook(binder_transfer_binder, 0, from, to);
766 767
}

768 769
int security_binder_transfer_file(const struct cred *from,
				  const struct cred *to, struct file *file)
770
{
771
	return call_int_hook(binder_transfer_file, 0, from, to, file);
772 773
}

774
int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
775
{
776
	return call_int_hook(ptrace_access_check, 0, child, mode);
777 778 779 780
}

int security_ptrace_traceme(struct task_struct *parent)
{
781
	return call_int_hook(ptrace_traceme, 0, parent);
782 783 784 785 786 787 788
}

int security_capget(struct task_struct *target,
		     kernel_cap_t *effective,
		     kernel_cap_t *inheritable,
		     kernel_cap_t *permitted)
{
789 790
	return call_int_hook(capget, 0, target,
				effective, inheritable, permitted);
791 792
}

D
David Howells 已提交
793 794 795 796
int security_capset(struct cred *new, const struct cred *old,
		    const kernel_cap_t *effective,
		    const kernel_cap_t *inheritable,
		    const kernel_cap_t *permitted)
797
{
798 799
	return call_int_hook(capset, 0, new, old,
				effective, inheritable, permitted);
800 801
}

802 803 804 805
int security_capable(const struct cred *cred,
		     struct user_namespace *ns,
		     int cap,
		     unsigned int opts)
806
{
807
	return call_int_hook(capable, 0, cred, ns, cap, opts);
808 809 810 811
}

int security_quotactl(int cmds, int type, int id, struct super_block *sb)
{
812
	return call_int_hook(quotactl, 0, cmds, type, id, sb);
813 814 815 816
}

int security_quota_on(struct dentry *dentry)
{
817
	return call_int_hook(quota_on, 0, dentry);
818 819
}

820
int security_syslog(int type)
821
{
822
	return call_int_hook(syslog, 0, type);
823 824
}

825
int security_settime64(const struct timespec64 *ts, const struct timezone *tz)
826
{
827
	return call_int_hook(settime, 0, ts, tz);
828 829 830 831
}

int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
C
Casey Schaufler 已提交
832 833 834 835 836 837 838 839 840 841 842
	struct security_hook_list *hp;
	int cap_sys_admin = 1;
	int rc;

	/*
	 * The module will respond with a positive value if
	 * it thinks the __vm_enough_memory() call should be
	 * made with the cap_sys_admin set. If all of the modules
	 * agree that it should be set it will. If any module
	 * thinks it should not be set it won't.
	 */
843
	hlist_for_each_entry(hp, &security_hook_heads.vm_enough_memory, list) {
C
Casey Schaufler 已提交
844 845 846 847 848 849 850
		rc = hp->hook.vm_enough_memory(mm, pages);
		if (rc <= 0) {
			cap_sys_admin = 0;
			break;
		}
	}
	return __vm_enough_memory(mm, pages, cap_sys_admin);
851 852
}

853
int security_bprm_creds_for_exec(struct linux_binprm *bprm)
854
{
855 856 857
	return call_int_hook(bprm_creds_for_exec, 0, bprm);
}

858
int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file)
859
{
860
	return call_int_hook(bprm_creds_from_file, 0, bprm, file);
861 862
}

863
int security_bprm_check(struct linux_binprm *bprm)
864
{
865 866
	int ret;

867
	ret = call_int_hook(bprm_check_security, 0, bprm);
868 869 870
	if (ret)
		return ret;
	return ima_bprm_check(bprm);
871 872
}

873
void security_bprm_committing_creds(struct linux_binprm *bprm)
874
{
875
	call_void_hook(bprm_committing_creds, bprm);
876 877
}

878
void security_bprm_committed_creds(struct linux_binprm *bprm)
879
{
880
	call_void_hook(bprm_committed_creds, bprm);
881 882
}

A
Al Viro 已提交
883 884 885 886 887
int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc)
{
	return call_int_hook(fs_context_dup, 0, fc, src_fc);
}

888 889
int security_fs_context_parse_param(struct fs_context *fc,
				    struct fs_parameter *param)
890
{
891 892 893 894 895 896 897 898 899 900 901 902 903
	struct security_hook_list *hp;
	int trc;
	int rc = -ENOPARAM;

	hlist_for_each_entry(hp, &security_hook_heads.fs_context_parse_param,
			     list) {
		trc = hp->hook.fs_context_parse_param(fc, param);
		if (trc == 0)
			rc = 0;
		else if (trc != -ENOPARAM)
			return trc;
	}
	return rc;
904 905
}

906 907
int security_sb_alloc(struct super_block *sb)
{
908 909 910 911 912 913 914 915
	int rc = lsm_superblock_alloc(sb);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(sb_alloc_security, 0, sb);
	if (unlikely(rc))
		security_sb_free(sb);
	return rc;
916 917
}

918 919 920
void security_sb_delete(struct super_block *sb)
{
	call_void_hook(sb_delete, sb);
921 922 923 924
}

void security_sb_free(struct super_block *sb)
{
925
	call_void_hook(sb_free_security, sb);
926 927
	kfree(sb->s_security);
	sb->s_security = NULL;
928 929
}

930
void security_free_mnt_opts(void **mnt_opts)
931
{
932 933 934 935
	if (!*mnt_opts)
		return;
	call_void_hook(sb_free_mnt_opts, *mnt_opts);
	*mnt_opts = NULL;
936
}
937
EXPORT_SYMBOL(security_free_mnt_opts);
938

939
int security_sb_eat_lsm_opts(char *options, void **mnt_opts)
940
{
941
	return call_int_hook(sb_eat_lsm_opts, 0, options, mnt_opts);
942
}
A
Al Viro 已提交
943
EXPORT_SYMBOL(security_sb_eat_lsm_opts);
944

945 946 947 948 949 950 951
int security_sb_mnt_opts_compat(struct super_block *sb,
				void *mnt_opts)
{
	return call_int_hook(sb_mnt_opts_compat, 0, sb, mnt_opts);
}
EXPORT_SYMBOL(security_sb_mnt_opts_compat);

952
int security_sb_remount(struct super_block *sb,
953
			void *mnt_opts)
954
{
955
	return call_int_hook(sb_remount, 0, sb, mnt_opts);
956
}
A
Al Viro 已提交
957
EXPORT_SYMBOL(security_sb_remount);
958

959
int security_sb_kern_mount(struct super_block *sb)
960
{
961
	return call_int_hook(sb_kern_mount, 0, sb);
962 963
}

964 965
int security_sb_show_options(struct seq_file *m, struct super_block *sb)
{
966
	return call_int_hook(sb_show_options, 0, m, sb);
967 968
}

969 970
int security_sb_statfs(struct dentry *dentry)
{
971
	return call_int_hook(sb_statfs, 0, dentry);
972 973
}

A
Al Viro 已提交
974
int security_sb_mount(const char *dev_name, const struct path *path,
A
Al Viro 已提交
975
                       const char *type, unsigned long flags, void *data)
976
{
977
	return call_int_hook(sb_mount, 0, dev_name, path, type, flags, data);
978 979 980 981
}

int security_sb_umount(struct vfsmount *mnt, int flags)
{
982
	return call_int_hook(sb_umount, 0, mnt, flags);
983 984
}

A
Al Viro 已提交
985
int security_sb_pivotroot(const struct path *old_path, const struct path *new_path)
986
{
987
	return call_int_hook(sb_pivotroot, 0, old_path, new_path);
988 989
}

990
int security_sb_set_mnt_opts(struct super_block *sb,
991
				void *mnt_opts,
992 993
				unsigned long kern_flags,
				unsigned long *set_kern_flags)
994
{
C
Casey Schaufler 已提交
995
	return call_int_hook(sb_set_mnt_opts,
996 997
				mnt_opts ? -EOPNOTSUPP : 0, sb,
				mnt_opts, kern_flags, set_kern_flags);
998
}
999
EXPORT_SYMBOL(security_sb_set_mnt_opts);
1000

1001
int security_sb_clone_mnt_opts(const struct super_block *oldsb,
1002 1003 1004
				struct super_block *newsb,
				unsigned long kern_flags,
				unsigned long *set_kern_flags)
1005
{
1006 1007
	return call_int_hook(sb_clone_mnt_opts, 0, oldsb, newsb,
				kern_flags, set_kern_flags);
1008
}
1009 1010
EXPORT_SYMBOL(security_sb_clone_mnt_opts);

1011 1012 1013 1014 1015
int security_move_mount(const struct path *from_path, const struct path *to_path)
{
	return call_int_hook(move_mount, 0, from_path, to_path);
}

1016 1017 1018 1019 1020 1021
int security_path_notify(const struct path *path, u64 mask,
				unsigned int obj_type)
{
	return call_int_hook(path_notify, 0, path, mask, obj_type);
}

1022 1023
int security_inode_alloc(struct inode *inode)
{
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
	int rc = lsm_inode_alloc(inode);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(inode_alloc_security, 0, inode);
	if (unlikely(rc))
		security_inode_free(inode);
	return rc;
}

static void inode_free_by_rcu(struct rcu_head *head)
{
	/*
	 * The rcu head is at the start of the inode blob
	 */
	kmem_cache_free(lsm_inode_cache, head);
1040 1041 1042 1043
}

void security_inode_free(struct inode *inode)
{
1044
	integrity_inode_free(inode);
1045
	call_void_hook(inode_free_security, inode);
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
	/*
	 * The inode may still be referenced in a path walk and
	 * a call to security_inode_permission() can be made
	 * after inode_free_security() is called. Ideally, the VFS
	 * wouldn't do this, but fixing that is a much harder
	 * job. For now, simply free the i_security via RCU, and
	 * leave the current inode->i_security pointer intact.
	 * The inode will be freed after the RCU grace period too.
	 */
	if (inode->i_security)
		call_rcu((struct rcu_head *)inode->i_security,
				inode_free_by_rcu);
1058 1059
}

1060
int security_dentry_init_security(struct dentry *dentry, int mode,
1061 1062 1063
				  const struct qstr *name,
				  const char **xattr_name, void **ctx,
				  u32 *ctxlen)
1064
{
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
	struct security_hook_list *hp;
	int rc;

	/*
	 * Only one module will provide a security context.
	 */
	hlist_for_each_entry(hp, &security_hook_heads.dentry_init_security, list) {
		rc = hp->hook.dentry_init_security(dentry, mode, name,
						   xattr_name, ctx, ctxlen);
		if (rc != LSM_RET_DEFAULT(dentry_init_security))
			return rc;
	}
	return LSM_RET_DEFAULT(dentry_init_security);
1078 1079 1080
}
EXPORT_SYMBOL(security_dentry_init_security);

1081 1082 1083 1084 1085 1086 1087 1088 1089
int security_dentry_create_files_as(struct dentry *dentry, int mode,
				    struct qstr *name,
				    const struct cred *old, struct cred *new)
{
	return call_int_hook(dentry_create_files_as, 0, dentry, mode,
				name, old, new);
}
EXPORT_SYMBOL(security_dentry_create_files_as);

1090
int security_inode_init_security(struct inode *inode, struct inode *dir,
1091 1092
				 const struct qstr *qstr,
				 const initxattrs initxattrs, void *fs_data)
1093
{
1094 1095
	struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1];
	struct xattr *lsm_xattr, *evm_xattr, *xattr;
1096 1097
	int ret;

1098
	if (unlikely(IS_PRIVATE(inode)))
1099
		return 0;
1100 1101

	if (!initxattrs)
1102 1103
		return call_int_hook(inode_init_security, -EOPNOTSUPP, inode,
				     dir, qstr, NULL, NULL, NULL);
1104
	memset(new_xattrs, 0, sizeof(new_xattrs));
1105
	lsm_xattr = new_xattrs;
C
Casey Schaufler 已提交
1106
	ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr,
1107 1108 1109 1110 1111
						&lsm_xattr->name,
						&lsm_xattr->value,
						&lsm_xattr->value_len);
	if (ret)
		goto out;
1112 1113 1114 1115 1116

	evm_xattr = lsm_xattr + 1;
	ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr);
	if (ret)
		goto out;
1117 1118
	ret = initxattrs(inode, new_xattrs, fs_data);
out:
1119
	for (xattr = new_xattrs; xattr->value != NULL; xattr++)
1120
		kfree(xattr->value);
1121 1122 1123 1124
	return (ret == -EOPNOTSUPP) ? 0 : ret;
}
EXPORT_SYMBOL(security_inode_init_security);

1125 1126 1127 1128 1129 1130 1131 1132
int security_inode_init_security_anon(struct inode *inode,
				      const struct qstr *name,
				      const struct inode *context_inode)
{
	return call_int_hook(inode_init_security_anon, 0, inode, name,
			     context_inode);
}

1133
int security_old_inode_init_security(struct inode *inode, struct inode *dir,
1134
				     const struct qstr *qstr, const char **name,
1135
				     void **value, size_t *len)
1136 1137
{
	if (unlikely(IS_PRIVATE(inode)))
1138
		return -EOPNOTSUPP;
1139 1140
	return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir,
			     qstr, name, value, len);
1141
}
1142
EXPORT_SYMBOL(security_old_inode_init_security);
1143

1144
#ifdef CONFIG_SECURITY_PATH
1145
int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode,
1146 1147
			unsigned int dev)
{
1148
	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1149
		return 0;
1150
	return call_int_hook(path_mknod, 0, dir, dentry, mode, dev);
1151 1152 1153
}
EXPORT_SYMBOL(security_path_mknod);

1154
int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode)
1155
{
1156
	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1157
		return 0;
1158
	return call_int_hook(path_mkdir, 0, dir, dentry, mode);
1159
}
1160
EXPORT_SYMBOL(security_path_mkdir);
1161

A
Al Viro 已提交
1162
int security_path_rmdir(const struct path *dir, struct dentry *dentry)
1163
{
1164
	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1165
		return 0;
1166
	return call_int_hook(path_rmdir, 0, dir, dentry);
1167 1168
}

A
Al Viro 已提交
1169
int security_path_unlink(const struct path *dir, struct dentry *dentry)
1170
{
1171
	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1172
		return 0;
1173
	return call_int_hook(path_unlink, 0, dir, dentry);
1174
}
1175
EXPORT_SYMBOL(security_path_unlink);
1176

1177
int security_path_symlink(const struct path *dir, struct dentry *dentry,
1178 1179
			  const char *old_name)
{
1180
	if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry))))
1181
		return 0;
1182
	return call_int_hook(path_symlink, 0, dir, dentry, old_name);
1183 1184
}

A
Al Viro 已提交
1185
int security_path_link(struct dentry *old_dentry, const struct path *new_dir,
1186 1187
		       struct dentry *new_dentry)
{
1188
	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
1189
		return 0;
1190
	return call_int_hook(path_link, 0, old_dentry, new_dir, new_dentry);
1191 1192
}

A
Al Viro 已提交
1193 1194
int security_path_rename(const struct path *old_dir, struct dentry *old_dentry,
			 const struct path *new_dir, struct dentry *new_dentry,
1195
			 unsigned int flags)
1196
{
1197 1198
	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
		     (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
1199
		return 0;
M
Miklos Szeredi 已提交
1200

1201
	return call_int_hook(path_rename, 0, old_dir, old_dentry, new_dir,
1202
				new_dentry, flags);
1203
}
1204
EXPORT_SYMBOL(security_path_rename);
1205

A
Al Viro 已提交
1206
int security_path_truncate(const struct path *path)
1207
{
1208
	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1209
		return 0;
1210
	return call_int_hook(path_truncate, 0, path);
1211
}
1212

1213
int security_path_chmod(const struct path *path, umode_t mode)
1214
{
1215
	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1216
		return 0;
1217
	return call_int_hook(path_chmod, 0, path, mode);
1218 1219
}

1220
int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid)
1221
{
1222
	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1223
		return 0;
1224
	return call_int_hook(path_chown, 0, path, uid, gid);
1225
}
T
Tetsuo Handa 已提交
1226

A
Al Viro 已提交
1227
int security_path_chroot(const struct path *path)
T
Tetsuo Handa 已提交
1228
{
1229
	return call_int_hook(path_chroot, 0, path);
T
Tetsuo Handa 已提交
1230
}
1231 1232
#endif

A
Al Viro 已提交
1233
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
1234 1235 1236
{
	if (unlikely(IS_PRIVATE(dir)))
		return 0;
1237
	return call_int_hook(inode_create, 0, dir, dentry, mode);
1238
}
1239
EXPORT_SYMBOL_GPL(security_inode_create);
1240 1241 1242 1243

int security_inode_link(struct dentry *old_dentry, struct inode *dir,
			 struct dentry *new_dentry)
{
1244
	if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry))))
1245
		return 0;
1246
	return call_int_hook(inode_link, 0, old_dentry, dir, new_dentry);
1247 1248 1249 1250
}

int security_inode_unlink(struct inode *dir, struct dentry *dentry)
{
1251
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1252
		return 0;
1253
	return call_int_hook(inode_unlink, 0, dir, dentry);
1254 1255 1256 1257 1258 1259 1260
}

int security_inode_symlink(struct inode *dir, struct dentry *dentry,
			    const char *old_name)
{
	if (unlikely(IS_PRIVATE(dir)))
		return 0;
1261
	return call_int_hook(inode_symlink, 0, dir, dentry, old_name);
1262 1263
}

1264
int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1265 1266 1267
{
	if (unlikely(IS_PRIVATE(dir)))
		return 0;
1268
	return call_int_hook(inode_mkdir, 0, dir, dentry, mode);
1269
}
1270
EXPORT_SYMBOL_GPL(security_inode_mkdir);
1271 1272 1273

int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
{
1274
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1275
		return 0;
1276
	return call_int_hook(inode_rmdir, 0, dir, dentry);
1277 1278
}

A
Al Viro 已提交
1279
int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
1280 1281 1282
{
	if (unlikely(IS_PRIVATE(dir)))
		return 0;
1283
	return call_int_hook(inode_mknod, 0, dir, dentry, mode, dev);
1284 1285 1286
}

int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
1287 1288
			   struct inode *new_dir, struct dentry *new_dentry,
			   unsigned int flags)
1289
{
1290 1291
        if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) ||
            (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry)))))
1292
		return 0;
M
Miklos Szeredi 已提交
1293 1294

	if (flags & RENAME_EXCHANGE) {
1295
		int err = call_int_hook(inode_rename, 0, new_dir, new_dentry,
M
Miklos Szeredi 已提交
1296 1297 1298 1299 1300
						     old_dir, old_dentry);
		if (err)
			return err;
	}

1301
	return call_int_hook(inode_rename, 0, old_dir, old_dentry,
1302 1303 1304 1305 1306
					   new_dir, new_dentry);
}

int security_inode_readlink(struct dentry *dentry)
{
1307
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1308
		return 0;
1309
	return call_int_hook(inode_readlink, 0, dentry);
1310 1311
}

1312 1313
int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
			       bool rcu)
1314
{
1315
	if (unlikely(IS_PRIVATE(inode)))
1316
		return 0;
1317
	return call_int_hook(inode_follow_link, 0, dentry, inode, rcu);
1318 1319
}

1320
int security_inode_permission(struct inode *inode, int mask)
1321 1322 1323
{
	if (unlikely(IS_PRIVATE(inode)))
		return 0;
1324
	return call_int_hook(inode_permission, 0, inode, mask);
1325 1326 1327 1328
}

int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
1329 1330
	int ret;

1331
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1332
		return 0;
1333
	ret = call_int_hook(inode_setattr, 0, dentry, attr);
1334 1335 1336
	if (ret)
		return ret;
	return evm_inode_setattr(dentry, attr);
1337
}
1338
EXPORT_SYMBOL_GPL(security_inode_setattr);
1339

1340
int security_inode_getattr(const struct path *path)
1341
{
1342
	if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry))))
1343
		return 0;
1344
	return call_int_hook(inode_getattr, 0, path);
1345 1346
}

1347 1348
int security_inode_setxattr(struct user_namespace *mnt_userns,
			    struct dentry *dentry, const char *name,
1349
			    const void *value, size_t size, int flags)
1350
{
1351 1352
	int ret;

1353
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1354
		return 0;
C
Casey Schaufler 已提交
1355 1356 1357 1358
	/*
	 * SELinux and Smack integrate the cap call,
	 * so assume that all LSMs supplying this call do so.
	 */
1359 1360
	ret = call_int_hook(inode_setxattr, 1, mnt_userns, dentry, name, value,
			    size, flags);
C
Casey Schaufler 已提交
1361 1362 1363

	if (ret == 1)
		ret = cap_inode_setxattr(dentry, name, value, size, flags);
1364 1365 1366
	if (ret)
		return ret;
	ret = ima_inode_setxattr(dentry, name, value, size);
1367 1368
	if (ret)
		return ret;
1369
	return evm_inode_setxattr(mnt_userns, dentry, name, value, size);
1370 1371
}

1372 1373
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
				  const void *value, size_t size, int flags)
1374
{
1375
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1376
		return;
1377
	call_void_hook(inode_post_setxattr, dentry, name, value, size, flags);
1378
	evm_inode_post_setxattr(dentry, name, value, size);
1379 1380
}

1381
int security_inode_getxattr(struct dentry *dentry, const char *name)
1382
{
1383
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1384
		return 0;
1385
	return call_int_hook(inode_getxattr, 0, dentry, name);
1386 1387 1388 1389
}

int security_inode_listxattr(struct dentry *dentry)
{
1390
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1391
		return 0;
1392
	return call_int_hook(inode_listxattr, 0, dentry);
1393 1394
}

1395 1396
int security_inode_removexattr(struct user_namespace *mnt_userns,
			       struct dentry *dentry, const char *name)
1397
{
1398 1399
	int ret;

1400
	if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
1401
		return 0;
C
Casey Schaufler 已提交
1402 1403 1404 1405
	/*
	 * SELinux and Smack integrate the cap call,
	 * so assume that all LSMs supplying this call do so.
	 */
1406
	ret = call_int_hook(inode_removexattr, 1, mnt_userns, dentry, name);
C
Casey Schaufler 已提交
1407
	if (ret == 1)
1408
		ret = cap_inode_removexattr(mnt_userns, dentry, name);
1409 1410 1411
	if (ret)
		return ret;
	ret = ima_inode_removexattr(dentry, name);
1412 1413
	if (ret)
		return ret;
1414
	return evm_inode_removexattr(mnt_userns, dentry, name);
1415 1416
}

1417 1418
int security_inode_need_killpriv(struct dentry *dentry)
{
1419
	return call_int_hook(inode_need_killpriv, 0, dentry);
1420 1421
}

1422 1423
int security_inode_killpriv(struct user_namespace *mnt_userns,
			    struct dentry *dentry)
1424
{
1425
	return call_int_hook(inode_killpriv, 0, mnt_userns, dentry);
1426 1427
}

1428 1429 1430
int security_inode_getsecurity(struct user_namespace *mnt_userns,
			       struct inode *inode, const char *name,
			       void **buffer, bool alloc)
1431
{
1432 1433 1434
	struct security_hook_list *hp;
	int rc;

1435
	if (unlikely(IS_PRIVATE(inode)))
1436
		return LSM_RET_DEFAULT(inode_getsecurity);
1437 1438 1439
	/*
	 * Only one module will provide an attribute with a given name.
	 */
1440
	hlist_for_each_entry(hp, &security_hook_heads.inode_getsecurity, list) {
1441
		rc = hp->hook.inode_getsecurity(mnt_userns, inode, name, buffer, alloc);
1442
		if (rc != LSM_RET_DEFAULT(inode_getsecurity))
1443 1444
			return rc;
	}
1445
	return LSM_RET_DEFAULT(inode_getsecurity);
1446 1447 1448 1449
}

int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
1450 1451 1452
	struct security_hook_list *hp;
	int rc;

1453
	if (unlikely(IS_PRIVATE(inode)))
1454
		return LSM_RET_DEFAULT(inode_setsecurity);
1455 1456 1457
	/*
	 * Only one module will provide an attribute with a given name.
	 */
1458
	hlist_for_each_entry(hp, &security_hook_heads.inode_setsecurity, list) {
1459 1460
		rc = hp->hook.inode_setsecurity(inode, name, value, size,
								flags);
1461
		if (rc != LSM_RET_DEFAULT(inode_setsecurity))
1462 1463
			return rc;
	}
1464
	return LSM_RET_DEFAULT(inode_setsecurity);
1465 1466 1467 1468 1469 1470
}

int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
	if (unlikely(IS_PRIVATE(inode)))
		return 0;
1471
	return call_int_hook(inode_listsecurity, 0, inode, buffer, buffer_size);
1472
}
1473
EXPORT_SYMBOL(security_inode_listsecurity);
1474

1475
void security_inode_getsecid(struct inode *inode, u32 *secid)
1476
{
1477
	call_void_hook(inode_getsecid, inode, secid);
1478 1479
}

1480 1481 1482 1483 1484 1485
int security_inode_copy_up(struct dentry *src, struct cred **new)
{
	return call_int_hook(inode_copy_up, 0, src, new);
}
EXPORT_SYMBOL(security_inode_copy_up);

1486 1487
int security_inode_copy_up_xattr(const char *name)
{
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	struct security_hook_list *hp;
	int rc;

	/*
	 * The implementation can return 0 (accept the xattr), 1 (discard the
	 * xattr), -EOPNOTSUPP if it does not know anything about the xattr or
	 * any other error code incase of an error.
	 */
	hlist_for_each_entry(hp,
		&security_hook_heads.inode_copy_up_xattr, list) {
		rc = hp->hook.inode_copy_up_xattr(name);
		if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr))
			return rc;
	}

	return LSM_RET_DEFAULT(inode_copy_up_xattr);
1504 1505 1506
}
EXPORT_SYMBOL(security_inode_copy_up_xattr);

1507 1508 1509 1510 1511 1512
int security_kernfs_init_security(struct kernfs_node *kn_dir,
				  struct kernfs_node *kn)
{
	return call_int_hook(kernfs_init_security, 0, kn_dir, kn);
}

1513 1514
int security_file_permission(struct file *file, int mask)
{
1515 1516
	int ret;

1517
	ret = call_int_hook(file_permission, 0, file, mask);
1518 1519 1520 1521
	if (ret)
		return ret;

	return fsnotify_perm(file, mask);
1522 1523 1524 1525
}

int security_file_alloc(struct file *file)
{
1526 1527 1528 1529 1530 1531 1532 1533
	int rc = lsm_file_alloc(file);

	if (rc)
		return rc;
	rc = call_int_hook(file_alloc_security, 0, file);
	if (unlikely(rc))
		security_file_free(file);
	return rc;
1534 1535 1536 1537
}

void security_file_free(struct file *file)
{
1538 1539
	void *blob;

1540
	call_void_hook(file_free_security, file);
1541 1542 1543 1544 1545 1546

	blob = file->f_security;
	if (blob) {
		file->f_security = NULL;
		kmem_cache_free(lsm_file_cache, blob);
	}
1547 1548 1549 1550
}

int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
1551
	return call_int_hook(file_ioctl, 0, file, cmd, arg);
1552
}
1553
EXPORT_SYMBOL_GPL(security_file_ioctl);
1554

1555
static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
1556
{
1557
	/*
1558 1559
	 * Does we have PROT_READ and does the application expect
	 * it to imply PROT_EXEC?  If not, nothing to talk about...
1560
	 */
1561 1562
	if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
		return prot;
1563
	if (!(current->personality & READ_IMPLIES_EXEC))
1564 1565 1566 1567 1568 1569 1570 1571
		return prot;
	/*
	 * if that's an anonymous mapping, let it.
	 */
	if (!file)
		return prot | PROT_EXEC;
	/*
	 * ditto if it's not on noexec mount, except that on !MMU we need
1572
	 * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
1573
	 */
1574
	if (!path_noexec(&file->f_path)) {
1575
#ifndef CONFIG_MMU
1576 1577 1578 1579 1580
		if (file->f_op->mmap_capabilities) {
			unsigned caps = file->f_op->mmap_capabilities(file);
			if (!(caps & NOMMU_MAP_EXEC))
				return prot;
		}
1581
#endif
1582
		return prot | PROT_EXEC;
1583
	}
1584 1585 1586 1587 1588 1589 1590 1591
	/* anything on noexec mount won't get PROT_EXEC */
	return prot;
}

int security_mmap_file(struct file *file, unsigned long prot,
			unsigned long flags)
{
	int ret;
1592
	ret = call_int_hook(mmap_file, 0, file, prot,
1593
					mmap_prot(file, prot), flags);
1594 1595 1596
	if (ret)
		return ret;
	return ima_file_mmap(file, prot);
1597 1598
}

1599 1600
int security_mmap_addr(unsigned long addr)
{
1601
	return call_int_hook(mmap_addr, 0, addr);
1602 1603
}

1604 1605 1606
int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
			    unsigned long prot)
{
1607 1608 1609 1610 1611 1612
	int ret;

	ret = call_int_hook(file_mprotect, 0, vma, reqprot, prot);
	if (ret)
		return ret;
	return ima_file_mprotect(vma, prot);
1613 1614 1615 1616
}

int security_file_lock(struct file *file, unsigned int cmd)
{
1617
	return call_int_hook(file_lock, 0, file, cmd);
1618 1619 1620 1621
}

int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
1622
	return call_int_hook(file_fcntl, 0, file, cmd, arg);
1623 1624
}

1625
void security_file_set_fowner(struct file *file)
1626
{
1627
	call_void_hook(file_set_fowner, file);
1628 1629 1630 1631 1632
}

int security_file_send_sigiotask(struct task_struct *tsk,
				  struct fown_struct *fown, int sig)
{
1633
	return call_int_hook(file_send_sigiotask, 0, tsk, fown, sig);
1634 1635 1636 1637
}

int security_file_receive(struct file *file)
{
1638
	return call_int_hook(file_receive, 0, file);
1639 1640
}

1641
int security_file_open(struct file *file)
1642
{
1643 1644
	int ret;

A
Al Viro 已提交
1645
	ret = call_int_hook(file_open, 0, file);
1646 1647 1648 1649
	if (ret)
		return ret;

	return fsnotify_perm(file, MAY_OPEN);
1650 1651
}

1652 1653
int security_task_alloc(struct task_struct *task, unsigned long clone_flags)
{
1654 1655 1656 1657 1658 1659 1660 1661
	int rc = lsm_task_alloc(task);

	if (rc)
		return rc;
	rc = call_int_hook(task_alloc, 0, task, clone_flags);
	if (unlikely(rc))
		security_task_free(task);
	return rc;
1662 1663
}

1664 1665
void security_task_free(struct task_struct *task)
{
1666
	call_void_hook(task_free, task);
1667 1668 1669

	kfree(task->security);
	task->security = NULL;
1670 1671
}

1672 1673
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
1674 1675 1676 1677 1678 1679
	int rc = lsm_cred_alloc(cred, gfp);

	if (rc)
		return rc;

	rc = call_int_hook(cred_alloc_blank, 0, cred, gfp);
1680
	if (unlikely(rc))
1681 1682
		security_cred_free(cred);
	return rc;
1683 1684
}

D
David Howells 已提交
1685
void security_cred_free(struct cred *cred)
1686
{
1687 1688 1689 1690 1691 1692 1693
	/*
	 * There is a failure case in prepare_creds() that
	 * may result in a call here with ->security being NULL.
	 */
	if (unlikely(cred->security == NULL))
		return;

1694
	call_void_hook(cred_free, cred);
1695 1696 1697

	kfree(cred->security);
	cred->security = NULL;
1698 1699
}

D
David Howells 已提交
1700
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
1701
{
1702 1703 1704 1705 1706 1707
	int rc = lsm_cred_alloc(new, gfp);

	if (rc)
		return rc;

	rc = call_int_hook(cred_prepare, 0, new, old, gfp);
1708
	if (unlikely(rc))
1709 1710
		security_cred_free(new);
	return rc;
D
David Howells 已提交
1711 1712
}

1713 1714
void security_transfer_creds(struct cred *new, const struct cred *old)
{
1715
	call_void_hook(cred_transfer, new, old);
1716 1717
}

1718 1719 1720 1721 1722 1723 1724
void security_cred_getsecid(const struct cred *c, u32 *secid)
{
	*secid = 0;
	call_void_hook(cred_getsecid, c, secid);
}
EXPORT_SYMBOL(security_cred_getsecid);

1725 1726
int security_kernel_act_as(struct cred *new, u32 secid)
{
1727
	return call_int_hook(kernel_act_as, 0, new, secid);
1728 1729 1730 1731
}

int security_kernel_create_files_as(struct cred *new, struct inode *inode)
{
1732
	return call_int_hook(kernel_create_files_as, 0, new, inode);
1733 1734
}

1735
int security_kernel_module_request(char *kmod_name)
1736
{
1737 1738 1739 1740 1741 1742
	int ret;

	ret = call_int_hook(kernel_module_request, 0, kmod_name);
	if (ret)
		return ret;
	return integrity_kernel_module_request(kmod_name);
1743 1744
}

1745 1746
int security_kernel_read_file(struct file *file, enum kernel_read_file_id id,
			      bool contents)
1747 1748 1749
{
	int ret;

1750
	ret = call_int_hook(kernel_read_file, 0, file, id, contents);
1751 1752
	if (ret)
		return ret;
1753
	return ima_read_file(file, id, contents);
1754 1755 1756
}
EXPORT_SYMBOL_GPL(security_kernel_read_file);

1757 1758
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
				   enum kernel_read_file_id id)
1759
{
1760 1761 1762 1763 1764 1765
	int ret;

	ret = call_int_hook(kernel_post_read_file, 0, file, buf, size, id);
	if (ret)
		return ret;
	return ima_post_read_file(file, buf, size, id);
1766 1767 1768
}
EXPORT_SYMBOL_GPL(security_kernel_post_read_file);

1769
int security_kernel_load_data(enum kernel_load_data_id id, bool contents)
1770
{
1771 1772
	int ret;

1773
	ret = call_int_hook(kernel_load_data, 0, id, contents);
1774 1775
	if (ret)
		return ret;
1776
	return ima_load_data(id, contents);
1777
}
1778
EXPORT_SYMBOL_GPL(security_kernel_load_data);
1779

1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
int security_kernel_post_load_data(char *buf, loff_t size,
				   enum kernel_load_data_id id,
				   char *description)
{
	int ret;

	ret = call_int_hook(kernel_post_load_data, 0, buf, size, id,
			    description);
	if (ret)
		return ret;
	return ima_post_load_data(buf, size, id, description);
}
EXPORT_SYMBOL_GPL(security_kernel_post_load_data);

D
David Howells 已提交
1794 1795
int security_task_fix_setuid(struct cred *new, const struct cred *old,
			     int flags)
1796
{
1797
	return call_int_hook(task_fix_setuid, 0, new, old, flags);
1798 1799
}

1800 1801 1802 1803 1804 1805
int security_task_fix_setgid(struct cred *new, const struct cred *old,
				 int flags)
{
	return call_int_hook(task_fix_setgid, 0, new, old, flags);
}

1806 1807
int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
1808
	return call_int_hook(task_setpgid, 0, p, pgid);
1809 1810 1811 1812
}

int security_task_getpgid(struct task_struct *p)
{
1813
	return call_int_hook(task_getpgid, 0, p);
1814 1815 1816 1817
}

int security_task_getsid(struct task_struct *p)
{
1818
	return call_int_hook(task_getsid, 0, p);
1819 1820
}

1821
void security_current_getsecid_subj(u32 *secid)
1822
{
C
Casey Schaufler 已提交
1823
	*secid = 0;
1824
	call_void_hook(current_getsecid_subj, secid);
1825
}
1826
EXPORT_SYMBOL(security_current_getsecid_subj);
1827 1828 1829 1830 1831 1832 1833

void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
{
	*secid = 0;
	call_void_hook(task_getsecid_obj, p, secid);
}
EXPORT_SYMBOL(security_task_getsecid_obj);
1834 1835 1836

int security_task_setnice(struct task_struct *p, int nice)
{
1837
	return call_int_hook(task_setnice, 0, p, nice);
1838 1839 1840 1841
}

int security_task_setioprio(struct task_struct *p, int ioprio)
{
1842
	return call_int_hook(task_setioprio, 0, p, ioprio);
1843 1844 1845 1846
}

int security_task_getioprio(struct task_struct *p)
{
1847
	return call_int_hook(task_getioprio, 0, p);
1848 1849
}

1850 1851 1852 1853 1854 1855
int security_task_prlimit(const struct cred *cred, const struct cred *tcred,
			  unsigned int flags)
{
	return call_int_hook(task_prlimit, 0, cred, tcred, flags);
}

1856 1857
int security_task_setrlimit(struct task_struct *p, unsigned int resource,
		struct rlimit *new_rlim)
1858
{
1859
	return call_int_hook(task_setrlimit, 0, p, resource, new_rlim);
1860 1861
}

1862
int security_task_setscheduler(struct task_struct *p)
1863
{
1864
	return call_int_hook(task_setscheduler, 0, p);
1865 1866 1867 1868
}

int security_task_getscheduler(struct task_struct *p)
{
1869
	return call_int_hook(task_getscheduler, 0, p);
1870 1871 1872 1873
}

int security_task_movememory(struct task_struct *p)
{
1874
	return call_int_hook(task_movememory, 0, p);
1875 1876
}

1877
int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
1878
			int sig, const struct cred *cred)
1879
{
1880
	return call_int_hook(task_kill, 0, p, info, sig, cred);
1881 1882 1883
}

int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
D
David Howells 已提交
1884
			 unsigned long arg4, unsigned long arg5)
1885
{
C
Casey Schaufler 已提交
1886
	int thisrc;
1887
	int rc = LSM_RET_DEFAULT(task_prctl);
C
Casey Schaufler 已提交
1888 1889
	struct security_hook_list *hp;

1890
	hlist_for_each_entry(hp, &security_hook_heads.task_prctl, list) {
C
Casey Schaufler 已提交
1891
		thisrc = hp->hook.task_prctl(option, arg2, arg3, arg4, arg5);
1892
		if (thisrc != LSM_RET_DEFAULT(task_prctl)) {
C
Casey Schaufler 已提交
1893 1894 1895 1896 1897 1898
			rc = thisrc;
			if (thisrc != 0)
				break;
		}
	}
	return rc;
1899 1900 1901 1902
}

void security_task_to_inode(struct task_struct *p, struct inode *inode)
{
1903
	call_void_hook(task_to_inode, p, inode);
1904 1905 1906 1907
}

int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
{
1908
	return call_int_hook(ipc_permission, 0, ipcp, flag);
1909 1910
}

1911 1912
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
{
C
Casey Schaufler 已提交
1913
	*secid = 0;
1914
	call_void_hook(ipc_getsecid, ipcp, secid);
1915 1916
}

1917 1918
int security_msg_msg_alloc(struct msg_msg *msg)
{
1919 1920 1921 1922 1923 1924 1925 1926
	int rc = lsm_msg_msg_alloc(msg);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(msg_msg_alloc_security, 0, msg);
	if (unlikely(rc))
		security_msg_msg_free(msg);
	return rc;
1927 1928 1929 1930
}

void security_msg_msg_free(struct msg_msg *msg)
{
1931
	call_void_hook(msg_msg_free_security, msg);
1932 1933
	kfree(msg->security);
	msg->security = NULL;
1934 1935
}

1936
int security_msg_queue_alloc(struct kern_ipc_perm *msq)
1937
{
1938 1939 1940 1941 1942 1943 1944 1945
	int rc = lsm_ipc_alloc(msq);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(msg_queue_alloc_security, 0, msq);
	if (unlikely(rc))
		security_msg_queue_free(msq);
	return rc;
1946 1947
}

1948
void security_msg_queue_free(struct kern_ipc_perm *msq)
1949
{
1950
	call_void_hook(msg_queue_free_security, msq);
1951 1952
	kfree(msq->security);
	msq->security = NULL;
1953 1954
}

1955
int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg)
1956
{
1957
	return call_int_hook(msg_queue_associate, 0, msq, msqflg);
1958 1959
}

1960
int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd)
1961
{
1962
	return call_int_hook(msg_queue_msgctl, 0, msq, cmd);
1963 1964
}

1965
int security_msg_queue_msgsnd(struct kern_ipc_perm *msq,
1966 1967
			       struct msg_msg *msg, int msqflg)
{
1968
	return call_int_hook(msg_queue_msgsnd, 0, msq, msg, msqflg);
1969 1970
}

1971
int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg,
1972 1973
			       struct task_struct *target, long type, int mode)
{
1974
	return call_int_hook(msg_queue_msgrcv, 0, msq, msg, target, type, mode);
1975 1976
}

1977
int security_shm_alloc(struct kern_ipc_perm *shp)
1978
{
1979 1980 1981 1982 1983 1984 1985 1986
	int rc = lsm_ipc_alloc(shp);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(shm_alloc_security, 0, shp);
	if (unlikely(rc))
		security_shm_free(shp);
	return rc;
1987 1988
}

1989
void security_shm_free(struct kern_ipc_perm *shp)
1990
{
1991
	call_void_hook(shm_free_security, shp);
1992 1993
	kfree(shp->security);
	shp->security = NULL;
1994 1995
}

1996
int security_shm_associate(struct kern_ipc_perm *shp, int shmflg)
1997
{
1998
	return call_int_hook(shm_associate, 0, shp, shmflg);
1999 2000
}

2001
int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd)
2002
{
2003
	return call_int_hook(shm_shmctl, 0, shp, cmd);
2004 2005
}

2006
int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg)
2007
{
2008
	return call_int_hook(shm_shmat, 0, shp, shmaddr, shmflg);
2009 2010
}

2011
int security_sem_alloc(struct kern_ipc_perm *sma)
2012
{
2013 2014 2015 2016 2017 2018 2019 2020
	int rc = lsm_ipc_alloc(sma);

	if (unlikely(rc))
		return rc;
	rc = call_int_hook(sem_alloc_security, 0, sma);
	if (unlikely(rc))
		security_sem_free(sma);
	return rc;
2021 2022
}

2023
void security_sem_free(struct kern_ipc_perm *sma)
2024
{
2025
	call_void_hook(sem_free_security, sma);
2026 2027
	kfree(sma->security);
	sma->security = NULL;
2028 2029
}

2030
int security_sem_associate(struct kern_ipc_perm *sma, int semflg)
2031
{
2032
	return call_int_hook(sem_associate, 0, sma, semflg);
2033 2034
}

2035
int security_sem_semctl(struct kern_ipc_perm *sma, int cmd)
2036
{
2037
	return call_int_hook(sem_semctl, 0, sma, cmd);
2038 2039
}

2040
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
2041 2042
			unsigned nsops, int alter)
{
2043
	return call_int_hook(sem_semop, 0, sma, sops, nsops, alter);
2044 2045 2046 2047 2048 2049
}

void security_d_instantiate(struct dentry *dentry, struct inode *inode)
{
	if (unlikely(inode && IS_PRIVATE(inode)))
		return;
2050
	call_void_hook(d_instantiate, dentry, inode);
2051 2052 2053
}
EXPORT_SYMBOL(security_d_instantiate);

2054 2055
int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
				char **value)
2056
{
2057 2058 2059 2060 2061 2062 2063
	struct security_hook_list *hp;

	hlist_for_each_entry(hp, &security_hook_heads.getprocattr, list) {
		if (lsm != NULL && strcmp(lsm, hp->lsm))
			continue;
		return hp->hook.getprocattr(p, name, value);
	}
2064
	return LSM_RET_DEFAULT(getprocattr);
2065 2066
}

2067 2068
int security_setprocattr(const char *lsm, const char *name, void *value,
			 size_t size)
2069
{
2070 2071 2072 2073 2074 2075 2076
	struct security_hook_list *hp;

	hlist_for_each_entry(hp, &security_hook_heads.setprocattr, list) {
		if (lsm != NULL && strcmp(lsm, hp->lsm))
			continue;
		return hp->hook.setprocattr(name, value, size);
	}
2077
	return LSM_RET_DEFAULT(setprocattr);
2078 2079 2080 2081
}

int security_netlink_send(struct sock *sk, struct sk_buff *skb)
{
2082
	return call_int_hook(netlink_send, 0, sk, skb);
2083 2084
}

2085 2086
int security_ismaclabel(const char *name)
{
2087
	return call_int_hook(ismaclabel, 0, name);
2088 2089 2090
}
EXPORT_SYMBOL(security_ismaclabel);

2091 2092
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
	struct security_hook_list *hp;
	int rc;

	/*
	 * Currently, only one LSM can implement secid_to_secctx (i.e this
	 * LSM hook is not "stackable").
	 */
	hlist_for_each_entry(hp, &security_hook_heads.secid_to_secctx, list) {
		rc = hp->hook.secid_to_secctx(secid, secdata, seclen);
		if (rc != LSM_RET_DEFAULT(secid_to_secctx))
			return rc;
	}

	return LSM_RET_DEFAULT(secid_to_secctx);
2107 2108 2109
}
EXPORT_SYMBOL(security_secid_to_secctx);

2110
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
2111
{
C
Casey Schaufler 已提交
2112
	*secid = 0;
2113
	return call_int_hook(secctx_to_secid, 0, secdata, seclen, secid);
2114 2115 2116
}
EXPORT_SYMBOL(security_secctx_to_secid);

2117 2118
void security_release_secctx(char *secdata, u32 seclen)
{
2119
	call_void_hook(release_secctx, secdata, seclen);
2120 2121 2122
}
EXPORT_SYMBOL(security_release_secctx);

2123 2124 2125 2126 2127 2128
void security_inode_invalidate_secctx(struct inode *inode)
{
	call_void_hook(inode_invalidate_secctx, inode);
}
EXPORT_SYMBOL(security_inode_invalidate_secctx);

2129 2130
int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
2131
	return call_int_hook(inode_notifysecctx, 0, inode, ctx, ctxlen);
2132 2133 2134 2135 2136
}
EXPORT_SYMBOL(security_inode_notifysecctx);

int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
{
2137
	return call_int_hook(inode_setsecctx, 0, dentry, ctx, ctxlen);
2138 2139 2140 2141 2142
}
EXPORT_SYMBOL(security_inode_setsecctx);

int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
C
Casey Schaufler 已提交
2143
	return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen);
2144 2145 2146
}
EXPORT_SYMBOL(security_inode_getsecctx);

2147 2148 2149 2150 2151 2152 2153 2154 2155
#ifdef CONFIG_WATCH_QUEUE
int security_post_notification(const struct cred *w_cred,
			       const struct cred *cred,
			       struct watch_notification *n)
{
	return call_int_hook(post_notification, 0, w_cred, cred, n);
}
#endif /* CONFIG_WATCH_QUEUE */

2156 2157 2158 2159 2160 2161 2162
#ifdef CONFIG_KEY_NOTIFICATIONS
int security_watch_key(struct key *key)
{
	return call_int_hook(watch_key, 0, key);
}
#endif

2163 2164
#ifdef CONFIG_SECURITY_NETWORK

2165
int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk)
2166
{
2167
	return call_int_hook(unix_stream_connect, 0, sock, other, newsk);
2168 2169 2170 2171 2172
}
EXPORT_SYMBOL(security_unix_stream_connect);

int security_unix_may_send(struct socket *sock,  struct socket *other)
{
2173
	return call_int_hook(unix_may_send, 0, sock, other);
2174 2175 2176 2177 2178
}
EXPORT_SYMBOL(security_unix_may_send);

int security_socket_create(int family, int type, int protocol, int kern)
{
2179
	return call_int_hook(socket_create, 0, family, type, protocol, kern);
2180 2181 2182 2183 2184
}

int security_socket_post_create(struct socket *sock, int family,
				int type, int protocol, int kern)
{
2185
	return call_int_hook(socket_post_create, 0, sock, family, type,
2186 2187 2188
						protocol, kern);
}

2189 2190 2191 2192 2193 2194
int security_socket_socketpair(struct socket *socka, struct socket *sockb)
{
	return call_int_hook(socket_socketpair, 0, socka, sockb);
}
EXPORT_SYMBOL(security_socket_socketpair);

2195 2196
int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
{
2197
	return call_int_hook(socket_bind, 0, sock, address, addrlen);
2198 2199 2200 2201
}

int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen)
{
2202
	return call_int_hook(socket_connect, 0, sock, address, addrlen);
2203 2204 2205 2206
}

int security_socket_listen(struct socket *sock, int backlog)
{
2207
	return call_int_hook(socket_listen, 0, sock, backlog);
2208 2209 2210 2211
}

int security_socket_accept(struct socket *sock, struct socket *newsock)
{
2212
	return call_int_hook(socket_accept, 0, sock, newsock);
2213 2214 2215 2216
}

int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size)
{
2217
	return call_int_hook(socket_sendmsg, 0, sock, msg, size);
2218 2219 2220 2221 2222
}

int security_socket_recvmsg(struct socket *sock, struct msghdr *msg,
			    int size, int flags)
{
2223
	return call_int_hook(socket_recvmsg, 0, sock, msg, size, flags);
2224 2225 2226 2227
}

int security_socket_getsockname(struct socket *sock)
{
2228
	return call_int_hook(socket_getsockname, 0, sock);
2229 2230 2231 2232
}

int security_socket_getpeername(struct socket *sock)
{
2233
	return call_int_hook(socket_getpeername, 0, sock);
2234 2235 2236 2237
}

int security_socket_getsockopt(struct socket *sock, int level, int optname)
{
2238
	return call_int_hook(socket_getsockopt, 0, sock, level, optname);
2239 2240 2241 2242
}

int security_socket_setsockopt(struct socket *sock, int level, int optname)
{
2243
	return call_int_hook(socket_setsockopt, 0, sock, level, optname);
2244 2245 2246 2247
}

int security_socket_shutdown(struct socket *sock, int how)
{
2248
	return call_int_hook(socket_shutdown, 0, sock, how);
2249 2250 2251 2252
}

int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
2253
	return call_int_hook(socket_sock_rcv_skb, 0, sk, skb);
2254 2255 2256 2257 2258 2259
}
EXPORT_SYMBOL(security_sock_rcv_skb);

int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
				      int __user *optlen, unsigned len)
{
C
Casey Schaufler 已提交
2260 2261
	return call_int_hook(socket_getpeersec_stream, -ENOPROTOOPT, sock,
				optval, optlen, len);
2262 2263 2264 2265
}

int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
{
2266 2267
	return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
			     skb, secid);
2268 2269 2270 2271 2272
}
EXPORT_SYMBOL(security_socket_getpeersec_dgram);

int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
{
2273
	return call_int_hook(sk_alloc_security, 0, sk, family, priority);
2274 2275 2276 2277
}

void security_sk_free(struct sock *sk)
{
2278
	call_void_hook(sk_free_security, sk);
2279 2280 2281 2282
}

void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
2283
	call_void_hook(sk_clone_security, sk, newsk);
2284
}
2285
EXPORT_SYMBOL(security_sk_clone);
2286

2287
void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic)
2288
{
2289
	call_void_hook(sk_getsecid, sk, &flic->flowic_secid);
2290 2291 2292
}
EXPORT_SYMBOL(security_sk_classify_flow);

2293 2294
void security_req_classify_flow(const struct request_sock *req,
				struct flowi_common *flic)
2295
{
2296
	call_void_hook(req_classify_flow, req, flic);
2297 2298 2299 2300 2301
}
EXPORT_SYMBOL(security_req_classify_flow);

void security_sock_graft(struct sock *sk, struct socket *parent)
{
2302
	call_void_hook(sock_graft, sk, parent);
2303 2304 2305
}
EXPORT_SYMBOL(security_sock_graft);

2306
int security_inet_conn_request(const struct sock *sk,
2307 2308
			struct sk_buff *skb, struct request_sock *req)
{
2309
	return call_int_hook(inet_conn_request, 0, sk, skb, req);
2310 2311 2312 2313 2314 2315
}
EXPORT_SYMBOL(security_inet_conn_request);

void security_inet_csk_clone(struct sock *newsk,
			const struct request_sock *req)
{
2316
	call_void_hook(inet_csk_clone, newsk, req);
2317 2318 2319 2320 2321
}

void security_inet_conn_established(struct sock *sk,
			struct sk_buff *skb)
{
2322
	call_void_hook(inet_conn_established, sk, skb);
2323
}
2324
EXPORT_SYMBOL(security_inet_conn_established);
2325

2326 2327
int security_secmark_relabel_packet(u32 secid)
{
2328
	return call_int_hook(secmark_relabel_packet, 0, secid);
2329 2330 2331 2332 2333
}
EXPORT_SYMBOL(security_secmark_relabel_packet);

void security_secmark_refcount_inc(void)
{
2334
	call_void_hook(secmark_refcount_inc);
2335 2336 2337 2338 2339
}
EXPORT_SYMBOL(security_secmark_refcount_inc);

void security_secmark_refcount_dec(void)
{
2340
	call_void_hook(secmark_refcount_dec);
2341 2342 2343
}
EXPORT_SYMBOL(security_secmark_refcount_dec);

2344 2345
int security_tun_dev_alloc_security(void **security)
{
2346
	return call_int_hook(tun_dev_alloc_security, 0, security);
2347 2348 2349 2350 2351
}
EXPORT_SYMBOL(security_tun_dev_alloc_security);

void security_tun_dev_free_security(void *security)
{
2352
	call_void_hook(tun_dev_free_security, security);
2353 2354 2355
}
EXPORT_SYMBOL(security_tun_dev_free_security);

P
Paul Moore 已提交
2356 2357
int security_tun_dev_create(void)
{
2358
	return call_int_hook(tun_dev_create, 0);
P
Paul Moore 已提交
2359 2360 2361
}
EXPORT_SYMBOL(security_tun_dev_create);

2362
int security_tun_dev_attach_queue(void *security)
P
Paul Moore 已提交
2363
{
2364
	return call_int_hook(tun_dev_attach_queue, 0, security);
P
Paul Moore 已提交
2365
}
2366
EXPORT_SYMBOL(security_tun_dev_attach_queue);
P
Paul Moore 已提交
2367

2368
int security_tun_dev_attach(struct sock *sk, void *security)
P
Paul Moore 已提交
2369
{
2370
	return call_int_hook(tun_dev_attach, 0, sk, security);
P
Paul Moore 已提交
2371 2372 2373
}
EXPORT_SYMBOL(security_tun_dev_attach);

2374 2375
int security_tun_dev_open(void *security)
{
2376
	return call_int_hook(tun_dev_open, 0, security);
2377 2378 2379
}
EXPORT_SYMBOL(security_tun_dev_open);

2380
int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb)
2381
{
2382
	return call_int_hook(sctp_assoc_request, 0, asoc, skb);
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
}
EXPORT_SYMBOL(security_sctp_assoc_request);

int security_sctp_bind_connect(struct sock *sk, int optname,
			       struct sockaddr *address, int addrlen)
{
	return call_int_hook(sctp_bind_connect, 0, sk, optname,
			     address, addrlen);
}
EXPORT_SYMBOL(security_sctp_bind_connect);

2394
void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
2395 2396
			    struct sock *newsk)
{
2397
	call_void_hook(sctp_sk_clone, asoc, sk, newsk);
2398 2399 2400
}
EXPORT_SYMBOL(security_sctp_sk_clone);

2401 2402 2403 2404 2405 2406 2407
int security_sctp_assoc_established(struct sctp_association *asoc,
				    struct sk_buff *skb)
{
	return call_int_hook(sctp_assoc_established, 0, asoc, skb);
}
EXPORT_SYMBOL(security_sctp_assoc_established);

2408 2409
#endif	/* CONFIG_SECURITY_NETWORK */

2410 2411 2412 2413 2414 2415 2416 2417
#ifdef CONFIG_SECURITY_INFINIBAND

int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey)
{
	return call_int_hook(ib_pkey_access, 0, sec, subnet_prefix, pkey);
}
EXPORT_SYMBOL(security_ib_pkey_access);

2418 2419 2420 2421 2422 2423
int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num)
{
	return call_int_hook(ib_endport_manage_subnet, 0, sec, dev_name, port_num);
}
EXPORT_SYMBOL(security_ib_endport_manage_subnet);

2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
int security_ib_alloc_security(void **sec)
{
	return call_int_hook(ib_alloc_security, 0, sec);
}
EXPORT_SYMBOL(security_ib_alloc_security);

void security_ib_free_security(void *sec)
{
	call_void_hook(ib_free_security, sec);
}
EXPORT_SYMBOL(security_ib_free_security);
#endif	/* CONFIG_SECURITY_INFINIBAND */

2437 2438
#ifdef CONFIG_SECURITY_NETWORK_XFRM

2439 2440 2441
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
			       struct xfrm_user_sec_ctx *sec_ctx,
			       gfp_t gfp)
2442
{
2443
	return call_int_hook(xfrm_policy_alloc_security, 0, ctxp, sec_ctx, gfp);
2444 2445 2446
}
EXPORT_SYMBOL(security_xfrm_policy_alloc);

2447 2448
int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
			      struct xfrm_sec_ctx **new_ctxp)
2449
{
2450
	return call_int_hook(xfrm_policy_clone_security, 0, old_ctx, new_ctxp);
2451 2452
}

2453
void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
2454
{
2455
	call_void_hook(xfrm_policy_free_security, ctx);
2456 2457 2458
}
EXPORT_SYMBOL(security_xfrm_policy_free);

2459
int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
2460
{
2461
	return call_int_hook(xfrm_policy_delete_security, 0, ctx);
2462 2463
}

2464 2465
int security_xfrm_state_alloc(struct xfrm_state *x,
			      struct xfrm_user_sec_ctx *sec_ctx)
2466
{
2467
	return call_int_hook(xfrm_state_alloc, 0, x, sec_ctx);
2468 2469 2470 2471 2472 2473
}
EXPORT_SYMBOL(security_xfrm_state_alloc);

int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
				      struct xfrm_sec_ctx *polsec, u32 secid)
{
2474
	return call_int_hook(xfrm_state_alloc_acquire, 0, x, polsec, secid);
2475 2476 2477 2478
}

int security_xfrm_state_delete(struct xfrm_state *x)
{
2479
	return call_int_hook(xfrm_state_delete_security, 0, x);
2480 2481 2482 2483 2484
}
EXPORT_SYMBOL(security_xfrm_state_delete);

void security_xfrm_state_free(struct xfrm_state *x)
{
2485
	call_void_hook(xfrm_state_free_security, x);
2486 2487
}

2488
int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
2489
{
2490
	return call_int_hook(xfrm_policy_lookup, 0, ctx, fl_secid);
2491 2492 2493
}

int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
2494
				       struct xfrm_policy *xp,
2495
				       const struct flowi_common *flic)
2496
{
C
Casey Schaufler 已提交
2497
	struct security_hook_list *hp;
2498
	int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match);
C
Casey Schaufler 已提交
2499 2500 2501 2502 2503 2504 2505 2506 2507 2508

	/*
	 * Since this function is expected to return 0 or 1, the judgment
	 * becomes difficult if multiple LSMs supply this call. Fortunately,
	 * we can use the first LSM's judgment because currently only SELinux
	 * supplies this call.
	 *
	 * For speed optimization, we explicitly break the loop rather than
	 * using the macro
	 */
2509
	hlist_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match,
C
Casey Schaufler 已提交
2510
				list) {
2511
		rc = hp->hook.xfrm_state_pol_flow_match(x, xp, flic);
C
Casey Schaufler 已提交
2512 2513 2514
		break;
	}
	return rc;
2515 2516 2517 2518
}

int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
{
2519
	return call_int_hook(xfrm_decode_session, 0, skb, secid, 1);
2520 2521
}

2522
void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic)
2523
{
2524
	int rc = call_int_hook(xfrm_decode_session, 0, skb, &flic->flowic_secid,
2525
				0);
2526 2527 2528 2529 2530 2531 2532 2533 2534

	BUG_ON(rc);
}
EXPORT_SYMBOL(security_skb_classify_flow);

#endif	/* CONFIG_SECURITY_NETWORK_XFRM */

#ifdef CONFIG_KEYS

D
David Howells 已提交
2535 2536
int security_key_alloc(struct key *key, const struct cred *cred,
		       unsigned long flags)
2537
{
2538
	return call_int_hook(key_alloc, 0, key, cred, flags);
2539 2540 2541 2542
}

void security_key_free(struct key *key)
{
2543
	call_void_hook(key_free, key);
2544 2545
}

2546 2547
int security_key_permission(key_ref_t key_ref, const struct cred *cred,
			    enum key_need_perm need_perm)
2548
{
2549
	return call_int_hook(key_permission, 0, key_ref, cred, need_perm);
2550 2551
}

2552 2553
int security_key_getsecurity(struct key *key, char **_buffer)
{
C
Casey Schaufler 已提交
2554
	*_buffer = NULL;
2555
	return call_int_hook(key_getsecurity, 0, key, _buffer);
2556 2557
}

2558
#endif	/* CONFIG_KEYS */
2559 2560 2561 2562 2563

#ifdef CONFIG_AUDIT

int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
{
2564
	return call_int_hook(audit_rule_init, 0, field, op, rulestr, lsmrule);
2565 2566 2567 2568
}

int security_audit_rule_known(struct audit_krule *krule)
{
2569
	return call_int_hook(audit_rule_known, 0, krule);
2570 2571 2572 2573
}

void security_audit_rule_free(void *lsmrule)
{
2574
	call_void_hook(audit_rule_free, lsmrule);
2575 2576
}

2577
int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule)
2578
{
2579
	return call_int_hook(audit_rule_match, 0, secid, field, op, lsmrule);
2580
}
C
Casey Schaufler 已提交
2581
#endif /* CONFIG_AUDIT */
2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612

#ifdef CONFIG_BPF_SYSCALL
int security_bpf(int cmd, union bpf_attr *attr, unsigned int size)
{
	return call_int_hook(bpf, 0, cmd, attr, size);
}
int security_bpf_map(struct bpf_map *map, fmode_t fmode)
{
	return call_int_hook(bpf_map, 0, map, fmode);
}
int security_bpf_prog(struct bpf_prog *prog)
{
	return call_int_hook(bpf_prog, 0, prog);
}
int security_bpf_map_alloc(struct bpf_map *map)
{
	return call_int_hook(bpf_map_alloc_security, 0, map);
}
int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
{
	return call_int_hook(bpf_prog_alloc_security, 0, aux);
}
void security_bpf_map_free(struct bpf_map *map)
{
	call_void_hook(bpf_map_free_security, map);
}
void security_bpf_prog_free(struct bpf_prog_aux *aux)
{
	call_void_hook(bpf_prog_free_security, aux);
}
#endif /* CONFIG_BPF_SYSCALL */
2613 2614 2615 2616 2617 2618

int security_locked_down(enum lockdown_reason what)
{
	return call_int_hook(locked_down, 0, what);
}
EXPORT_SYMBOL(security_locked_down);
2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645

#ifdef CONFIG_PERF_EVENTS
int security_perf_event_open(struct perf_event_attr *attr, int type)
{
	return call_int_hook(perf_event_open, 0, attr, type);
}

int security_perf_event_alloc(struct perf_event *event)
{
	return call_int_hook(perf_event_alloc, 0, event);
}

void security_perf_event_free(struct perf_event *event)
{
	call_void_hook(perf_event_free, event);
}

int security_perf_event_read(struct perf_event *event)
{
	return call_int_hook(perf_event_read, 0, event);
}

int security_perf_event_write(struct perf_event *event)
{
	return call_int_hook(perf_event_write, 0, event);
}
#endif /* CONFIG_PERF_EVENTS */
2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657

#ifdef CONFIG_IO_URING
int security_uring_override_creds(const struct cred *new)
{
	return call_int_hook(uring_override_creds, 0, new);
}

int security_uring_sqpoll(void)
{
	return call_int_hook(uring_sqpoll, 0);
}
#endif /* CONFIG_IO_URING */