提交 832d30ca 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of...

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6: (38 commits)
  SELinux: Make selinux_kernel_create_files_as() shouldn't just always return 0
  TOMOYO: Protect find_task_by_vpid() with RCU.
  Security: add static to security_ops and default_security_ops variable
  selinux: libsepol: remove dead code in check_avtab_hierarchy_callback()
  TOMOYO: Remove __func__ from tomoyo_is_correct_path/domain
  security: fix a couple of sparse warnings
  TOMOYO: Remove unneeded parameter.
  TOMOYO: Use shorter names.
  TOMOYO: Use enum for index numbers.
  TOMOYO: Add garbage collector.
  TOMOYO: Add refcounter on domain structure.
  TOMOYO: Merge headers.
  TOMOYO: Add refcounter on string data.
  TOMOYO: Reduce lines by using common path for addition and deletion.
  selinux: fix memory leak in sel_make_bools
  TOMOYO: Extract bitfield
  syslog: clean up needless comment
  syslog: use defined constants instead of raw numbers
  syslog: distinguish between /proc/kmsg and syscalls
  selinux: allow MLS->non-MLS and vice versa upon policy reload
  ...
......@@ -12,37 +12,37 @@
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/fs.h>
#include <linux/syslog.h>
#include <asm/uaccess.h>
#include <asm/io.h>
extern wait_queue_head_t log_wait;
extern int do_syslog(int type, char __user *bug, int count);
static int kmsg_open(struct inode * inode, struct file * file)
{
return do_syslog(1,NULL,0);
return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
}
static int kmsg_release(struct inode * inode, struct file * file)
{
(void) do_syslog(0,NULL,0);
(void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
return 0;
}
static ssize_t kmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
if ((file->f_flags & O_NONBLOCK) &&
!do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
return -EAGAIN;
return do_syslog(2, buf, count);
return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
}
static unsigned int kmsg_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &log_wait, wait);
if (do_syslog(9, NULL, 0))
if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
return POLLIN | POLLRDNORM;
return 0;
}
......
......@@ -76,7 +76,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
extern int cap_task_setioprio(struct task_struct *p, int ioprio);
extern int cap_task_setnice(struct task_struct *p, int nice);
extern int cap_syslog(int type);
extern int cap_syslog(int type, bool from_file);
extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
struct msghdr;
......@@ -95,6 +95,8 @@ struct seq_file;
extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
extern int cap_netlink_recv(struct sk_buff *skb, int cap);
void reset_security_ops(void);
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr;
......@@ -985,6 +987,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Check permissions on incoming network packets. This hook is distinct
* from Netfilter's IP input hooks since it is the first time that the
* incoming sk_buff @skb has been associated with a particular socket, @sk.
* Must not sleep inside this hook because some callers hold spinlocks.
* @sk contains the sock (not socket) associated with the incoming sk_buff.
* @skb contains the incoming network data.
* @socket_getpeersec_stream:
......@@ -1348,6 +1351,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* logging to the console.
* See the syslog(2) manual page for an explanation of the @type values.
* @type contains the type of action.
* @from_file indicates the context of action (if it came from /proc).
* Return 0 if permission is granted.
* @settime:
* Check permission to change the system time.
......@@ -1462,7 +1466,7 @@ struct security_operations {
int (*sysctl) (struct ctl_table *table, int op);
int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
int (*quota_on) (struct dentry *dentry);
int (*syslog) (int type);
int (*syslog) (int type, bool from_file);
int (*settime) (struct timespec *ts, struct timezone *tz);
int (*vm_enough_memory) (struct mm_struct *mm, long pages);
......@@ -1761,7 +1765,7 @@ int security_acct(struct file *file);
int security_sysctl(struct ctl_table *table, int op);
int security_quotactl(int cmds, int type, int id, struct super_block *sb);
int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
int security_syslog(int type, bool from_file);
int security_settime(struct timespec *ts, struct timezone *tz);
int security_vm_enough_memory(long pages);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
......@@ -2007,9 +2011,9 @@ static inline int security_quota_on(struct dentry *dentry)
return 0;
}
static inline int security_syslog(int type)
static inline int security_syslog(int type, bool from_file)
{
return cap_syslog(type);
return cap_syslog(type, from_file);
}
static inline int security_settime(struct timespec *ts, struct timezone *tz)
......
/* Syslog internals
*
* Copyright 2010 Canonical, Ltd.
* Author: Kees Cook <kees.cook@canonical.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_SYSLOG_H
#define _LINUX_SYSLOG_H
/* Close the log. Currently a NOP. */
#define SYSLOG_ACTION_CLOSE 0
/* Open the log. Currently a NOP. */
#define SYSLOG_ACTION_OPEN 1
/* Read from the log. */
#define SYSLOG_ACTION_READ 2
/* Read all messages remaining in the ring buffer. */
#define SYSLOG_ACTION_READ_ALL 3
/* Read and clear all messages remaining in the ring buffer */
#define SYSLOG_ACTION_READ_CLEAR 4
/* Clear ring buffer. */
#define SYSLOG_ACTION_CLEAR 5
/* Disable printk's to console */
#define SYSLOG_ACTION_CONSOLE_OFF 6
/* Enable printk's to console */
#define SYSLOG_ACTION_CONSOLE_ON 7
/* Set level of messages printed to console */
#define SYSLOG_ACTION_CONSOLE_LEVEL 8
/* Return number of unread characters in the log buffer */
#define SYSLOG_ACTION_SIZE_UNREAD 9
/* Return size of the log buffer */
#define SYSLOG_ACTION_SIZE_BUFFER 10
#define SYSLOG_FROM_CALL 0
#define SYSLOG_FROM_FILE 1
int do_syslog(int type, char __user *buf, int count, bool from_file);
#endif /* _LINUX_SYSLOG_H */
......@@ -135,7 +135,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
if (pid && (pid != task_pid_vnr(current))) {
struct task_struct *target;
read_lock(&tasklist_lock);
rcu_read_lock();
target = find_task_by_vpid(pid);
if (!target)
......@@ -143,7 +143,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
else
ret = security_capget(target, pEp, pIp, pPp);
read_unlock(&tasklist_lock);
rcu_read_unlock();
} else
ret = security_capget(current, pEp, pIp, pPp);
......
......@@ -35,6 +35,7 @@
#include <linux/kexec.h>
#include <linux/ratelimit.h>
#include <linux/kmsg_dump.h>
#include <linux/syslog.h>
#include <asm/uaccess.h>
......@@ -258,38 +259,23 @@ static inline void boot_delay_msec(void)
}
#endif
/*
* Commands to do_syslog:
*
* 0 -- Close the log. Currently a NOP.
* 1 -- Open the log. Currently a NOP.
* 2 -- Read from the log.
* 3 -- Read all messages remaining in the ring buffer.
* 4 -- Read and clear all messages remaining in the ring buffer
* 5 -- Clear ring buffer.
* 6 -- Disable printk's to console
* 7 -- Enable printk's to console
* 8 -- Set level of messages printed to console
* 9 -- Return number of unread characters in the log buffer
* 10 -- Return size of the log buffer
*/
int do_syslog(int type, char __user *buf, int len)
int do_syslog(int type, char __user *buf, int len, bool from_file)
{
unsigned i, j, limit, count;
int do_clear = 0;
char c;
int error = 0;
error = security_syslog(type);
error = security_syslog(type, from_file);
if (error)
return error;
switch (type) {
case 0: /* Close log */
case SYSLOG_ACTION_CLOSE: /* Close log */
break;
case 1: /* Open log */
case SYSLOG_ACTION_OPEN: /* Open log */
break;
case 2: /* Read from log */
case SYSLOG_ACTION_READ: /* Read from log */
error = -EINVAL;
if (!buf || len < 0)
goto out;
......@@ -320,10 +306,12 @@ int do_syslog(int type, char __user *buf, int len)
if (!error)
error = i;
break;
case 4: /* Read/clear last kernel messages */
/* Read/clear last kernel messages */
case SYSLOG_ACTION_READ_CLEAR:
do_clear = 1;
/* FALL THRU */
case 3: /* Read last kernel messages */
/* Read last kernel messages */
case SYSLOG_ACTION_READ_ALL:
error = -EINVAL;
if (!buf || len < 0)
goto out;
......@@ -376,21 +364,25 @@ int do_syslog(int type, char __user *buf, int len)
}
}
break;
case 5: /* Clear ring buffer */
/* Clear ring buffer */
case SYSLOG_ACTION_CLEAR:
logged_chars = 0;
break;
case 6: /* Disable logging to console */
/* Disable logging to console */
case SYSLOG_ACTION_CONSOLE_OFF:
if (saved_console_loglevel == -1)
saved_console_loglevel = console_loglevel;
console_loglevel = minimum_console_loglevel;
break;
case 7: /* Enable logging to console */
/* Enable logging to console */
case SYSLOG_ACTION_CONSOLE_ON:
if (saved_console_loglevel != -1) {
console_loglevel = saved_console_loglevel;
saved_console_loglevel = -1;
}
break;
case 8: /* Set level of messages printed to console */
/* Set level of messages printed to console */
case SYSLOG_ACTION_CONSOLE_LEVEL:
error = -EINVAL;
if (len < 1 || len > 8)
goto out;
......@@ -401,10 +393,12 @@ int do_syslog(int type, char __user *buf, int len)
saved_console_loglevel = -1;
error = 0;
break;
case 9: /* Number of chars in the log buffer */
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
error = log_end - log_start;
break;
case 10: /* Size of the log buffer */
/* Size of the log buffer */
case SYSLOG_ACTION_SIZE_BUFFER:
error = log_buf_len;
break;
default:
......@@ -417,7 +411,7 @@ out:
SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
{
return do_syslog(type, buf, len);
return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
}
/*
......
......@@ -906,10 +906,6 @@ static void cap_audit_rule_free(void *lsmrule)
}
#endif /* CONFIG_AUDIT */
struct security_operations default_security_ops = {
.name = "default",
};
#define set_to_cap_if_null(ops, function) \
do { \
if (!ops->function) { \
......
......@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/prctl.h>
#include <linux/securebits.h>
#include <linux/syslog.h>
/*
* If a non-root user executes a setuid-root binary in
......@@ -888,13 +889,17 @@ error:
/**
* cap_syslog - Determine whether syslog function is permitted
* @type: Function requested
* @from_file: Whether this request came from an open file (i.e. /proc)
*
* Determine whether the current process is permitted to use a particular
* syslog function, returning 0 if permission is granted, -ve if not.
*/
int cap_syslog(int type)
int cap_syslog(int type, bool from_file)
{
if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
if (type != SYSLOG_ACTION_OPEN && from_file)
return 0;
if ((type != SYSLOG_ACTION_READ_ALL &&
type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
......
......@@ -23,10 +23,12 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
CONFIG_DEFAULT_SECURITY;
/* things that live in capability.c */
extern struct security_operations default_security_ops;
extern void security_fixup_ops(struct security_operations *ops);
struct security_operations *security_ops; /* Initialized to NULL */
static struct security_operations *security_ops;
static struct security_operations default_security_ops = {
.name = "default",
};
static inline int verify(struct security_operations *ops)
{
......@@ -63,6 +65,11 @@ int __init security_init(void)
return 0;
}
void reset_security_ops(void)
{
security_ops = &default_security_ops;
}
/* Save user chosen LSM */
static int __init choose_lsm(char *str)
{
......@@ -203,9 +210,9 @@ int security_quota_on(struct dentry *dentry)
return security_ops->quota_on(dentry);
}
int security_syslog(int type)
int security_syslog(int type, bool from_file)
{
return security_ops->syslog(type);
return security_ops->syslog(type, from_file);
}
int security_settime(struct timespec *ts, struct timezone *tz)
......@@ -389,42 +396,42 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
EXPORT_SYMBOL(security_inode_init_security);
#ifdef CONFIG_SECURITY_PATH
int security_path_mknod(struct path *path, struct dentry *dentry, int mode,
int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
unsigned int dev)
{
if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
return security_ops->path_mknod(path, dentry, mode, dev);
return security_ops->path_mknod(dir, dentry, mode, dev);
}
EXPORT_SYMBOL(security_path_mknod);
int security_path_mkdir(struct path *path, struct dentry *dentry, int mode)
int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
{
if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
return security_ops->path_mkdir(path, dentry, mode);
return security_ops->path_mkdir(dir, dentry, mode);
}
int security_path_rmdir(struct path *path, struct dentry *dentry)
int security_path_rmdir(struct path *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
return security_ops->path_rmdir(path, dentry);
return security_ops->path_rmdir(dir, dentry);
}
int security_path_unlink(struct path *path, struct dentry *dentry)
int security_path_unlink(struct path *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
return security_ops->path_unlink(path, dentry);
return security_ops->path_unlink(dir, dentry);
}
int security_path_symlink(struct path *path, struct dentry *dentry,
int security_path_symlink(struct path *dir, struct dentry *dentry,
const char *old_name)
{
if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
return security_ops->path_symlink(path, dentry, old_name);
return security_ops->path_symlink(dir, dentry, old_name);
}
int security_path_link(struct dentry *old_dentry, struct path *new_dir,
......@@ -630,14 +637,14 @@ int security_inode_killpriv(struct dentry *dentry)
int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return -EOPNOTSUPP;
return security_ops->inode_getsecurity(inode, name, buffer, alloc);
}
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return -EOPNOTSUPP;
return security_ops->inode_setsecurity(inode, name, value, size, flags);
}
......
......@@ -489,17 +489,14 @@ void avc_audit(u32 ssid, u32 tsid,
struct common_audit_data stack_data;
u32 denied, audited;
denied = requested & ~avd->allowed;
if (denied) {
audited = denied;
if (!(audited & avd->auditdeny))
return;
} else if (result) {
if (denied)
audited = denied & avd->auditdeny;
else if (result)
audited = denied = requested;
} else {
audited = requested;
if (!(audited & avd->auditallow))
return;
}
else
audited = requested & avd->auditallow;
if (!audited)
return;
if (!a) {
a = &stack_data;
memset(a, 0, sizeof(*a));
......@@ -746,9 +743,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
else
avd = &avd_entry;
rc = security_compute_av(ssid, tsid, tclass, requested, avd);
if (rc)
goto out;
security_compute_av(ssid, tsid, tclass, avd);
rcu_read_lock();
node = avc_insert(ssid, tsid, tclass, avd);
} else {
......@@ -770,7 +765,6 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
}
rcu_read_unlock();
out:
return rc;
}
......
......@@ -76,6 +76,7 @@
#include <linux/selinux.h>
#include <linux/mutex.h>
#include <linux/posix-timers.h>
#include <linux/syslog.h>
#include "avc.h"
#include "objsec.h"
......@@ -125,13 +126,6 @@ __setup("selinux=", selinux_enabled_setup);
int selinux_enabled = 1;
#endif
/*
* Minimal support for a secondary security module,
* just to allow the use of the capability module.
*/
static struct security_operations *secondary_ops;
/* Lists of inode and superblock security structures initialized
before the policy was loaded. */
static LIST_HEAD(superblock_security_head);
......@@ -2049,29 +2043,30 @@ static int selinux_quota_on(struct dentry *dentry)
return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
}
static int selinux_syslog(int type)
static int selinux_syslog(int type, bool from_file)
{
int rc;
rc = cap_syslog(type);
rc = cap_syslog(type, from_file);
if (rc)
return rc;
switch (type) {
case 3: /* Read last kernel messages */
case 10: /* Return size of the log buffer */
case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
rc = task_has_system(current, SYSTEM__SYSLOG_READ);
break;
case 6: /* Disable logging to console */
case 7: /* Enable logging to console */
case 8: /* Set level of messages printed to console */
case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
/* Set level of messages printed to console */
case SYSLOG_ACTION_CONSOLE_LEVEL:
rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
break;
case 0: /* Close log */
case 1: /* Open log */
case 2: /* Read from log */
case 4: /* Read/clear last kernel messages */
case 5: /* Clear ring buffer */
case SYSLOG_ACTION_CLOSE: /* Close log */
case SYSLOG_ACTION_OPEN: /* Open log */
case SYSLOG_ACTION_READ: /* Read from log */
case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
default:
rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
break;
......@@ -3334,7 +3329,7 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
if (ret == 0)
tsec->create_sid = isec->sid;
return 0;
return ret;
}
static int selinux_kernel_module_request(char *kmod_name)
......@@ -5672,9 +5667,6 @@ static __init int selinux_init(void)
0, SLAB_PANIC, NULL);
avc_init();
secondary_ops = security_ops;
if (!secondary_ops)
panic("SELinux: No initial security operations\n");
if (register_security(&selinux_ops))
panic("SELinux: Unable to register with kernel.\n");
......@@ -5835,8 +5827,7 @@ int selinux_disable(void)
selinux_disabled = 1;
selinux_enabled = 0;
/* Reset security_ops to the secondary module, dummy or capability. */
security_ops = secondary_ops;
reset_security_ops();
/* Try to destroy the avc node cache */
avc_disable();
......
......@@ -57,7 +57,6 @@
struct netlbl_lsm_secattr;
extern int selinux_enabled;
extern int selinux_mls_enabled;
/* Policy capabilities */
enum {
......@@ -80,6 +79,8 @@ extern int selinux_policycap_openperm;
/* limitation of boundary depth */
#define POLICYDB_BOUNDS_MAXDEPTH 4
int security_mls_enabled(void);
int security_load_policy(void *data, size_t len);
int security_policycap_supported(unsigned int req_cap);
......@@ -96,13 +97,11 @@ struct av_decision {
/* definitions of av_decision.flags */
#define AVD_FLAGS_PERMISSIVE 0x0001
int security_compute_av(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct av_decision *avd);
void security_compute_av(u32 ssid, u32 tsid,
u16 tclass, struct av_decision *avd);
int security_compute_av_user(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct av_decision *avd);
void security_compute_av_user(u32 ssid, u32 tsid,
u16 tclass, struct av_decision *avd);
int security_transition_sid(u32 ssid, u32 tsid,
u16 tclass, u32 *out_sid);
......
......@@ -282,7 +282,8 @@ static ssize_t sel_read_mls(struct file *filp, char __user *buf,
char tmpbuf[TMPBUFLEN];
ssize_t length;
length = scnprintf(tmpbuf, TMPBUFLEN, "%d", selinux_mls_enabled);
length = scnprintf(tmpbuf, TMPBUFLEN, "%d",
security_mls_enabled());
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
}
......@@ -494,7 +495,6 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
char *scon, *tcon;
u32 ssid, tsid;
u16 tclass;
u32 req;
struct av_decision avd;
ssize_t length;
......@@ -512,7 +512,7 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
goto out;
length = -EINVAL;
if (sscanf(buf, "%s %s %hu %x", scon, tcon, &tclass, &req) != 4)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out2;
length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
......@@ -522,9 +522,7 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
if (length < 0)
goto out2;
length = security_compute_av_user(ssid, tsid, tclass, req, &avd);
if (length < 0)
goto out2;
security_compute_av_user(ssid, tsid, tclass, &avd);
length = scnprintf(buf, SIMPLE_TRANSACTION_LIMIT,
"%x %x %x %x %u %x",
......@@ -979,6 +977,8 @@ static int sel_make_bools(void)
u32 sid;
/* remove any existing files */
for (i = 0; i < bool_num; i++)
kfree(bool_pending_names[i]);
kfree(bool_pending_names);
kfree(bool_pending_values);
bool_pending_names = NULL;
......
......@@ -41,9 +41,6 @@ static inline int mls_context_cpy(struct context *dst, struct context *src)
{
int rc;
if (!selinux_mls_enabled)
return 0;
dst->range.level[0].sens = src->range.level[0].sens;
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
if (rc)
......@@ -64,9 +61,6 @@ static inline int mls_context_cpy_low(struct context *dst, struct context *src)
{
int rc;
if (!selinux_mls_enabled)
return 0;
dst->range.level[0].sens = src->range.level[0].sens;
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
if (rc)
......@@ -82,9 +76,6 @@ out:
static inline int mls_context_cmp(struct context *c1, struct context *c2)
{
if (!selinux_mls_enabled)
return 1;
return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
(c1->range.level[1].sens == c2->range.level[1].sens) &&
......@@ -93,9 +84,6 @@ static inline int mls_context_cmp(struct context *c1, struct context *c2)
static inline void mls_context_destroy(struct context *c)
{
if (!selinux_mls_enabled)
return;
ebitmap_destroy(&c->range.level[0].cat);
ebitmap_destroy(&c->range.level[1].cat);
mls_context_init(c);
......
......@@ -39,7 +39,7 @@ int mls_compute_context_len(struct context *context)
struct ebitmap *e;
struct ebitmap_node *node;
if (!selinux_mls_enabled)
if (!policydb.mls_enabled)
return 0;
len = 1; /* for the beginning ":" */
......@@ -93,7 +93,7 @@ void mls_sid_to_context(struct context *context,
struct ebitmap *e;
struct ebitmap_node *node;
if (!selinux_mls_enabled)
if (!policydb.mls_enabled)
return;
scontextp = *scontext;
......@@ -200,7 +200,7 @@ int mls_context_isvalid(struct policydb *p, struct context *c)
{
struct user_datum *usrdatum;
if (!selinux_mls_enabled)
if (!p->mls_enabled)
return 1;
if (!mls_range_isvalid(p, &c->range))
......@@ -253,7 +253,7 @@ int mls_context_to_sid(struct policydb *pol,
struct cat_datum *catdatum, *rngdatum;
int l, rc = -EINVAL;
if (!selinux_mls_enabled) {
if (!pol->mls_enabled) {
if (def_sid != SECSID_NULL && oldc)
*scontext += strlen(*scontext)+1;
return 0;
......@@ -387,7 +387,7 @@ int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
char *tmpstr, *freestr;
int rc;
if (!selinux_mls_enabled)
if (!policydb.mls_enabled)
return -EINVAL;
/* we need freestr because mls_context_to_sid will change
......@@ -407,7 +407,7 @@ int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
/*
* Copies the MLS range `range' into `context'.
*/
static inline int mls_range_set(struct context *context,
int mls_range_set(struct context *context,
struct mls_range *range)
{
int l, rc = 0;
......@@ -427,7 +427,7 @@ static inline int mls_range_set(struct context *context,
int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
struct context *usercon)
{
if (selinux_mls_enabled) {
if (policydb.mls_enabled) {
struct mls_level *fromcon_sen = &(fromcon->range.level[0]);
struct mls_level *fromcon_clr = &(fromcon->range.level[1]);
struct mls_level *user_low = &(user->range.level[0]);
......@@ -477,7 +477,7 @@ int mls_convert_context(struct policydb *oldp,
struct ebitmap_node *node;
int l, i;
if (!selinux_mls_enabled)
if (!policydb.mls_enabled)
return 0;
for (l = 0; l < 2; l++) {
......@@ -513,23 +513,21 @@ int mls_compute_sid(struct context *scontext,
u32 specified,
struct context *newcontext)
{
struct range_trans *rtr;
struct range_trans rtr;
struct mls_range *r;
if (!selinux_mls_enabled)
if (!policydb.mls_enabled)
return 0;
switch (specified) {
case AVTAB_TRANSITION:
/* Look for a range transition rule. */
for (rtr = policydb.range_tr; rtr; rtr = rtr->next) {
if (rtr->source_type == scontext->type &&
rtr->target_type == tcontext->type &&
rtr->target_class == tclass) {
/* Set the range from the rule */
return mls_range_set(newcontext,
&rtr->target_range);
}
}
rtr.source_type = scontext->type;
rtr.target_type = tcontext->type;
rtr.target_class = tclass;
r = hashtab_search(policydb.range_tr, &rtr);
if (r)
return mls_range_set(newcontext, r);
/* Fallthrough */
case AVTAB_CHANGE:
if (tclass == policydb.process_class)
......@@ -541,8 +539,8 @@ int mls_compute_sid(struct context *scontext,
case AVTAB_MEMBER:
/* Use the process effective MLS attributes. */
return mls_context_cpy_low(newcontext, scontext);
default:
return -EINVAL;
/* fall through */
}
return -EINVAL;
}
......@@ -561,7 +559,7 @@ int mls_compute_sid(struct context *scontext,
void mls_export_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
if (!selinux_mls_enabled)
if (!policydb.mls_enabled)
return;
secattr->attr.mls.lvl = context->range.level[0].sens - 1;
......@@ -581,7 +579,7 @@ void mls_export_netlbl_lvl(struct context *context,
void mls_import_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
if (!selinux_mls_enabled)
if (!policydb.mls_enabled)
return;
context->range.level[0].sens = secattr->attr.mls.lvl + 1;
......@@ -603,7 +601,7 @@ int mls_export_netlbl_cat(struct context *context,
{
int rc;
if (!selinux_mls_enabled)
if (!policydb.mls_enabled)
return 0;
rc = ebitmap_netlbl_export(&context->range.level[0].cat,
......@@ -631,7 +629,7 @@ int mls_import_netlbl_cat(struct context *context,
{
int rc;
if (!selinux_mls_enabled)
if (!policydb.mls_enabled)
return 0;
rc = ebitmap_netlbl_import(&context->range.level[0].cat,
......
......@@ -39,6 +39,8 @@ int mls_context_to_sid(struct policydb *p,
int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
int mls_range_set(struct context *context, struct mls_range *range);
int mls_convert_context(struct policydb *oldp,
struct policydb *newp,
struct context *context);
......
......@@ -15,6 +15,7 @@
#define _SS_MLS_TYPES_H_
#include "security.h"
#include "ebitmap.h"
struct mls_level {
u32 sens; /* sensitivity */
......@@ -27,18 +28,12 @@ struct mls_range {
static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
{
if (!selinux_mls_enabled)
return 1;
return ((l1->sens == l2->sens) &&
ebitmap_cmp(&l1->cat, &l2->cat));
}
static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
{
if (!selinux_mls_enabled)
return 1;
return ((l1->sens >= l2->sens) &&
ebitmap_contains(&l1->cat, &l2->cat));
}
......
......@@ -52,8 +52,6 @@ static char *symtab_name[SYM_NUM] = {
};
#endif
int selinux_mls_enabled;
static unsigned int symtab_sizes[SYM_NUM] = {
2,
32,
......@@ -177,6 +175,21 @@ out_free_role:
goto out;
}
static u32 rangetr_hash(struct hashtab *h, const void *k)
{
const struct range_trans *key = k;
return (key->source_type + (key->target_type << 3) +
(key->target_class << 5)) & (h->size - 1);
}
static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
{
const struct range_trans *key1 = k1, *key2 = k2;
return (key1->source_type != key2->source_type ||
key1->target_type != key2->target_type ||
key1->target_class != key2->target_class);
}
/*
* Initialize a policy database structure.
*/
......@@ -204,6 +217,10 @@ static int policydb_init(struct policydb *p)
if (rc)
goto out_free_symtab;
p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256);
if (!p->range_tr)
goto out_free_symtab;
ebitmap_init(&p->policycaps);
ebitmap_init(&p->permissive_map);
......@@ -408,6 +425,20 @@ static void symtab_hash_eval(struct symtab *s)
info.slots_used, h->size, info.max_chain_len);
}
}
static void rangetr_hash_eval(struct hashtab *h)
{
struct hashtab_info info;
hashtab_stat(h, &info);
printk(KERN_DEBUG "SELinux: rangetr: %d entries and %d/%d buckets used, "
"longest chain length %d\n", h->nel,
info.slots_used, h->size, info.max_chain_len);
}
#else
static inline void rangetr_hash_eval(struct hashtab *h)
{
}
#endif
/*
......@@ -422,7 +453,7 @@ static int policydb_index_others(struct policydb *p)
printk(KERN_DEBUG "SELinux: %d users, %d roles, %d types, %d bools",
p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
if (selinux_mls_enabled)
if (p->mls_enabled)
printk(", %d sens, %d cats", p->p_levels.nprim,
p->p_cats.nprim);
printk("\n");
......@@ -612,6 +643,17 @@ static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
cat_destroy,
};
static int range_tr_destroy(void *key, void *datum, void *p)
{
struct mls_range *rt = datum;
kfree(key);
ebitmap_destroy(&rt->level[0].cat);
ebitmap_destroy(&rt->level[1].cat);
kfree(datum);
cond_resched();
return 0;
}
static void ocontext_destroy(struct ocontext *c, int i)
{
context_destroy(&c->context[0]);
......@@ -632,7 +674,6 @@ void policydb_destroy(struct policydb *p)
int i;
struct role_allow *ra, *lra = NULL;
struct role_trans *tr, *ltr = NULL;
struct range_trans *rt, *lrt = NULL;
for (i = 0; i < SYM_NUM; i++) {
cond_resched();
......@@ -693,20 +734,8 @@ void policydb_destroy(struct policydb *p)
}
kfree(lra);
for (rt = p->range_tr; rt; rt = rt->next) {
cond_resched();
if (lrt) {
ebitmap_destroy(&lrt->target_range.level[0].cat);
ebitmap_destroy(&lrt->target_range.level[1].cat);
kfree(lrt);
}
lrt = rt;
}
if (lrt) {
ebitmap_destroy(&lrt->target_range.level[0].cat);
ebitmap_destroy(&lrt->target_range.level[1].cat);
kfree(lrt);
}
hashtab_map(p->range_tr, range_tr_destroy, NULL);
hashtab_destroy(p->range_tr);
if (p->type_attr_map) {
for (i = 0; i < p->p_types.nprim; i++)
......@@ -1686,12 +1715,11 @@ int policydb_read(struct policydb *p, void *fp)
int i, j, rc;
__le32 buf[4];
u32 nodebuf[8];
u32 len, len2, config, nprim, nel, nel2;
u32 len, len2, nprim, nel, nel2;
char *policydb_str;
struct policydb_compat_info *info;
struct range_trans *rt, *lrt;
config = 0;
struct range_trans *rt;
struct mls_range *r;
rc = policydb_init(p);
if (rc)
......@@ -1740,7 +1768,7 @@ int policydb_read(struct policydb *p, void *fp)
kfree(policydb_str);
policydb_str = NULL;
/* Read the version, config, and table sizes. */
/* Read the version and table sizes. */
rc = next_entry(buf, fp, sizeof(u32)*4);
if (rc < 0)
goto bad;
......@@ -1755,13 +1783,7 @@ int policydb_read(struct policydb *p, void *fp)
}
if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS)) {
if (ss_initialized && !selinux_mls_enabled) {
printk(KERN_ERR "SELinux: Cannot switch between non-MLS"
" and MLS policies\n");
goto bad;
}
selinux_mls_enabled = 1;
config |= POLICYDB_CONFIG_MLS;
p->mls_enabled = 1;
if (p->policyvers < POLICYDB_VERSION_MLS) {
printk(KERN_ERR "SELinux: security policydb version %d "
......@@ -1769,12 +1791,6 @@ int policydb_read(struct policydb *p, void *fp)
p->policyvers);
goto bad;
}
} else {
if (ss_initialized && selinux_mls_enabled) {
printk(KERN_ERR "SELinux: Cannot switch between MLS and"
" non-MLS policies\n");
goto bad;
}
}
p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
......@@ -2122,44 +2138,61 @@ int policydb_read(struct policydb *p, void *fp)
if (rc < 0)
goto bad;
nel = le32_to_cpu(buf[0]);
lrt = NULL;
for (i = 0; i < nel; i++) {
rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (!rt) {
rc = -ENOMEM;
goto bad;
}
if (lrt)
lrt->next = rt;
else
p->range_tr = rt;
rc = next_entry(buf, fp, (sizeof(u32) * 2));
if (rc < 0)
if (rc < 0) {
kfree(rt);
goto bad;
}
rt->source_type = le32_to_cpu(buf[0]);
rt->target_type = le32_to_cpu(buf[1]);
if (new_rangetr) {
rc = next_entry(buf, fp, sizeof(u32));
if (rc < 0)
if (rc < 0) {
kfree(rt);
goto bad;
}
rt->target_class = le32_to_cpu(buf[0]);
} else
rt->target_class = p->process_class;
if (!policydb_type_isvalid(p, rt->source_type) ||
!policydb_type_isvalid(p, rt->target_type) ||
!policydb_class_isvalid(p, rt->target_class)) {
kfree(rt);
rc = -EINVAL;
goto bad;
}
rc = mls_read_range_helper(&rt->target_range, fp);
if (rc)
r = kzalloc(sizeof(*r), GFP_KERNEL);
if (!r) {
kfree(rt);
rc = -ENOMEM;
goto bad;
if (!mls_range_isvalid(p, &rt->target_range)) {
}
rc = mls_read_range_helper(r, fp);
if (rc) {
kfree(rt);
kfree(r);
goto bad;
}
if (!mls_range_isvalid(p, r)) {
printk(KERN_WARNING "SELinux: rangetrans: invalid range\n");
kfree(rt);
kfree(r);
goto bad;
}
rc = hashtab_insert(p->range_tr, rt, r);
if (rc) {
kfree(rt);
kfree(r);
goto bad;
}
lrt = rt;
}
rangetr_hash_eval(p->range_tr);
}
p->type_attr_map = kmalloc(p->p_types.nprim*sizeof(struct ebitmap), GFP_KERNEL);
......
......@@ -27,6 +27,8 @@
#include "symtab.h"
#include "avtab.h"
#include "sidtab.h"
#include "ebitmap.h"
#include "mls_types.h"
#include "context.h"
#include "constraint.h"
......@@ -113,8 +115,6 @@ struct range_trans {
u32 source_type;
u32 target_type;
u32 target_class;
struct mls_range target_range;
struct range_trans *next;
};
/* Boolean data type */
......@@ -187,6 +187,8 @@ struct genfs {
/* The policy database */
struct policydb {
int mls_enabled;
/* symbol tables */
struct symtab symtab[SYM_NUM];
#define p_commons symtab[SYM_COMMONS]
......@@ -240,8 +242,8 @@ struct policydb {
fixed labeling behavior. */
struct genfs *genfs;
/* range transitions */
struct range_trans *range_tr;
/* range transitions table (range_trans_key -> mls_range) */
struct hashtab *range_tr;
/* type -> attribute reverse mapping */
struct ebitmap *type_attr_map;
......
......@@ -26,6 +26,10 @@
*
* Added support for bounds domain and audit messaged on masked permissions
*
* Updated: Guido Trentalancia <guido@trentalancia.com>
*
* Added support for runtime switching of the policy type
*
* Copyright (C) 2008, 2009 NEC Corporation
* Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
......@@ -87,11 +91,10 @@ static u32 latest_granting;
static int context_struct_to_string(struct context *context, char **scontext,
u32 *scontext_len);
static int context_struct_compute_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 requested,
struct av_decision *avd);
static void context_struct_compute_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
struct av_decision *avd);
struct selinux_mapping {
u16 value; /* policy value */
......@@ -196,23 +199,6 @@ static u16 unmap_class(u16 tclass)
return tclass;
}
static u32 unmap_perm(u16 tclass, u32 tperm)
{
if (tclass < current_mapping_size) {
unsigned i;
u32 kperm = 0;
for (i = 0; i < current_mapping[tclass].num_perms; i++)
if (tperm & (1<<i)) {
kperm |= current_mapping[tclass].perms[i];
tperm &= ~(1<<i);
}
return kperm;
}
return tperm;
}
static void map_decision(u16 tclass, struct av_decision *avd,
int allow_unknown)
{
......@@ -250,6 +236,10 @@ static void map_decision(u16 tclass, struct av_decision *avd,
}
}
int security_mls_enabled(void)
{
return policydb.mls_enabled;
}
/*
* Return the boolean value of a constraint expression
......@@ -465,7 +455,8 @@ static void security_dump_masked_av(struct context *scontext,
char *scontext_name = NULL;
char *tcontext_name = NULL;
char *permission_names[32];
int index, length;
int index;
u32 length;
bool need_comma = false;
if (!permissions)
......@@ -532,7 +523,6 @@ out:
static void type_attribute_bounds_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 requested,
struct av_decision *avd)
{
struct context lo_scontext;
......@@ -553,7 +543,6 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(&lo_scontext,
tcontext,
tclass,
requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
......@@ -569,7 +558,6 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(scontext,
&lo_tcontext,
tclass,
requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
......@@ -586,7 +574,6 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(&lo_scontext,
&lo_tcontext,
tclass,
requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
......@@ -607,11 +594,10 @@ static void type_attribute_bounds_av(struct context *scontext,
* Compute access vectors based on a context structure pair for
* the permissions in a particular class.
*/
static int context_struct_compute_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 requested,
struct av_decision *avd)
static void context_struct_compute_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
struct av_decision *avd)
{
struct constraint_node *constraint;
struct role_allow *ra;
......@@ -622,19 +608,14 @@ static int context_struct_compute_av(struct context *scontext,
struct ebitmap_node *snode, *tnode;
unsigned int i, j;
/*
* Initialize the access vectors to the default values.
*/
avd->allowed = 0;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
avd->seqno = latest_granting;
avd->flags = 0;
if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
if (printk_ratelimit())
printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass);
return -EINVAL;
return;
}
tclass_datum = policydb.class_val_to_struct[tclass - 1];
......@@ -705,9 +686,7 @@ static int context_struct_compute_av(struct context *scontext,
* permission and notice it to userspace via audit.
*/
type_attribute_bounds_av(scontext, tcontext,
tclass, requested, avd);
return 0;
tclass, avd);
}
static int security_validtrans_handle_fail(struct context *ocontext,
......@@ -864,7 +843,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
if (rc) {
char *old_name = NULL;
char *new_name = NULL;
int length;
u32 length;
if (!context_struct_to_string(old_context,
&old_name, &length) &&
......@@ -886,110 +865,116 @@ out:
return rc;
}
static int security_compute_av_core(u32 ssid,
u32 tsid,
u16 tclass,
u32 requested,
struct av_decision *avd)
static void avd_init(struct av_decision *avd)
{
struct context *scontext = NULL, *tcontext = NULL;
int rc = 0;
scontext = sidtab_search(&sidtab, ssid);
if (!scontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
return -EINVAL;
}
tcontext = sidtab_search(&sidtab, tsid);
if (!tcontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
return -EINVAL;
}
rc = context_struct_compute_av(scontext, tcontext, tclass,
requested, avd);
/* permissive domain? */
if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
avd->flags |= AVD_FLAGS_PERMISSIVE;
return rc;
avd->allowed = 0;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
avd->seqno = latest_granting;
avd->flags = 0;
}
/**
* security_compute_av - Compute access vector decisions.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions
* @avd: access vector decisions
*
* Compute a set of access vector decisions based on the
* SID pair (@ssid, @tsid) for the permissions in @tclass.
* Return -%EINVAL if any of the parameters are invalid or %0
* if the access vector decisions were computed successfully.
*/
int security_compute_av(u32 ssid,
u32 tsid,
u16 orig_tclass,
u32 orig_requested,
struct av_decision *avd)
void security_compute_av(u32 ssid,
u32 tsid,
u16 orig_tclass,
struct av_decision *avd)
{
u16 tclass;
u32 requested;
int rc;
struct context *scontext = NULL, *tcontext = NULL;
read_lock(&policy_rwlock);
avd_init(avd);
if (!ss_initialized)
goto allow;
requested = unmap_perm(orig_tclass, orig_requested);
scontext = sidtab_search(&sidtab, ssid);
if (!scontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
goto out;
}
/* permissive domain? */
if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
avd->flags |= AVD_FLAGS_PERMISSIVE;
tcontext = sidtab_search(&sidtab, tsid);
if (!tcontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
goto out;
}
tclass = unmap_class(orig_tclass);
if (unlikely(orig_tclass && !tclass)) {
if (policydb.allow_unknown)
goto allow;
rc = -EINVAL;
goto out;
}
rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
context_struct_compute_av(scontext, tcontext, tclass, avd);
map_decision(orig_tclass, avd, policydb.allow_unknown);
out:
read_unlock(&policy_rwlock);
return rc;
return;
allow:
avd->allowed = 0xffffffff;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
avd->seqno = latest_granting;
avd->flags = 0;
rc = 0;
goto out;
}
int security_compute_av_user(u32 ssid,
u32 tsid,
u16 tclass,
u32 requested,
struct av_decision *avd)
void security_compute_av_user(u32 ssid,
u32 tsid,
u16 tclass,
struct av_decision *avd)
{
int rc;
struct context *scontext = NULL, *tcontext = NULL;
if (!ss_initialized) {
avd->allowed = 0xffffffff;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
avd->seqno = latest_granting;
return 0;
read_lock(&policy_rwlock);
avd_init(avd);
if (!ss_initialized)
goto allow;
scontext = sidtab_search(&sidtab, ssid);
if (!scontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
goto out;
}
read_lock(&policy_rwlock);
rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
/* permissive domain? */
if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
avd->flags |= AVD_FLAGS_PERMISSIVE;
tcontext = sidtab_search(&sidtab, tsid);
if (!tcontext) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
goto out;
}
if (unlikely(!tclass)) {
if (policydb.allow_unknown)
goto allow;
goto out;
}
context_struct_compute_av(scontext, tcontext, tclass, avd);
out:
read_unlock(&policy_rwlock);
return rc;
return;
allow:
avd->allowed = 0xffffffff;
goto out;
}
/*
......@@ -1565,7 +1550,10 @@ static int clone_sid(u32 sid,
{
struct sidtab *s = arg;
return sidtab_insert(s, sid, context);
if (sid > SECINITSID_NUM)
return sidtab_insert(s, sid, context);
else
return 0;
}
static inline int convert_context_handle_invalid_context(struct context *context)
......@@ -1606,12 +1594,17 @@ static int convert_context(u32 key,
{
struct convert_context_args *args;
struct context oldc;
struct ocontext *oc;
struct mls_range *range;
struct role_datum *role;
struct type_datum *typdatum;
struct user_datum *usrdatum;
char *s;
u32 len;
int rc;
int rc = 0;
if (key <= SECINITSID_NUM)
goto out;
args = p;
......@@ -1673,9 +1666,39 @@ static int convert_context(u32 key,
goto bad;
c->type = typdatum->value;
rc = mls_convert_context(args->oldp, args->newp, c);
if (rc)
goto bad;
/* Convert the MLS fields if dealing with MLS policies */
if (args->oldp->mls_enabled && args->newp->mls_enabled) {
rc = mls_convert_context(args->oldp, args->newp, c);
if (rc)
goto bad;
} else if (args->oldp->mls_enabled && !args->newp->mls_enabled) {
/*
* Switching between MLS and non-MLS policy:
* free any storage used by the MLS fields in the
* context for all existing entries in the sidtab.
*/
mls_context_destroy(c);
} else if (!args->oldp->mls_enabled && args->newp->mls_enabled) {
/*
* Switching between non-MLS and MLS policy:
* ensure that the MLS fields of the context for all
* existing entries in the sidtab are filled in with a
* suitable default value, likely taken from one of the
* initial SIDs.
*/
oc = args->newp->ocontexts[OCON_ISID];
while (oc && oc->sid[0] != SECINITSID_UNLABELED)
oc = oc->next;
if (!oc) {
printk(KERN_ERR "SELinux: unable to look up"
" the initial SIDs list\n");
goto bad;
}
range = &oc->context[0].range;
rc = mls_range_set(c, range);
if (rc)
goto bad;
}
/* Check the validity of the new context. */
if (!policydb_context_isvalid(args->newp, c)) {
......@@ -1771,9 +1794,17 @@ int security_load_policy(void *data, size_t len)
if (policydb_read(&newpolicydb, fp))
return -EINVAL;
if (sidtab_init(&newsidtab)) {
/* If switching between different policy types, log MLS status */
if (policydb.mls_enabled && !newpolicydb.mls_enabled)
printk(KERN_INFO "SELinux: Disabling MLS support...\n");
else if (!policydb.mls_enabled && newpolicydb.mls_enabled)
printk(KERN_INFO "SELinux: Enabling MLS support...\n");
rc = policydb_load_isids(&newpolicydb, &newsidtab);
if (rc) {
printk(KERN_ERR "SELinux: unable to load the initial SIDs\n");
policydb_destroy(&newpolicydb);
return -ENOMEM;
return rc;
}
if (selinux_set_mapping(&newpolicydb, secclass_map,
......@@ -1800,8 +1831,12 @@ int security_load_policy(void *data, size_t len)
args.oldp = &policydb;
args.newp = &newpolicydb;
rc = sidtab_map(&newsidtab, convert_context, &args);
if (rc)
if (rc) {
printk(KERN_ERR "SELinux: unable to convert the internal"
" representation of contexts in the new SID"
" table\n");
goto err;
}
/* Save the old policydb and SID table to free later. */
memcpy(&oldpolicydb, &policydb, sizeof policydb);
......@@ -2397,7 +2432,7 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
u32 len;
int rc = 0;
if (!ss_initialized || !selinux_mls_enabled) {
if (!ss_initialized || !policydb.mls_enabled) {
*new_sid = sid;
goto out;
}
......@@ -2498,7 +2533,7 @@ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
/* we don't need to check ss_initialized here since the only way both
* nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the
* security server was initialized and ss_initialized was true */
if (!selinux_mls_enabled) {
if (!policydb.mls_enabled) {
*peer_sid = SECSID_NULL;
return 0;
}
......@@ -2555,7 +2590,7 @@ int security_get_classes(char ***classes, int *nclasses)
read_lock(&policy_rwlock);
*nclasses = policydb.p_classes.nprim;
*classes = kcalloc(*nclasses, sizeof(*classes), GFP_ATOMIC);
*classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC);
if (!*classes)
goto out;
......@@ -2602,7 +2637,7 @@ int security_get_permissions(char *class, char ***perms, int *nperms)
}
*nperms = match->permissions.nprim;
*perms = kcalloc(*nperms, sizeof(*perms), GFP_ATOMIC);
*perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC);
if (!*perms)
goto out;
......
......@@ -157,12 +157,12 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
*
* Returns 0 on success, error code otherwise.
*/
static int smack_syslog(int type)
static int smack_syslog(int type, bool from_file)
{
int rc;
char *sp = current_security();
rc = cap_syslog(type);
rc = cap_syslog(type, from_file);
if (rc != 0)
return rc;
......
obj-y = common.o realpath.o tomoyo.o domain.o file.o
obj-y = common.o realpath.o tomoyo.o domain.o file.o gc.o
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* security/tomoyo/gc.c
*
* Implementation of the Domain-Based Mandatory Access Control.
*
* Copyright (C) 2005-2010 NTT DATA CORPORATION
*
*/
#include "common.h"
#include <linux/kthread.h>
enum tomoyo_gc_id {
TOMOYO_ID_DOMAIN_INITIALIZER,
TOMOYO_ID_DOMAIN_KEEPER,
TOMOYO_ID_ALIAS,
TOMOYO_ID_GLOBALLY_READABLE,
TOMOYO_ID_PATTERN,
TOMOYO_ID_NO_REWRITE,
TOMOYO_ID_MANAGER,
TOMOYO_ID_NAME,
TOMOYO_ID_ACL,
TOMOYO_ID_DOMAIN
};
struct tomoyo_gc_entry {
struct list_head list;
int type;
void *element;
};
static LIST_HEAD(tomoyo_gc_queue);
static DEFINE_MUTEX(tomoyo_gc_mutex);
/* Caller holds tomoyo_policy_lock mutex. */
static bool tomoyo_add_to_gc(const int type, void *element)
{
struct tomoyo_gc_entry *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return false;
entry->type = type;
entry->element = element;
list_add(&entry->list, &tomoyo_gc_queue);
return true;
}
static void tomoyo_del_allow_read
(struct tomoyo_globally_readable_file_entry *ptr)
{
tomoyo_put_name(ptr->filename);
}
static void tomoyo_del_file_pattern(struct tomoyo_pattern_entry *ptr)
{
tomoyo_put_name(ptr->pattern);
}
static void tomoyo_del_no_rewrite(struct tomoyo_no_rewrite_entry *ptr)
{
tomoyo_put_name(ptr->pattern);
}
static void tomoyo_del_domain_initializer
(struct tomoyo_domain_initializer_entry *ptr)
{
tomoyo_put_name(ptr->domainname);
tomoyo_put_name(ptr->program);
}
static void tomoyo_del_domain_keeper(struct tomoyo_domain_keeper_entry *ptr)
{
tomoyo_put_name(ptr->domainname);
tomoyo_put_name(ptr->program);
}
static void tomoyo_del_alias(struct tomoyo_alias_entry *ptr)
{
tomoyo_put_name(ptr->original_name);
tomoyo_put_name(ptr->aliased_name);
}
static void tomoyo_del_manager(struct tomoyo_policy_manager_entry *ptr)
{
tomoyo_put_name(ptr->manager);
}
static void tomoyo_del_acl(struct tomoyo_acl_info *acl)
{
switch (acl->type) {
case TOMOYO_TYPE_PATH_ACL:
{
struct tomoyo_path_acl *entry
= container_of(acl, typeof(*entry), head);
tomoyo_put_name(entry->filename);
}
break;
case TOMOYO_TYPE_PATH2_ACL:
{
struct tomoyo_path2_acl *entry
= container_of(acl, typeof(*entry), head);
tomoyo_put_name(entry->filename1);
tomoyo_put_name(entry->filename2);
}
break;
default:
printk(KERN_WARNING "Unknown type\n");
break;
}
}
static bool tomoyo_del_domain(struct tomoyo_domain_info *domain)
{
struct tomoyo_acl_info *acl;
struct tomoyo_acl_info *tmp;
/*
* Since we don't protect whole execve() operation using SRCU,
* we need to recheck domain->users at this point.
*
* (1) Reader starts SRCU section upon execve().
* (2) Reader traverses tomoyo_domain_list and finds this domain.
* (3) Writer marks this domain as deleted.
* (4) Garbage collector removes this domain from tomoyo_domain_list
* because this domain is marked as deleted and used by nobody.
* (5) Reader saves reference to this domain into
* "struct linux_binprm"->cred->security .
* (6) Reader finishes SRCU section, although execve() operation has
* not finished yet.
* (7) Garbage collector waits for SRCU synchronization.
* (8) Garbage collector kfree() this domain because this domain is
* used by nobody.
* (9) Reader finishes execve() operation and restores this domain from
* "struct linux_binprm"->cred->security.
*
* By updating domain->users at (5), we can solve this race problem
* by rechecking domain->users at (8).
*/
if (atomic_read(&domain->users))
return false;
list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
tomoyo_del_acl(acl);
tomoyo_memory_free(acl);
}
tomoyo_put_name(domain->domainname);
return true;
}
static void tomoyo_del_name(const struct tomoyo_name_entry *ptr)
{
}
static void tomoyo_collect_entry(void)
{
mutex_lock(&tomoyo_policy_lock);
{
struct tomoyo_globally_readable_file_entry *ptr;
list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list,
list) {
if (!ptr->is_deleted)
continue;
if (tomoyo_add_to_gc(TOMOYO_ID_GLOBALLY_READABLE, ptr))
list_del_rcu(&ptr->list);
else
break;
}
}
{
struct tomoyo_pattern_entry *ptr;
list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
if (!ptr->is_deleted)
continue;
if (tomoyo_add_to_gc(TOMOYO_ID_PATTERN, ptr))
list_del_rcu(&ptr->list);
else
break;
}
}
{
struct tomoyo_no_rewrite_entry *ptr;
list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
if (!ptr->is_deleted)
continue;
if (tomoyo_add_to_gc(TOMOYO_ID_NO_REWRITE, ptr))
list_del_rcu(&ptr->list);
else
break;
}
}
{
struct tomoyo_domain_initializer_entry *ptr;
list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list,
list) {
if (!ptr->is_deleted)
continue;
if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN_INITIALIZER, ptr))
list_del_rcu(&ptr->list);
else
break;
}
}
{
struct tomoyo_domain_keeper_entry *ptr;
list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
if (!ptr->is_deleted)
continue;
if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN_KEEPER, ptr))
list_del_rcu(&ptr->list);
else
break;
}
}
{
struct tomoyo_alias_entry *ptr;
list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
if (!ptr->is_deleted)
continue;
if (tomoyo_add_to_gc(TOMOYO_ID_ALIAS, ptr))
list_del_rcu(&ptr->list);
else
break;
}
}
{
struct tomoyo_policy_manager_entry *ptr;
list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list,
list) {
if (!ptr->is_deleted)
continue;
if (tomoyo_add_to_gc(TOMOYO_ID_MANAGER, ptr))
list_del_rcu(&ptr->list);
else
break;
}
}
{
struct tomoyo_domain_info *domain;
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
struct tomoyo_acl_info *acl;
list_for_each_entry_rcu(acl, &domain->acl_info_list,
list) {
switch (acl->type) {
case TOMOYO_TYPE_PATH_ACL:
if (container_of(acl,
struct tomoyo_path_acl,
head)->perm ||
container_of(acl,
struct tomoyo_path_acl,
head)->perm_high)
continue;
break;
case TOMOYO_TYPE_PATH2_ACL:
if (container_of(acl,
struct tomoyo_path2_acl,
head)->perm)
continue;
break;
default:
continue;
}
if (tomoyo_add_to_gc(TOMOYO_ID_ACL, acl))
list_del_rcu(&acl->list);
else
break;
}
if (!domain->is_deleted || atomic_read(&domain->users))
continue;
/*
* Nobody is referring this domain. But somebody may
* refer this domain after successful execve().
* We recheck domain->users after SRCU synchronization.
*/
if (tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, domain))
list_del_rcu(&domain->list);
else
break;
}
}
mutex_unlock(&tomoyo_policy_lock);
mutex_lock(&tomoyo_name_list_lock);
{
int i;
for (i = 0; i < TOMOYO_MAX_HASH; i++) {
struct tomoyo_name_entry *ptr;
list_for_each_entry_rcu(ptr, &tomoyo_name_list[i],
list) {
if (atomic_read(&ptr->users))
continue;
if (tomoyo_add_to_gc(TOMOYO_ID_NAME, ptr))
list_del_rcu(&ptr->list);
else {
i = TOMOYO_MAX_HASH;
break;
}
}
}
}
mutex_unlock(&tomoyo_name_list_lock);
}
static void tomoyo_kfree_entry(void)
{
struct tomoyo_gc_entry *p;
struct tomoyo_gc_entry *tmp;
list_for_each_entry_safe(p, tmp, &tomoyo_gc_queue, list) {
switch (p->type) {
case TOMOYO_ID_DOMAIN_INITIALIZER:
tomoyo_del_domain_initializer(p->element);
break;
case TOMOYO_ID_DOMAIN_KEEPER:
tomoyo_del_domain_keeper(p->element);
break;
case TOMOYO_ID_ALIAS:
tomoyo_del_alias(p->element);
break;
case TOMOYO_ID_GLOBALLY_READABLE:
tomoyo_del_allow_read(p->element);
break;
case TOMOYO_ID_PATTERN:
tomoyo_del_file_pattern(p->element);
break;
case TOMOYO_ID_NO_REWRITE:
tomoyo_del_no_rewrite(p->element);
break;
case TOMOYO_ID_MANAGER:
tomoyo_del_manager(p->element);
break;
case TOMOYO_ID_NAME:
tomoyo_del_name(p->element);
break;
case TOMOYO_ID_ACL:
tomoyo_del_acl(p->element);
break;
case TOMOYO_ID_DOMAIN:
if (!tomoyo_del_domain(p->element))
continue;
break;
default:
printk(KERN_WARNING "Unknown type\n");
break;
}
tomoyo_memory_free(p->element);
list_del(&p->list);
kfree(p);
}
}
static int tomoyo_gc_thread(void *unused)
{
daemonize("GC for TOMOYO");
if (mutex_trylock(&tomoyo_gc_mutex)) {
int i;
for (i = 0; i < 10; i++) {
tomoyo_collect_entry();
if (list_empty(&tomoyo_gc_queue))
break;
synchronize_srcu(&tomoyo_ss);
tomoyo_kfree_entry();
}
mutex_unlock(&tomoyo_gc_mutex);
}
do_exit(0);
}
void tomoyo_run_gc(void)
{
struct task_struct *task = kthread_create(tomoyo_gc_thread, NULL,
"GC for TOMOYO");
if (!IS_ERR(task))
wake_up_process(task);
}
......@@ -14,9 +14,8 @@
#include <linux/mnt_namespace.h>
#include <linux/fs_struct.h>
#include <linux/hash.h>
#include <linux/magic.h>
#include "common.h"
#include "realpath.h"
/**
* tomoyo_encode: Convert binary string to ascii string.
......@@ -112,7 +111,7 @@ int tomoyo_realpath_from_path2(struct path *path, char *newname,
path_put(&ns_root);
/* Prepend "/proc" prefix if using internal proc vfs mount. */
if (!IS_ERR(sp) && (path->mnt->mnt_parent == path->mnt) &&
(strcmp(path->mnt->mnt_sb->s_type->name, "proc") == 0)) {
(path->mnt->mnt_sb->s_magic == PROC_SUPER_MAGIC)) {
sp -= 5;
if (sp >= newname)
memcpy(sp, "/proc", 5);
......@@ -149,12 +148,12 @@ int tomoyo_realpath_from_path2(struct path *path, char *newname,
*
* Returns the realpath of the given @path on success, NULL otherwise.
*
* These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
* These functions use kzalloc(), so the caller must call kfree()
* if these functions didn't return NULL.
*/
char *tomoyo_realpath_from_path(struct path *path)
{
char *buf = tomoyo_alloc(sizeof(struct tomoyo_page_buffer));
char *buf = kzalloc(sizeof(struct tomoyo_page_buffer), GFP_KERNEL);
BUILD_BUG_ON(sizeof(struct tomoyo_page_buffer)
<= TOMOYO_MAX_PATHNAME_LEN - 1);
......@@ -163,7 +162,7 @@ char *tomoyo_realpath_from_path(struct path *path)
if (tomoyo_realpath_from_path2(path, buf,
TOMOYO_MAX_PATHNAME_LEN - 1) == 0)
return buf;
tomoyo_free(buf);
kfree(buf);
return NULL;
}
......@@ -206,98 +205,47 @@ char *tomoyo_realpath_nofollow(const char *pathname)
}
/* Memory allocated for non-string data. */
static unsigned int tomoyo_allocated_memory_for_elements;
/* Quota for holding non-string data. */
static unsigned int tomoyo_quota_for_elements;
static atomic_t tomoyo_policy_memory_size;
/* Quota for holding policy. */
static unsigned int tomoyo_quota_for_policy;
/**
* tomoyo_alloc_element - Allocate permanent memory for structures.
* tomoyo_memory_ok - Check memory quota.
*
* @size: Size in bytes.
* @ptr: Pointer to allocated memory.
*
* Returns pointer to allocated memory on success, NULL otherwise.
* Returns true on success, false otherwise.
*
* Memory has to be zeroed.
* The RAM is chunked, so NEVER try to kfree() the returned pointer.
* Caller holds tomoyo_policy_lock.
* Memory pointed by @ptr will be zeroed on success.
*/
void *tomoyo_alloc_element(const unsigned int size)
bool tomoyo_memory_ok(void *ptr)
{
static char *buf;
static DEFINE_MUTEX(lock);
static unsigned int buf_used_len = PATH_MAX;
char *ptr = NULL;
/*Assumes sizeof(void *) >= sizeof(long) is true. */
const unsigned int word_aligned_size
= roundup(size, max(sizeof(void *), sizeof(long)));
if (word_aligned_size > PATH_MAX)
return NULL;
mutex_lock(&lock);
if (buf_used_len + word_aligned_size > PATH_MAX) {
if (!tomoyo_quota_for_elements ||
tomoyo_allocated_memory_for_elements
+ PATH_MAX <= tomoyo_quota_for_elements)
ptr = kzalloc(PATH_MAX, GFP_KERNEL);
if (!ptr) {
printk(KERN_WARNING "ERROR: Out of memory "
"for tomoyo_alloc_element().\n");
if (!tomoyo_policy_loaded)
panic("MAC Initialization failed.\n");
} else {
buf = ptr;
tomoyo_allocated_memory_for_elements += PATH_MAX;
buf_used_len = word_aligned_size;
ptr = buf;
}
} else if (word_aligned_size) {
int i;
ptr = buf + buf_used_len;
buf_used_len += word_aligned_size;
for (i = 0; i < word_aligned_size; i++) {
if (!ptr[i])
continue;
printk(KERN_ERR "WARNING: Reserved memory was tainted! "
"The system might go wrong.\n");
ptr[i] = '\0';
}
int allocated_len = ptr ? ksize(ptr) : 0;
atomic_add(allocated_len, &tomoyo_policy_memory_size);
if (ptr && (!tomoyo_quota_for_policy ||
atomic_read(&tomoyo_policy_memory_size)
<= tomoyo_quota_for_policy)) {
memset(ptr, 0, allocated_len);
return true;
}
mutex_unlock(&lock);
return ptr;
printk(KERN_WARNING "ERROR: Out of memory "
"for tomoyo_alloc_element().\n");
if (!tomoyo_policy_loaded)
panic("MAC Initialization failed.\n");
return false;
}
/* Memory allocated for string data in bytes. */
static unsigned int tomoyo_allocated_memory_for_savename;
/* Quota for holding string data in bytes. */
static unsigned int tomoyo_quota_for_savename;
/*
* TOMOYO uses this hash only when appending a string into the string
* table. Frequency of appending strings is very low. So we don't need
* large (e.g. 64k) hash size. 256 will be sufficient.
*/
#define TOMOYO_HASH_BITS 8
#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
/*
* tomoyo_name_entry is a structure which is used for linking
* "struct tomoyo_path_info" into tomoyo_name_list .
/**
* tomoyo_memory_free - Free memory for elements.
*
* Since tomoyo_name_list manages a list of strings which are shared by
* multiple processes (whereas "struct tomoyo_path_info" inside
* "struct tomoyo_path_info_with_data" is not shared), a reference counter will
* be added to "struct tomoyo_name_entry" rather than "struct tomoyo_path_info"
* when TOMOYO starts supporting garbage collector.
* @ptr: Pointer to allocated memory.
*/
struct tomoyo_name_entry {
struct list_head list;
struct tomoyo_path_info entry;
};
/* Structure for available memory region. */
struct tomoyo_free_memory_block_list {
struct list_head list;
char *ptr; /* Pointer to a free area. */
int len; /* Length of the area. */
};
void tomoyo_memory_free(void *ptr)
{
atomic_sub(ksize(ptr), &tomoyo_policy_memory_size);
kfree(ptr);
}
/*
* tomoyo_name_list is used for holding string data used by TOMOYO.
......@@ -305,87 +253,58 @@ struct tomoyo_free_memory_block_list {
* "/lib/libc-2.5.so"), TOMOYO shares string data in the form of
* "const struct tomoyo_path_info *".
*/
static struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
/* Lock for protecting tomoyo_name_list . */
DEFINE_MUTEX(tomoyo_name_list_lock);
/**
* tomoyo_save_name - Allocate permanent memory for string data.
* tomoyo_get_name - Allocate permanent memory for string data.
*
* @name: The string to store into the permernent memory.
*
* Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
*
* The RAM is shared, so NEVER try to modify or kfree() the returned name.
*/
const struct tomoyo_path_info *tomoyo_save_name(const char *name)
const struct tomoyo_path_info *tomoyo_get_name(const char *name)
{
static LIST_HEAD(fmb_list);
static DEFINE_MUTEX(lock);
struct tomoyo_name_entry *ptr;
unsigned int hash;
/* fmb contains available size in bytes.
fmb is removed from the fmb_list when fmb->len becomes 0. */
struct tomoyo_free_memory_block_list *fmb;
int len;
char *cp;
int allocated_len;
struct list_head *head;
if (!name)
return NULL;
len = strlen(name) + 1;
if (len > TOMOYO_MAX_PATHNAME_LEN) {
printk(KERN_WARNING "ERROR: Name too long "
"for tomoyo_save_name().\n");
return NULL;
}
hash = full_name_hash((const unsigned char *) name, len - 1);
head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
mutex_lock(&lock);
mutex_lock(&tomoyo_name_list_lock);
list_for_each_entry(ptr, head, list) {
if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name))
goto out;
}
list_for_each_entry(fmb, &fmb_list, list) {
if (len <= fmb->len)
goto ready;
if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name))
continue;
atomic_inc(&ptr->users);
goto out;
}
if (!tomoyo_quota_for_savename ||
tomoyo_allocated_memory_for_savename + PATH_MAX
<= tomoyo_quota_for_savename)
cp = kzalloc(PATH_MAX, GFP_KERNEL);
else
cp = NULL;
fmb = kzalloc(sizeof(*fmb), GFP_KERNEL);
if (!cp || !fmb) {
kfree(cp);
kfree(fmb);
ptr = kzalloc(sizeof(*ptr) + len, GFP_KERNEL);
allocated_len = ptr ? ksize(ptr) : 0;
if (!ptr || (tomoyo_quota_for_policy &&
atomic_read(&tomoyo_policy_memory_size) + allocated_len
> tomoyo_quota_for_policy)) {
kfree(ptr);
printk(KERN_WARNING "ERROR: Out of memory "
"for tomoyo_save_name().\n");
"for tomoyo_get_name().\n");
if (!tomoyo_policy_loaded)
panic("MAC Initialization failed.\n");
ptr = NULL;
goto out;
}
tomoyo_allocated_memory_for_savename += PATH_MAX;
list_add(&fmb->list, &fmb_list);
fmb->ptr = cp;
fmb->len = PATH_MAX;
ready:
ptr = tomoyo_alloc_element(sizeof(*ptr));
if (!ptr)
goto out;
ptr->entry.name = fmb->ptr;
memmove(fmb->ptr, name, len);
atomic_add(allocated_len, &tomoyo_policy_memory_size);
ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
memmove((char *) ptr->entry.name, name, len);
atomic_set(&ptr->users, 1);
tomoyo_fill_path_info(&ptr->entry);
fmb->ptr += len;
fmb->len -= len;
list_add_tail(&ptr->list, head);
if (fmb->len == 0) {
list_del(&fmb->list);
kfree(fmb);
}
out:
mutex_unlock(&lock);
mutex_unlock(&tomoyo_name_list_lock);
return ptr ? &ptr->entry : NULL;
}
......@@ -400,45 +319,14 @@ void __init tomoyo_realpath_init(void)
for (i = 0; i < TOMOYO_MAX_HASH; i++)
INIT_LIST_HEAD(&tomoyo_name_list[i]);
INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
tomoyo_kernel_domain.domainname = tomoyo_save_name(TOMOYO_ROOT_NAME);
list_add_tail(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
down_read(&tomoyo_domain_list_lock);
tomoyo_kernel_domain.domainname = tomoyo_get_name(TOMOYO_ROOT_NAME);
/*
* tomoyo_read_lock() is not needed because this function is
* called before the first "delete" request.
*/
list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain)
panic("Can't register tomoyo_kernel_domain");
up_read(&tomoyo_domain_list_lock);
}
/* Memory allocated for temporary purpose. */
static atomic_t tomoyo_dynamic_memory_size;
/**
* tomoyo_alloc - Allocate memory for temporary purpose.
*
* @size: Size in bytes.
*
* Returns pointer to allocated memory on success, NULL otherwise.
*/
void *tomoyo_alloc(const size_t size)
{
void *p = kzalloc(size, GFP_KERNEL);
if (p)
atomic_add(ksize(p), &tomoyo_dynamic_memory_size);
return p;
}
/**
* tomoyo_free - Release memory allocated by tomoyo_alloc().
*
* @p: Pointer returned by tomoyo_alloc(). May be NULL.
*
* Returns nothing.
*/
void tomoyo_free(const void *p)
{
if (p) {
atomic_sub(ksize(p), &tomoyo_dynamic_memory_size);
kfree(p);
}
}
/**
......@@ -451,32 +339,19 @@ void tomoyo_free(const void *p)
int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head)
{
if (!head->read_eof) {
const unsigned int shared
= tomoyo_allocated_memory_for_savename;
const unsigned int private
= tomoyo_allocated_memory_for_elements;
const unsigned int dynamic
= atomic_read(&tomoyo_dynamic_memory_size);
const unsigned int policy
= atomic_read(&tomoyo_policy_memory_size);
char buffer[64];
memset(buffer, 0, sizeof(buffer));
if (tomoyo_quota_for_savename)
snprintf(buffer, sizeof(buffer) - 1,
" (Quota: %10u)",
tomoyo_quota_for_savename);
else
buffer[0] = '\0';
tomoyo_io_printf(head, "Shared: %10u%s\n", shared, buffer);
if (tomoyo_quota_for_elements)
if (tomoyo_quota_for_policy)
snprintf(buffer, sizeof(buffer) - 1,
" (Quota: %10u)",
tomoyo_quota_for_elements);
tomoyo_quota_for_policy);
else
buffer[0] = '\0';
tomoyo_io_printf(head, "Private: %10u%s\n", private, buffer);
tomoyo_io_printf(head, "Dynamic: %10u\n", dynamic);
tomoyo_io_printf(head, "Total: %10u\n",
shared + private + dynamic);
tomoyo_io_printf(head, "Policy: %10u%s\n", policy, buffer);
tomoyo_io_printf(head, "Total: %10u\n", policy);
head->read_eof = true;
}
return 0;
......@@ -494,9 +369,7 @@ int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head)
char *data = head->write_buf;
unsigned int size;
if (sscanf(data, "Shared: %u", &size) == 1)
tomoyo_quota_for_savename = size;
else if (sscanf(data, "Private: %u", &size) == 1)
tomoyo_quota_for_elements = size;
if (sscanf(data, "Policy: %u", &size) == 1)
tomoyo_quota_for_policy = size;
return 0;
}
/*
* security/tomoyo/realpath.h
*
* Get the canonicalized absolute pathnames. The basis for TOMOYO.
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
* Version: 2.2.0 2009/04/01
*
*/
#ifndef _SECURITY_TOMOYO_REALPATH_H
#define _SECURITY_TOMOYO_REALPATH_H
struct path;
struct tomoyo_path_info;
struct tomoyo_io_buffer;
/* Convert binary string to ascii string. */
int tomoyo_encode(char *buffer, int buflen, const char *str);
/* Returns realpath(3) of the given pathname but ignores chroot'ed root. */
int tomoyo_realpath_from_path2(struct path *path, char *newname,
int newname_len);
/*
* Returns realpath(3) of the given pathname but ignores chroot'ed root.
* These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
* if these functions didn't return NULL.
*/
char *tomoyo_realpath(const char *pathname);
/*
* Same with tomoyo_realpath() except that it doesn't follow the final symlink.
*/
char *tomoyo_realpath_nofollow(const char *pathname);
/* Same with tomoyo_realpath() except that the pathname is already solved. */
char *tomoyo_realpath_from_path(struct path *path);
/*
* Allocate memory for ACL entry.
* The RAM is chunked, so NEVER try to kfree() the returned pointer.
*/
void *tomoyo_alloc_element(const unsigned int size);
/*
* Keep the given name on the RAM.
* The RAM is shared, so NEVER try to modify or kfree() the returned name.
*/
const struct tomoyo_path_info *tomoyo_save_name(const char *name);
/* Allocate memory for temporary use (e.g. permission checks). */
void *tomoyo_alloc(const size_t size);
/* Free memory allocated by tomoyo_alloc(). */
void tomoyo_free(const void *p);
/* Check for memory usage. */
int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head);
/* Set memory quota. */
int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head);
/* Initialize realpath related code. */
void __init tomoyo_realpath_init(void);
#endif /* !defined(_SECURITY_TOMOYO_REALPATH_H) */
......@@ -11,8 +11,6 @@
#include <linux/security.h>
#include "common.h"
#include "tomoyo.h"
#include "realpath.h"
static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
{
......@@ -23,21 +21,23 @@ static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
/*
* Since "struct tomoyo_domain_info *" is a sharable pointer,
* we don't need to duplicate.
*/
new->security = old->security;
struct tomoyo_domain_info *domain = old->security;
new->security = domain;
if (domain)
atomic_inc(&domain->users);
return 0;
}
static void tomoyo_cred_transfer(struct cred *new, const struct cred *old)
{
/*
* Since "struct tomoyo_domain_info *" is a sharable pointer,
* we don't need to duplicate.
*/
new->security = old->security;
tomoyo_cred_prepare(new, old, 0);
}
static void tomoyo_cred_free(struct cred *cred)
{
struct tomoyo_domain_info *domain = cred->security;
if (domain)
atomic_dec(&domain->users);
}
static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
......@@ -60,6 +60,14 @@ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
*/
if (!tomoyo_policy_loaded)
tomoyo_load_policy(bprm->filename);
/*
* Release reference to "struct tomoyo_domain_info" stored inside
* "bprm->cred->security". New reference to "struct tomoyo_domain_info"
* stored inside "bprm->cred->security" will be acquired later inside
* tomoyo_find_next_domain().
*/
atomic_dec(&((struct tomoyo_domain_info *)
bprm->cred->security)->users);
/*
* Tell tomoyo_bprm_check_security() is called for the first time of an
* execve operation.
......@@ -76,8 +84,12 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
* Execute permission is checked against pathname passed to do_execve()
* using current domain.
*/
if (!domain)
return tomoyo_find_next_domain(bprm);
if (!domain) {
const int idx = tomoyo_read_lock();
const int err = tomoyo_find_next_domain(bprm);
tomoyo_read_unlock(idx);
return err;
}
/*
* Read permission is checked against interpreters using next domain.
*/
......@@ -87,67 +99,56 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
static int tomoyo_path_truncate(struct path *path, loff_t length,
unsigned int time_attrs)
{
return tomoyo_check_1path_perm(tomoyo_domain(),
TOMOYO_TYPE_TRUNCATE_ACL,
path);
return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path);
}
static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
{
struct path path = { parent->mnt, dentry };
return tomoyo_check_1path_perm(tomoyo_domain(),
TOMOYO_TYPE_UNLINK_ACL,
&path);
return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path);
}
static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
int mode)
{
struct path path = { parent->mnt, dentry };
return tomoyo_check_1path_perm(tomoyo_domain(),
TOMOYO_TYPE_MKDIR_ACL,
&path);
return tomoyo_path_perm(TOMOYO_TYPE_MKDIR, &path);
}
static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
{
struct path path = { parent->mnt, dentry };
return tomoyo_check_1path_perm(tomoyo_domain(),
TOMOYO_TYPE_RMDIR_ACL,
&path);
return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path);
}
static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
const char *old_name)
{
struct path path = { parent->mnt, dentry };
return tomoyo_check_1path_perm(tomoyo_domain(),
TOMOYO_TYPE_SYMLINK_ACL,
&path);
return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path);
}
static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
int mode, unsigned int dev)
{
struct path path = { parent->mnt, dentry };
int type = TOMOYO_TYPE_CREATE_ACL;
int type = TOMOYO_TYPE_CREATE;
switch (mode & S_IFMT) {
case S_IFCHR:
type = TOMOYO_TYPE_MKCHAR_ACL;
type = TOMOYO_TYPE_MKCHAR;
break;
case S_IFBLK:
type = TOMOYO_TYPE_MKBLOCK_ACL;
type = TOMOYO_TYPE_MKBLOCK;
break;
case S_IFIFO:
type = TOMOYO_TYPE_MKFIFO_ACL;
type = TOMOYO_TYPE_MKFIFO;
break;
case S_IFSOCK:
type = TOMOYO_TYPE_MKSOCK_ACL;
type = TOMOYO_TYPE_MKSOCK;
break;
}
return tomoyo_check_1path_perm(tomoyo_domain(),
type, &path);
return tomoyo_path_perm(type, &path);
}
static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
......@@ -155,9 +156,7 @@ static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
{
struct path path1 = { new_dir->mnt, old_dentry };
struct path path2 = { new_dir->mnt, new_dentry };
return tomoyo_check_2path_perm(tomoyo_domain(),
TOMOYO_TYPE_LINK_ACL,
&path1, &path2);
return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
}
static int tomoyo_path_rename(struct path *old_parent,
......@@ -167,16 +166,14 @@ static int tomoyo_path_rename(struct path *old_parent,
{
struct path path1 = { old_parent->mnt, old_dentry };
struct path path2 = { new_parent->mnt, new_dentry };
return tomoyo_check_2path_perm(tomoyo_domain(),
TOMOYO_TYPE_RENAME_ACL,
&path1, &path2);
return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
}
static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
if (cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND))
return tomoyo_check_rewrite_permission(tomoyo_domain(), file);
return tomoyo_check_rewrite_permission(file);
return 0;
}
......@@ -189,6 +186,51 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags);
}
static int tomoyo_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return tomoyo_path_perm(TOMOYO_TYPE_IOCTL, &file->f_path);
}
static int tomoyo_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
mode_t mode)
{
struct path path = { mnt, dentry };
return tomoyo_path_perm(TOMOYO_TYPE_CHMOD, &path);
}
static int tomoyo_path_chown(struct path *path, uid_t uid, gid_t gid)
{
int error = 0;
if (uid != (uid_t) -1)
error = tomoyo_path_perm(TOMOYO_TYPE_CHOWN, path);
if (!error && gid != (gid_t) -1)
error = tomoyo_path_perm(TOMOYO_TYPE_CHGRP, path);
return error;
}
static int tomoyo_path_chroot(struct path *path)
{
return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path);
}
static int tomoyo_sb_mount(char *dev_name, struct path *path,
char *type, unsigned long flags, void *data)
{
return tomoyo_path_perm(TOMOYO_TYPE_MOUNT, path);
}
static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
{
struct path path = { mnt, mnt->mnt_root };
return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path);
}
static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
{
return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
}
/*
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
......@@ -198,6 +240,7 @@ static struct security_operations tomoyo_security_ops = {
.cred_alloc_blank = tomoyo_cred_alloc_blank,
.cred_prepare = tomoyo_cred_prepare,
.cred_transfer = tomoyo_cred_transfer,
.cred_free = tomoyo_cred_free,
.bprm_set_creds = tomoyo_bprm_set_creds,
.bprm_check_security = tomoyo_bprm_check_security,
.file_fcntl = tomoyo_file_fcntl,
......@@ -210,8 +253,18 @@ static struct security_operations tomoyo_security_ops = {
.path_mknod = tomoyo_path_mknod,
.path_link = tomoyo_path_link,
.path_rename = tomoyo_path_rename,
.file_ioctl = tomoyo_file_ioctl,
.path_chmod = tomoyo_path_chmod,
.path_chown = tomoyo_path_chown,
.path_chroot = tomoyo_path_chroot,
.sb_mount = tomoyo_sb_mount,
.sb_umount = tomoyo_sb_umount,
.sb_pivotroot = tomoyo_sb_pivotroot,
};
/* Lock for GC. */
struct srcu_struct tomoyo_ss;
static int __init tomoyo_init(void)
{
struct cred *cred = (struct cred *) current_cred();
......@@ -219,7 +272,8 @@ static int __init tomoyo_init(void)
if (!security_module_enable(&tomoyo_security_ops))
return 0;
/* register ourselves with the security framework */
if (register_security(&tomoyo_security_ops))
if (register_security(&tomoyo_security_ops) ||
init_srcu_struct(&tomoyo_ss))
panic("Failure registering TOMOYO Linux");
printk(KERN_INFO "TOMOYO Linux initialized\n");
cred->security = &tomoyo_kernel_domain;
......
/*
* security/tomoyo/tomoyo.h
*
* Implementation of the Domain-Based Mandatory Access Control.
*
* Copyright (C) 2005-2009 NTT DATA CORPORATION
*
* Version: 2.2.0 2009/04/01
*
*/
#ifndef _SECURITY_TOMOYO_TOMOYO_H
#define _SECURITY_TOMOYO_TOMOYO_H
struct tomoyo_path_info;
struct path;
struct inode;
struct linux_binprm;
struct pt_regs;
int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
const struct tomoyo_path_info *filename);
int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
struct path *path, const int flag);
int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
const u8 operation, struct path *path);
int tomoyo_check_2path_perm(struct tomoyo_domain_info *domain,
const u8 operation, struct path *path1,
struct path *path2);
int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
struct file *filp);
int tomoyo_find_next_domain(struct linux_binprm *bprm);
/* Index numbers for Access Controls. */
#define TOMOYO_TYPE_SINGLE_PATH_ACL 0
#define TOMOYO_TYPE_DOUBLE_PATH_ACL 1
/* Index numbers for File Controls. */
/*
* TYPE_READ_WRITE_ACL is special. TYPE_READ_WRITE_ACL is automatically set
* if both TYPE_READ_ACL and TYPE_WRITE_ACL are set. Both TYPE_READ_ACL and
* TYPE_WRITE_ACL are automatically set if TYPE_READ_WRITE_ACL is set.
* TYPE_READ_WRITE_ACL is automatically cleared if either TYPE_READ_ACL or
* TYPE_WRITE_ACL is cleared. Both TYPE_READ_ACL and TYPE_WRITE_ACL are
* automatically cleared if TYPE_READ_WRITE_ACL is cleared.
*/
#define TOMOYO_TYPE_READ_WRITE_ACL 0
#define TOMOYO_TYPE_EXECUTE_ACL 1
#define TOMOYO_TYPE_READ_ACL 2
#define TOMOYO_TYPE_WRITE_ACL 3
#define TOMOYO_TYPE_CREATE_ACL 4
#define TOMOYO_TYPE_UNLINK_ACL 5
#define TOMOYO_TYPE_MKDIR_ACL 6
#define TOMOYO_TYPE_RMDIR_ACL 7
#define TOMOYO_TYPE_MKFIFO_ACL 8
#define TOMOYO_TYPE_MKSOCK_ACL 9
#define TOMOYO_TYPE_MKBLOCK_ACL 10
#define TOMOYO_TYPE_MKCHAR_ACL 11
#define TOMOYO_TYPE_TRUNCATE_ACL 12
#define TOMOYO_TYPE_SYMLINK_ACL 13
#define TOMOYO_TYPE_REWRITE_ACL 14
#define TOMOYO_MAX_SINGLE_PATH_OPERATION 15
#define TOMOYO_TYPE_LINK_ACL 0
#define TOMOYO_TYPE_RENAME_ACL 1
#define TOMOYO_MAX_DOUBLE_PATH_OPERATION 2
#define TOMOYO_DOMAINPOLICY 0
#define TOMOYO_EXCEPTIONPOLICY 1
#define TOMOYO_DOMAIN_STATUS 2
#define TOMOYO_PROCESS_STATUS 3
#define TOMOYO_MEMINFO 4
#define TOMOYO_SELFDOMAIN 5
#define TOMOYO_VERSION 6
#define TOMOYO_PROFILE 7
#define TOMOYO_MANAGER 8
extern struct tomoyo_domain_info tomoyo_kernel_domain;
static inline struct tomoyo_domain_info *tomoyo_domain(void)
{
return current_cred()->security;
}
static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
*task)
{
return task_cred_xxx(task, security);
}
#endif /* !defined(_SECURITY_TOMOYO_TOMOYO_H) */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册