ima_iint.c 4.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
/*
 * Copyright (C) 2008 IBM Corporation
 *
 * Authors:
 * Mimi Zohar <zohar@us.ibm.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, version 2 of the
 * License.
 *
 * File: ima_iint.c
 * 	- implements the IMA hooks: ima_inode_alloc, ima_inode_free
 *	- cache integrity information associated with an inode
 *	  using a radix tree.
 */
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/radix-tree.h>
#include "ima.h"

#define ima_iint_delete ima_inode_free

RADIX_TREE(ima_iint_store, GFP_ATOMIC);
DEFINE_SPINLOCK(ima_iint_lock);

static struct kmem_cache *iint_cache __read_mostly;

/* ima_iint_find_get - return the iint associated with an inode
 *
 * ima_iint_find_get gets a reference to the iint. Caller must
 * remember to put the iint reference.
 */
struct ima_iint_cache *ima_iint_find_get(struct inode *inode)
{
	struct ima_iint_cache *iint;

	rcu_read_lock();
	iint = radix_tree_lookup(&ima_iint_store, (unsigned long)inode);
	if (!iint)
		goto out;
	kref_get(&iint->refcount);
out:
	rcu_read_unlock();
	return iint;
}

/* Allocate memory for the iint associated with the inode
 * from the iint_cache slab, initialize the iint, and
 * insert it into the radix tree.
 *
 * On success return a pointer to the iint; on failure return NULL.
 */
struct ima_iint_cache *ima_iint_insert(struct inode *inode)
{
	struct ima_iint_cache *iint = NULL;
	int rc = 0;

	if (!ima_initialized)
		return iint;
	iint = kmem_cache_alloc(iint_cache, GFP_KERNEL);
	if (!iint)
		return iint;

	rc = radix_tree_preload(GFP_KERNEL);
	if (rc < 0)
		goto out;

	spin_lock(&ima_iint_lock);
	rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
	spin_unlock(&ima_iint_lock);
out:
	if (rc < 0) {
		kmem_cache_free(iint_cache, iint);
		if (rc == -EEXIST) {
			iint = radix_tree_lookup(&ima_iint_store,
						 (unsigned long)inode);
		} else
			iint = NULL;
	}
	radix_tree_preload_end();
	return iint;
}

/**
 * ima_inode_alloc - allocate an iint associated with an inode
 * @inode: pointer to the inode
 *
 * Return 0 on success, 1 on failure.
 */
int ima_inode_alloc(struct inode *inode)
{
	struct ima_iint_cache *iint;

	if (!ima_initialized)
		return 0;

	iint = ima_iint_insert(inode);
	if (!iint)
		return 1;
	return 0;
}

/* ima_iint_find_insert_get - get the iint associated with an inode
 *
 * Most insertions are done at inode_alloc, except those allocated
 * before late_initcall. When the iint does not exist, allocate it,
 * initialize and insert it, and increment the iint refcount.
 *
 * (Can't initialize at security_initcall before any inodes are
 * allocated, got to wait at least until proc_init.)
 *
 *  Return the iint.
 */
struct ima_iint_cache *ima_iint_find_insert_get(struct inode *inode)
{
	struct ima_iint_cache *iint = NULL;

	iint = ima_iint_find_get(inode);
	if (iint)
		return iint;

	iint = ima_iint_insert(inode);
	if (iint)
		kref_get(&iint->refcount);

	return iint;
}
M
Mimi Zohar 已提交
129
EXPORT_SYMBOL_GPL(ima_iint_find_insert_get);
130 131 132 133 134 135 136 137

/* iint_free - called when the iint refcount goes to zero */
void iint_free(struct kref *kref)
{
	struct ima_iint_cache *iint = container_of(kref, struct ima_iint_cache,
						   refcount);
	iint->version = 0;
	iint->flags = 0UL;
M
Mimi Zohar 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
	if (iint->readcount != 0) {
		printk(KERN_INFO "%s: readcount: %ld\n", __FUNCTION__,
		       iint->readcount);
		iint->readcount = 0;
	}
	if (iint->writecount != 0) {
		printk(KERN_INFO "%s: writecount: %ld\n", __FUNCTION__,
		       iint->writecount);
		iint->writecount = 0;
	}
	if (iint->opencount != 0) {
		printk(KERN_INFO "%s: opencount: %ld\n", __FUNCTION__,
		       iint->opencount);
		iint->opencount = 0;
	}
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
	kref_set(&iint->refcount, 1);
	kmem_cache_free(iint_cache, iint);
}

void iint_rcu_free(struct rcu_head *rcu_head)
{
	struct ima_iint_cache *iint = container_of(rcu_head,
						   struct ima_iint_cache, rcu);
	kref_put(&iint->refcount, iint_free);
}

/**
 * ima_iint_delete - called on integrity_inode_free
 * @inode: pointer to the inode
 *
 * Free the integrity information(iint) associated with an inode.
 */
void ima_iint_delete(struct inode *inode)
{
	struct ima_iint_cache *iint;

	if (!ima_initialized)
		return;
	spin_lock(&ima_iint_lock);
	iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode);
	spin_unlock(&ima_iint_lock);
	if (iint)
		call_rcu(&iint->rcu, iint_rcu_free);
}

static void init_once(void *foo)
{
	struct ima_iint_cache *iint = foo;

	memset(iint, 0, sizeof *iint);
	iint->version = 0;
	iint->flags = 0UL;
	mutex_init(&iint->mutex);
	iint->readcount = 0;
	iint->writecount = 0;
M
Mimi Zohar 已提交
193
	iint->opencount = 0;
194 195 196 197 198 199 200 201 202
	kref_set(&iint->refcount, 1);
}

void ima_iintcache_init(void)
{
	iint_cache =
	    kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
			      SLAB_PANIC, init_once);
}