slab_common.c 4.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Slab allocator functions that are independent of the allocator strategy
 *
 * (C) 2012 Christoph Lameter <cl@linux.com>
 */
#include <linux/slab.h>

#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/interrupt.h>
#include <linux/memory.h>
#include <linux/compiler.h>
#include <linux/module.h>
14 15
#include <linux/cpu.h>
#include <linux/uaccess.h>
16 17 18 19
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>

20 21 22
#include "slab.h"

enum slab_state slab_state;
23 24
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
25
struct kmem_cache *kmem_cache;
26

27 28
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, size_t size)
29 30 31 32 33
{
	struct kmem_cache *s = NULL;

	if (!name || in_interrupt() || size < sizeof(void *) ||
		size > KMALLOC_MAX_SIZE) {
34 35
		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
		return -EINVAL;
36
	}
37

38 39 40 41 42 43 44 45 46 47 48
	list_for_each_entry(s, &slab_caches, list) {
		char tmp;
		int res;

		/*
		 * This happens when the module gets unloaded and doesn't
		 * destroy its slab cache and no-one else reuses the vmalloc
		 * area of the module.  Print a warning.
		 */
		res = probe_kernel_address(s->name, tmp);
		if (res) {
49
			pr_err("Slab cache with size %d has lost its name\n",
50 51 52 53 54
			       s->object_size);
			continue;
		}

		if (!strcmp(s->name, name)) {
55 56
			pr_err("%s (%s): Cache name already exists.\n",
			       __func__, name);
57 58
			dump_stack();
			s = NULL;
59
			return -EINVAL;
60 61 62 63
		}
	}

	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
64 65 66 67 68 69 70
	return 0;
}
#else
static inline int kmem_cache_sanity_check(const char *name, size_t size)
{
	return 0;
}
71 72
#endif

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
/*
 * kmem_cache_create - Create a cache.
 * @name: A string which is used in /proc/slabinfo to identify this cache.
 * @size: The size of objects to be created in this cache.
 * @align: The required alignment for the objects.
 * @flags: SLAB flags
 * @ctor: A constructor for the objects.
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a interrupt, but can be interrupted.
 * The @ctor is run when new pages are allocated by the cache.
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline.  This can be beneficial if you're counting cycles as closely
 * as davem.
 */

struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
		unsigned long flags, void (*ctor)(void *))
{
	struct kmem_cache *s = NULL;
102
	int err = 0;
103
	char *n;
104

105 106
	get_online_cpus();
	mutex_lock(&slab_mutex);
107 108 109 110 111

	if (!kmem_cache_sanity_check(name, size) == 0)
		goto out_locked;


112 113 114 115 116 117
	n = kstrdup(name, GFP_KERNEL);
	if (!n) {
		err = -ENOMEM;
		goto out_locked;
	}

118 119 120 121
	s = __kmem_cache_alias(name, size, align, flags, ctor);
	if (s)
		goto out_locked;

122
	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
123
	if (s) {
124 125 126
		err = __kmem_cache_create(s, n, size, align, flags, ctor);
		if (!err)

127
			list_add(&s->list, &slab_caches);
128

129 130 131 132 133
		else {
			kfree(n);
			kmem_cache_free(kmem_cache, s);
		}

134 135
	} else {
		kfree(n);
136
		err = -ENOMEM;
137
	}
138

139
out_locked:
140 141 142
	mutex_unlock(&slab_mutex);
	put_online_cpus();

143 144 145 146 147 148 149 150 151 152 153 154 155
	if (err) {

		if (flags & SLAB_PANIC)
			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
				name, err);
		else {
			printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
				name, err);
			dump_stack();
		}

		return NULL;
	}
156

157 158 159 160 161 162 163 164
	if (s->refcount == 1) {
		err = sysfs_slab_add(s);
		if (err)
			printk(KERN_WARNING "kmem_cache_create(%s) failed to"
				" create sysfs entry. Error %d\n",
					name, err);
	}

165 166 167
	return s;
}
EXPORT_SYMBOL(kmem_cache_create);
168

169 170 171 172 173 174 175 176 177 178 179 180
void kmem_cache_destroy(struct kmem_cache *s)
{
	get_online_cpus();
	mutex_lock(&slab_mutex);
	s->refcount--;
	if (!s->refcount) {
		list_del(&s->list);

		if (!__kmem_cache_shutdown(s)) {
			if (s->flags & SLAB_DESTROY_BY_RCU)
				rcu_barrier();

181
			kfree(s->name);
182
			kmem_cache_free(kmem_cache, s);
183 184 185 186 187 188 189 190 191 192 193 194
		} else {
			list_add(&s->list, &slab_caches);
			printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
				s->name);
			dump_stack();
		}
	}
	mutex_unlock(&slab_mutex);
	put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);

195 196 197 198
int slab_is_available(void)
{
	return slab_state >= UP;
}