slab.h 7.4 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3 4 5 6
 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
 *
 * (C) SGI 2006, Christoph Lameter <clameter@sgi.com>
 * 	Cleaned up and restructured to ease the addition of alternative
 * 	implementations of SLAB allocators.
L
Linus Torvalds 已提交
7 8 9 10 11
 */

#ifndef _LINUX_SLAB_H
#define	_LINUX_SLAB_H

12
#ifdef __KERNEL__
L
Linus Torvalds 已提交
13

14 15
#include <linux/gfp.h>
#include <linux/types.h>
L
Linus Torvalds 已提交
16

17
typedef struct kmem_cache kmem_cache_t __deprecated;
L
Linus Torvalds 已提交
18

19 20 21
/*
 * Flags to pass to kmem_cache_create().
 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
L
Linus Torvalds 已提交
22
 */
C
Christoph Lameter 已提交
23 24 25 26
#define SLAB_DEBUG_FREE		0x00000100UL	/* DEBUG: Perform (expensive) checks on free */
#define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
#define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
27 28 29 30 31
#define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
#define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
#define SLAB_RECLAIM_ACCOUNT	0x00020000UL	/* Objects are reclaimable */
#define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
#define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
32
#define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
C
Christoph Lameter 已提交
33
#define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
L
Linus Torvalds 已提交
34

35 36 37 38
/*
 * struct kmem_cache related prototypes
 */
void __init kmem_cache_init(void);
C
Christoph Lameter 已提交
39
int slab_is_available(void);
L
Linus Torvalds 已提交
40

41
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
42 43 44
			unsigned long,
			void (*)(void *, struct kmem_cache *, unsigned long),
			void (*)(void *, struct kmem_cache *, unsigned long));
45 46 47 48 49 50 51
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *);
const char *kmem_cache_name(struct kmem_cache *);
C
Christoph Lameter 已提交
52
int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
53

54 55 56 57 58 59 60 61 62 63 64 65
/*
 * Please use this macro to create slab caches. Simply specify the
 * name of the structure and maybe some flags that are listed above.
 *
 * The alignment of the struct determines object alignment. If you
 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
 * then the objects will be properly aligned in SMP configurations.
 */
#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
		sizeof(struct __struct), __alignof__(struct __struct),\
		(__flags), NULL, NULL)

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#else
static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
					gfp_t flags, int node)
{
	return kmem_cache_alloc(cachep, flags);
}
#endif

/*
 * Common kmalloc functions provided by all allocators
 */
void *__kmalloc(size_t, gfp_t);
void *__kzalloc(size_t, gfp_t);
P
Pekka Enberg 已提交
81
void * __must_check krealloc(const void *, size_t, gfp_t);
82
void kfree(const void *);
P
Pekka Enberg 已提交
83
size_t ksize(const void *);
84 85 86 87 88 89 90 91 92 93 94 95 96

/**
 * kcalloc - allocate memory for an array. The memory is set to zero.
 * @n: number of elements.
 * @size: element size.
 * @flags: the type of memory to allocate.
 */
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
	if (n != 0 && size > ULONG_MAX / n)
		return NULL;
	return __kzalloc(n * size, flags);
}
L
Linus Torvalds 已提交
97

98 99 100 101 102
/*
 * Allocator specific definitions. These are mainly used to establish optimized
 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting
 * the appropriate general cache at compile time.
 */
C
Christoph Lameter 已提交
103

C
Christoph Lameter 已提交
104 105 106 107
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#else
108
#include <linux/slab_def.h>
C
Christoph Lameter 已提交
109
#endif /* !CONFIG_SLUB */
110
#else
C
Christoph Lameter 已提交
111

112 113 114 115 116
/*
 * Fallback definitions for an allocator not wanting to provide
 * its own optimized kmalloc definitions (like SLOB).
 */

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
/**
 * kmalloc - allocate memory
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate.
 *
 * kmalloc is the normal method of allocating memory
 * in the kernel.
 *
 * The @flags argument may be one of:
 *
 * %GFP_USER - Allocate memory on behalf of user.  May sleep.
 *
 * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
 *
 * %GFP_ATOMIC - Allocation will not sleep.
 *   For example, use this inside interrupt handlers.
 *
 * %GFP_HIGHUSER - Allocate pages from high memory.
 *
 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
 *
 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
 *
 * Also it is possible to set different flags by OR'ing
 * in one or more of the following additional @flags:
 *
 * %__GFP_COLD - Request cache-cold pages instead of
 *   trying to return cache-warm pages.
 *
 * %__GFP_DMA - Request memory from the DMA-capable zone.
 *
 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
 *
 * %__GFP_HIGHMEM - Allocated memory may be from highmem.
 *
 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
 *   (think twice before using).
 *
 * %__GFP_NORETRY - If memory is not immediately available,
 *   then give up at once.
 *
 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
 *
 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
 */
C
Christoph Lameter 已提交
162
static inline void *kmalloc(size_t size, gfp_t flags)
L
Linus Torvalds 已提交
163 164 165 166
{
	return __kmalloc(size, flags);
}

167 168 169 170 171
/**
 * kzalloc - allocate memory. The memory is set to zero.
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate (see kmalloc).
 */
C
Christoph Lameter 已提交
172
static inline void *kzalloc(size_t size, gfp_t flags)
173 174 175 176 177
{
	return __kzalloc(size, flags);
}
#endif

C
Christoph Lameter 已提交
178 179 180 181 182 183 184 185 186 187 188 189
#ifndef CONFIG_NUMA
static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
	return kmalloc(size, flags);
}

static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
	return __kmalloc(size, flags);
}
#endif /* !CONFIG_NUMA */

190 191 192 193 194 195 196 197
/*
 * kmalloc_track_caller is a special version of kmalloc that records the
 * calling function of the routine calling it for slab leak tracking instead
 * of just the calling function (confusing, eh?).
 * It's useful when the call to kmalloc comes from a widely-used standard
 * allocator where we care about the real place the memory allocation
 * request comes from.
 */
C
Christoph Lameter 已提交
198
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
199 200 201
extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
#define kmalloc_track_caller(size, flags) \
	__kmalloc_track_caller(size, flags, __builtin_return_address(0))
202 203 204 205
#else
#define kmalloc_track_caller(size, flags) \
	__kmalloc(size, flags)
#endif /* DEBUG_SLAB */
L
Linus Torvalds 已提交
206

207
#ifdef CONFIG_NUMA
208 209 210 211 212 213 214 215
/*
 * kmalloc_node_track_caller is a special version of kmalloc_node that
 * records the calling function of the routine calling it for slab leak
 * tracking instead of just the calling function (confusing, eh?).
 * It's useful when the call to kmalloc_node comes from a widely-used
 * standard allocator where we care about the real place the memory
 * allocation request comes from.
 */
C
Christoph Lameter 已提交
216
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
217 218 219 220
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
#define kmalloc_node_track_caller(size, flags, node) \
	__kmalloc_node_track_caller(size, flags, node, \
			__builtin_return_address(0))
221 222 223
#else
#define kmalloc_node_track_caller(size, flags, node) \
	__kmalloc_node(size, flags, node)
224
#endif
225

226 227 228 229
#else /* CONFIG_NUMA */

#define kmalloc_node_track_caller(size, flags, node) \
	kmalloc_track_caller(size, flags)
230

C
Christoph Lameter 已提交
231
#endif /* DEBUG_SLAB */
232

L
Linus Torvalds 已提交
233 234
#endif	/* __KERNEL__ */
#endif	/* _LINUX_SLAB_H */
235