slub_def.h 6.2 KB
Newer Older
C
Christoph Lameter 已提交
1 2 3 4 5 6
#ifndef _LINUX_SLUB_DEF_H
#define _LINUX_SLUB_DEF_H

/*
 * SLUB : A Slab allocator without object queues.
 *
C
Christoph Lameter 已提交
7
 * (C) 2007 SGI, Christoph Lameter
C
Christoph Lameter 已提交
8 9 10
 */
#include <linux/types.h>
#include <linux/gfp.h>
11
#include <linux/bug.h>
C
Christoph Lameter 已提交
12 13 14
#include <linux/workqueue.h>
#include <linux/kobject.h>

15
#include <linux/kmemleak.h>
16

17 18 19
enum stat_item {
	ALLOC_FASTPATH,		/* Allocation from cpu slab */
	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
20
	FREE_FASTPATH,		/* Free to cpu slab */
21 22 23 24
	FREE_SLOWPATH,		/* Freeing not to cpu slab */
	FREE_FROZEN,		/* Freeing to frozen slab */
	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
25
	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
26 27
	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
28
	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
29 30 31 32 33 34 35
	FREE_SLAB,		/* Slab freed to the page allocator */
	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
36
	DEACTIVATE_BYPASS,	/* Implicit deactivation */
37
	ORDER_FALLBACK,		/* Number of times fallback was necessary */
38
	CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
39
	CMPXCHG_DOUBLE_FAIL,	/* Number of times that cmpxchg double did not match */
40
	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
41 42 43
	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
44 45
	NR_SLUB_STAT_ITEMS };

46
struct kmem_cache_cpu {
47 48
	void **freelist;	/* Pointer to next available object */
	unsigned long tid;	/* Globally unique transaction id */
49
	struct page *page;	/* The slab from which we are allocating */
50
	struct page *partial;	/* Partially allocated frozen slabs */
51 52 53
#ifdef CONFIG_SLUB_STATS
	unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
54
};
55

56 57 58 59 60 61 62 63 64
/*
 * Word size structure that can be atomically updated or read and that
 * contains both the order and the number of objects that a slab of the
 * given order would contain.
 */
struct kmem_cache_order_objects {
	unsigned long x;
};

C
Christoph Lameter 已提交
65 66 67 68
/*
 * Slab cache management.
 */
struct kmem_cache {
69
	struct kmem_cache_cpu __percpu *cpu_slab;
C
Christoph Lameter 已提交
70 71
	/* Used for retriving partial slabs etc */
	unsigned long flags;
72
	unsigned long min_partial;
C
Christoph Lameter 已提交
73
	int size;		/* The size of an object including meta data */
74
	int object_size;	/* The size of an object without meta data */
C
Christoph Lameter 已提交
75
	int offset;		/* Free pointer offset. */
76
	int cpu_partial;	/* Number of per cpu partial objects to keep around */
77
	struct kmem_cache_order_objects oo;
C
Christoph Lameter 已提交
78 79

	/* Allocation and freeing of slabs */
80
	struct kmem_cache_order_objects max;
81
	struct kmem_cache_order_objects min;
82
	gfp_t allocflags;	/* gfp flags to use on each alloc */
C
Christoph Lameter 已提交
83
	int refcount;		/* Refcount for slab cache destroy */
84
	void (*ctor)(void *);
C
Christoph Lameter 已提交
85 86
	int inuse;		/* Offset to metadata */
	int align;		/* Alignment */
87
	int reserved;		/* Reserved bytes at the end of slabs */
C
Christoph Lameter 已提交
88 89
	const char *name;	/* Name (only for display!) */
	struct list_head list;	/* List of slab caches */
90
#ifdef CONFIG_SYSFS
C
Christoph Lameter 已提交
91
	struct kobject kobj;	/* For sysfs */
92
#endif
G
Glauber Costa 已提交
93 94
#ifdef CONFIG_MEMCG_KMEM
	struct memcg_cache_params *memcg_params;
95
	int max_attr_size; /* for propagation, maximum size of a stored attr */
G
Glauber Costa 已提交
96
#endif
C
Christoph Lameter 已提交
97 98

#ifdef CONFIG_NUMA
99 100 101 102
	/*
	 * Defragmentation by allocating from a remote node.
	 */
	int remote_node_defrag_ratio;
C
Christoph Lameter 已提交
103
#endif
104
	struct kmem_cache_node *node[MAX_NUMNODES];
C
Christoph Lameter 已提交
105 106
};

P
Paul Mundt 已提交
107 108 109
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);

110 111 112
static __always_inline void *
kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{
113 114 115 116
	void *ret;

	flags |= (__GFP_COMP | __GFP_KMEMCG);
	ret = (void *) __get_free_pages(flags, order);
117 118 119 120
	kmemleak_alloc(ret, size, 1, flags);
	return ret;
}

121 122 123 124 125 126 127 128 129 130 131 132 133
/**
 * Calling this on allocated memory will check that the memory
 * is expected to be in use, and print warnings if not.
 */
#ifdef CONFIG_SLUB_DEBUG
extern bool verify_mem_not_deleted(const void *x);
#else
static inline bool verify_mem_not_deleted(const void *x)
{
	return true;
}
#endif

134
#ifdef CONFIG_TRACING
135 136 137
extern void *
kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
E
Eduard - Gabriel Munteanu 已提交
138 139
#else
static __always_inline void *
140
kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
E
Eduard - Gabriel Munteanu 已提交
141 142 143
{
	return kmem_cache_alloc(s, gfpflags);
}
144 145 146 147 148 149

static __always_inline void *
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
	return kmalloc_order(size, flags, order);
}
E
Eduard - Gabriel Munteanu 已提交
150 151
#endif

152 153
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
E
Eduard - Gabriel Munteanu 已提交
154
	unsigned int order = get_order(size);
155
	return kmalloc_order_trace(size, flags, order);
156 157
}

158
static __always_inline void *kmalloc(size_t size, gfp_t flags)
C
Christoph Lameter 已提交
159
{
160
	if (__builtin_constant_p(size)) {
161
		if (size > KMALLOC_MAX_CACHE_SIZE)
162
			return kmalloc_large(size, flags);
C
Christoph Lameter 已提交
163

164 165
		if (!(flags & GFP_DMA)) {
			int index = kmalloc_index(size);
166

167
			if (!index)
168
				return ZERO_SIZE_PTR;
C
Christoph Lameter 已提交
169

170 171
			return kmem_cache_alloc_trace(kmalloc_caches[index],
					flags, size);
172 173 174
		}
	}
	return __kmalloc(size, flags);
C
Christoph Lameter 已提交
175 176 177
}

#ifdef CONFIG_NUMA
P
Paul Mundt 已提交
178 179
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
C
Christoph Lameter 已提交
180

181
#ifdef CONFIG_TRACING
182
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
E
Eduard - Gabriel Munteanu 已提交
183
					   gfp_t gfpflags,
184
					   int node, size_t size);
E
Eduard - Gabriel Munteanu 已提交
185 186
#else
static __always_inline void *
187
kmem_cache_alloc_node_trace(struct kmem_cache *s,
E
Eduard - Gabriel Munteanu 已提交
188
			      gfp_t gfpflags,
189
			      int node, size_t size)
E
Eduard - Gabriel Munteanu 已提交
190 191 192 193 194
{
	return kmem_cache_alloc_node(s, gfpflags, node);
}
#endif

195
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
C
Christoph Lameter 已提交
196
{
197
	if (__builtin_constant_p(size) &&
198 199
		size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
		int index = kmalloc_index(size);
C
Christoph Lameter 已提交
200

201
		if (!index)
202
			return ZERO_SIZE_PTR;
C
Christoph Lameter 已提交
203

204 205
		return kmem_cache_alloc_node_trace(kmalloc_caches[index],
			       flags, node, size);
206 207
	}
	return __kmalloc_node(size, flags, node);
C
Christoph Lameter 已提交
208 209 210 211
}
#endif

#endif /* _LINUX_SLUB_DEF_H */