percpu.h 6.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2
#ifndef __LINUX_PERCPU_H
#define __LINUX_PERCPU_H
3

4
#include <linux/preempt.h>
L
Linus Torvalds 已提交
5 6
#include <linux/slab.h> /* For kmalloc() */
#include <linux/smp.h>
7
#include <linux/cpumask.h>
T
Tejun Heo 已提交
8
#include <linux/pfn.h>
9

L
Linus Torvalds 已提交
10 11
#include <asm/percpu.h>

T
Tejun Heo 已提交
12
/* enough to cover all DEFINE_PER_CPUs in modules */
13
#ifdef CONFIG_MODULES
T
Tejun Heo 已提交
14
#define PERCPU_MODULE_RESERVE		(8 << 10)
15
#else
T
Tejun Heo 已提交
16
#define PERCPU_MODULE_RESERVE		0
L
Linus Torvalds 已提交
17 18
#endif

T
Tejun Heo 已提交
19
#ifndef PERCPU_ENOUGH_ROOM
20
#define PERCPU_ENOUGH_ROOM						\
T
Tejun Heo 已提交
21 22 23
	(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +	\
	 PERCPU_MODULE_RESERVE)
#endif
24

25 26 27 28 29
/*
 * Must be an lvalue. Since @var must be a simple identifier,
 * we force a syntax error here if it isn't.
 */
#define get_cpu_var(var) (*({				\
30
	extern int simple_identifier_##var(void);	\
31 32
	preempt_disable();				\
	&__get_cpu_var(var); }))
L
Linus Torvalds 已提交
33 34 35 36
#define put_cpu_var(var) preempt_enable()

#ifdef CONFIG_SMP

37
#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
L
Linus Torvalds 已提交
38

39
/* minimum unit size, also is the maximum supported allocation size */
T
Tejun Heo 已提交
40
#define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(64 << 10)
41 42 43

/*
 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
44 45 46
 * back on the first chunk for dynamic percpu allocation if arch is
 * manually allocating and mapping it for faster access (as a part of
 * large page mapping for example).
47
 *
48 49 50 51
 * The following values give between one and two pages of free space
 * after typical minimal boot (2-way SMP, single disk and NIC) with
 * both defconfig and a distro config on x86_64 and 32.  More
 * intelligent way to determine this would be nice.
52
 */
53 54 55 56 57
#if BITS_PER_LONG > 32
#define PERCPU_DYNAMIC_RESERVE		(20 << 10)
#else
#define PERCPU_DYNAMIC_RESERVE		(12 << 10)
#endif
58

59
extern void *pcpu_base_addr;
T
Tejun Heo 已提交
60
extern const unsigned long *pcpu_unit_offsets;
L
Linus Torvalds 已提交
61

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
struct pcpu_group_info {
	int			nr_units;	/* aligned # of units */
	unsigned long		base_offset;	/* base address offset */
	unsigned int		*cpu_map;	/* unit->cpu map, empty
						 * entries contain NR_CPUS */
};

struct pcpu_alloc_info {
	size_t			static_size;
	size_t			reserved_size;
	size_t			dyn_size;
	size_t			unit_size;
	size_t			atom_size;
	size_t			alloc_size;
	size_t			__ai_size;	/* internal, don't use */
	int			nr_groups;	/* 0 if grouping unnecessary */
	struct pcpu_group_info	groups[];
};

81 82 83 84 85 86 87 88 89 90 91
enum pcpu_fc {
	PCPU_FC_AUTO,
	PCPU_FC_EMBED,
	PCPU_FC_PAGE,

	PCPU_FC_NR,
};
extern const char *pcpu_fc_names[PCPU_FC_NR];

extern enum pcpu_fc pcpu_chosen_fc;

92 93
typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
				     size_t align);
94 95
typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
96
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
97

98 99 100 101 102 103 104
extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
							     int nr_units);
extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);

extern struct pcpu_alloc_info * __init pcpu_build_alloc_info(
				size_t reserved_size, ssize_t dyn_size,
				size_t atom_size,
105 106
				pcpu_fc_cpu_distance_fn_t cpu_distance_fn);

T
Tejun Heo 已提交
107 108
extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
					 void *base_addr);
109

110
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
111 112 113 114 115
extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
				size_t atom_size,
				pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
				pcpu_fc_alloc_fn_t alloc_fn,
				pcpu_fc_free_fn_t free_fn);
116
#endif
117

118
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
T
Tejun Heo 已提交
119
extern int __init pcpu_page_first_chunk(size_t reserved_size,
120 121 122
				pcpu_fc_alloc_fn_t alloc_fn,
				pcpu_fc_free_fn_t free_fn,
				pcpu_fc_populate_pte_fn_t populate_pte_fn);
123
#endif
124

125 126 127 128 129
/*
 * Use this to get to a cpu's version of the per-cpu object
 * dynamically allocated. Non-atomic access to the current CPU's
 * version should probably be combined with get_cpu()/put_cpu().
 */
130 131
#define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))

132 133
extern void *__alloc_reserved_percpu(size_t size, size_t align);

134
#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
135 136 137 138 139

struct percpu_data {
	void *ptrs[1];
};

140 141
/* pointer disguising messes up the kmemleak objects tracking */
#ifndef CONFIG_DEBUG_KMEMLEAK
142
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
143 144 145
#else
#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
#endif
146

147 148 149 150 151 152
#define per_cpu_ptr(ptr, cpu)						\
({									\
        struct percpu_data *__p = __percpu_disguise(ptr);		\
        (__typeof__(ptr))__p->ptrs[(cpu)];				\
})

153
#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
154

155 156
extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);
L
Linus Torvalds 已提交
157

158 159 160 161
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void);
#endif

L
Linus Torvalds 已提交
162 163
#else /* CONFIG_SMP */

164
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
165

166
static inline void *__alloc_percpu(size_t size, size_t align)
167
{
168 169 170 171 172
	/*
	 * Can't easily make larger alignment work with kmalloc.  WARN
	 * on it.  Larger alignment should only be used for module
	 * percpu sections on SMP for which this path isn't used.
	 */
173
	WARN_ON_ONCE(align > SMP_CACHE_BYTES);
I
Ingo Molnar 已提交
174
	return kzalloc(size, GFP_KERNEL);
175 176
}

177
static inline void free_percpu(void *p)
178
{
179
	kfree(p);
L
Linus Torvalds 已提交
180 181
}

182 183
static inline void __init setup_per_cpu_areas(void) { }

184 185 186 187 188
static inline void *pcpu_lpage_remapped(void *kaddr)
{
	return NULL;
}

L
Linus Torvalds 已提交
189 190
#endif /* CONFIG_SMP */

191 192
#define alloc_percpu(type)	(type *)__alloc_percpu(sizeof(type), \
						       __alignof__(type))
L
Linus Torvalds 已提交
193

T
Tejun Heo 已提交
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
/*
 * Optional methods for optimized non-lvalue per-cpu variable access.
 *
 * @var can be a percpu variable or a field of it and its size should
 * equal char, int or long.  percpu_read() evaluates to a lvalue and
 * all others to void.
 *
 * These operations are guaranteed to be atomic w.r.t. preemption.
 * The generic versions use plain get/put_cpu_var().  Archs are
 * encouraged to implement single-instruction alternatives which don't
 * require preemption protection.
 */
#ifndef percpu_read
# define percpu_read(var)						\
  ({									\
	typeof(per_cpu_var(var)) __tmp_var__;				\
	__tmp_var__ = get_cpu_var(var);					\
	put_cpu_var(var);						\
	__tmp_var__;							\
  })
#endif

#define __percpu_generic_to_op(var, val, op)				\
do {									\
	get_cpu_var(var) op val;					\
	put_cpu_var(var);						\
} while (0)

#ifndef percpu_write
# define percpu_write(var, val)		__percpu_generic_to_op(var, (val), =)
#endif

#ifndef percpu_add
# define percpu_add(var, val)		__percpu_generic_to_op(var, (val), +=)
#endif

#ifndef percpu_sub
# define percpu_sub(var, val)		__percpu_generic_to_op(var, (val), -=)
#endif

#ifndef percpu_and
# define percpu_and(var, val)		__percpu_generic_to_op(var, (val), &=)
#endif

#ifndef percpu_or
# define percpu_or(var, val)		__percpu_generic_to_op(var, (val), |=)
#endif

#ifndef percpu_xor
# define percpu_xor(var, val)		__percpu_generic_to_op(var, (val), ^=)
#endif

L
Linus Torvalds 已提交
246
#endif /* __LINUX_PERCPU_H */