percpu.h 4.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2
#ifndef __LINUX_PERCPU_H
#define __LINUX_PERCPU_H
3

4
#include <linux/preempt.h>
L
Linus Torvalds 已提交
5 6
#include <linux/slab.h> /* For kmalloc() */
#include <linux/smp.h>
7 8
#include <linux/cpumask.h>

L
Linus Torvalds 已提交
9 10
#include <asm/percpu.h>

11
#ifndef PER_CPU_BASE_SECTION
12
#ifdef CONFIG_SMP
B
Brian Gerst 已提交
13
#define PER_CPU_BASE_SECTION ".data.percpu"
14 15 16 17 18 19
#else
#define PER_CPU_BASE_SECTION ".data"
#endif
#endif

#ifdef CONFIG_SMP
20

21
#ifdef MODULE
B
Brian Gerst 已提交
22
#define PER_CPU_SHARED_ALIGNED_SECTION ""
23
#else
B
Brian Gerst 已提交
24
#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
25
#endif
B
Brian Gerst 已提交
26
#define PER_CPU_FIRST_SECTION ".first"
27

B
Brian Gerst 已提交
28 29 30 31 32 33
#else

#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_FIRST_SECTION ""

#endif
34

B
Brian Gerst 已提交
35 36
#define DEFINE_PER_CPU_SECTION(type, name, section)			\
	__attribute__((__section__(PER_CPU_BASE_SECTION section)))	\
37
	PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
B
Brian Gerst 已提交
38

39
#define DEFINE_PER_CPU(type, name)					\
B
Brian Gerst 已提交
40
	DEFINE_PER_CPU_SECTION(type, name, "")
41

B
Brian Gerst 已提交
42 43 44
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)			\
	DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
	____cacheline_aligned_in_smp
45

B
Brian Gerst 已提交
46 47 48 49 50
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)				\
	DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")

#define DEFINE_PER_CPU_FIRST(type, name)				\
	DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
51 52 53 54

#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)

L
Linus Torvalds 已提交
55 56
/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
#ifndef PERCPU_ENOUGH_ROOM
57 58 59 60
#ifdef CONFIG_MODULES
#define PERCPU_MODULE_RESERVE	8192
#else
#define PERCPU_MODULE_RESERVE	0
L
Linus Torvalds 已提交
61 62
#endif

63 64 65 66
#define PERCPU_ENOUGH_ROOM						\
	(__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
#endif	/* PERCPU_ENOUGH_ROOM */

67 68 69 70 71
/*
 * Must be an lvalue. Since @var must be a simple identifier,
 * we force a syntax error here if it isn't.
 */
#define get_cpu_var(var) (*({				\
72
	extern int simple_identifier_##var(void);	\
73 74
	preempt_disable();				\
	&__get_cpu_var(var); }))
L
Linus Torvalds 已提交
75 76 77 78
#define put_cpu_var(var) preempt_enable()

#ifdef CONFIG_SMP

79
#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
L
Linus Torvalds 已提交
80

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE		(16UL << PAGE_SHIFT)

/*
 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
 * back on the first chunk if arch is manually allocating and mapping
 * it for faster access (as a part of large page mapping for example).
 * Note that dynamic percpu allocator covers both static and dynamic
 * areas, so these values are bigger than PERCPU_MODULE_RESERVE.
 *
 * On typical configuration with modules, the following values leave
 * about 8k of free space on the first chunk after boot on both x86_32
 * and 64 when module support is enabled.  When module support is
 * disabled, it's much tighter.
 */
#ifndef PERCPU_DYNAMIC_RESERVE
#  if BITS_PER_LONG > 32
#    ifdef CONFIG_MODULES
#      define PERCPU_DYNAMIC_RESERVE	(6 << PAGE_SHIFT)
#    else
#      define PERCPU_DYNAMIC_RESERVE	(4 << PAGE_SHIFT)
#    endif
#  else
#    ifdef CONFIG_MODULES
#      define PERCPU_DYNAMIC_RESERVE	(4 << PAGE_SHIFT)
#    else
#      define PERCPU_DYNAMIC_RESERVE	(2 << PAGE_SHIFT)
#    endif
#  endif
#endif	/* PERCPU_DYNAMIC_RESERVE */

112
extern void *pcpu_base_addr;
L
Linus Torvalds 已提交
113

114
typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
115 116
typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);

117 118 119 120 121
extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
					size_t static_size, size_t unit_size,
					size_t free_size, void *base_addr,
					pcpu_populate_pte_fn_t populate_pte_fn);

122 123 124 125 126
/*
 * Use this to get to a cpu's version of the per-cpu object
 * dynamically allocated. Non-atomic access to the current CPU's
 * version should probably be combined with get_cpu()/put_cpu().
 */
127 128 129 130 131 132 133 134 135 136
#define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))

#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */

struct percpu_data {
	void *ptrs[1];
};

#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)

137 138 139 140 141 142
#define per_cpu_ptr(ptr, cpu)						\
({									\
        struct percpu_data *__p = __percpu_disguise(ptr);		\
        (__typeof__(ptr))__p->ptrs[(cpu)];				\
})

143 144
#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */

145 146
extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);
L
Linus Torvalds 已提交
147 148 149

#else /* CONFIG_SMP */

150
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
151

152
static inline void *__alloc_percpu(size_t size, size_t align)
153
{
154 155 156 157 158 159
	/*
	 * Can't easily make larger alignment work with kmalloc.  WARN
	 * on it.  Larger alignment should only be used for module
	 * percpu sections on SMP for which this path isn't used.
	 */
	WARN_ON_ONCE(align > __alignof__(unsigned long long));
160 161 162
	return kzalloc(size, gfp);
}

163
static inline void free_percpu(void *p)
164
{
165
	kfree(p);
L
Linus Torvalds 已提交
166 167 168 169
}

#endif /* CONFIG_SMP */

170 171
#define alloc_percpu(type)	(type *)__alloc_percpu(sizeof(type), \
						       __alignof__(type))
L
Linus Torvalds 已提交
172 173

#endif /* __LINUX_PERCPU_H */