提交 e0fdb0e0 编写于 作者: R Rusty Russell 提交者: Tejun Heo

percpu: add __percpu for sparse.

We have to make __kernel "__attribute__((address_space(0)))" so we can
cast to it.

tj: * put_cpu_var() update.

    * Annotations added to dynamic allocator interface.
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: NTejun Heo <tj@kernel.org>
上级 f7b64fe8
...@@ -41,7 +41,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; ...@@ -41,7 +41,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
* Only S390 provides its own means of moving the pointer. * Only S390 provides its own means of moving the pointer.
*/ */
#ifndef SHIFT_PERCPU_PTR #ifndef SHIFT_PERCPU_PTR
#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset)) /* Weird cast keeps both GCC and sparse happy. */
#define SHIFT_PERCPU_PTR(__p, __offset) \
RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
#endif #endif
/* /*
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#ifdef __CHECKER__ #ifdef __CHECKER__
# define __user __attribute__((noderef, address_space(1))) # define __user __attribute__((noderef, address_space(1)))
# define __kernel /* default address space */ # define __kernel __attribute__((address_space(0)))
# define __safe __attribute__((safe)) # define __safe __attribute__((safe))
# define __force __attribute__((force)) # define __force __attribute__((force))
# define __nocast __attribute__((nocast)) # define __nocast __attribute__((nocast))
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
# define __acquire(x) __context__(x,1) # define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1) # define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
extern void __chk_user_ptr(const volatile void __user *); extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *); extern void __chk_io_ptr(const volatile void __iomem *);
#else #else
...@@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *); ...@@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __acquire(x) (void)0 # define __acquire(x) (void)0
# define __release(x) (void)0 # define __release(x) (void)0
# define __cond_lock(x,c) (c) # define __cond_lock(x,c) (c)
# define __percpu
#endif #endif
#ifdef __KERNEL__ #ifdef __KERNEL__
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
* that section. * that section.
*/ */
#define __PCPU_ATTRS(sec) \ #define __PCPU_ATTRS(sec) \
__attribute__((section(PER_CPU_BASE_SECTION sec))) \ __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
PER_CPU_ATTRIBUTES PER_CPU_ATTRIBUTES
#define __PCPU_DUMMY_ATTRS \ #define __PCPU_DUMMY_ATTRS \
......
...@@ -30,8 +30,12 @@ ...@@ -30,8 +30,12 @@
preempt_disable(); \ preempt_disable(); \
&__get_cpu_var(var); })) &__get_cpu_var(var); }))
/*
* The weird & is necessary because sparse considers (void)(var) to be
* a direct dereference of percpu variable (var).
*/
#define put_cpu_var(var) do { \ #define put_cpu_var(var) do { \
(void)(var); \ (void)&(var); \
preempt_enable(); \ preempt_enable(); \
} while (0) } while (0)
...@@ -130,9 +134,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, ...@@ -130,9 +134,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
*/ */
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
extern void *__alloc_reserved_percpu(size_t size, size_t align); extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
extern void *__alloc_percpu(size_t size, size_t align); extern void __percpu *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata); extern void free_percpu(void __percpu *__pdata);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void); extern void __init setup_per_cpu_areas(void);
...@@ -142,7 +146,7 @@ extern void __init setup_per_cpu_areas(void); ...@@ -142,7 +146,7 @@ extern void __init setup_per_cpu_areas(void);
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
static inline void *__alloc_percpu(size_t size, size_t align) static inline void __percpu *__alloc_percpu(size_t size, size_t align)
{ {
/* /*
* Can't easily make larger alignment work with kmalloc. WARN * Can't easily make larger alignment work with kmalloc. WARN
...@@ -153,7 +157,7 @@ static inline void *__alloc_percpu(size_t size, size_t align) ...@@ -153,7 +157,7 @@ static inline void *__alloc_percpu(size_t size, size_t align)
return kzalloc(size, GFP_KERNEL); return kzalloc(size, GFP_KERNEL);
} }
static inline void free_percpu(void *p) static inline void free_percpu(void __percpu *p)
{ {
kfree(p); kfree(p);
} }
...@@ -168,7 +172,7 @@ static inline void *pcpu_lpage_remapped(void *kaddr) ...@@ -168,7 +172,7 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#define alloc_percpu(type) \ #define alloc_percpu(type) \
(typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type)) (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
/* /*
* Optional methods for optimized non-lvalue per-cpu variable access. * Optional methods for optimized non-lvalue per-cpu variable access.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册