提交 62fde541 编写于 作者: T Tejun Heo

percpu: include/asm-generic/percpu.h should contain only arch-overridable parts

The roles of the various percpu header files has become unclear.
There are four header files involved.

 include/linux/percpu-defs.h
 include/linux/percpu.h
 include/asm-generic/percpu.h
 arch/*/include/asm/percpu.h

The original intention for include/asm-generic/percpu.h is providing
generic definitions for arch-overridable parts; however, it now hosts
various stuff which can't be overridden by archs.

Also, include/linux/percpu-defs.h was initially added to contain
section and percpu variable definition macros so that arch header
files can make use of them without worrying about introducing cyclic
inclusion dependency by including include/linux/percpu.h; however,
arch headers sometimes need to access percpu variables too and this is
one of the reasons why some accessors were implemented in
include/linux/asm-generic/percpu.h.

Let's clear up the situation by making include/asm-generic/percpu.h
contain only arch-overridable parts and moving accessors and
operations into include/linux/percpu-defs.  Note that this patch only
moves things from include/asm-generic/percpu.h.
include/linux/percpu.h will be taken care of by later patches.

This patch moves the followings.

* SHIFT_PERCPU_PTR() / VERIFY_PERCPU_PTR()
* per_cpu()
* raw_cpu_ptr()
* this_cpu_ptr()
* __get_cpu_var()
* __raw_get_cpu_var()
* __this_cpu_ptr()
* PER_CPU_[SHARED_]ALIGNED_SECTION
* PER_CPU_[SHARED_]ALIGNED_SECTION
* PER_CPU_FIRST_SECTION

This patch is pure reorganization.
Signed-off-by: NTejun Heo <tj@kernel.org>
Acked-by: NChristoph Lameter <cl@linux.com>
上级 bbc344e1
...@@ -35,24 +35,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; ...@@ -35,24 +35,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define my_cpu_offset __my_cpu_offset #define my_cpu_offset __my_cpu_offset
#endif #endif
/*
* Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE()
* to prevent the compiler from making incorrect assumptions about the
* pointer value. The weird cast keeps both GCC and sparse happy.
*/
#define SHIFT_PERCPU_PTR(__p, __offset) ({ \
__verify_pcpu_ptr((__p)); \
RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
})
/*
* A percpu variable may point to a discarded regions. The following are
* established ways to produce a usable pointer from the percpu variable
* offset.
*/
#define per_cpu(var, cpu) \
(*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
/* /*
* Arch may define arch_raw_cpu_ptr() to provide more efficient address * Arch may define arch_raw_cpu_ptr() to provide more efficient address
* translations for raw_cpu_ptr(). * translations for raw_cpu_ptr().
...@@ -61,34 +43,10 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; ...@@ -61,34 +43,10 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) #define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
#endif #endif
#define raw_cpu_ptr(ptr) arch_raw_cpu_ptr(ptr)
#ifdef CONFIG_DEBUG_PREEMPT
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
#else
#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
#endif
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void); extern void setup_per_cpu_areas(void);
#endif #endif
#else /* ! SMP */
#define VERIFY_PERCPU_PTR(__p) ({ \
__verify_pcpu_ptr((__p)); \
(typeof(*(__p)) __kernel __force *)(__p); \
})
#define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var))))
#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr)
#endif /* SMP */ #endif /* SMP */
#ifndef PER_CPU_BASE_SECTION #ifndef PER_CPU_BASE_SECTION
...@@ -99,25 +57,6 @@ extern void setup_per_cpu_areas(void); ...@@ -99,25 +57,6 @@ extern void setup_per_cpu_areas(void);
#endif #endif
#endif #endif
#ifdef CONFIG_SMP
#ifdef MODULE
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION ""
#else
#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#endif
#define PER_CPU_FIRST_SECTION "..first"
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#define PER_CPU_FIRST_SECTION ""
#endif
#ifndef PER_CPU_ATTRIBUTES #ifndef PER_CPU_ATTRIBUTES
#define PER_CPU_ATTRIBUTES #define PER_CPU_ATTRIBUTES
#endif #endif
...@@ -126,7 +65,4 @@ extern void setup_per_cpu_areas(void); ...@@ -126,7 +65,4 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_DEF_ATTRIBUTES #define PER_CPU_DEF_ATTRIBUTES
#endif #endif
/* Keep until we have removed all uses of __this_cpu_ptr */
#define __this_cpu_ptr raw_cpu_ptr
#endif /* _ASM_GENERIC_PERCPU_H_ */ #endif /* _ASM_GENERIC_PERCPU_H_ */
/*
* linux/percpu-defs.h - basic definitions for percpu areas
*
* DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER.
*
* This file is separate from linux/percpu.h to avoid cyclic inclusion
* dependency from arch header files. Only to be included from
* asm/percpu.h.
*
* This file includes macros necessary to declare percpu sections and
* variables, and definitions of percpu accessors and operations. It
* should provide enough percpu features to arch header files even when
* they can only include asm/percpu.h to avoid cyclic inclusion dependency.
*/
#ifndef _LINUX_PERCPU_DEFS_H #ifndef _LINUX_PERCPU_DEFS_H
#define _LINUX_PERCPU_DEFS_H #define _LINUX_PERCPU_DEFS_H
#ifdef CONFIG_SMP
#ifdef MODULE
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION ""
#else
#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#endif
#define PER_CPU_FIRST_SECTION "..first"
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#define PER_CPU_FIRST_SECTION ""
#endif
/* /*
* Base implementations of per-CPU variable declarations and definitions, where * Base implementations of per-CPU variable declarations and definitions, where
* the section in which the variable is to be placed is provided by the * the section in which the variable is to be placed is provided by the
...@@ -164,4 +198,59 @@ ...@@ -164,4 +198,59 @@
#define EXPORT_PER_CPU_SYMBOL_GPL(var) #define EXPORT_PER_CPU_SYMBOL_GPL(var)
#endif #endif
/*
* Accessors and operations.
*/
#ifndef __ASSEMBLY__
#ifdef CONFIG_SMP
/*
* Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE()
* to prevent the compiler from making incorrect assumptions about the
* pointer value. The weird cast keeps both GCC and sparse happy.
*/
#define SHIFT_PERCPU_PTR(__p, __offset) ({ \
__verify_pcpu_ptr((__p)); \
RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
})
/*
* A percpu variable may point to a discarded regions. The following are
* established ways to produce a usable pointer from the percpu variable
* offset.
*/
#define per_cpu(var, cpu) \
(*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
#define raw_cpu_ptr(ptr) arch_raw_cpu_ptr(ptr)
#ifdef CONFIG_DEBUG_PREEMPT
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
#else
#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
#endif
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
#else /* CONFIG_SMP */
#define VERIFY_PERCPU_PTR(__p) ({ \
__verify_pcpu_ptr((__p)); \
(typeof(*(__p)) __kernel __force *)(__p); \
})
#define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var))))
#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr)
#endif /* CONFIG_SMP */
/* keep until we have removed all uses of __this_cpu_ptr */
#define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_PERCPU_DEFS_H */ #endif /* _LINUX_PERCPU_DEFS_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册