提交 188a8140 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

percpu: add preemption checks to __this_cpu ops

We define a check function in order to avoid trouble with the include
files.  Then the higher level __this_cpu macros are modified to invoke
the preemption check.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: NChristoph Lameter <cl@linux.com>
Acked-by: NIngo Molnar <mingo@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Tested-by: NGrygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 293b6a4c
...@@ -173,6 +173,12 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); ...@@ -173,6 +173,12 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
extern void __bad_size_call_parameter(void); extern void __bad_size_call_parameter(void);
#ifdef CONFIG_DEBUG_PREEMPT
extern void __this_cpu_preempt_check(const char *op);
#else
static inline void __this_cpu_preempt_check(const char *op) { }
#endif
#define __pcpu_size_call_return(stem, variable) \ #define __pcpu_size_call_return(stem, variable) \
({ typeof(variable) pscr_ret__; \ ({ typeof(variable) pscr_ret__; \
__verify_pcpu_ptr(&(variable)); \ __verify_pcpu_ptr(&(variable)); \
...@@ -725,18 +731,24 @@ do { \ ...@@ -725,18 +731,24 @@ do { \
/* /*
* Generic percpu operations for context that are safe from preemption/interrupts. * Generic percpu operations for context that are safe from preemption/interrupts.
* Checks will be added here soon.
*/ */
#ifndef __this_cpu_read #ifndef __this_cpu_read
# define __this_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp)) # define __this_cpu_read(pcp) \
(__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
#endif #endif
#ifndef __this_cpu_write #ifndef __this_cpu_write
# define __this_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val)) # define __this_cpu_write(pcp, val) \
do { __this_cpu_preempt_check("write"); \
__pcpu_size_call(raw_cpu_write_, (pcp), (val)); \
} while (0)
#endif #endif
#ifndef __this_cpu_add #ifndef __this_cpu_add
# define __this_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val)) # define __this_cpu_add(pcp, val) \
do { __this_cpu_preempt_check("add"); \
__pcpu_size_call(raw_cpu_add_, (pcp), (val)); \
} while (0)
#endif #endif
#ifndef __this_cpu_sub #ifndef __this_cpu_sub
...@@ -752,16 +764,23 @@ do { \ ...@@ -752,16 +764,23 @@ do { \
#endif #endif
#ifndef __this_cpu_and #ifndef __this_cpu_and
# define __this_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val)) # define __this_cpu_and(pcp, val) \
do { __this_cpu_preempt_check("and"); \
__pcpu_size_call(raw_cpu_and_, (pcp), (val)); \
} while (0)
#endif #endif
#ifndef __this_cpu_or #ifndef __this_cpu_or
# define __this_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val)) # define __this_cpu_or(pcp, val) \
do { __this_cpu_preempt_check("or"); \
__pcpu_size_call(raw_cpu_or_, (pcp), (val)); \
} while (0)
#endif #endif
#ifndef __this_cpu_add_return #ifndef __this_cpu_add_return
# define __this_cpu_add_return(pcp, val) \ # define __this_cpu_add_return(pcp, val) \
__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
#endif #endif
#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
...@@ -770,17 +789,17 @@ do { \ ...@@ -770,17 +789,17 @@ do { \
#ifndef __this_cpu_xchg #ifndef __this_cpu_xchg
# define __this_cpu_xchg(pcp, nval) \ # define __this_cpu_xchg(pcp, nval) \
__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval) (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
#endif #endif
#ifndef __this_cpu_cmpxchg #ifndef __this_cpu_cmpxchg
# define __this_cpu_cmpxchg(pcp, oval, nval) \ # define __this_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
#endif #endif
#ifndef __this_cpu_cmpxchg_double #ifndef __this_cpu_cmpxchg_double
# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
#endif #endif
#endif /* __LINUX_PERCPU_H */ #endif /* __LINUX_PERCPU_H */
...@@ -7,7 +7,8 @@ ...@@ -7,7 +7,8 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/sched.h> #include <linux/sched.h>
notrace unsigned int debug_smp_processor_id(void) notrace static unsigned int check_preemption_disabled(const char *what1,
const char *what2)
{ {
int this_cpu = raw_smp_processor_id(); int this_cpu = raw_smp_processor_id();
...@@ -38,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void) ...@@ -38,9 +39,9 @@ notrace unsigned int debug_smp_processor_id(void)
if (!printk_ratelimit()) if (!printk_ratelimit())
goto out_enable; goto out_enable;
printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
"code: %s/%d\n", what1, what2, preempt_count() - 1, current->comm, current->pid);
preempt_count() - 1, current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0)); print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack(); dump_stack();
...@@ -50,5 +51,14 @@ notrace unsigned int debug_smp_processor_id(void) ...@@ -50,5 +51,14 @@ notrace unsigned int debug_smp_processor_id(void)
return this_cpu; return this_cpu;
} }
notrace unsigned int debug_smp_processor_id(void)
{
return check_preemption_disabled("smp_processor_id", "");
}
EXPORT_SYMBOL(debug_smp_processor_id); EXPORT_SYMBOL(debug_smp_processor_id);
notrace void __this_cpu_preempt_check(const char *op)
{
check_preemption_disabled("__this_cpu_", op);
}
EXPORT_SYMBOL(__this_cpu_preempt_check);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册