提交 4c75f84f 编写于 作者: P Paul Mackerras 提交者: Benjamin Herrenschmidt

powerpc: Add compiler memory barrier to mtmsr macro

On 32-bit non-Book E, local_irq_restore() turns into just mtmsr(),
which doesn't currently have a compiler memory barrier.  This means
that accesses to memory inside a local_irq_save/restore section,
or a spin_lock_irqsave/spin_unlock_irqrestore section on UP, can
be reordered by the compiler to occur outside that section.

To fix this, this adds a compiler memory barrier to mtmsr for both
32-bit and 64-bit.  Having a compiler memory barrier in mtmsr makes
sense because it will almost always be changing something about the
context in which memory accesses are done, so in general we don't want
memory accesses getting moved from one side of an mtmsr to the other.

With the barrier in mtmsr(), some of the explicit barriers in
hw_irq.h are now redundant, so this removes them.
Signed-off-by: NPaul Mackerras <paulus@samba.org>
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
上级 e8d1673b
...@@ -80,7 +80,7 @@ static inline void local_irq_disable(void) ...@@ -80,7 +80,7 @@ static inline void local_irq_disable(void)
__asm__ __volatile__("wrteei 0": : :"memory"); __asm__ __volatile__("wrteei 0": : :"memory");
#else #else
unsigned long msr; unsigned long msr;
__asm__ __volatile__("": : :"memory");
msr = mfmsr(); msr = mfmsr();
SET_MSR_EE(msr & ~MSR_EE); SET_MSR_EE(msr & ~MSR_EE);
#endif #endif
...@@ -92,7 +92,7 @@ static inline void local_irq_enable(void) ...@@ -92,7 +92,7 @@ static inline void local_irq_enable(void)
__asm__ __volatile__("wrteei 1": : :"memory"); __asm__ __volatile__("wrteei 1": : :"memory");
#else #else
unsigned long msr; unsigned long msr;
__asm__ __volatile__("": : :"memory");
msr = mfmsr(); msr = mfmsr();
SET_MSR_EE(msr | MSR_EE); SET_MSR_EE(msr | MSR_EE);
#endif #endif
...@@ -108,7 +108,6 @@ static inline void local_irq_save_ptr(unsigned long *flags) ...@@ -108,7 +108,6 @@ static inline void local_irq_save_ptr(unsigned long *flags)
#else #else
SET_MSR_EE(msr & ~MSR_EE); SET_MSR_EE(msr & ~MSR_EE);
#endif #endif
__asm__ __volatile__("": : :"memory");
} }
#define local_save_flags(flags) ((flags) = mfmsr()) #define local_save_flags(flags) ((flags) = mfmsr())
......
...@@ -745,11 +745,11 @@ ...@@ -745,11 +745,11 @@
asm volatile("mfmsr %0" : "=r" (rval)); rval;}) asm volatile("mfmsr %0" : "=r" (rval)); rval;})
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
: : "r" (v)) : : "r" (v) : "memory")
#define mtmsrd(v) __mtmsrd((v), 0) #define mtmsrd(v) __mtmsrd((v), 0)
#define mtmsr(v) mtmsrd(v) #define mtmsr(v) mtmsrd(v)
#else #else
#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v)) #define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory")
#endif #endif
#define mfspr(rn) ({unsigned long rval; \ #define mfspr(rn) ({unsigned long rval; \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册