aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-06-12 02:00:50 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-15 13:27:37 +1000
commit4c75f84f2c781beb230031234ed961d28771a764 (patch)
treebae42cc387d0fdbc206729f2163b3eebefb9a85c
parente8d1673b9720ec72d85916c6b7d5d476abb2c861 (diff)
powerpc: Add compiler memory barrier to mtmsr macro
On 32-bit non-Book E, local_irq_restore() turns into just mtmsr(), which doesn't currently have a compiler memory barrier. This means that accesses to memory inside a local_irq_save/restore section, or a spin_lock_irqsave/spin_unlock_irqrestore section on UP, can be reordered by the compiler to occur outside that section. To fix this, this adds a compiler memory barrier to mtmsr for both 32-bit and 64-bit. Having a compiler memory barrier in mtmsr makes sense because it will almost always be changing something about the context in which memory accesses are done, so in general we don't want memory accesses getting moved from one side of an mtmsr to the other. With the barrier in mtmsr(), some of the explicit barriers in hw_irq.h are now redundant, so this removes them. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/hw_irq.h5
-rw-r--r--arch/powerpc/include/asm/reg.h4
2 files changed, 4 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 53512374e1c..b7f8f4a87cc 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -80,7 +80,7 @@ static inline void local_irq_disable(void)
__asm__ __volatile__("wrteei 0": : :"memory");
#else
unsigned long msr;
- __asm__ __volatile__("": : :"memory");
+
msr = mfmsr();
SET_MSR_EE(msr & ~MSR_EE);
#endif
@@ -92,7 +92,7 @@ static inline void local_irq_enable(void)
__asm__ __volatile__("wrteei 1": : :"memory");
#else
unsigned long msr;
- __asm__ __volatile__("": : :"memory");
+
msr = mfmsr();
SET_MSR_EE(msr | MSR_EE);
#endif
@@ -108,7 +108,6 @@ static inline void local_irq_save_ptr(unsigned long *flags)
#else
SET_MSR_EE(msr & ~MSR_EE);
#endif
- __asm__ __volatile__("": : :"memory");
}
#define local_save_flags(flags) ((flags) = mfmsr())
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fb359b0a693..a3c28e46947 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -745,11 +745,11 @@
asm volatile("mfmsr %0" : "=r" (rval)); rval;})
#ifdef CONFIG_PPC64
#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
- : : "r" (v))
+ : : "r" (v) : "memory")
#define mtmsrd(v) __mtmsrd((v), 0)
#define mtmsr(v) mtmsrd(v)
#else
-#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
+#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory")
#endif
#define mfspr(rn) ({unsigned long rval; \