aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/paravirt.h
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2009-02-12 10:02:56 -0800
committerThomas Gleixner <tglx@linutronix.de>2009-02-12 23:11:58 +0100
commitd85cf93da66977dbc645352be1b2084a659d8a0b (patch)
treec8783022810c0ae48f5a5007ce9163c99e804d6f /arch/x86/include/asm/paravirt.h
parentbe03d9e8022030c16abf534e33e185bfc3d40eef (diff)
x86/paravirt: make arch_flush_lazy_mmu/cpu disable preemption
Impact: avoid access to percpu vars in preempible context They are intended to be used whenever there's the possibility that there's some stale state which is going to be overwritten with a queued update, or to force a state change when we may be in lazy mode. Either way, we could end up calling it with preemption enabled, so wrap the functions in their own little preempt-disable section so they can be safely called in any context (though preemption should never be enabled if we're actually in a lazy state). (Move out of line to avoid #include dependencies.) Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/include/asm/paravirt.h')
-rw-r--r--arch/x86/include/asm/paravirt.h17
1 files changed, 2 insertions, 15 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ba3e2ff6aed..a660eceaa27 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1352,14 +1352,7 @@ static inline void arch_leave_lazy_cpu_mode(void)
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
}
-static inline void arch_flush_lazy_cpu_mode(void)
-{
- if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
- arch_leave_lazy_cpu_mode();
- arch_enter_lazy_cpu_mode();
- }
-}
-
+void arch_flush_lazy_cpu_mode(void);
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
@@ -1372,13 +1365,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
}
-static inline void arch_flush_lazy_mmu_mode(void)
-{
- if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
- arch_leave_lazy_mmu_mode();
- arch_enter_lazy_mmu_mode();
- }
-}
+void arch_flush_lazy_mmu_mode(void);
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
unsigned long phys, pgprot_t flags)