aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVenki Pallipadi <venkatesh.pallipadi@intel.com>2008-01-30 13:32:01 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:32:01 +0100
commitbde6f5f59c2b2b48a7a849c129d5b48838fe77ee (patch)
tree4fa3befdfa227db56770a0dc85b8fc18be232f70
parent7d409d6057c7244f8757ce15245f6df27271be0c (diff)
x86: voluntary leave_mm before entering ACPI C3
Aviod TLB flush IPIs during C3 states by voluntary leave_mm() before entering C3. The performance impact of TLB flush on C3 should not be significant with respect to C3 wakeup latency. Also, CPUs tend to flush TLB in hardware while in C3 anyways. On a 8 logical CPU system, running make -j2, the number of tlbflush IPIs goes down from 40 per second to ~ 0. Total number of interrupts during the run of this workload was ~1200 per second, which makes it ~3% savings in wakeups. There was no measurable performance or power impact however. [ akpm@linux-foundation.org: symbol export fixes. ] Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/smp_32.c3
-rw-r--r--arch/x86/kernel/smp_64.c3
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--include/asm-ia64/acpi.h2
-rw-r--r--include/asm-x86/acpi.h3
-rw-r--r--include/asm-x86/mmu.h8
-rw-r--r--include/asm-x86/mmu_context_32.h2
7 files changed, 19 insertions, 4 deletions
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
index 070816ac79e..dc0cde9d16f 100644
--- a/arch/x86/kernel/smp_32.c
+++ b/arch/x86/kernel/smp_32.c
@@ -256,13 +256,14 @@ static DEFINE_SPINLOCK(tlbstate_lock);
* We need to reload %cr3 since the page tables may be going
* away from under us..
*/
-void leave_mm(unsigned long cpu)
+void leave_mm(int cpu)
{
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
BUG();
cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
load_cr3(swapper_pg_dir);
}
+EXPORT_SYMBOL_GPL(leave_mm);
/*
*
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
index 02a6533e890..2fd74b06db6 100644
--- a/arch/x86/kernel/smp_64.c
+++ b/arch/x86/kernel/smp_64.c
@@ -69,13 +69,14 @@ static DEFINE_PER_CPU(union smp_flush_state, flush_state);
* We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask.
*/
-static inline void leave_mm(int cpu)
+void leave_mm(int cpu)
{
if (read_pda(mmu_state) == TLBSTATE_OK)
BUG();
cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
load_cr3(swapper_pg_dir);
}
+EXPORT_SYMBOL_GPL(leave_mm);
/*
*
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2235f4e02d2..0721a8183c8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -534,6 +534,7 @@ static void acpi_processor_idle(void)
break;
case ACPI_STATE_C3:
+ acpi_unlazy_tlb(smp_processor_id());
/*
* Must be done before busmaster disable as we might
* need to access HPET !
@@ -1423,6 +1424,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
return 0;
}
+ acpi_unlazy_tlb(smp_processor_id());
/*
* Must be done before busmaster disable as we might need to
* access HPET !
diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h
index 81bcd5e5178..cd1cc39b559 100644
--- a/include/asm-ia64/acpi.h
+++ b/include/asm-ia64/acpi.h
@@ -127,6 +127,8 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#endif
+#define acpi_unlazy_tlb(x)
+
#endif /*__KERNEL__*/
#endif /*_ASM_ACPI_H*/
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
index 2feb0c494be..98a9ca26653 100644
--- a/include/asm-x86/acpi.h
+++ b/include/asm-x86/acpi.h
@@ -27,6 +27,7 @@
#include <asm/numa.h>
#include <asm/processor.h>
+#include <asm/mmu.h>
#define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long
@@ -167,4 +168,6 @@ static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
}
#endif
+#define acpi_unlazy_tlb(x) leave_mm(x)
+
#endif /*__X86_ASM_ACPI_H*/
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
index 3f922c8e1c8..efa962c3889 100644
--- a/include/asm-x86/mmu.h
+++ b/include/asm-x86/mmu.h
@@ -20,4 +20,12 @@ typedef struct {
void *vdso;
} mm_context_t;
+#ifdef CONFIG_SMP
+void leave_mm(int cpu);
+#else
+static inline void leave_mm(int cpu)
+{
+}
+#endif
+
#endif /* _ASM_X86_MMU_H */
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h
index 7eb0b0b1fb3..8198d1cca1f 100644
--- a/include/asm-x86/mmu_context_32.h
+++ b/include/asm-x86/mmu_context_32.h
@@ -32,8 +32,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#endif
}
-void leave_mm(unsigned long cpu);
-
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)