diff options
author | Pallipadi, Venkatesh <venkatesh.pallipadi@intel.com> | 2009-05-26 10:33:35 -0700 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2009-05-26 13:12:12 -0700 |
commit | 2171787be2e71ff71159857bfeb21398b61eb615 (patch) | |
tree | 671bde892ae47316d9fbf2b5f105d6c365d07b60 | |
parent | 46176b4f6bac19454b7b5c35f68594b85850a600 (diff) |
x86: avoid back to back on_each_cpu in cpa_flush_array
Cleanup cpa_flush_array() to avoid back to back on_each_cpu() calls.
[ Impact: optimizes fix 0af48f42df15b97080b450d24219dd95db7b929a ]
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
-rw-r--r-- | arch/x86/mm/pageattr.c | 17 |
1 files changed, 3 insertions, 14 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 0f9052bcec4..e17efed088c 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -204,30 +204,19 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) } } -static void wbinvd_local(void *unused) -{ - wbinvd(); -} - static void cpa_flush_array(unsigned long *start, int numpages, int cache, int in_flags, struct page **pages) { unsigned int i, level; + unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ BUG_ON(irqs_disabled()); - on_each_cpu(__cpa_flush_range, NULL, 1); + on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); - if (!cache) + if (!cache || do_wbinvd) return; - /* 4M threshold */ - if (numpages >= 1024) { - if (boot_cpu_data.x86 >= 4) - on_each_cpu(wbinvd_local, NULL, 1); - - return; - } /* * We only need to flush on one CPU, * clflush is a MESI-coherent instruction that |