diff options
author | Andi Kleen <andi@firstfloor.org> | 2008-03-12 03:53:29 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 17:41:30 +0200 |
commit | c9caa02c529d5e113e40cbc77254558fcdfa4215 (patch) | |
tree | cb3f9f5a5d26bf7df542a61bfb524e886d8c6510 | |
parent | cc6150321903ca4c3bc9d53b0cdafb05d77d64d0 (diff) |
x86: add set_memory_4k to pageattr.c
Add a new function to force split large pages into 4k pages.
This is needed for some followup optimizations.
I had to add a new field to cpa_data to pass down the information
that try_preserve_large_page should not run.
Right now no set_page_4k() because I didn't need it and all the
specialized users I have in mind would be more comfortable with
pure addresses. I also didn't export it because it's unlikely
external code needs it.
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: andreas.herrmann3@amd.com
Cc: mingo@elte.hu
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/mm/pageattr.c | 20 | ||||
-rw-r--r-- | include/asm-x86/cacheflush.h | 1 |
2 files changed, 17 insertions, 4 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 270cab2e603..7d9517abc9a 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -31,6 +31,7 @@ struct cpa_data { int numpages; int flushtlb; unsigned long pfn; + unsigned force_split : 1; }; #ifdef CONFIG_X86_64 @@ -262,6 +263,9 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, int i, do_split = 1; unsigned int level; + if (cpa->force_split) + return 1; + spin_lock_irqsave(&pgd_lock, flags); /* * Check for races, another CPU might have split this page @@ -696,7 +700,8 @@ static inline int cache_attr(pgprot_t attr) } static int change_page_attr_set_clr(unsigned long addr, int numpages, - pgprot_t mask_set, pgprot_t mask_clr) + pgprot_t mask_set, pgprot_t mask_clr, + int force_split) { struct cpa_data cpa; int ret, cache, checkalias; @@ -707,7 +712,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, */ mask_set = canon_pgprot(mask_set); mask_clr = canon_pgprot(mask_clr); - if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) + if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) return 0; /* Ensure we are PAGE_SIZE aligned */ @@ -724,6 +729,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, cpa.mask_set = mask_set; cpa.mask_clr = mask_clr; cpa.flushtlb = 0; + cpa.force_split = force_split; /* No alias checking for _NX bit modifications */ checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; @@ -762,13 +768,13 @@ out: static inline int change_page_attr_set(unsigned long addr, int numpages, pgprot_t mask) { - return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); + return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0); } static inline int change_page_attr_clear(unsigned long addr, int numpages, pgprot_t mask) { - return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); + return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0); } int _set_memory_uc(unsigned long addr, int numpages) @@ -847,6 +853,12 @@ int set_memory_np(unsigned long addr, int numpages) return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); } +int set_memory_4k(unsigned long addr, int numpages) +{ + return change_page_attr_set_clr(addr, numpages, __pgprot(0), + __pgprot(0), 1); +} + int set_pages_uc(struct page *page, int numpages) { unsigned long addr = (unsigned long)page_address(page); diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 7ab5b520b7b..cb1d6f8fd00 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h @@ -45,6 +45,7 @@ int set_memory_nx(unsigned long addr, int numpages); int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_np(unsigned long addr, int numpages); +int set_memory_4k(unsigned long addr, int numpages); void clflush_cache_range(void *addr, unsigned int size); |