diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-01-30 13:32:58 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:32:58 +0100 |
commit | 195466dc4b9b8a4cc89d37ea1211746f3afbc941 (patch) | |
tree | 38a4dc9e105d54cf285cdcbc141b424a2fc16f41 | |
parent | e33287013585e96180c575288bf1db22bee47b52 (diff) |
x86: pgtable: unify pte accessors
Make various pte accessors common.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/asm-x86/pgtable-2level.h | 1 | ||||
-rw-r--r-- | include/asm-x86/pgtable-3level.h | 1 | ||||
-rw-r--r-- | include/asm-x86/pgtable.h | 110 | ||||
-rw-r--r-- | include/asm-x86/pgtable_32.h | 101 | ||||
-rw-r--r-- | include/asm-x86/pgtable_64.h | 43 |
5 files changed, 113 insertions, 143 deletions
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h index f949bb08308..65cdc8f9e3b 100644 --- a/include/asm-x86/pgtable-2level.h +++ b/include/asm-x86/pgtable-2level.h @@ -33,7 +33,6 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) #define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) -#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index 3da96f792dd..a123e687112 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h @@ -101,7 +101,6 @@ static inline void native_pmd_clear(pmd_t *pmd) #define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte) #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) #define set_pud(pudp, pud) native_set_pud(pudp, pud) -#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) #define pmd_clear(pmd) native_pmd_clear(pmd) #endif diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 6452286e71a..62fa856c249 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -116,6 +116,7 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; #define __S111 PAGE_SHARED_EXEC #ifndef __ASSEMBLY__ + /* * The following only work if pte_present() is true. * Undefined behaviour if not.. @@ -169,7 +170,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) return __pte(val); } - #endif /* __ASSEMBLY__ */ #ifdef CONFIG_X86_32 @@ -178,4 +178,112 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) # include "pgtable_64.h" #endif +#ifndef __ASSEMBLY__ + +#ifndef CONFIG_PARAVIRT +/* + * Rules for using pte_update - it must be called after any PTE update which + * has not been done using the set_pte / clear_pte interfaces. It is used by + * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE + * updates should either be sets, clears, or set_pte_atomic for P->P + * transitions, which means this hook should only be called for user PTEs. + * This hook implies a P->P protection or access change has taken place, which + * requires a subsequent TLB flush. The notification can optionally be delayed + * until the TLB flush event by using the pte_update_defer form of the + * interface, but care must be taken to assure that the flush happens while + * still holding the same page table lock so that the shadow and primary pages + * do not become out of sync on SMP. + */ +#define pte_update(mm, addr, ptep) do { } while (0) +#define pte_update_defer(mm, addr, ptep) do { } while (0) +#endif + +/* local pte updates need not use xchg for locking */ +static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) +{ + pte_t res = *ptep; + + /* Pure native function needs no input for mm, addr */ + native_pte_clear(NULL, 0, ptep); + return res; +} + +/* + * We only update the dirty/accessed state if we set + * the dirty bit by hand in the kernel, since the hardware + * will do the accessed bit for us, and we don't want to + * race with other CPU's that might be updating the dirty + * bit at the same time. + */ +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ +({ \ + int __changed = !pte_same(*(ptep), entry); \ + if (__changed && dirty) { \ + *ptep = entry; \ + pte_update_defer((vma)->vm_mm, (address), (ptep)); \ + flush_tlb_page(vma, address); \ + } \ + __changed; \ +}) + +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define ptep_test_and_clear_young(vma, addr, ptep) ({ \ + int __ret = 0; \ + if (pte_young(*(ptep))) \ + __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \ + &(ptep)->pte); \ + if (__ret) \ + pte_update((vma)->vm_mm, addr, ptep); \ + __ret; \ +}) + +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define ptep_clear_flush_young(vma, address, ptep) \ +({ \ + int __young; \ + __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ + if (__young) \ + flush_tlb_page(vma, address); \ + __young; \ +}) + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_t pte = native_ptep_get_and_clear(ptep); + pte_update(mm, addr, ptep); + return pte; +} + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) +{ + pte_t pte; + if (full) { + /* + * Full address destruction in progress; paravirt does not + * care about updates and native needs no locking + */ + pte = native_local_ptep_get_and_clear(ptep); + } else { + pte = ptep_get_and_clear(mm, addr, ptep); + } + return pte; +} + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + clear_bit(_PAGE_BIT_RW, &ptep->pte); + pte_update(mm, addr, ptep); +} + +#ifndef CONFIG_PARAVIRT +#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) +#endif /* !CONFIG_PARAVIRT */ + +#include <asm-generic/pgtable.h> +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_X86_PGTABLE_H */ diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 109dad5e16e..d4d238c1029 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -107,105 +107,6 @@ extern unsigned long pg0[]; # include <asm/pgtable-2level.h> #endif -#ifndef CONFIG_PARAVIRT -/* - * Rules for using pte_update - it must be called after any PTE update which - * has not been done using the set_pte / clear_pte interfaces. It is used by - * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE - * updates should either be sets, clears, or set_pte_atomic for P->P - * transitions, which means this hook should only be called for user PTEs. - * This hook implies a P->P protection or access change has taken place, which - * requires a subsequent TLB flush. The notification can optionally be delayed - * until the TLB flush event by using the pte_update_defer form of the - * interface, but care must be taken to assure that the flush happens while - * still holding the same page table lock so that the shadow and primary pages - * do not become out of sync on SMP. - */ -#define pte_update(mm, addr, ptep) do { } while (0) -#define pte_update_defer(mm, addr, ptep) do { } while (0) -#endif - -/* local pte updates need not use xchg for locking */ -static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) -{ - pte_t res = *ptep; - - /* Pure native function needs no input for mm, addr */ - native_pte_clear(NULL, 0, ptep); - return res; -} - -/* - * We only update the dirty/accessed state if we set - * the dirty bit by hand in the kernel, since the hardware - * will do the accessed bit for us, and we don't want to - * race with other CPU's that might be updating the dirty - * bit at the same time. - */ -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ -({ \ - int __changed = !pte_same(*(ptep), entry); \ - if (__changed && dirty) { \ - (ptep)->pte_low = (entry).pte_low; \ - pte_update_defer((vma)->vm_mm, (address), (ptep)); \ - flush_tlb_page(vma, address); \ - } \ - __changed; \ -}) - -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define ptep_test_and_clear_young(vma, addr, ptep) ({ \ - int __ret = 0; \ - if (pte_young(*(ptep))) \ - __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \ - &(ptep)->pte_low); \ - if (__ret) \ - pte_update((vma)->vm_mm, addr, ptep); \ - __ret; \ -}) - -#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH -#define ptep_clear_flush_young(vma, address, ptep) \ -({ \ - int __young; \ - __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ - if (__young) \ - flush_tlb_page(vma, address); \ - __young; \ -}) - -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = native_ptep_get_and_clear(ptep); - pte_update(mm, addr, ptep); - return pte; -} - -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL -static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) -{ - pte_t pte; - if (full) { - /* - * Full address destruction in progress; paravirt does not - * care about updates and native needs no locking - */ - pte = native_local_ptep_get_and_clear(ptep); - } else { - pte = ptep_get_and_clear(mm, addr, ptep); - } - return pte; -} - -#define __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - clear_bit(_PAGE_BIT_RW, &ptep->pte_low); - pte_update(mm, addr, ptep); -} - /* * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); * @@ -359,6 +260,4 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base) #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) -#include <asm-generic/pgtable.h> - #endif /* _I386_PGTABLE_H */ diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 77038d8e9bf..987f51f684a 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -101,18 +101,18 @@ static inline void pgd_clear (pgd_t * pgd) set_pgd(pgd, __pgd(0)); } -#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) +#define native_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte, 0)) struct mm_struct; -static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) +static inline pte_t native_ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) { pte_t pte; if (full) { pte = *ptep; *ptep = __pte(0); } else { - pte = ptep_get_and_clear(mm, addr, ptep); + pte = native_ptep_get_and_clear(ptep); } return pte; } @@ -158,26 +158,12 @@ static inline unsigned long pmd_bad(pmd_t pmd) #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) +#define native_pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) -struct vm_area_struct; - -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) -{ - if (!pte_young(*ptep)) - return 0; - return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); -} - -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - clear_bit(_PAGE_BIT_RW, &ptep->pte); -} - /* * Macro to mark a page protection value as "uncacheable". */ @@ -243,22 +229,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, #define update_mmu_cache(vma,address,pte) do { } while (0) -/* We only update the dirty/accessed state if we set - * the dirty bit by hand in the kernel, since the hardware - * will do the accessed bit for us, and we don't want to - * race with other CPU's that might be updating the dirty - * bit at the same time. */ -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ -({ \ - int __changed = !pte_same(*(__ptep), __entry); \ - if (__changed && __dirty) { \ - set_pte(__ptep, __entry); \ - flush_tlb_page(__vma, __address); \ - } \ - __changed; \ -}) - /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x3f) #define __swp_offset(x) ((x).val >> 8) @@ -290,12 +260,7 @@ pte_t *lookup_address(unsigned long addr); #define kc_offset_to_vaddr(o) \ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL -#define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME -#include <asm-generic/pgtable.h> #endif /* !__ASSEMBLY__ */ #endif /* _X86_64_PGTABLE_H */ |