aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2008-02-04 16:48:05 +0100
committerIngo Molnar <mingo@elte.hu>2008-02-04 16:48:05 +0100
commit626c2c9d065da0cbd9997e112501487958fde690 (patch)
treefccad0409c38a4cd845c82f407e2e77bd8dbfc47 /arch/x86/mm
parentcc0f21bbc12dc9f05b2e7f2469128f8717b2f4d3 (diff)
x86: use the pfn from the page when change its attributes
When changing the attributes of a pte, we should use the PFN from the existing PTE rather than going through hoops calculating what we think it might have been; this is both fragile and totally unneeded. It also makes it more hairy to call any of these functions on non-direct maps for no good reason whatsover. With this change, __change_page_attr() no longer takes a pfn as argument, which simplifies all the callers. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@tglx.de>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/pageattr.c31
1 files changed, 17 insertions, 14 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bf5e33f6a32..6c55fbdbd7e 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -277,17 +277,12 @@ out_unlock:
}
static int
-__change_page_attr(unsigned long address, unsigned long pfn,
- pgprot_t mask_set, pgprot_t mask_clr)
+__change_page_attr(unsigned long address, pgprot_t mask_set, pgprot_t mask_clr)
{
struct page *kpte_page;
int level, err = 0;
pte_t *kpte;
-#ifdef CONFIG_X86_32
- BUG_ON(pfn > max_low_pfn);
-#endif
-
repeat:
kpte = lookup_address(address, &level);
if (!kpte)
@@ -298,17 +293,25 @@ repeat:
BUG_ON(PageCompound(kpte_page));
if (level == PG_LEVEL_4K) {
- pgprot_t new_prot = pte_pgprot(*kpte);
pte_t new_pte, old_pte = *kpte;
+ pgprot_t new_prot = pte_pgprot(old_pte);
+
+ if(!pte_val(old_pte)) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
pgprot_val(new_prot) |= pgprot_val(mask_set);
new_prot = static_protections(new_prot, address);
- new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
- BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte));
-
+ /*
+ * We need to keep the pfn from the existing PTE,
+ * after all we're only going to change it's attributes
+ * not the memory it points to
+ */
+ new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
set_pte_atomic(kpte, new_pte);
} else {
err = split_large_page(kpte, address);
@@ -337,11 +340,11 @@ static int
change_page_attr_addr(unsigned long address, pgprot_t mask_set,
pgprot_t mask_clr)
{
- unsigned long phys_addr = __pa(address);
- unsigned long pfn = phys_addr >> PAGE_SHIFT;
int err;
#ifdef CONFIG_X86_64
+ unsigned long phys_addr = __pa(address);
+
/*
* If we are inside the high mapped kernel range, then we
* fixup the low mapping first. __va() returns the virtual
@@ -351,7 +354,7 @@ change_page_attr_addr(unsigned long address, pgprot_t mask_set,
address = (unsigned long) __va(phys_addr);
#endif
- err = __change_page_attr(address, pfn, mask_set, mask_clr);
+ err = __change_page_attr(address, mask_set, mask_clr);
if (err)
return err;
@@ -375,7 +378,7 @@ change_page_attr_addr(unsigned long address, pgprot_t mask_set,
* everything between 0 and KERNEL_TEXT_SIZE, so do
* not propagate lookup failures back to users:
*/
- __change_page_attr(address, pfn, mask_set, mask_clr);
+ __change_page_attr(address, mask_set, mask_clr);
}
#endif
return err;