aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2007-05-02 19:27:10 +0200
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 19:27:10 +0200
commitd01ad8dd56527be72947b4b9997bb2c05783c3ed (patch)
tree4bb9ad792fae974101d0baec755202eebc8181aa /arch
parent90a0a06aa81692028864c21f981905fda46b1208 (diff)
[PATCH] x86: Improve handling of kernel mappings in change_page_attr
Fix various broken corner cases in i386 and x86-64 change_page_attr. AK: split off from tighten kernel image access rights Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/mm/pageattr.c4
-rw-r--r--arch/x86_64/mm/pageattr.c16
2 files changed, 14 insertions, 6 deletions
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 412ebbd8adb..ea6b6d4a0a2 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -142,7 +142,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
return -EINVAL;
kpte_page = virt_to_page(kpte);
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
- if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
+ if (!pte_huge(*kpte)) {
set_pte_atomic(kpte, mk_pte(page, prot));
} else {
pgprot_t ref_prot;
@@ -158,7 +158,7 @@ __change_page_attr(struct page *page, pgprot_t prot)
kpte_page = split;
}
page_private(kpte_page)++;
- } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
+ } else if (!pte_huge(*kpte)) {
set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
BUG_ON(page_private(kpte_page) == 0);
page_private(kpte_page)--;
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index 76ee90a5abe..bf4aa8dd425 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -179,16 +179,24 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
{
unsigned long phys_base_pfn = __pa_symbol(__START_KERNEL_map) >> PAGE_SHIFT;
- int err = 0;
+ int err = 0, kernel_map = 0;
int i;
+ if (address >= __START_KERNEL_map
+ && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
+ address = (unsigned long)__va(__pa(address));
+ kernel_map = 1;
+ }
+
down_write(&init_mm.mmap_sem);
for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
unsigned long pfn = __pa(address) >> PAGE_SHIFT;
- err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
- if (err)
- break;
+ if (!kernel_map || pte_present(pfn_pte(0, prot))) {
+ err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
+ if (err)
+ break;
+ }
/* Handle kernel mapping too which aliases part of the
* lowmem */
if ((pfn >= phys_base_pfn) &&