aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-09 21:31:56 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-10-09 21:31:56 +0100
commit6a4690c22f5da1eb1c898b61b6a80da52fbd976f (patch)
treea03891a32abe0da191fb765fe669a597e07423c6 /arch/arm/mm
parent90bb28b0644f7324f8bd1feb27b35146e6785ba2 (diff)
parent8ec53663d2698076468b3e1edc4e1b418bd54de3 (diff)
Merge branch 'ptebits' into devel
Conflicts: arch/arm/Kconfig
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/fault-armv.c11
-rw-r--r--arch/arm/mm/ioremap.c3
-rw-r--r--arch/arm/mm/mm.h1
-rw-r--r--arch/arm/mm/mmu.c74
-rw-r--r--arch/arm/mm/proc-arm1020.S26
-rw-r--r--arch/arm/mm/proc-arm1020e.S26
-rw-r--r--arch/arm/mm/proc-arm1022.S26
-rw-r--r--arch/arm/mm/proc-arm1026.S26
-rw-r--r--arch/arm/mm/proc-arm6_7.S27
-rw-r--r--arch/arm/mm/proc-arm720.S25
-rw-r--r--arch/arm/mm/proc-arm740.S2
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S28
-rw-r--r--arch/arm/mm/proc-arm922.S26
-rw-r--r--arch/arm/mm/proc-arm925.S26
-rw-r--r--arch/arm/mm/proc-arm926.S26
-rw-r--r--arch/arm/mm/proc-arm940.S2
-rw-r--r--arch/arm/mm/proc-arm946.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-feroceon.S21
-rw-r--r--arch/arm/mm/proc-macros.S170
-rw-r--r--arch/arm/mm/proc-sa110.S21
-rw-r--r--arch/arm/mm/proc-sa1100.S21
-rw-r--r--arch/arm/mm/proc-v6.S42
-rw-r--r--arch/arm/mm/proc-v7.S29
-rw-r--r--arch/arm/mm/proc-xsc3.S56
-rw-r--r--arch/arm/mm/proc-xscale.S76
29 files changed, 333 insertions, 468 deletions
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index ded0e96d069..8d33e254934 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -28,7 +28,7 @@
* specific hacks for copying pages efficiently.
*/
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
- L_PTE_CACHEABLE)
+ L_PTE_MT_MINICACHE)
static DEFINE_SPINLOCK(minicache_lock);
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 2e455f82a4d..bad49331bbf 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -30,7 +30,7 @@
#define COPYPAGE_MINICACHE 0xffff8000
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
- L_PTE_CACHEABLE)
+ L_PTE_MT_MINICACHE)
static DEFINE_SPINLOCK(minicache_lock);
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index af6ed6ef9a8..81d0b8772de 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -23,7 +23,7 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
-static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
+static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
/*
* We take the easy way out of this problem - we make the
@@ -65,9 +65,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
* If this page isn't present, or is already setup to
* fault (ie, is old), we can safely ignore any issues.
*/
- if (ret && pte_val(entry) & shared_pte_mask) {
+ if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
flush_cache_page(vma, address, pte_pfn(entry));
- pte_val(entry) &= ~shared_pte_mask;
+ pte_val(entry) &= ~L_PTE_MT_MASK;
+ pte_val(entry) |= shared_pte_mask;
set_pte_at(vma->vm_mm, address, pte, entry);
flush_tlb_page(vma, address);
}
@@ -199,7 +200,7 @@ void __init check_writebuffer_bugs(void)
unsigned long *p1, *p2;
pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
L_PTE_DIRTY|L_PTE_WRITE|
- L_PTE_BUFFERABLE);
+ L_PTE_MT_BUFFERABLE);
p1 = vmap(&page, 1, VM_IOREMAP, prot);
p2 = vmap(&page, 1, VM_IOREMAP, prot);
@@ -220,7 +221,7 @@ void __init check_writebuffer_bugs(void)
if (v) {
printk("failed, %s\n", reason);
- shared_pte_mask |= L_PTE_BUFFERABLE;
+ shared_pte_mask = L_PTE_MT_UNCACHED;
} else {
printk("ok\n");
}
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 8a41912ec7c..18373f73f2f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -56,8 +56,7 @@ static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
if (!pte_none(*pte))
goto bad;
- set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot),
- type->prot_pte_ext);
+ set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
phys_addr += PAGE_SIZE;
} while (pte++, addr += PAGE_SIZE, addr != end);
return 0;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 96590104ba0..5d9f53907b4 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -18,7 +18,6 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
struct mem_type {
unsigned int prot_pte;
- unsigned int prot_pte_ext;
unsigned int prot_l1;
unsigned int prot_sect;
unsigned int domain;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e7af83e569d..8ba75406455 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -66,27 +66,27 @@ static struct cachepolicy cache_policies[] __initdata = {
.policy = "uncached",
.cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED,
- .pte = 0,
+ .pte = L_PTE_MT_UNCACHED,
}, {
.policy = "buffered",
.cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED,
- .pte = PTE_BUFFERABLE,
+ .pte = L_PTE_MT_BUFFERABLE,
}, {
.policy = "writethrough",
.cr_mask = 0,
.pmd = PMD_SECT_WT,
- .pte = PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITETHROUGH,
}, {
.policy = "writeback",
.cr_mask = 0,
.pmd = PMD_SECT_WB,
- .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITEBACK,
}, {
.policy = "writealloc",
.cr_mask = 0,
.pmd = PMD_SECT_WBWA,
- .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITEALLOC,
}
};
@@ -184,29 +184,28 @@ void adjust_cr(unsigned long mask, unsigned long set)
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
- .prot_pte = PROT_PTE_DEVICE,
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+ L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
.domain = DOMAIN_IO,
},
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
- .prot_pte = PROT_PTE_DEVICE,
- .prot_pte_ext = PTE_EXT_TEX(2),
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
.domain = DOMAIN_IO,
},
[MT_DEVICE_CACHED] = { /* ioremap_cached */
- .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
.domain = DOMAIN_IO,
},
- [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */
- .prot_pte = PROT_PTE_DEVICE,
+ [MT_DEVICE_WC] = { /* ioremap_wc */
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE |
- PMD_SECT_TEX(1),
+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
@@ -251,7 +250,7 @@ static void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
- unsigned int user_pgprot, kern_pgprot;
+ unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -269,6 +268,20 @@ static void __init build_mem_type_table(void)
cachepolicy = CPOLICY_WRITEBACK;
ecc_mask = 0;
}
+#ifdef CONFIG_SMP
+ cachepolicy = CPOLICY_WRITEALLOC;
+#endif
+
+ /*
+ * On non-Xscale3 ARMv5-and-older systems, use CB=01
+ * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3
+ * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable
+ * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
+ */
+ if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) {
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
+ mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE;
+ }
/*
* ARMv5 and lower, bit 4 must be set for page tables.
@@ -290,7 +303,15 @@ static void __init build_mem_type_table(void)
}
cp = &cache_policies[cachepolicy];
- kern_pgprot = user_pgprot = cp->pte;
+ vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+
+#ifndef CONFIG_SMP
+ /*
+ * Only use write-through for non-SMP systems
+ */
+ if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
+ vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
+#endif
/*
* Enable CPU-specific coherency if supported.
@@ -318,7 +339,6 @@ static void __init build_mem_type_table(void)
/*
* Mark the device area as "shared device"
*/
- mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
#ifdef CONFIG_SMP
@@ -327,30 +347,21 @@ static void __init build_mem_type_table(void)
*/
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
+ vecs_pgprot |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
#endif
}
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
- v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
- protection_map[i] = __pgprot(v);
+ protection_map[i] = __pgprot(v | user_pgprot);
}
- mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
- mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
+ mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
+ mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
- if (cpu_arch >= CPU_ARCH_ARMv5) {
-#ifndef CONFIG_SMP
- /*
- * Only use write-through for non-SMP systems
- */
- mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
- mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
-#endif
- } else {
+ if (cpu_arch < CPU_ARCH_ARMv5)
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
- }
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
@@ -398,8 +409,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte = pte_offset_kernel(pmd, addr);
do {
- set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
- type->prot_pte_ext);
+ set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 5673f4d6113..b5551bf010a 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -29,7 +29,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
@@ -399,29 +399,7 @@ ENTRY(cpu_arm1020_switch_mm)
.align 5
ENTRY(cpu_arm1020_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r1, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 4
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 4343fdb0e9e..8bc6740c29e 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -29,7 +29,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
@@ -383,29 +383,7 @@ ENTRY(cpu_arm1020e_switch_mm)
.align 5
ENTRY(cpu_arm1020e_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r1, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 2a4ea1659e9..2cd03e66c0a 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
@@ -365,29 +365,7 @@ ENTRY(cpu_arm1022_switch_mm)
.align 5
ENTRY(cpu_arm1022_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r1, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 77a1babd421..ad961a897f6 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -18,7 +18,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
@@ -354,29 +354,7 @@ ENTRY(cpu_arm1026_switch_mm)
.align 5
ENTRY(cpu_arm1026_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r1, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index c371fc87776..80d6e1de069 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -15,11 +15,13 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
+#include "proc-macros.S"
+
ENTRY(cpu_arm6_dcache_clean_area)
ENTRY(cpu_arm7_dcache_clean_area)
mov pc, lr
@@ -214,30 +216,13 @@ ENTRY(cpu_arm7_switch_mm)
* : r1 = value to set
* Purpose : Set a PTE and flush it out of any WB cache
*/
- .align 5
+ .align 5
ENTRY(cpu_arm6_set_pte_ext)
ENTRY(cpu_arm7_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young
- movne r2, #0
-
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext wc_disable=0
#endif /* CONFIG_MMU */
- mov pc, lr
+ mov pc, lr
/*
* Function: _arm6_7_reset
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index eda733d3045..85ae18695f1 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -36,7 +36,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
@@ -93,29 +93,12 @@ ENTRY(cpu_arm720_switch_mm)
* : r1 = value to set
* Purpose : Set a PTE and flush it out of any WB cache
*/
- .align 5
+ .align 5
ENTRY(cpu_arm720_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young
- movne r2, #0
-
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext wc_disable=0
#endif
- mov pc, lr
+ mov pc, lr
/*
* Function: arm720_reset
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 3a57376c8bc..4f95bee63e9 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 7b3ecdeb537..93e05fa7bed 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 28cdb060df4..914d688394f 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -28,7 +28,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
@@ -351,33 +351,11 @@ ENTRY(cpu_arm920_switch_mm)
.align 5
ENTRY(cpu_arm920_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r2, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
-#endif /* CONFIG_MMU */
+#endif
mov pc, lr
__INIT
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 94ddcb4a4b7..51c9c9859e5 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -29,7 +29,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
@@ -355,29 +355,7 @@ ENTRY(cpu_arm922_switch_mm)
.align 5
ENTRY(cpu_arm922_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r2, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index d045812f339..2724526d89c 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -52,7 +52,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
@@ -398,29 +398,7 @@ ENTRY(cpu_arm925_switch_mm)
.align 5
ENTRY(cpu_arm925_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r2, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 4cd33169a7c..54466937bff 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -28,7 +28,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
@@ -359,29 +359,7 @@ ENTRY(cpu_arm926_switch_mm)
.align 5
ENTRY(cpu_arm926_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r2, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 551244d5ca1..f595117caf5 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -11,7 +11,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 6168c6160de..e03f6ff1fb2 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -13,7 +13,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index c85c1f50e39..be6c11d2b3f 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index f2e5884c513..2b8bb383755 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -22,7 +22,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
@@ -446,24 +446,7 @@ ENTRY(cpu_feroceon_switch_mm)
.align 5
ENTRY(cpu_feroceon_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext wc_disable=0
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH)
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index b13150052a7..54b1f721dec 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -71,3 +71,173 @@
mov \reg, #16 @ size offset
mov \reg, \reg, lsl \tmp @ actual cache line size
.endm
+
+
+/*
+ * Sanity check the PTE configuration for the code below - which makes
+ * certain assumptions about how these bits are layed out.
+ */
+#if L_PTE_SHARED != PTE_EXT_SHARED
+#error PTE shared bit mismatch
+#endif
+#if L_PTE_BUFFERABLE != PTE_BUFFERABLE
+#error PTE bufferable bit mismatch
+#endif
+#if L_PTE_CACHEABLE != PTE_CACHEABLE
+#error PTE cacheable bit mismatch
+#endif
+#if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\
+ L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
+#error Invalid Linux PTE bit settings
+#endif
+
+/*
+ * The ARMv6 and ARMv7 set_pte_ext translation function.
+ *
+ * Permission translation:
+ * YUWD APX AP1 AP0 SVC User
+ * 0xxx 0 0 0 no acc no acc
+ * 100x 1 0 1 r/o no acc
+ * 10x0 1 0 1 r/o no acc
+ * 1011 0 0 1 r/w no acc
+ * 110x 0 1 0 r/w r/o
+ * 11x0 0 1 0 r/w r/o
+ * 1111 0 1 1 r/w r/w
+ */
+ .macro armv6_mt_table pfx
+\pfx\()_mt_table:
+ .long 0x00 @ L_PTE_MT_UNCACHED
+ .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE
+ .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
+ .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
+ .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
+ .long 0x00 @ unused
+ .long 0x00 @ L_PTE_MT_MINICACHE (not present)
+ .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
+ .long 0x00 @ unused
+ .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC
+ .long 0x00 @ unused
+ .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
+ .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
+ .long 0x00 @ unused
+ .long 0x00 @ unused
+ .long 0x00 @ unused
+ .endm
+
+ .macro armv6_set_pte_ext pfx
+ str r1, [r0], #-2048 @ linux version
+
+ bic r3, r1, #0x000003fc
+ bic r3, r3, #PTE_TYPE_MASK
+ orr r3, r3, r2
+ orr r3, r3, #PTE_EXT_AP0 | 2
+
+ adr ip, \pfx\()_mt_table
+ and r2, r1, #L_PTE_MT_MASK
+ ldr r2, [ip, r2]
+
+ tst r1, #L_PTE_WRITE
+ tstne r1, #L_PTE_DIRTY
+ orreq r3, r3, #PTE_EXT_APX
+
+ tst r1, #L_PTE_USER
+ orrne r3, r3, #PTE_EXT_AP1
+ tstne r3, #PTE_EXT_APX
+ bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+
+ tst r1, #L_PTE_EXEC
+ orreq r3, r3, #PTE_EXT_XN
+
+ orr r3, r3, r2
+
+ tst r1, #L_PTE_YOUNG
+ tstne r1, #L_PTE_PRESENT
+ moveq r3, #0
+
+ str r3, [r0]
+ mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ .endm
+
+
+/*
+ * The ARMv3, ARMv4 and ARMv5 set_pte_ext translation function,
+ * covering most CPUs except Xscale and Xscale 3.
+ *
+ * Permission translation:
+ * YUWD AP SVC User
+ * 0xxx 0x00 no acc no acc
+ * 100x 0x00 r/o no acc
+ * 10x0 0x00 r/o no acc
+ * 1011 0x55 r/w no acc
+ * 110x 0xaa r/w r/o
+ * 11x0 0xaa r/w r/o
+ * 1111 0xff r/w r/w
+ */
+ .macro armv3_set_pte_ext wc_disable=1
+ str r1, [r0], #-2048 @ linux version
+
+ eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
+
+ bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
+ bic r2, r2, #PTE_TYPE_MASK
+ orr r2, r2, #PTE_TYPE_SMALL
+
+ tst r3, #L_PTE_USER @ user?
+ orrne r2, r2, #PTE_SMALL_AP_URO_SRW
+
+ tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
+ orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
+
+ tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
+ movne r2, #0
+
+ .if \wc_disable
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+ tst r2, #PTE_CACHEABLE
+ bicne r2, r2, #PTE_BUFFERABLE
+#endif
+ .endif
+ str r2, [r0] @ hardware version
+ .endm
+
+
+/*
+ * Xscale set_pte_ext translation, split into two halves to cope
+ * with work-arounds. r3 must be preserved by code between these
+ * two macros.
+ *
+ * Permission translation:
+ * YUWD AP SVC User
+ * 0xxx 00 no acc no acc
+ * 100x 00 r/o no acc
+ * 10x0 00 r/o no acc
+ * 1011 01 r/w no acc
+ * 110x 10 r/w r/o
+ * 11x0 10 r/w r/o
+ * 1111 11 r/w r/w
+ */
+ .macro xscale_set_pte_ext_prologue
+ str r1, [r0], #-2048 @ linux version
+
+ eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
+
+ bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
+ orr r2, r2, #PTE_TYPE_EXT @ extended page
+
+ tst r3, #L_PTE_USER @ user?
+ orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
+
+ tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
+ orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
+ @ combined with user -> user r/w
+ .endm
+
+ .macro xscale_set_pte_ext_epilogue
+ tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
+ movne r2, #0 @ no -> fault
+
+ str r2, [r0] @ hardware version
+ mov ip, #0
+ mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
+ mcr p15, 0, ip, c7, c10, 4 @ data write barrier
+ .endm
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index bbe10576c86..90a7e5279f2 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -17,7 +17,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <mach/hardware.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
@@ -153,24 +153,7 @@ ENTRY(cpu_sa110_switch_mm)
.align 5
ENTRY(cpu_sa110_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext wc_disable=0
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 871ba018252..451e2d953e2 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -22,7 +22,7 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <mach/hardware.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
@@ -166,24 +166,7 @@ ENTRY(cpu_sa1100_switch_mm)
.align 5
ENTRY(cpu_sa1100_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- bic r2, r1, #PTE_SMALL_AP_MASK
- bic r2, r2, #PTE_TYPE_MASK
- orr r2, r2, #PTE_TYPE_SMALL
-
- tst r1, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_SMALL_AP_URO_SRW
-
- tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
-
- tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0
-
- str r2, [r0] @ hardware version
+ armv3_set_pte_ext wc_disable=0
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 5702ec58b2a..294943b8597 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -13,7 +13,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
@@ -114,46 +114,12 @@ ENTRY(cpu_v6_switch_mm)
* (hardware version is stored at -1024 bytes)
* - pte - PTE value to store
* - ext - value for extended PTE bits
- *
- * Permissions:
- * YUWD APX AP1 AP0 SVC User
- * 0xxx 0 0 0 no acc no acc
- * 100x 1 0 1 r/o no acc
- * 10x0 1 0 1 r/o no acc
- * 1011 0 0 1 r/w no acc
- * 110x 0 1 0 r/w r/o
- * 11x0 0 1 0 r/w r/o
- * 1111 0 1 1 r/w r/w
*/
+ armv6_mt_table cpu_v6
+
ENTRY(cpu_v6_set_pte_ext)
#ifdef CONFIG_MMU
- str r1, [r0], #-2048 @ linux version
-
- bic r3, r1, #0x000003f0
- bic r3, r3, #0x00000003
- orr r3, r3, r2
- orr r3, r3, #PTE_EXT_AP0 | 2
-
- tst r1, #L_PTE_WRITE
- tstne r1, #L_PTE_DIRTY
- orreq r3, r3, #PTE_EXT_APX
-
- tst r1, #L_PTE_USER
- orrne r3, r3, #PTE_EXT_AP1
- tstne r3, #PTE_EXT_APX
- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
-
- tst r1, #L_PTE_YOUNG
- biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK
-
- tst r1, #L_PTE_EXEC
- orreq r3, r3, #PTE_EXT_XN
-
- tst r1, #L_PTE_PRESENT
- moveq r3, #0
-
- str r3, [r0]
- mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ armv6_set_pte_ext cpu_v6
#endif
mov pc, lr
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index a67e26f3dce..34e42404192 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -12,7 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
@@ -105,26 +105,19 @@ ENDPROC(cpu_v7_switch_mm)
* (hardware version is stored at -1024 bytes)
* - pte - PTE value to store
* - ext - value for extended PTE bits
- *
- * Permissions:
- * YUWD APX AP1 AP0 SVC User
- * 0xxx 0 0 0 no acc no acc
- * 100x 1 0 1 r/o no acc
- * 10x0 1 0 1 r/o no acc
- * 1011 0 0 1 r/w no acc
- * 110x 0 1 0 r/w r/o
- * 11x0 0 1 0 r/w r/o
- * 1111 0 1 1 r/w r/w
*/
ENTRY(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
bic r3, r1, #0x000003f0
- bic r3, r3, #0x00000003
+ bic r3, r3, #PTE_TYPE_MASK
orr r3, r3, r2
orr r3, r3, #PTE_EXT_AP0 | 2
+ tst r2, #1 << 4
+ orrne r3, r3, #PTE_EXT_TEX(1)
+
tst r1, #L_PTE_WRITE
tstne r1, #L_PTE_DIRTY
orreq r3, r3, #PTE_EXT_APX
@@ -134,13 +127,11 @@ ENTRY(cpu_v7_set_pte_ext)
tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
- tst r1, #L_PTE_YOUNG
- biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK
-
tst r1, #L_PTE_EXEC
orreq r3, r3, #PTE_EXT_XN
- tst r1, #L_PTE_PRESENT
+ tst r1, #L_PTE_YOUNG
+ tstne r1, #L_PTE_PRESENT
moveq r3, #0
str r3, [r0]
@@ -189,6 +180,10 @@ __v7_setup:
mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register
#endif
+ ldr r5, =0x40e040e0
+ ldr r6, =0xff0aa1a8
+ mcr p15, 0, r5, c10, c2, 0 @ write PRRR
+ mcr p15, 0, r6, c10, c2, 1 @ write NMRR
adr r5, v7_crval
ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0, 0 @ read control register
@@ -205,7 +200,7 @@ ENDPROC(__v7_setup)
*/
.type v7_crval, #object
v7_crval:
- crval clear=0x0120c302, mmuset=0x00c0387d, ucset=0x00c0187c
+ crval clear=0x0120c302, mmuset=0x10c0387d, ucset=0x00c0187c
__v7_setup_stack:
.space 4 * 11 @ 11 registers
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 7bd9e7197f6..04dc8b65401 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -27,7 +27,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <mach/hardware.h>
#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
@@ -345,38 +345,38 @@ ENTRY(cpu_xsc3_switch_mm)
* cpu_xsc3_set_pte_ext(ptep, pte, ext)
*
* Set a PTE and flush it out
- *
*/
+cpu_xsc3_mt_table:
+ .long 0x00 @ L_PTE_MT_UNCACHED
+ .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE
+ .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
+ .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
+ .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
+ .long 0x00 @ unused
+ .long 0x00 @ L_PTE_MT_MINICACHE (not present)
+ .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?)
+ .long 0x00 @ unused
+ .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC
+ .long 0x00 @ unused
+ .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
+ .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
+ .long 0x00 @ unused
+ .long 0x00 @ unused
+ .long 0x00 @ unused
+
.align 5
ENTRY(cpu_xsc3_set_pte_ext)
- str r1, [r0], #-2048 @ linux version
+ xscale_set_pte_ext_prologue
- bic r2, r1, #0xff0 @ keep C, B bits
- orr r2, r2, #PTE_TYPE_EXT @ extended page
tst r1, #L_PTE_SHARED @ shared?
- orrne r2, r2, #0x200
-
- eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- tst r3, #L_PTE_USER @ user?
- orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
-
- tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
- orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
- @ combined with user -> user r/w
-
- @ If it's cacheable, it needs to be in L2 also.
- eor ip, r1, #L_PTE_CACHEABLE
- tst ip, #L_PTE_CACHEABLE
- orreq r2, r2, #PTE_EXT_TEX(0x5)
-
- tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
- movne r2, #0 @ no -> fault
-
- str r2, [r0] @ hardware version
- mov ip, #0
- mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
- mcr p15, 0, ip, c7, c10, 4 @ data write barrier
+ and r1, r1, #L_PTE_MT_MASK
+ adr ip, cpu_xsc3_mt_table
+ ldr ip, [ip, r1]
+ orrne r2, r2, #PTE_EXT_COHERENT @ interlock: mask in coherent bit
+ bic r2, r2, #0x0c @ clear old C,B bits
+ orr r2, r2, ip
+
+ xscale_set_pte_ext_epilogue
mov pc, lr
.ltorg
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 2dd85273976..0cce37b9393 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -23,7 +23,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/elf.h>
+#include <asm/hwcap.h>
#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
@@ -406,8 +406,6 @@ ENTRY(cpu_xscale_dcache_clean_area)
/* =============================== PageTable ============================== */
-#define PTE_CACHE_WRITE_ALLOCATE 0
-
/*
* cpu_xscale_switch_mm(pgd)
*
@@ -431,56 +429,42 @@ ENTRY(cpu_xscale_switch_mm)
*
* Errata 40: must set memory to write-through for user read-only pages.
*/
+cpu_xscale_mt_table:
+ .long 0x00 @ L_PTE_MT_UNCACHED
+ .long PTE_BUFFERABLE @ L_PTE_MT_BUFFERABLE
+ .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
+ .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
+ .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
+ .long 0x00 @ unused
+ .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE
+ .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
+ .long 0x00 @ unused
+ .long PTE_BUFFERABLE @ L_PTE_MT_DEV_WC
+ .long 0x00 @ unused
+ .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
+ .long 0x00 @ L_PTE_MT_DEV_NONSHARED
+ .long 0x00 @ unused
+ .long 0x00 @ unused
+ .long 0x00 @ unused
+
.align 5
ENTRY(cpu_xscale_set_pte_ext)
- str r1, [r0], #-2048 @ linux version
-
- bic r2, r1, #0xff0
- orr r2, r2, #PTE_TYPE_EXT @ extended page
-
- eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- tst r3, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
-
- tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
- @ combined with user -> user r/w
-
- @
- @ Handle the X bit. We want to set this bit for the minicache
- @ (U = E = B = W = 0, C = 1) or when write allocate is enabled,
- @ and we have a writeable, cacheable region. If we ignore the
- @ U and E bits, we can allow user space to use the minicache as
- @ well.
- @
- @ X = (C & ~W & ~B) | (C & W & B & write_allocate)
- @
- eor ip, r1, #L_PTE_CACHEABLE
- tst ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
-#if PTE_CACHE_WRITE_ALLOCATE
- eorne ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
- tstne ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
-#endif
- orreq r2, r2, #PTE_EXT_TEX(1)
+ xscale_set_pte_ext_prologue
@
- @ Erratum 40: The B bit must be cleared for a user read-only
- @ cacheable page.
- @
- @ B = B & ~(U & C & ~W)
+ @ Erratum 40: must set memory to write-through for user read-only pages
@
- and ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE
- teq ip, #L_PTE_USER | L_PTE_CACHEABLE
- biceq r2, r2, #PTE_BUFFERABLE
+ and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_WRITE) & ~(4 << 2)
+ teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER
- tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0 @ no -> fault
+ moveq r1, #L_PTE_MT_WRITETHROUGH
+ and r1, r1, #L_PTE_MT_MASK
+ adr ip, cpu_xscale_mt_table
+ ldr ip, [ip, r1]
+ bic r2, r2, #0x0c
+ orr r2, r2, ip
- str r2, [r0] @ hardware version
- mov ip, #0
- mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
- mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
+ xscale_set_pte_ext_epilogue
mov pc, lr