aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-09-06 20:04:59 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-10-01 16:40:56 +0100
commitbb30f36f9b71c31dc8fe3483bba4c9884fc86080 (patch)
treec99b583586ebec2a29be2b0173d1eb9ad07a68f9 /arch/arm/mm/mmu.c
parent9cff96e5bfc8e366166bfb07610604c7604ac48c (diff)
[ARM] Introduce new PTE memory type bits
Provide L_PTE_MT_xxx definitions to describe the memory types that we use in Linux/ARM. These definitions are carefully picked such that: 1. their LSBs match what is required for pre-ARMv6 CPUs. 2. they all have a unique encoding, including after modification by build_mem_type_table() (the result being that some have more than one combination.) Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c62
1 files changed, 32 insertions, 30 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index a713e40e1f1..cfc0add4874 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -68,27 +68,27 @@ static struct cachepolicy cache_policies[] __initdata = {
.policy = "uncached",
.cr_mask = CR_W|CR_C,
.pmd = PMD_SECT_UNCACHED,
- .pte = 0,
+ .pte = L_PTE_MT_UNCACHED,
}, {
.policy = "buffered",
.cr_mask = CR_C,
.pmd = PMD_SECT_BUFFERED,
- .pte = PTE_BUFFERABLE,
+ .pte = L_PTE_MT_BUFFERABLE,
}, {
.policy = "writethrough",
.cr_mask = 0,
.pmd = PMD_SECT_WT,
- .pte = PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITETHROUGH,
}, {
.policy = "writeback",
.cr_mask = 0,
.pmd = PMD_SECT_WB,
- .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITEBACK,
}, {
.policy = "writealloc",
.cr_mask = 0,
.pmd = PMD_SECT_WBWA,
- .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
+ .pte = L_PTE_MT_WRITEALLOC,
}
};
@@ -186,35 +186,36 @@ void adjust_cr(unsigned long mask, unsigned long set)
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
- .prot_pte = PROT_PTE_DEVICE,
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+ L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
.domain = DOMAIN_IO,
},
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
- .prot_pte = PROT_PTE_DEVICE,
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
.prot_pte_ext = PTE_EXT_TEX(2),
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
.domain = DOMAIN_IO,
},
[MT_DEVICE_CACHED] = { /* ioremap_cached */
- .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
.domain = DOMAIN_IO,
},
[MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */
- .prot_pte = PROT_PTE_DEVICE,
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_IXP2000,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE |
PMD_SECT_TEX(1),
.domain = DOMAIN_IO,
},
[MT_DEVICE_WC] = { /* ioremap_wc */
- .prot_pte = PROT_PTE_DEVICE,
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE,
+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
@@ -259,7 +260,7 @@ static void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
- unsigned int user_pgprot, kern_pgprot;
+ unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -277,6 +278,9 @@ static void __init build_mem_type_table(void)
cachepolicy = CPOLICY_WRITEBACK;
ecc_mask = 0;
}
+#ifdef CONFIG_SMP
+ cachepolicy = CPOLICY_WRITEALLOC;
+#endif
/*
* On non-Xscale3 ARMv5-and-older systems, use CB=01
@@ -286,10 +290,9 @@ static void __init build_mem_type_table(void)
*/
if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) {
mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1);
+ mem_types[MT_DEVICE_WC].prot_pte &= ~L_PTE_BUFFERABLE;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
- } else {
- mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_BUFFERABLE;
- mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
+ mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE;
}
/*
@@ -312,7 +315,15 @@ static void __init build_mem_type_table(void)
}
cp = &cache_policies[cachepolicy];
- kern_pgprot = user_pgprot = cp->pte;
+ vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+
+#ifndef CONFIG_SMP
+ /*
+ * Only use write-through for non-SMP systems
+ */
+ if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
+ vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
+#endif
/*
* Enable CPU-specific coherency if supported.
@@ -349,30 +360,21 @@ static void __init build_mem_type_table(void)
*/
user_pgprot |= L_PTE_SHARED;
kern_pgprot |= L_PTE_SHARED;
+ vecs_pgprot |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
#endif
}
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
- v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
- protection_map[i] = __pgprot(v);
+ protection_map[i] = __pgprot(v | user_pgprot);
}
- mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
- mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
+ mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
+ mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
- if (cpu_arch >= CPU_ARCH_ARMv5) {
-#ifndef CONFIG_SMP
- /*
- * Only use write-through for non-SMP systems
- */
- mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
- mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
-#endif
- } else {
+ if (cpu_arch < CPU_ARCH_ARMv5)
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
- }
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |