aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/include/asm/mmu_context_32.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-03-17 17:49:49 +0900
committerPaul Mundt <lethal@linux-sh.org>2009-03-17 17:49:49 +0900
commit8263a67e169fdf0d06d172acbf6c03ae172a69d4 (patch)
treecdfefd2d72c7854101287a9e39e3ad97cad6cb5b /arch/sh/include/asm/mmu_context_32.h
parentda78800632197ac12adcdefbf09991d82adb8201 (diff)
sh: Support for extended ASIDs on PTEAEX-capable SH-X3 cores.
This adds support for extended ASIDs (up to 16-bits) on newer SH-X3 cores that implement the PTAEX register and respective functionality. Presently only the 65nm SH7786 (90nm only supports legacy 8-bit ASIDs). The main change is in how the PTE is written out when loading the entry in to the TLB, as well as in how the TLB entry is selectively flushed. While SH-X2 extended mode splits out the memory-mapped U and I-TLB data arrays for extra bits, extended ASID mode splits out the address arrays. While we don't use the memory-mapped data array access, the address array accesses are necessary for selective TLB flushes, so these are implemented newly and replace the generic SH-4 implementation. With this, TLB flushes in switch_mm() are almost non-existent on newer parts. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include/asm/mmu_context_32.h')
-rw-r--r--arch/sh/include/asm/mmu_context_32.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/sh/include/asm/mmu_context_32.h b/arch/sh/include/asm/mmu_context_32.h
index f4f9aebd68b..8ef800c549a 100644
--- a/arch/sh/include/asm/mmu_context_32.h
+++ b/arch/sh/include/asm/mmu_context_32.h
@@ -10,6 +10,17 @@ static inline void destroy_context(struct mm_struct *mm)
/* Do nothing */
}
+#ifdef CONFIG_CPU_HAS_PTEAEX
+static inline void set_asid(unsigned long asid)
+{
+ __raw_writel(asid, MMU_PTEAEX);
+}
+
+static inline unsigned long get_asid(void)
+{
+ return __raw_readl(MMU_PTEAEX) & MMU_CONTEXT_ASID_MASK;
+}
+#else
static inline void set_asid(unsigned long asid)
{
unsigned long __dummy;
@@ -33,6 +44,7 @@ static inline unsigned long get_asid(void)
asid &= MMU_CONTEXT_ASID_MASK;
return asid;
}
+#endif /* CONFIG_CPU_HAS_PTEAEX */
/* MMU_TTB is used for optimizing the fault handling. */
static inline void set_TTB(pgd_t *pgd)