aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-15 02:00:54 +0900
committerPaul Mundt <lethal@linux-sh.org>2009-08-15 02:00:54 +0900
commit795687265d1b6f666d02ff56f6c1679a8db160a9 (patch)
tree9e0de63a1299b24a8d6d1d58d246fb3d7a476926 /arch
parent43bc61d86f8ea6edef2e02d1dc47617883fa9a9c (diff)
sh64: Wire up the shared __flush_xxx_region() flushers.
Now with all of the prep work out of the way, kill off the SH-5 variants and use the SH-4 version directly. This also takes advantage of the unrolling that was previously done for the new version. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/mm/Makefile_642
-rw-r--r--arch/sh/mm/cache-sh5.c48
2 files changed, 1 insertions, 49 deletions
diff --git a/arch/sh/mm/Makefile_64 b/arch/sh/mm/Makefile_64
index 2863ffb7006..66c39106d0a 100644
--- a/arch/sh/mm/Makefile_64
+++ b/arch/sh/mm/Makefile_64
@@ -9,7 +9,7 @@ mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \
extable_64.o
ifndef CONFIG_CACHE_OFF
-obj-y += cache-sh5.o
+obj-y += cache-sh5.o flush-sh4.o
endif
obj-y += $(mmu-y)
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index 3e2d7321b63..698113fce81 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -539,54 +539,6 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
sh64_dcache_purge_user_pages(mm, start, end);
}
}
-
-/*
- * Purge the range of addresses from the D-cache.
- *
- * The addresses lie in the superpage mapping. There's no harm if we
- * overpurge at either end - just a small performance loss.
- */
-void __flush_purge_region(void *start, int size)
-{
- unsigned long long ullend, addr, aligned_start;
-
- aligned_start = (unsigned long long)(signed long long)(signed long) start;
- addr = L1_CACHE_ALIGN(aligned_start);
- ullend = (unsigned long long) (signed long long) (signed long) start + size;
-
- while (addr <= ullend) {
- __asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr));
- addr += L1_CACHE_BYTES;
- }
-}
-
-void __flush_wback_region(void *start, int size)
-{
- unsigned long long ullend, addr, aligned_start;
-
- aligned_start = (unsigned long long)(signed long long)(signed long) start;
- addr = L1_CACHE_ALIGN(aligned_start);
- ullend = (unsigned long long) (signed long long) (signed long) start + size;
-
- while (addr < ullend) {
- __asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr));
- addr += L1_CACHE_BYTES;
- }
-}
-
-void __flush_invalidate_region(void *start, int size)
-{
- unsigned long long ullend, addr, aligned_start;
-
- aligned_start = (unsigned long long)(signed long long)(signed long) start;
- addr = L1_CACHE_ALIGN(aligned_start);
- ullend = (unsigned long long) (signed long long) (signed long) start + size;
-
- while (addr < ullend) {
- __asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr));
- addr += L1_CACHE_BYTES;
- }
-}
#endif /* !CONFIG_DCACHE_DISABLED */
/*