diff options
-rw-r--r-- | arch/sh/include/asm/cacheflush.h | 37 | ||||
-rw-r--r-- | arch/sh/include/cpu-common/cpu/cacheflush.h | 37 | ||||
-rw-r--r-- | arch/sh/include/cpu-sh4/cpu/cacheflush.h | 31 | ||||
-rw-r--r-- | arch/sh/include/cpu-sh5/cpu/cacheflush.h | 10 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 87 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 70 | ||||
-rw-r--r-- | arch/sh/mm/flush-sh4.c | 13 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 5 |
9 files changed, 159 insertions, 135 deletions
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index b1cf30f423a..25b7f46494d 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -1,46 +1,11 @@ #ifndef __ASM_SH_CACHEFLUSH_H #define __ASM_SH_CACHEFLUSH_H -#include <linux/mm.h> - #ifdef __KERNEL__ -#ifdef CONFIG_CACHE_OFF -/* - * Nothing to do when the cache is disabled, initial flush and explicit - * disabling is handled at CPU init time. - * - * See arch/sh/kernel/cpu/init.c:cache_init(). - */ -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define flush_dcache_page(page) do { } while (0) -#define flush_icache_range(start, end) do { } while (0) -#define flush_icache_page(vma,pg) do { } while (0) -#define flush_cache_sigtramp(vaddr) do { } while (0) -#define __flush_wback_region(start, size) do { (void)(start); } while (0) -#define __flush_purge_region(start, size) do { (void)(start); } while (0) -#define __flush_invalidate_region(start, size) do { (void)(start); } while (0) -#else +#include <linux/mm.h> #include <cpu/cacheflush.h> -/* - * Consistent DMA requires that the __flush_xxx() primitives must be set - * for any of the enabled non-coherent caches (most of the UP CPUs), - * regardless of PIPT or VIPT cache configurations. - */ - -/* Flush (write-back only) a region (smaller than a page) */ -extern void __flush_wback_region(void *start, int size); -/* Flush (write-back & invalidate) a region (smaller than a page) */ -extern void __flush_purge_region(void *start, int size); -/* Flush (invalidate only) a region (smaller than a page) */ -extern void __flush_invalidate_region(void *start, int size); -#endif - #define ARCH_HAS_FLUSH_ANON_PAGE extern void __flush_anon_page(struct page *page, unsigned long); diff --git a/arch/sh/include/cpu-common/cpu/cacheflush.h b/arch/sh/include/cpu-common/cpu/cacheflush.h index 5dc3736218e..8189dbd68f8 100644 --- a/arch/sh/include/cpu-common/cpu/cacheflush.h +++ b/arch/sh/include/cpu-common/cpu/cacheflush.h @@ -1,14 +1,12 @@ /* - * include/asm-sh/cpu-sh2/cacheflush.h - * * Copyright (C) 2003 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ -#ifndef __ASM_CPU_SH2_CACHEFLUSH_H -#define __ASM_CPU_SH2_CACHEFLUSH_H +#ifndef __ASM_CPU_SH_CACHEFLUSH_H +#define __ASM_CPU_SH_CACHEFLUSH_H /* * Cache flushing: @@ -22,18 +20,23 @@ * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache * - flush_icache_range(start, end) flushes(invalidates) a range for icache * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache - * - * Caches are indexed (effectively) by physical address on SH-2, so - * we don't need them. + * - flush_cache_sigtramp(vaddr) flushes the signal trampoline */ -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define flush_dcache_page(page) do { } while (0) -#define flush_icache_range(start, end) do { } while (0) -#define flush_icache_page(vma,pg) do { } while (0) -#define flush_cache_sigtramp(vaddr) do { } while (0) +extern void (*flush_cache_all)(void); +extern void (*flush_cache_mm)(struct mm_struct *mm); +extern void (*flush_cache_dup_mm)(struct mm_struct *mm); +extern void (*flush_cache_page)(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn); +extern void (*flush_cache_range)(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void (*flush_dcache_page)(struct page *page); +extern void (*flush_icache_range)(unsigned long start, unsigned long end); +extern void (*flush_icache_page)(struct vm_area_struct *vma, + struct page *page); +extern void (*flush_cache_sigtramp)(unsigned long address); + +extern void (*__flush_wback_region)(void *start, int size); +extern void (*__flush_purge_region)(void *start, int size); +extern void (*__flush_invalidate_region)(void *start, int size); -#endif /* __ASM_CPU_SH2_CACHEFLUSH_H */ +#endif /* __ASM_CPU_SH_CACHEFLUSH_H */ diff --git a/arch/sh/include/cpu-sh4/cpu/cacheflush.h b/arch/sh/include/cpu-sh4/cpu/cacheflush.h deleted file mode 100644 index d6bd396d7df..00000000000 --- a/arch/sh/include/cpu-sh4/cpu/cacheflush.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * include/asm-sh/cpu-sh4/cacheflush.h - * - * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2003 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#ifndef __ASM_CPU_SH4_CACHEFLUSH_H -#define __ASM_CPU_SH4_CACHEFLUSH_H - -/* - * Caches are broken on SH-4 (unless we use write-through - * caching; in which case they're only semi-broken), - * so we need them. - */ -void flush_cache_all(void); -void flush_cache_mm(struct mm_struct *mm); -#define flush_cache_dup_mm(mm) flush_cache_mm(mm) -void flush_cache_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, - unsigned long pfn); -void flush_dcache_page(struct page *pg); -void flush_icache_range(unsigned long start, unsigned long end); - -#define flush_icache_page(vma,pg) do { } while (0) - -#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ diff --git a/arch/sh/include/cpu-sh5/cpu/cacheflush.h b/arch/sh/include/cpu-sh5/cpu/cacheflush.h index 740d10a316e..202f637a0e1 100644 --- a/arch/sh/include/cpu-sh5/cpu/cacheflush.h +++ b/arch/sh/include/cpu-sh5/cpu/cacheflush.h @@ -3,10 +3,6 @@ #ifndef __ASSEMBLY__ -struct vm_area_struct; -struct page; -struct mm_struct; - extern void flush_cache_all(void); extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_sigtramp(unsigned long vaddr); @@ -16,10 +12,14 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, uns extern void flush_dcache_page(struct page *pg); extern void flush_icache_range(unsigned long start, unsigned long end); +/* XXX .. */ +extern void (*__flush_wback_region)(void *start, int size); +extern void (*__flush_purge_region)(void *start, int size); +extern void (*__flush_invalidate_region)(void *start, int size); + #define flush_cache_dup_mm(mm) flush_cache_mm(mm) #define flush_icache_page(vma, page) do { } while (0) #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_CPU_SH5_CACHEFLUSH_H */ - diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index b5860535e61..05cb04bc394 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -26,13 +26,6 @@ #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ #define MAX_ICACHE_PAGES 32 -static void __flush_dcache_segment_1way(unsigned long start, - unsigned long extent); -static void __flush_dcache_segment_2way(unsigned long start, - unsigned long extent); -static void __flush_dcache_segment_4way(unsigned long start, - unsigned long extent); - static void __flush_cache_4096(unsigned long addr, unsigned long phys, unsigned long exec_offset); @@ -45,38 +38,12 @@ static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = (void (*)(unsigned long, unsigned long))0xdeadbeef; /* - * SH-4 has virtually indexed and physically tagged cache. - */ -void __init sh4_cache_init(void) -{ - printk("PVR=%08x CVR=%08x PRR=%08x\n", - ctrl_inl(CCN_PVR), - ctrl_inl(CCN_CVR), - ctrl_inl(CCN_PRR)); - - switch (boot_cpu_data.dcache.ways) { - case 1: - __flush_dcache_segment_fn = __flush_dcache_segment_1way; - break; - case 2: - __flush_dcache_segment_fn = __flush_dcache_segment_2way; - break; - case 4: - __flush_dcache_segment_fn = __flush_dcache_segment_4way; - break; - default: - panic("unknown number of cache ways\n"); - break; - } -} - -/* * Write back the range of D-cache, and purge the I-cache. * * Called from kernel/module.c:sys_init_module and routine for a.out format, * signal handler code and kprobes code */ -void flush_icache_range(unsigned long start, unsigned long end) +static void sh4_flush_icache_range(unsigned long start, unsigned long end) { int icacheaddr; unsigned long flags, v; @@ -137,7 +104,7 @@ static inline void flush_cache_4096(unsigned long start, * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ -void flush_dcache_page(struct page *page) +static void sh4_flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); @@ -188,7 +155,7 @@ static inline void flush_dcache_all(void) wmb(); } -void flush_cache_all(void) +static void sh4_flush_cache_all(void) { flush_dcache_all(); flush_icache_all(); @@ -280,7 +247,7 @@ loop_exit: * * Caller takes mm->mmap_sem. */ -void flush_cache_mm(struct mm_struct *mm) +static void sh4_flush_cache_mm(struct mm_struct *mm) { if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) return; @@ -320,8 +287,8 @@ void flush_cache_mm(struct mm_struct *mm) * ADDR: Virtual Address (U0 address) * PFN: Physical page number */ -void flush_cache_page(struct vm_area_struct *vma, unsigned long address, - unsigned long pfn) +static void sh4_flush_cache_page(struct vm_area_struct *vma, + unsigned long address, unsigned long pfn) { unsigned long phys = pfn << PAGE_SHIFT; unsigned int alias_mask; @@ -368,8 +335,8 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, * Flushing the cache lines for U0 only isn't enough. * We need to flush for P1 too, which may contain aliases. */ -void flush_cache_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) +static void sh4_flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) { if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) return; @@ -668,3 +635,41 @@ static void __flush_dcache_segment_4way(unsigned long start, a3 += linesz; } while (a0 < a0e); } + +extern void __weak sh4__flush_region_init(void); + +/* + * SH-4 has virtually indexed and physically tagged cache. + */ +void __init sh4_cache_init(void) +{ + printk("PVR=%08x CVR=%08x PRR=%08x\n", + ctrl_inl(CCN_PVR), + ctrl_inl(CCN_CVR), + ctrl_inl(CCN_PRR)); + + switch (boot_cpu_data.dcache.ways) { + case 1: + __flush_dcache_segment_fn = __flush_dcache_segment_1way; + break; + case 2: + __flush_dcache_segment_fn = __flush_dcache_segment_2way; + break; + case 4: + __flush_dcache_segment_fn = __flush_dcache_segment_4way; + break; + default: + panic("unknown number of cache ways\n"); + break; + } + + flush_icache_range = sh4_flush_icache_range; + flush_dcache_page = sh4_flush_dcache_page; + flush_cache_all = sh4_flush_cache_all; + flush_cache_mm = sh4_flush_cache_mm; + flush_cache_dup_mm = sh4_flush_cache_mm; + flush_cache_page = sh4_flush_cache_page; + flush_cache_range = sh4_flush_cache_range; + + sh4__flush_region_init(); +} diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index a50d23caf01..a8f5142dc2c 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c @@ -20,6 +20,8 @@ #include <asm/uaccess.h> #include <asm/mmu_context.h> +extern void __weak sh4__flush_region_init(void); + /* Wired TLB entry for the D-cache */ static unsigned long long dtlb_cache_slot; @@ -27,6 +29,8 @@ void __init cpu_cache_init(void) { /* Reserve a slot for dcache colouring in the DTLB */ dtlb_cache_slot = sh64_get_wired_dtlb_entry(); + + sh4__flush_region_init(); } void __init kmap_coherent_init(void) diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index a31e5c46e7a..da5bc6ac1b2 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -15,6 +15,62 @@ #include <asm/mmu_context.h> #include <asm/cacheflush.h> +void (*flush_cache_all)(void); +void (*flush_cache_mm)(struct mm_struct *mm); +void (*flush_cache_dup_mm)(struct mm_struct *mm); +void (*flush_cache_page)(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn); +void (*flush_cache_range)(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +void (*flush_dcache_page)(struct page *page); +void (*flush_icache_range)(unsigned long start, unsigned long end); +void (*flush_icache_page)(struct vm_area_struct *vma, + struct page *page); +void (*flush_cache_sigtramp)(unsigned long address); +void (*__flush_wback_region)(void *start, int size); +void (*__flush_purge_region)(void *start, int size); +void (*__flush_invalidate_region)(void *start, int size); + +static inline void noop_flush_cache_all(void) +{ +} + +static inline void noop_flush_cache_mm(struct mm_struct *mm) +{ +} + +static inline void noop_flush_cache_page(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) +{ +} + +static inline void noop_flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +static inline void noop_flush_dcache_page(struct page *page) +{ +} + +static inline void noop_flush_icache_range(unsigned long start, + unsigned long end) +{ +} + +static inline void noop_flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ +} + +static inline void noop_flush_cache_sigtramp(unsigned long address) +{ +} + +static inline void noop__flush_region(void *start, int size) +{ +} + void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) @@ -174,6 +230,20 @@ void __init cpu_cache_init(void) compute_alias(&boot_cpu_data.dcache); compute_alias(&boot_cpu_data.scache); + flush_cache_all = noop_flush_cache_all; + flush_cache_mm = noop_flush_cache_mm; + flush_cache_dup_mm = noop_flush_cache_mm; + flush_cache_page = noop_flush_cache_page; + flush_cache_range = noop_flush_cache_range; + flush_dcache_page = noop_flush_dcache_page; + flush_icache_range = noop_flush_icache_range; + flush_icache_page = noop_flush_icache_page; + flush_cache_sigtramp = noop_flush_cache_sigtramp; + + __flush_wback_region = noop__flush_region; + __flush_purge_region = noop__flush_region; + __flush_invalidate_region = noop__flush_region; + if ((boot_cpu_data.family == CPU_FAMILY_SH4) || (boot_cpu_data.family == CPU_FAMILY_SH4A) || (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c index 1b6b6a12a99..99c50dc7551 100644 --- a/arch/sh/mm/flush-sh4.c +++ b/arch/sh/mm/flush-sh4.c @@ -8,7 +8,7 @@ * START: Virtual Address (U0, P1, or P3) * SIZE: Size of the region. */ -void __weak __flush_wback_region(void *start, int size) +static void sh4__flush_wback_region(void *start, int size) { reg_size_t aligned_start, v, cnt, end; @@ -51,7 +51,7 @@ void __weak __flush_wback_region(void *start, int size) * START: Virtual Address (U0, P1, or P3) * SIZE: Size of the region. */ -void __weak __flush_purge_region(void *start, int size) +static void sh4__flush_purge_region(void *start, int size) { reg_size_t aligned_start, v, cnt, end; @@ -90,7 +90,7 @@ void __weak __flush_purge_region(void *start, int size) /* * No write back please */ -void __weak __flush_invalidate_region(void *start, int size) +static void sh4__flush_invalidate_region(void *start, int size) { reg_size_t aligned_start, v, cnt, end; @@ -126,3 +126,10 @@ void __weak __flush_invalidate_region(void *start, int size) cnt--; } } + +void __init sh4__flush_region_init(void) +{ + __flush_wback_region = sh4__flush_wback_region; + __flush_invalidate_region = sh4__flush_invalidate_region; + __flush_purge_region = sh4__flush_purge_region; +} diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index cf0e9c5146b..0a9b4d855bc 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -210,6 +210,9 @@ void __init mem_init(void) high_memory = node_high_memory; } + /* Set this up early, so we can take care of the zero page */ + cpu_cache_init(); + /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); __flush_wback_region(empty_zero_page, PAGE_SIZE); @@ -230,8 +233,6 @@ void __init mem_init(void) datasize >> 10, initsize >> 10); - cpu_cache_init(); - /* Initialize the vDSO */ vsyscall_init(); } |