diff options
author | Roman Zippel <zippel@linux-m68k.org> | 2005-09-03 15:57:09 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 00:06:19 -0700 |
commit | 2855b97020f6d4a4dfb005fb77c0b79c8cb9d13f (patch) | |
tree | 6746ded865a27cb23f1646ea2b9e10f76c9d9601 /arch | |
parent | 69f447cffb911bb2d9737fa905f6d983ec2aa5d3 (diff) |
[PATCH] m68k: move cache functions into separate file
Move a few cache functions into its own file and fix flush_icache_range() so
it can handle both kernel and user addresses correctly (assuming context is
set correctly).
Turn copy_to_user_page/copy_from_user_page into inline functions and add a
missing cache flush.
Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/m68k/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/m68k/mm/cache.c | 118 | ||||
-rw-r--r-- | arch/m68k/mm/memory.c | 104 |
3 files changed, 119 insertions, 105 deletions
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile index 90f1c735c11..5eaa43c4cb3 100644 --- a/arch/m68k/mm/Makefile +++ b/arch/m68k/mm/Makefile @@ -2,7 +2,7 @@ # Makefile for the linux m68k-specific parts of the memory manager. # -obj-y := init.o fault.o hwtest.o +obj-y := cache.o init.o fault.o hwtest.o obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c new file mode 100644 index 00000000000..5437fff5fe0 --- /dev/null +++ b/arch/m68k/mm/cache.c @@ -0,0 +1,118 @@ +/* + * linux/arch/m68k/mm/cache.c + * + * Instruction cache handling + * + * Copyright (C) 1995 Hamish Macdonald + */ + +#include <linux/module.h> +#include <asm/pgalloc.h> +#include <asm/traps.h> + + +static unsigned long virt_to_phys_slow(unsigned long vaddr) +{ + if (CPU_IS_060) { + unsigned long paddr; + + /* The PLPAR instruction causes an access error if the translation + * is not possible. To catch this we use the same exception mechanism + * as for user space accesses in <asm/uaccess.h>. */ + asm volatile (".chip 68060\n" + "1: plpar (%0)\n" + ".chip 68k\n" + "2:\n" + ".section .fixup,\"ax\"\n" + " .even\n" + "3: sub.l %0,%0\n" + " jra 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 1b,3b\n" + ".previous" + : "=a" (paddr) + : "0" (vaddr)); + return paddr; + } else if (CPU_IS_040) { + unsigned long mmusr; + + asm volatile (".chip 68040\n\t" + "ptestr (%1)\n\t" + "movec %%mmusr, %0\n\t" + ".chip 68k" + : "=r" (mmusr) + : "a" (vaddr)); + + if (mmusr & MMU_R_040) + return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); + } else { + unsigned short mmusr; + unsigned long *descaddr; + + asm volatile ("ptestr %3,%2@,#7,%0\n\t" + "pmove %%psr,%1@" + : "=a&" (descaddr) + : "a" (&mmusr), "a" (vaddr), "d" (get_fs().seg)); + if (mmusr & (MMU_I|MMU_B|MMU_L)) + return 0; + descaddr = phys_to_virt((unsigned long)descaddr); + switch (mmusr & MMU_NUM) { + case 1: + return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); + case 2: + return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); + case 3: + return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK); + } + } + return 0; +} + +/* Push n pages at kernel virtual address and clear the icache */ +/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ +void flush_icache_range(unsigned long address, unsigned long endaddr) +{ + + if (CPU_IS_040_OR_060) { + address &= PAGE_MASK; + + do { + asm volatile ("nop\n\t" + ".chip 68040\n\t" + "cpushp %%bc,(%0)\n\t" + ".chip 68k" + : : "a" (virt_to_phys_slow(address))); + address += PAGE_SIZE; + } while (address < endaddr); + } else { + unsigned long tmp; + asm volatile ("movec %%cacr,%0\n\t" + "orw %1,%0\n\t" + "movec %0,%%cacr" + : "=&d" (tmp) + : "di" (FLUSH_I)); + } +} +EXPORT_SYMBOL(flush_icache_range); + +void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, + unsigned long addr, int len) +{ + if (CPU_IS_040_OR_060) { + asm volatile ("nop\n\t" + ".chip 68040\n\t" + "cpushp %%bc,(%0)\n\t" + ".chip 68k" + : : "a" (page_to_phys(page))); + } else { + unsigned long tmp; + asm volatile ("movec %%cacr,%0\n\t" + "orw %1,%0\n\t" + "movec %0,%%cacr" + : "=&d" (tmp) + : "di" (FLUSH_I)); + } +} + diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index 1453a601372..559942ce0e1 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -354,110 +354,6 @@ void cache_push (unsigned long paddr, int len) #endif } -static unsigned long virt_to_phys_slow(unsigned long vaddr) -{ - if (CPU_IS_060) { - mm_segment_t fs = get_fs(); - unsigned long paddr; - - set_fs(get_ds()); - - /* The PLPAR instruction causes an access error if the translation - * is not possible. To catch this we use the same exception mechanism - * as for user space accesses in <asm/uaccess.h>. */ - asm volatile (".chip 68060\n" - "1: plpar (%0)\n" - ".chip 68k\n" - "2:\n" - ".section .fixup,\"ax\"\n" - " .even\n" - "3: sub.l %0,%0\n" - " jra 2b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 1b,3b\n" - ".previous" - : "=a" (paddr) - : "0" (vaddr)); - set_fs(fs); - return paddr; - } else if (CPU_IS_040) { - mm_segment_t fs = get_fs(); - unsigned long mmusr; - - set_fs(get_ds()); - - asm volatile (".chip 68040\n\t" - "ptestr (%1)\n\t" - "movec %%mmusr, %0\n\t" - ".chip 68k" - : "=r" (mmusr) - : "a" (vaddr)); - set_fs(fs); - - if (mmusr & MMU_R_040) - return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); - } else { - unsigned short mmusr; - unsigned long *descaddr; - - asm volatile ("ptestr #5,%2@,#7,%0\n\t" - "pmove %%psr,%1@" - : "=a&" (descaddr) - : "a" (&mmusr), "a" (vaddr)); - if (mmusr & (MMU_I|MMU_B|MMU_L)) - return 0; - descaddr = phys_to_virt((unsigned long)descaddr); - switch (mmusr & MMU_NUM) { - case 1: - return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); - case 2: - return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); - case 3: - return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK); - } - } - return 0; -} - -/* Push n pages at kernel virtual address and clear the icache */ -/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ -void flush_icache_range(unsigned long address, unsigned long endaddr) -{ - if (CPU_IS_040_OR_060) { - address &= PAGE_MASK; - - if (address >= PAGE_OFFSET && address < (unsigned long)high_memory) { - do { - asm volatile ("nop\n\t" - ".chip 68040\n\t" - "cpushp %%bc,(%0)\n\t" - ".chip 68k" - : : "a" (virt_to_phys((void *)address))); - address += PAGE_SIZE; - } while (address < endaddr); - } else { - do { - asm volatile ("nop\n\t" - ".chip 68040\n\t" - "cpushp %%bc,(%0)\n\t" - ".chip 68k" - : : "a" (virt_to_phys_slow(address))); - address += PAGE_SIZE; - } while (address < endaddr); - } - } else { - unsigned long tmp; - asm volatile ("movec %%cacr,%0\n\t" - "orw %1,%0\n\t" - "movec %0,%%cacr" - : "=&d" (tmp) - : "di" (FLUSH_I)); - } -} - - #ifndef CONFIG_SINGLE_MEMORY_CHUNK int mm_end_of_chunk (unsigned long addr, int len) { |