aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Kconfig67
-rw-r--r--arch/sh/mm/Makefile23
-rw-r--r--arch/sh/mm/cache-sh4.c81
-rw-r--r--arch/sh/mm/copy_page.S169
-rw-r--r--arch/sh/mm/fault-nommu.c64
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sh/mm/init.c6
-rw-r--r--arch/sh/mm/pmb.c2
-rw-r--r--arch/sh/mm/tlb-sh4.c55
9 files changed, 238 insertions, 231 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 43f3972a5fb..cf446bbab5b 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -2,7 +2,6 @@
# Processor families
#
config CPU_SH2
- select SH_WRITETHROUGH if !CPU_SH2A
bool
config CPU_SH2A
@@ -19,6 +18,7 @@ config CPU_SH4
select CPU_HAS_INTEVT
select CPU_HAS_SR_RB
select CPU_HAS_PTEA if (!CPU_SUBTYPE_ST40 && !CPU_SH4A) || CPU_SHX2
+ select CPU_HAS_FPU if !CPU_SH4AL_DSP
config CPU_SH4A
bool
@@ -32,7 +32,6 @@ config CPU_SH4AL_DSP
config CPU_SUBTYPE_ST40
bool
select CPU_SH4
- select CPU_HAS_INTC2_IRQ
config CPU_SHX2
bool
@@ -52,26 +51,22 @@ choice
config CPU_SUBTYPE_SH7619
bool "Support SH7619 processor"
select CPU_SH2
- select CPU_HAS_IPR_IRQ
# SH-2A Processor Support
config CPU_SUBTYPE_SH7206
bool "Support SH7206 processor"
select CPU_SH2A
- select CPU_HAS_IPR_IRQ
# SH-3 Processor Support
config CPU_SUBTYPE_SH7705
bool "Support SH7705 processor"
select CPU_SH3
- select CPU_HAS_IPR_IRQ
config CPU_SUBTYPE_SH7706
bool "Support SH7706 processor"
select CPU_SH3
- select CPU_HAS_IPR_IRQ
help
Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU.
@@ -91,14 +86,12 @@ config CPU_SUBTYPE_SH7708
config CPU_SUBTYPE_SH7709
bool "Support SH7709 processor"
select CPU_SH3
- select CPU_HAS_IPR_IRQ
help
Select SH7709 if you have a 80 Mhz SH-3 HD6417709 CPU.
config CPU_SUBTYPE_SH7710
bool "Support SH7710 processor"
select CPU_SH3
- select CPU_HAS_IPR_IRQ
select CPU_HAS_DSP
help
Select SH7710 if you have a SH3-DSP SH7710 CPU.
@@ -106,24 +99,28 @@ config CPU_SUBTYPE_SH7710
config CPU_SUBTYPE_SH7712
bool "Support SH7712 processor"
select CPU_SH3
- select CPU_HAS_IPR_IRQ
select CPU_HAS_DSP
help
Select SH7712 if you have a SH3-DSP SH7712 CPU.
+config CPU_SUBTYPE_SH7720
+ bool "Support SH7720 processor"
+ select CPU_SH3
+ select CPU_HAS_DSP
+ help
+ Select SH7720 if you have a SH3-DSP SH7720 CPU.
+
# SH-4 Processor Support
config CPU_SUBTYPE_SH7750
bool "Support SH7750 processor"
select CPU_SH4
- select CPU_HAS_INTC_IRQ
help
Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU.
config CPU_SUBTYPE_SH7091
bool "Support SH7091 processor"
select CPU_SH4
- select CPU_HAS_INTC_IRQ
help
Select SH7091 if you have an SH-4 based Sega device (such as
the Dreamcast, Naomi, and Naomi 2).
@@ -131,17 +128,14 @@ config CPU_SUBTYPE_SH7091
config CPU_SUBTYPE_SH7750R
bool "Support SH7750R processor"
select CPU_SH4
- select CPU_HAS_INTC_IRQ
config CPU_SUBTYPE_SH7750S
bool "Support SH7750S processor"
select CPU_SH4
- select CPU_HAS_INTC_IRQ
config CPU_SUBTYPE_SH7751
bool "Support SH7751 processor"
select CPU_SH4
- select CPU_HAS_INTC_IRQ
help
Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU,
or if you have a HD6417751R CPU.
@@ -149,13 +143,10 @@ config CPU_SUBTYPE_SH7751
config CPU_SUBTYPE_SH7751R
bool "Support SH7751R processor"
select CPU_SH4
- select CPU_HAS_INTC_IRQ
config CPU_SUBTYPE_SH7760
bool "Support SH7760 processor"
select CPU_SH4
- select CPU_HAS_INTC2_IRQ
- select CPU_HAS_IPR_IRQ
config CPU_SUBTYPE_SH4_202
bool "Support SH4-202 processor"
@@ -185,19 +176,21 @@ config CPU_SUBTYPE_SH7770
config CPU_SUBTYPE_SH7780
bool "Support SH7780 processor"
select CPU_SH4A
- select CPU_HAS_INTC_IRQ
config CPU_SUBTYPE_SH7785
bool "Support SH7785 processor"
select CPU_SH4A
select CPU_SHX2
- select CPU_HAS_INTC2_IRQ
+ select ARCH_SPARSEMEM_ENABLE
+ select SYS_SUPPORTS_NUMA
config CPU_SUBTYPE_SHX3
bool "Support SH-X3 processor"
select CPU_SH4A
select CPU_SHX3
- select CPU_HAS_INTC2_IRQ
+ select ARCH_SPARSEMEM_ENABLE
+ select SYS_SUPPORTS_NUMA
+ select SYS_SUPPORTS_SMP
# SH4AL-DSP Processor Support
@@ -209,7 +202,6 @@ config CPU_SUBTYPE_SH7722
bool "Support SH7722 processor"
select CPU_SH4AL_DSP
select CPU_SHX2
- select CPU_HAS_INTC_IRQ
select ARCH_SPARSEMEM_ENABLE
select SYS_SUPPORTS_NUMA
@@ -274,7 +266,7 @@ config 32BIT
config X2TLB
bool "Enable extended TLB mode"
- depends on CPU_SHX2 && MMU && EXPERIMENTAL
+ depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL
help
Selecting this option will enable the extended mode of the SH-X2
TLB. For legacy SH-X behaviour and interoperability, say N. For
@@ -307,6 +299,7 @@ config NUMA
config NODES_SHIFT
int
+ default "3" if CPU_SUBTYPE_SHX3
default "1"
depends on NEED_MULTIPLE_NODES
@@ -323,7 +316,9 @@ config ARCH_SPARSEMEM_DEFAULT
config MAX_ACTIVE_REGIONS
int
- default "2" if (CPU_SUBTYPE_SH7722 && SPARSEMEM)
+ default "6" if (CPU_SUBTYPE_SHX3 && SPARSEMEM)
+ default "2" if SPARSEMEM && (CPU_SUBTYPE_SH7722 || \
+ CPU_SUBTYPE_SH7785)
default "1"
config ARCH_POPULATES_NODE_MAP
@@ -342,25 +337,27 @@ config ARCH_MEMORY_PROBE
choice
prompt "Kernel page size"
+ default PAGE_SIZE_8KB if X2TLB
default PAGE_SIZE_4KB
config PAGE_SIZE_4KB
bool "4kB"
+ depends on !X2TLB
help
This is the default page size used by all SuperH CPUs.
config PAGE_SIZE_8KB
bool "8kB"
- depends on EXPERIMENTAL && X2TLB
+ depends on X2TLB
help
This enables 8kB pages as supported by SH-X2 and later MMUs.
config PAGE_SIZE_64KB
bool "64kB"
- depends on EXPERIMENTAL && CPU_SH4
+ depends on CPU_SH4
help
This enables support for 64kB pages, possible on all SH-4
- CPUs and later. Highly experimental, not recommended.
+ CPUs and later.
endchoice
@@ -412,8 +409,17 @@ config SH_DIRECT_MAPPED
Turn this option off for platforms that do not have a direct-mapped
cache, and you have no need to run the caches in such a configuration.
-config SH_WRITETHROUGH
- bool "Use write-through caching"
+choice
+ prompt "Cache mode"
+ default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4
+ default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A)
+
+config CACHE_WRITEBACK
+ bool "Write-back"
+ depends on CPU_SH2A || CPU_SH3 || CPU_SH4
+
+config CACHE_WRITETHROUGH
+ bool "Write-through"
help
Selecting this option will configure the caches in write-through
mode, as opposed to the default write-back configuration.
@@ -424,4 +430,9 @@ config SH_WRITETHROUGH
If unsure, say N.
+config CACHE_OFF
+ bool "Off"
+
+endchoice
+
endmenu
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 4061e89d84d..ee30fb44dfe 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -4,29 +4,32 @@
obj-y := init.o extable.o consistent.o
-obj-$(CONFIG_CPU_SH2) += cache-sh2.o
-obj-$(CONFIG_CPU_SH3) += cache-sh3.o
-obj-$(CONFIG_CPU_SH4) += cache-sh4.o
+ifndef CONFIG_CACHE_OFF
+obj-$(CONFIG_CPU_SH2) += cache-sh2.o
+obj-$(CONFIG_CPU_SH3) += cache-sh3.o
+obj-$(CONFIG_CPU_SH4) += cache-sh4.o
+obj-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
+endif
mmu-y := tlb-nommu.o pg-nommu.o
-mmu-$(CONFIG_CPU_SH3) += fault-nommu.o
-mmu-$(CONFIG_CPU_SH4) += fault-nommu.o
mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o \
ioremap.o
obj-y += $(mmu-y)
ifdef CONFIG_DEBUG_FS
-obj-$(CONFIG_CPU_SH4) += cache-debugfs.o
+obj-$(CONFIG_CPU_SH4) += cache-debugfs.o
endif
ifdef CONFIG_MMU
-obj-$(CONFIG_CPU_SH3) += tlb-sh3.o
-obj-$(CONFIG_CPU_SH4) += tlb-sh4.o pg-sh4.o
-obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o
+obj-$(CONFIG_CPU_SH3) += tlb-sh3.o
+obj-$(CONFIG_CPU_SH4) += tlb-sh4.o
+ifndef CONFIG_CACHE_OFF
+obj-$(CONFIG_CPU_SH4) += pg-sh4.o
+obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o
+endif
endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
obj-$(CONFIG_32BIT) += pmb.o
obj-$(CONFIG_NUMA) += numa.o
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 86486326ef1..226b190c5b9 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -2,7 +2,7 @@
* arch/sh/mm/cache-sh4.c
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
- * Copyright (C) 2001 - 2006 Paul Mundt
+ * Copyright (C) 2001 - 2007 Paul Mundt
* Copyright (C) 2003 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -44,7 +44,7 @@ static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
static void compute_alias(struct cache_info *c)
{
c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
- c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1;
+ c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
}
static void __init emit_cache_params(void)
@@ -54,21 +54,35 @@ static void __init emit_cache_params(void)
ctrl_inl(CCN_CVR),
ctrl_inl(CCN_PRR));
printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
- current_cpu_data.icache.ways,
- current_cpu_data.icache.sets,
- current_cpu_data.icache.way_incr);
+ boot_cpu_data.icache.ways,
+ boot_cpu_data.icache.sets,
+ boot_cpu_data.icache.way_incr);
printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
- current_cpu_data.icache.entry_mask,
- current_cpu_data.icache.alias_mask,
- current_cpu_data.icache.n_aliases);
+ boot_cpu_data.icache.entry_mask,
+ boot_cpu_data.icache.alias_mask,
+ boot_cpu_data.icache.n_aliases);
printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
- current_cpu_data.dcache.ways,
- current_cpu_data.dcache.sets,
- current_cpu_data.dcache.way_incr);
+ boot_cpu_data.dcache.ways,
+ boot_cpu_data.dcache.sets,
+ boot_cpu_data.dcache.way_incr);
printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
- current_cpu_data.dcache.entry_mask,
- current_cpu_data.dcache.alias_mask,
- current_cpu_data.dcache.n_aliases);
+ boot_cpu_data.dcache.entry_mask,
+ boot_cpu_data.dcache.alias_mask,
+ boot_cpu_data.dcache.n_aliases);
+
+ /*
+ * Emit Secondary Cache parameters if the CPU has a probed L2.
+ */
+ if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
+ printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
+ boot_cpu_data.scache.ways,
+ boot_cpu_data.scache.sets,
+ boot_cpu_data.scache.way_incr);
+ printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
+ boot_cpu_data.scache.entry_mask,
+ boot_cpu_data.scache.alias_mask,
+ boot_cpu_data.scache.n_aliases);
+ }
if (!__flush_dcache_segment_fn)
panic("unknown number of cache ways\n");
@@ -79,10 +93,11 @@ static void __init emit_cache_params(void)
*/
void __init p3_cache_init(void)
{
- compute_alias(&current_cpu_data.icache);
- compute_alias(&current_cpu_data.dcache);
+ compute_alias(&boot_cpu_data.icache);
+ compute_alias(&boot_cpu_data.dcache);
+ compute_alias(&boot_cpu_data.scache);
- switch (current_cpu_data.dcache.ways) {
+ switch (boot_cpu_data.dcache.ways) {
case 1:
__flush_dcache_segment_fn = __flush_dcache_segment_1way;
break;
@@ -187,13 +202,13 @@ void flush_cache_sigtramp(unsigned long addr)
: "m" (__m(v)));
index = CACHE_IC_ADDRESS_ARRAY |
- (v & current_cpu_data.icache.entry_mask);
+ (v & boot_cpu_data.icache.entry_mask);
local_irq_save(flags);
jump_to_P2();
- for (i = 0; i < current_cpu_data.icache.ways;
- i++, index += current_cpu_data.icache.way_incr)
+ for (i = 0; i < boot_cpu_data.icache.ways;
+ i++, index += boot_cpu_data.icache.way_incr)
ctrl_outl(0, index); /* Clear out Valid-bit */
back_to_P1();
@@ -210,7 +225,7 @@ static inline void flush_cache_4096(unsigned long start,
* All types of SH-4 require PC to be in P2 to operate on the I-cache.
* Some types of SH-4 require PC to be in P2 to operate on the D-cache.
*/
- if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
+ if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
(start < CACHE_OC_ADDRESS_ARRAY))
exec_offset = 0x20000000;
@@ -232,7 +247,7 @@ void flush_dcache_page(struct page *page)
int i, n;
/* Loop all the D-cache */
- n = current_cpu_data.dcache.n_aliases;
+ n = boot_cpu_data.dcache.n_aliases;
for (i = 0; i < n; i++, addr += 4096)
flush_cache_4096(addr, phys);
}
@@ -264,7 +279,7 @@ static inline void flush_icache_all(void)
void flush_dcache_all(void)
{
- (*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size);
+ (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
wmb();
}
@@ -278,8 +293,8 @@ static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
unsigned long d = 0, p = start & PAGE_MASK;
- unsigned long alias_mask = current_cpu_data.dcache.alias_mask;
- unsigned long n_aliases = current_cpu_data.dcache.n_aliases;
+ unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
+ unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
unsigned long select_bit;
unsigned long all_aliases_mask;
unsigned long addr_offset;
@@ -366,7 +381,7 @@ void flush_cache_mm(struct mm_struct *mm)
* If cache is only 4k-per-way, there are never any 'aliases'. Since
* the cache is physically tagged, the data can just be left in there.
*/
- if (current_cpu_data.dcache.n_aliases == 0)
+ if (boot_cpu_data.dcache.n_aliases == 0)
return;
/*
@@ -403,7 +418,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
unsigned long phys = pfn << PAGE_SHIFT;
unsigned int alias_mask;
- alias_mask = current_cpu_data.dcache.alias_mask;
+ alias_mask = boot_cpu_data.dcache.alias_mask;
/* We only need to flush D-cache when we have alias */
if ((address^phys) & alias_mask) {
@@ -417,7 +432,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
phys);
}
- alias_mask = current_cpu_data.icache.alias_mask;
+ alias_mask = boot_cpu_data.icache.alias_mask;
if (vma->vm_flags & VM_EXEC) {
/*
* Evict entries from the portion of the cache from which code
@@ -449,7 +464,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
* If cache is only 4k-per-way, there are never any 'aliases'. Since
* the cache is physically tagged, the data can just be left in there.
*/
- if (current_cpu_data.dcache.n_aliases == 0)
+ if (boot_cpu_data.dcache.n_aliases == 0)
return;
/*
@@ -510,7 +525,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
unsigned long a, ea, p;
unsigned long temp_pc;
- dcache = &current_cpu_data.dcache;
+ dcache = &boot_cpu_data.dcache;
/* Write this way for better assembly. */
way_count = dcache->ways;
way_incr = dcache->way_incr;
@@ -585,7 +600,7 @@ static void __flush_dcache_segment_1way(unsigned long start,
base_addr = ((base_addr >> 16) << 16);
base_addr |= start;
- dcache = &current_cpu_data.dcache;
+ dcache = &boot_cpu_data.dcache;
linesz = dcache->linesz;
way_incr = dcache->way_incr;
way_size = dcache->way_size;
@@ -627,7 +642,7 @@ static void __flush_dcache_segment_2way(unsigned long start,
base_addr = ((base_addr >> 16) << 16);
base_addr |= start;
- dcache = &current_cpu_data.dcache;
+ dcache = &boot_cpu_data.dcache;
linesz = dcache->linesz;
way_incr = dcache->way_incr;
way_size = dcache->way_size;
@@ -686,7 +701,7 @@ static void __flush_dcache_segment_4way(unsigned long start,
base_addr = ((base_addr >> 16) << 16);
base_addr |= start;
- dcache = &current_cpu_data.dcache;
+ dcache = &boot_cpu_data.dcache;
linesz = dcache->linesz;
way_incr = dcache->way_incr;
way_size = dcache->way_size;
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S
index ae039f2da16..a81dbdb0559 100644
--- a/arch/sh/mm/copy_page.S
+++ b/arch/sh/mm/copy_page.S
@@ -141,47 +141,38 @@ ENTRY(__copy_user_page)
.long 9999b, 6000f ; \
.previous
ENTRY(__copy_user)
- tst r6,r6 ! Check explicitly for zero
- bf 1f
- rts
- mov #0,r0 ! normal return
-1:
- mov.l r10,@-r15
- mov.l r9,@-r15
- mov.l r8,@-r15
+ ! Check if small number of bytes
+ mov #11,r0
mov r4,r3
- add r6,r3 ! last destination address
- mov #12,r0 ! Check if small number of bytes
- cmp/gt r0,r6
- bt 2f
- bra .L_cleanup_loop
- nop
-2:
- neg r5,r0 ! Calculate bytes needed to align source
+ cmp/gt r0,r6 ! r6 (len) > r0 (11)
+ bf/s .L_cleanup_loop_no_pop
+ add r6,r3 ! last destination address
+
+ ! Calculate bytes needed to align to src
+ mov.l r11,@-r15
+ neg r5,r0
+ mov.l r10,@-r15
add #4,r0
+ mov.l r9,@-r15
and #3,r0
+ mov.l r8,@-r15
tst r0,r0
- bt .L_jump
- mov r0,r1
+ bt 2f
-.L_loop1:
- ! Copy bytes to align source
-EX( mov.b @r5+,r0 )
- dt r1
-EX( mov.b r0,@r4 )
+1:
+ ! Copy bytes to long word align src
+EX( mov.b @r5+,r1 )
+ dt r0
add #-1,r6
- bf/s .L_loop1
+EX( mov.b r1,@r4 )
+ bf/s 1b
add #1,r4
-.L_jump:
- mov r6,r2 ! Calculate number of longwords to copy
+ ! Jump to appropriate routine depending on dest
+2: mov #3,r1
+ mov r6, r2
+ and r4,r1
shlr2 r2
- tst r2,r2
- bt .L_cleanup
-
- mov r4,r0 ! Jump to appropriate routine
- and #3,r0
- mov r0,r1
shll2 r1
mova .L_jump_tbl,r0
mov.l @(r0,r1),r1
@@ -195,43 +186,97 @@ EX( mov.b r0,@r4 )
.long .L_dest10
.long .L_dest11
+/*
+ * Come here if there are less than 12 bytes to copy
+ *
+ * Keep the branch target close, so the bf/s callee doesn't overflow
+ * and result in a more expensive branch being inserted. This is the
+ * fast-path for small copies, the jump via the jump table will hit the
+ * default slow-path cleanup. -PFM.
+ */
+.L_cleanup_loop_no_pop:
+ tst r6,r6 ! Check explicitly for zero
+ bt 1f
+
+2:
+EX( mov.b @r5+,r0 )
+ dt r6
+EX( mov.b r0,@r4 )
+ bf/s 2b
+ add #1,r4
+
+1: mov #0,r0 ! normal return
+5000:
+
+# Exception handler:
+.section .fixup, "ax"
+6000:
+ mov.l 8000f,r1
+ mov r3,r0
+ jmp @r1
+ sub r4,r0
+ .align 2
+8000: .long 5000b
+
+.previous
+ rts
+ nop
+
! Destination = 00
.L_dest00:
- mov r2,r7
- shlr2 r7
- shlr r7
- tst r7,r7
- mov #7,r0
- bt/s 1f
- and r0,r2
- .align 2
+ ! Skip the large copy for small transfers
+ mov #(32+32-4), r0
+ cmp/gt r6, r0 ! r0 (60) > r6 (len)
+ bt 1f
+
+ ! Align dest to a 32 byte boundary
+ neg r4,r0
+ add #0x20, r0
+ and #0x1f, r0
+ tst r0, r0
+ bt 2f
+
+ sub r0, r6
+ shlr2 r0
+3:
+EX( mov.l @r5+,r1 )
+ dt r0
+EX( mov.l r1,@r4 )
+ bf/s 3b
+ add #4,r4
+
2:
EX( mov.l @r5+,r0 )
+EX( mov.l @r5+,r1 )
+EX( mov.l @r5+,r2 )
+EX( mov.l @r5+,r7 )
EX( mov.l @r5+,r8 )
EX( mov.l @r5+,r9 )
EX( mov.l @r5+,r10 )
-EX( mov.l r0,@r4 )
-EX( mov.l r8,@(4,r4) )
-EX( mov.l r9,@(8,r4) )
-EX( mov.l r10,@(12,r4) )
-EX( mov.l @r5+,r0 )
-EX( mov.l @r5+,r8 )
-EX( mov.l @r5+,r9 )
-EX( mov.l @r5+,r10 )
- dt r7
-EX( mov.l r0,@(16,r4) )
-EX( mov.l r8,@(20,r4) )
-EX( mov.l r9,@(24,r4) )
-EX( mov.l r10,@(28,r4) )
+EX( mov.l @r5+,r11 )
+EX( movca.l r0,@r4 )
+ add #-32, r6
+EX( mov.l r1,@(4,r4) )
+ mov #32, r0
+EX( mov.l r2,@(8,r4) )
+ cmp/gt r6, r0 ! r0 (32) > r6 (len)
+EX( mov.l r7,@(12,r4) )
+EX( mov.l r8,@(16,r4) )
+EX( mov.l r9,@(20,r4) )
+EX( mov.l r10,@(24,r4) )
+EX( mov.l r11,@(28,r4) )
bf/s 2b
add #32,r4
- tst r2,r2
+
+1: mov r6, r0
+ shlr2 r0
+ tst r0, r0
bt .L_cleanup
1:
-EX( mov.l @r5+,r0 )
- dt r2
-EX( mov.l r0,@r4 )
+EX( mov.l @r5+,r1 )
+ dt r0
+EX( mov.l r1,@r4 )
bf/s 1b
add #4,r4
@@ -250,7 +295,7 @@ EX( mov.l r0,@r4 )
and r0,r2
2:
dt r7
-#ifdef __LITTLE_ENDIAN__
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX( mov.l @r5+,r0 )
EX( mov.l @r5+,r1 )
EX( mov.l @r5+,r8 )
@@ -320,7 +365,7 @@ EX( mov.w r0,@(2,r4) )
1: ! Read longword, write two words per iteration
EX( mov.l @r5+,r0 )
dt r2
-#ifdef __LITTLE_ENDIAN__
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX( mov.w r0,@r4 )
shlr16 r0
EX( mov.w r0,@(2,r4) )
@@ -342,7 +387,7 @@ EX( mov.w r0,@r4 )
! Read longword, write byte, word, byte per iteration
EX( mov.l @r5+,r0 )
dt r2
-#ifdef __LITTLE_ENDIAN__
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
EX( mov.b r0,@r4 )
shlr8 r0
add #1,r4
@@ -379,6 +424,7 @@ EX( mov.b r0,@r4 )
.L_exit:
mov #0,r0 ! normal return
+
5000:
# Exception handler:
@@ -394,5 +440,6 @@ EX( mov.b r0,@r4 )
.previous
mov.l @r15+,r8
mov.l @r15+,r9
+ mov.l @r15+,r10
rts
- mov.l @r15+,r10
+ mov.l @r15+,r11
diff --git a/arch/sh/mm/fault-nommu.c b/arch/sh/mm/fault-nommu.c
deleted file mode 100644
index c6f5b51ec2c..00000000000
--- a/arch/sh/mm/fault-nommu.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * arch/sh/mm/fault-nommu.c
- *
- * Copyright (C) 2002 - 2007 Paul Mundt
- *
- * Based on linux/arch/sh/mm/fault.c:
- * Copyright (C) 1999 Niibe Yutaka
- *
- * Released under the terms of the GNU GPL v2.0.
- */
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/hardirq.h>
-#include <linux/kprobes.h>
-#include <asm/system.h>
-#include <asm/ptrace.h>
-#include <asm/kgdb.h>
-
-/*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- */
-asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
- unsigned long writeaccess,
- unsigned long address)
-{
- trace_hardirqs_on();
- local_irq_enable();
-
-#if defined(CONFIG_SH_KGDB)
- if (kgdb_nofault && kgdb_bus_err_hook)
- kgdb_bus_err_hook();
-#endif
-
- /*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- *
- */
- if (address < PAGE_SIZE) {
- printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
- } else {
- printk(KERN_ALERT "Unable to handle kernel paging request");
- }
-
- printk(" at virtual address %08lx\n", address);
- printk(KERN_ALERT "pc = %08lx\n", regs->pc);
-
- die("Oops", regs, writeaccess);
- do_exit(SIGKILL);
-}
-
-asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
- unsigned long writeaccess,
- unsigned long address)
-{
-#if defined(CONFIG_SH_KGDB)
- if (kgdb_nofault && kgdb_bus_err_hook)
- kgdb_bus_err_hook();
-#endif
-
- return (address >= TASK_SIZE);
-}
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 04a39aa7f1f..4729668ce5b 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -214,7 +214,7 @@ out_of_memory:
}
printk("VM: killing process %s\n", tsk->comm);
if (user_mode(regs))
- do_exit(SIGKILL);
+ do_group_exit(SIGKILL);
goto no_context;
do_sigbus:
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 82b68c789a5..d5e160da64b 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -294,12 +294,6 @@ int arch_add_memory(int nid, u64 start, u64 size)
}
EXPORT_SYMBOL_GPL(arch_add_memory);
-int remove_memory(u64 start, u64 size)
-{
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(remove_memory);
-
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 addr)
{
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index a08a4a958ad..7d43758dc24 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -145,7 +145,7 @@ repeat:
ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos));
-#ifdef CONFIG_SH_WRITETHROUGH
+#ifdef CONFIG_CACHE_WRITETHROUGH
/*
* When we are in 32-bit address extended mode, CCR.CB becomes
* invalid, so care must be taken to manually adjust cacheable
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index f74cf667c8f..2d1dd604430 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -4,27 +4,14 @@
* SH-4 specific TLB operations
*
* Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2002 Paul Mundt
+ * Copyright (C) 2002 - 2007 Paul Mundt
*
* Released under the terms of the GNU GPL v2.0.
*/
-#include <linux/signal.h>
-#include <linux/sched.h>
#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
-
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
@@ -34,22 +21,27 @@ void update_mmu_cache(struct vm_area_struct * vma,
unsigned long flags;
unsigned long pteval;
unsigned long vpn;
- struct page *page;
- unsigned long pfn;
/* Ptrace may call this routine. */
if (vma && current->active_mm != vma->vm_mm)
return;
- pfn = pte_pfn(pte);
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (!test_bit(PG_mapped, &page->flags)) {
- unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
- __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
- __set_bit(PG_mapped, &page->flags);
+#ifndef CONFIG_CACHE_OFF
+ {
+ unsigned long pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+
+ if (!test_bit(PG_mapped, &page->flags)) {
+ unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
+ __flush_wback_region((void *)P1SEGADDR(phys),
+ PAGE_SIZE);
+ __set_bit(PG_mapped, &page->flags);
+ }
}
}
+#endif
local_irq_save(flags);
@@ -57,16 +49,26 @@ void update_mmu_cache(struct vm_area_struct * vma,
vpn = (address & MMU_VPN_MASK) | get_asid();
ctrl_outl(vpn, MMU_PTEH);
- pteval = pte_val(pte);
+ pteval = pte.pte_low;
/* Set PTEA register */
+#ifdef CONFIG_X2TLB
+ /*
+ * For the extended mode TLB this is trivial, only the ESZ and
+ * EPR bits need to be written out to PTEA, with the remainder of
+ * the protection bits (with the exception of the compat-mode SZ
+ * and PR bits, which are cleared) being written out in PTEL.
+ */
+ ctrl_outl(pte.pte_high, MMU_PTEA);
+#else
if (cpu_data->flags & CPU_HAS_PTEA)
/* TODO: make this look less hacky */
ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
+#endif
/* Set PTEL register */
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
-#ifdef CONFIG_SH_WRITETHROUGH
+#ifdef CONFIG_CACHE_WRITETHROUGH
pteval |= _PAGE_WT;
#endif
/* conveniently, we want all the software flags to be 0 anyway */
@@ -93,4 +95,3 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
ctrl_outl(data, addr);
back_to_P1();
}
-