aboutsummaryrefslogtreecommitdiff
path: root/arch/sh
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/kernel/sys_sh.c2
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sh/mm/consistent.c2
-rw-r--r--arch/sh/mm/fault.c202
-rw-r--r--arch/sh/mm/init.c13
-rw-r--r--arch/sh/mm/tlb-flush.c132
6 files changed, 188 insertions, 165 deletions
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 917b2f32f26..d8bcd8a2232 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -21,7 +21,7 @@
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
-
+#include <asm/cacheflush.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index f4e32b3d24d..d90906367c5 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o
-mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o
+mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o
obj-y += $(mmu-y)
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index ee73e30263a..c81e6b67ad3 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -9,6 +9,8 @@
*/
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <asm/addrspace.h>
#include <asm/io.h>
void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 364181f27b7..7a03ffe6dad 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -1,33 +1,20 @@
-/* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $
+/*
+ * Page fault handler for SH with an MMU.
*
- * linux/arch/sh/mm/fault.c
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 Paul Mundt
*
* Based on linux/arch/i386/mm/fault.c:
* Copyright (C) 1995 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
-
-#include <linux/signal.h>
-#include <linux/sched.h>
#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-
#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
-#include <asm/cacheflush.h>
#include <asm/kgdb.h>
extern void die(const char *,struct pt_regs *,long);
@@ -187,14 +174,25 @@ do_sigbus:
goto no_context;
}
+#ifdef CONFIG_SH_STORE_QUEUES
/*
- * Called with interrupt disabled.
+ * This is a special case for the SH-4 store queues, as pages for this
+ * space still need to be faulted in before it's possible to flush the
+ * store queue cache for writeout to the remapped region.
+ */
+#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
+#else
+#define P3_ADDR_MAX P4SEG
+#endif
+
+/*
+ * Called with interrupts disabled.
*/
asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
unsigned long address)
{
- unsigned long addrmax = P4SEG;
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
@@ -207,31 +205,36 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
kgdb_bus_err_hook();
#endif
-#ifdef CONFIG_SH_STORE_QUEUES
- addrmax = P4SEG_STORE_QUE + 0x04000000;
-#endif
-
- if (address >= P3SEG && address < addrmax) {
+ /*
+ * We don't take page faults for P1, P2, and parts of P4, these
+ * are always mapped, whether it be due to legacy behaviour in
+ * 29-bit mode, or due to PMB configuration in 32-bit mode.
+ */
+ if (address >= P3SEG && address < P3_ADDR_MAX)
pgd = pgd_offset_k(address);
- mm = NULL;
- } else if (address >= TASK_SIZE)
- return 1;
- else if (!(mm = current->mm))
- return 1;
- else
- pgd = pgd_offset(mm, address);
+ else {
+ if (unlikely(address >= TASK_SIZE || !current->mm))
+ return 1;
+
+ pgd = pgd_offset(current->mm, address);
+ }
- pmd = pmd_offset(pgd, address);
+ pud = pud_offset(pgd, address);
+ if (pud_none_or_clear_bad(pud))
+ return 1;
+ pmd = pmd_offset(pud, address);
if (pmd_none_or_clear_bad(pmd))
return 1;
+
if (mm)
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
else
pte = pte_offset_kernel(pmd, address);
entry = *pte;
- if (pte_none(entry) || pte_not_present(entry)
- || (writeaccess && !pte_write(entry)))
+ if (unlikely(pte_none(entry) || pte_not_present(entry)))
+ goto unlock;
+ if (unlikely(writeaccess && !pte_write(entry)))
goto unlock;
if (writeaccess)
@@ -243,13 +246,7 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
* ITLB is not affected by "ldtlb" instruction.
* So, we need to flush the entry by ourselves.
*/
-
- {
- unsigned long flags;
- local_irq_save(flags);
- __flush_tlb_page(get_asid(), address&PAGE_MASK);
- local_irq_restore(flags);
- }
+ __flush_tlb_page(get_asid(), address & PAGE_MASK);
#endif
set_pte(pte, entry);
@@ -260,122 +257,3 @@ unlock:
pte_unmap_unlock(pte, ptl);
return ret;
}
-
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
- if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
- unsigned long flags;
- unsigned long asid;
- unsigned long saved_asid = MMU_NO_ASID;
-
- asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
- page &= PAGE_MASK;
-
- local_irq_save(flags);
- if (vma->vm_mm != current->mm) {
- saved_asid = get_asid();
- set_asid(asid);
- }
- __flush_tlb_page(asid, page);
- if (saved_asid != MMU_NO_ASID)
- set_asid(saved_asid);
- local_irq_restore(flags);
- }
-}
-
-void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- struct mm_struct *mm = vma->vm_mm;
-
- if (mm->context != NO_CONTEXT) {
- unsigned long flags;
- int size;
-
- local_irq_save(flags);
- size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
- mm->context = NO_CONTEXT;
- if (mm == current->mm)
- activate_context(mm);
- } else {
- unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
- unsigned long saved_asid = MMU_NO_ASID;
-
- start &= PAGE_MASK;
- end += (PAGE_SIZE - 1);
- end &= PAGE_MASK;
- if (mm != current->mm) {
- saved_asid = get_asid();
- set_asid(asid);
- }
- while (start < end) {
- __flush_tlb_page(asid, start);
- start += PAGE_SIZE;
- }
- if (saved_asid != MMU_NO_ASID)
- set_asid(saved_asid);
- }
- local_irq_restore(flags);
- }
-}
-
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- unsigned long flags;
- int size;
-
- local_irq_save(flags);
- size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
- flush_tlb_all();
- } else {
- unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
- unsigned long saved_asid = get_asid();
-
- start &= PAGE_MASK;
- end += (PAGE_SIZE - 1);
- end &= PAGE_MASK;
- set_asid(asid);
- while (start < end) {
- __flush_tlb_page(asid, start);
- start += PAGE_SIZE;
- }
- set_asid(saved_asid);
- }
- local_irq_restore(flags);
-}
-
-void flush_tlb_mm(struct mm_struct *mm)
-{
- /* Invalidate all TLB of this process. */
- /* Instead of invalidating each TLB, we get new MMU context. */
- if (mm->context != NO_CONTEXT) {
- unsigned long flags;
-
- local_irq_save(flags);
- mm->context = NO_CONTEXT;
- if (mm == current->mm)
- activate_context(mm);
- local_irq_restore(flags);
- }
-}
-
-void flush_tlb_all(void)
-{
- unsigned long flags, status;
-
- /*
- * Flush all the TLB.
- *
- * Write to the MMU control register's bit:
- * TF-bit for SH-3, TI-bit for SH-4.
- * It's same position, bit #2.
- */
- local_irq_save(flags);
- status = ctrl_inl(MMUCR);
- status |= 0x04;
- ctrl_outl(status, MMUCR);
- ctrl_barrier();
- local_irq_restore(flags);
-}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 8ea27ca4b70..d1a979eab65 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -80,6 +80,7 @@ void show_mem(void)
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
{
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -89,7 +90,17 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
return;
}
- pmd = pmd_offset(pgd, addr);
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud)) {
+ pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
+ if (pmd != pmd_offset(pud, 0)) {
+ pud_ERROR(*pud);
+ return;
+ }
+ }
+
+ pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
new file mode 100644
index 00000000000..fd7e42bcaa4
--- /dev/null
+++ b/arch/sh/mm/tlb-flush.c
@@ -0,0 +1,132 @@
+/*
+ * TLB flushing operations for SH with an MMU.
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
+ unsigned long flags;
+ unsigned long asid;
+ unsigned long saved_asid = MMU_NO_ASID;
+
+ asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
+ page &= PAGE_MASK;
+
+ local_irq_save(flags);
+ if (vma->vm_mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
+ __flush_tlb_page(asid, page);
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
+ local_irq_restore(flags);
+ }
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (mm->context != NO_CONTEXT) {
+ unsigned long flags;
+ int size;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
+ mm->context = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(mm);
+ } else {
+ unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
+ unsigned long saved_asid = MMU_NO_ASID;
+
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+ if (mm != current->mm) {
+ saved_asid = get_asid();
+ set_asid(asid);
+ }
+ while (start < end) {
+ __flush_tlb_page(asid, start);
+ start += PAGE_SIZE;
+ }
+ if (saved_asid != MMU_NO_ASID)
+ set_asid(saved_asid);
+ }
+ local_irq_restore(flags);
+ }
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+ int size;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
+ flush_tlb_all();
+ } else {
+ unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
+ unsigned long saved_asid = get_asid();
+
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+ set_asid(asid);
+ while (start < end) {
+ __flush_tlb_page(asid, start);
+ start += PAGE_SIZE;
+ }
+ set_asid(saved_asid);
+ }
+ local_irq_restore(flags);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ /* Invalidate all TLB of this process. */
+ /* Instead of invalidating each TLB, we get new MMU context. */
+ if (mm->context != NO_CONTEXT) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mm->context = NO_CONTEXT;
+ if (mm == current->mm)
+ activate_context(mm);
+ local_irq_restore(flags);
+ }
+}
+
+void flush_tlb_all(void)
+{
+ unsigned long flags, status;
+
+ /*
+ * Flush all the TLB.
+ *
+ * Write to the MMU control register's bit:
+ * TF-bit for SH-3, TI-bit for SH-4.
+ * It's same position, bit #2.
+ */
+ local_irq_save(flags);
+ status = ctrl_inl(MMUCR);
+ status |= 0x04;
+ ctrl_outl(status, MMUCR);
+ ctrl_barrier();
+ local_irq_restore(flags);
+}