From 9d4436a6fbc8c5eccdfcb8f5884e0a7b4a57f6d2 Mon Sep 17 00:00:00 2001 From: Yoshinori Sato Date: Sun, 5 Nov 2006 15:40:13 +0900 Subject: sh: Add support for SH7206 and SH7619 CPU subtypes. This implements initial support for the SH7206 (SH-2A) and SH7619 (SH-2) MMU-less CPUs. Signed-off-by: Yoshinori Sato Signed-off-by: Paul Mundt --- arch/sh/mm/Kconfig | 17 +++++++++++-- arch/sh/mm/cache-sh2.c | 69 +++++++++++++++++++++++++++----------------------- 2 files changed, 53 insertions(+), 33 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 9dd606464d2..814a1758697 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -4,8 +4,12 @@ menu "Processor selection" # Processor families # config CPU_SH2 + select SH_WRITETHROUGH if !CPU_SH2A bool - select SH_WRITETHROUGH + +config CPU_SH2A + bool + select CPU_SH2 config CPU_SH3 bool @@ -40,6 +44,16 @@ config CPU_SUBTYPE_SH7604 bool "Support SH7604 processor" select CPU_SH2 +config CPU_SUBTYPE_SH7619 + bool "Support SH7619 processor" + select CPU_SH2 + +comment "SH-2A Processor Support" + +config CPU_SUBTYPE_SH7206 + bool "Support SH7206 processor" + select CPU_SH2A + comment "SH-3 Processor Support" config CPU_SUBTYPE_SH7300 @@ -274,7 +288,6 @@ config SH_DIRECT_MAPPED config SH_WRITETHROUGH bool "Use write-through caching" - default y if CPU_SH2 help Selecting this option will configure the caches in write-through mode, as opposed to the default write-back configuration. diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index 2689cb24ea2..6614033f6be 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c @@ -5,6 +5,7 @@ * * Released under the terms of the GNU GPL v2.0. */ + #include #include @@ -14,37 +15,43 @@ #include #include -/* - * Calculate the OC address and set the way bit on the SH-2. - * - * We must have already jump_to_P2()'ed prior to calling this - * function, since we rely on CCR manipulation to do the - * Right Thing(tm). - */ -unsigned long __get_oc_addr(unsigned long set, unsigned long way) +void __flush_wback_region(void *start, int size) { - unsigned long ccr; - - /* - * On SH-2 the way bit isn't tracked in the address field - * if we're doing address array access .. instead, we need - * to manually switch out the way in the CCR. - */ - ccr = ctrl_inl(CCR); - ccr &= ~0x00c0; - ccr |= way << cpu_data->dcache.way_shift; - - /* - * Despite the number of sets being halved, we end up losing - * the first 2 ways to OCRAM instead of the last 2 (if we're - * 4-way). As a result, forcibly setting the W1 bit handily - * bumps us up 2 ways. - */ - if (ccr & CCR_CACHE_ORA) - ccr |= 1 << (cpu_data->dcache.way_shift + 1); - - ctrl_outl(ccr, CCR); - - return CACHE_OC_ADDRESS_ARRAY | (set << cpu_data->dcache.entry_shift); + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + /* FIXME cache purge */ + ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); + } +} + +void __flush_purge_region(void *start, int size) +{ + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); + } +} + +void __flush_invalidate_region(void *start, int size) +{ + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); + } } -- cgit v1.2.3 From b552c7e8bceae8a04ae79ecee6fa369c1ba4f8e4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 20 Nov 2006 14:14:29 +0900 Subject: sh: Hook SH7785 in to the build system. Simple 7785 placeholders to start hooking up other bits of code. Signed-off-by: Paul Mundt --- arch/sh/mm/Kconfig | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 814a1758697..27463e26a7b 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -171,6 +171,11 @@ config CPU_SUBTYPE_SH7780 select CPU_SH4A select CPU_HAS_INTC2_IRQ +config CPU_SUBTYPE_SH7785 + bool "Support SH7785 processor" + select CPU_SH4A + select CPU_HAS_INTC2_IRQ + comment "SH4AL-DSP Processor Support" config CPU_SUBTYPE_SH73180 -- cgit v1.2.3 From 21440cf04a64cd1b1209c12a6e1a3afba2a28709 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 20 Nov 2006 14:30:26 +0900 Subject: sh: Preliminary support for SH-X2 MMU. This adds some preliminary support for the SH-X2 MMU, used by newer SH-4A parts (particularly SH7785). This MMU implements a 'compat' mode with SH-X MMUs and an 'extended' mode for SH-X2 extended features. Extended features include additional page sizes (8kB, 4MB, 64MB), as well as the addition of page execute permissions. The extended mode attributes are placed in a second data array, which requires us to switch to 64-bit PTEs when in X2 mode. With the addition of the exec perms, we also overhaul the mmap prots somewhat, now that it's possible to handle them more intelligently. Signed-off-by: Paul Mundt --- arch/sh/mm/Kconfig | 49 +++++++++++++++++++++++++++++++++++++++++++++++-- arch/sh/mm/init.c | 4 ++-- arch/sh/mm/ioremap.c | 4 +--- arch/sh/mm/pg-sh4.c | 12 ++---------- 4 files changed, 52 insertions(+), 17 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 27463e26a7b..88e9663fc9f 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -235,13 +235,22 @@ config MEMORY_SIZE config 32BIT bool "Support 32-bit physical addressing through PMB" - depends on CPU_SH4A && MMU + depends on CPU_SH4A && MMU && (!X2TLB || BROKEN) default y help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. +config X2TLB + bool "Enable extended TLB mode" + depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL + help + Selecting this option will enable the extended mode of the SH-X2 + TLB. For legacy SH-X behaviour and interoperability, say N. For + all of the fun new features and a willingless to submit bug reports, + say Y. + config VSYSCALL bool "Support vsyscall page" depends on MMU @@ -255,17 +264,53 @@ config VSYSCALL For systems with an MMU that can afford to give up a page, (the default value) say Y. +choice + prompt "Kernel page size" + default PAGE_SIZE_4KB + +config PAGE_SIZE_4KB + bool "4kB" + help + This is the default page size used by all SuperH CPUs. + +config PAGE_SIZE_8KB + bool "8kB" + depends on EXPERIMENTAL && X2TLB + help + This enables 8kB pages as supported by SH-X2 and later MMUs. + +config PAGE_SIZE_64KB + bool "64kB" + depends on EXPERIMENTAL && CPU_SH4 + help + This enables support for 64kB pages, possible on all SH-4 + CPUs and later. Highly experimental, not recommended. + +endchoice + choice prompt "HugeTLB page size" depends on HUGETLB_PAGE && CPU_SH4 && MMU default HUGETLB_PAGE_SIZE_64K config HUGETLB_PAGE_SIZE_64K - bool "64K" + bool "64kB" + +config HUGETLB_PAGE_SIZE_256K + bool "256kB" + depends on X2TLB config HUGETLB_PAGE_SIZE_1MB bool "1MB" +config HUGETLB_PAGE_SIZE_4MB + bool "4MB" + depends on X2TLB + +config HUGETLB_PAGE_SIZE_64MB + bool "64MB" + depends on X2TLB + endchoice source "mm/Kconfig" diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 7154d1ce978..8b275166f40 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -93,7 +93,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) pud = pud_offset(pgd, addr); if (pud_none(*pud)) { pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE)); if (pmd != pmd_offset(pud, 0)) { pud_ERROR(*pud); return; @@ -103,7 +103,7 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); - set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); + set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); if (pte != pte_offset_kernel(pmd, 0)) { pmd_ERROR(*pmd); return; diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index a9fe80cfc23..11d54c14982 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -28,9 +28,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, { unsigned long end; unsigned long pfn; - pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | - _PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags); + pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); address &= ~PMD_MASK; end = address + size; diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 07371ed7a31..e973ac3b13b 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -37,10 +37,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page) if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) clear_page(to); else { - pgprot_t pgprot = __pgprot(_PAGE_PRESENT | - _PAGE_RW | _PAGE_CACHABLE | - _PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); @@ -50,7 +46,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) pte_t entry; unsigned long flags; - entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); + entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); down(&p3map_sem[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); @@ -77,10 +73,6 @@ void copy_user_page(void *to, void *from, unsigned long address, if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { - pgprot_t pgprot = __pgprot(_PAGE_PRESENT | - _PAGE_RW | _PAGE_CACHABLE | - _PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); @@ -90,7 +82,7 @@ void copy_user_page(void *to, void *from, unsigned long address, pte_t entry; unsigned long flags; - entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); + entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); down(&p3map_sem[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); -- cgit v1.2.3 From 52e27782e1c4afa1feca0fdf194d279595e0431c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 21 Nov 2006 11:09:41 +0900 Subject: sh: p3map_sem sem2mutex conversion. Simple sem2mutex conversion for the p3map semaphores. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh4.c | 14 +++++--------- arch/sh/mm/pg-sh4.c | 23 ++++++----------------- 2 files changed, 11 insertions(+), 26 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index e48cc22724d..7e62ba071d6 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -11,12 +11,8 @@ */ #include #include -#include -#include -#include -#include -#include -#include +#include +#include #include #include @@ -83,9 +79,9 @@ static void __init emit_cache_params(void) */ /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */ -#define MAX_P3_SEMAPHORES 16 +#define MAX_P3_MUTEXES 16 -struct semaphore p3map_sem[MAX_P3_SEMAPHORES]; +struct mutex p3map_mutex[MAX_P3_MUTEXES]; void __init p3_cache_init(void) { @@ -115,7 +111,7 @@ void __init p3_cache_init(void) panic("%s failed.", __FUNCTION__); for (i = 0; i < cpu_data->dcache.n_aliases; i++) - sema_init(&p3map_sem[i], 1); + mutex_init(&p3map_mutex[i]); } /* diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index e973ac3b13b..3f98d2a4f93 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -6,22 +6,12 @@ * * Released under the terms of the GNU GPL v2.0. */ -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include #include -extern struct semaphore p3map_sem[]; +extern struct mutex p3map_mutex[]; #define CACHE_ALIAS (cpu_data->dcache.alias_mask) @@ -47,7 +37,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) unsigned long flags; entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); - down(&p3map_sem[(address & CACHE_ALIAS)>>12]); + mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); @@ -55,7 +45,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) update_mmu_cache(NULL, p3_addr, entry); __clear_user_page((void *)p3_addr, to); pte_clear(&init_mm, p3_addr, pte); - up(&p3map_sem[(address & CACHE_ALIAS)>>12]); + mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } } @@ -83,7 +73,7 @@ void copy_user_page(void *to, void *from, unsigned long address, unsigned long flags; entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); - down(&p3map_sem[(address & CACHE_ALIAS)>>12]); + mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); @@ -91,7 +81,7 @@ void copy_user_page(void *to, void *from, unsigned long address, update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(&init_mm, p3_addr, pte); - up(&p3map_sem[(address & CACHE_ALIAS)>>12]); + mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } } @@ -114,4 +104,3 @@ inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t } return pte; } - -- cgit v1.2.3 From b5a1bcbee434b843c8850a968d9a6c7541f1be9d Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Tue, 21 Nov 2006 13:34:04 +0900 Subject: sh: Set up correct siginfo structures for page faults. Remove the previous saving of fault codes into the thread_struct as they are never used, and appeared to be inherited from x86. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/mm/fault.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 68663b8f99a..43bed2cb00e 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -26,13 +26,16 @@ extern void die(const char *,struct pt_regs *,long); * and the problem, and then passes it off to one of the appropriate * routines. */ -asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, - unsigned long address) +asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; unsigned long page; + int si_code; + siginfo_t info; #ifdef CONFIG_SH_KGDB if (kgdb_nofault && kgdb_bus_err_hook) @@ -41,6 +44,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, tsk = current; mm = tsk->mm; + si_code = SEGV_MAPERR; /* * If we're in an interrupt or have no user @@ -65,6 +69,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, * we can handle it.. */ good_area: + si_code = SEGV_ACCERR; if (writeaccess) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; @@ -105,9 +110,11 @@ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) { - tsk->thread.address = address; - tsk->thread.error_code = writeaccess; - force_sig(SIGSEGV, tsk); + info.si_signo = SIGSEGV; + info.si_errno = 0; + info.si_code = si_code; + info.si_addr = (void *) address; + force_sig_info(SIGSEGV, &info, tsk); return; } @@ -166,10 +173,11 @@ do_sigbus: * Send a sigbus, regardless of whether we were in kernel * or user mode. */ - tsk->thread.address = address; - tsk->thread.error_code = writeaccess; - tsk->thread.trap_no = 14; - force_sig(SIGBUS, tsk); + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRERR; + info.si_addr = (void *)address; + force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) -- cgit v1.2.3 From 6e4662ff49c6b94e16a47bfddb920576963b5a20 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Tue, 21 Nov 2006 13:53:44 +0900 Subject: sh: Use MMU.TTB register as pointer to current pgd. Add TTB accessor functions and give it a sensible default value. We will use this later for optimizing the fault path. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 8b275166f40..8c8d3911838 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -155,9 +155,6 @@ extern char __init_begin, __init_end; /* * paging_init() sets up the page tables - * - * This routines also unmaps the page at virtual kernel address 0, so - * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { @@ -180,14 +177,11 @@ void __init paging_init(void) */ { unsigned long max_dma, low, start_pfn; - pgd_t *pg_dir; - int i; - - /* We don't need kernel mapping as hardware support that. */ - pg_dir = swapper_pg_dir; - for (i = 0; i < PTRS_PER_PGD; i++) - pgd_val(pg_dir[i]) = 0; + /* We don't need to map the kernel through the TLB, as + * it is permanatly mapped using P1. So clear the + * entire pgd. */ + memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); /* Turn on the MMU */ enable_mmu(); @@ -206,6 +200,10 @@ void __init paging_init(void) } } + /* Set an initial value for the MMU.TTB so we don't have to + * check for a null value. */ + set_TTB(swapper_pg_dir); + #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) /* * If we don't have CONFIG_MMU set and the processor in question -- cgit v1.2.3 From 99a596f93be10001c50894bcab69e458a49a3b8c Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Tue, 21 Nov 2006 15:38:05 +0900 Subject: sh: pmd rework. Remove extra bits from the pmd structure and store a kernel logical address rather than a physical address. This allows it to be directly dereferenced. Another piece of wierdness inherited from x86. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/mm/fault.c | 40 ++++++++++++++++++++++++++++++++++++++++ arch/sh/mm/init.c | 26 +++++++++----------------- 2 files changed, 49 insertions(+), 17 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 43bed2cb00e..128907ef7fc 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -46,6 +46,45 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, mm = tsk->mm; si_code = SEGV_MAPERR; + if (unlikely(address >= TASK_SIZE)) { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + int offset = pgd_index(address); + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + + pgd = get_TTB() + offset; + pgd_k = swapper_pg_dir + offset; + + /* This will never happen with the folded page table. */ + if (!pgd_present(*pgd)) { + if (!pgd_present(*pgd_k)) + goto bad_area_nosemaphore; + set_pgd(pgd, *pgd_k); + return; + } + + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (pud_present(*pud) || !pud_present(*pud_k)) + goto bad_area_nosemaphore; + set_pud(pud, *pud_k); + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (pmd_present(*pmd) || !pmd_present(*pmd_k)) + goto bad_area_nosemaphore; + set_pmd(pmd, *pmd_k); + + return; + } + /* * If we're in an interrupt or have no user * context, we must not take the fault.. @@ -109,6 +148,7 @@ survive: bad_area: up_read(&mm->mmap_sem); +bad_area_nosemaphore: if (user_mode(regs)) { info.si_signo = SIGSEGV; info.si_errno = 0; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 8c8d3911838..462bfeac6d9 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -84,30 +84,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) pmd_t *pmd; pte_t *pte; - pgd = swapper_pg_dir + pgd_index(addr); + pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { pgd_ERROR(*pgd); return; } - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) { - pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); - set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE)); - if (pmd != pmd_offset(pud, 0)) { - pud_ERROR(*pud); - return; - } + pud = pud_alloc(NULL, pgd, addr); + if (unlikely(!pud)) { + pud_ERROR(*pud); + return; } - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { - pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); - set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); - if (pte != pte_offset_kernel(pmd, 0)) { - pmd_ERROR(*pmd); - return; - } + pmd = pmd_alloc(NULL, pud, addr); + if (unlikely(!pmd)) { + pmd_ERROR(*pmd); + return; } pte = pte_offset_kernel(pmd, addr); -- cgit v1.2.3 From 9b3a53ab76771e3669e50086c131e1574fe25847 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Fri, 24 Nov 2006 11:42:24 +0900 Subject: sh: TLB miss fast-path optimizations. Handle simple TLB miss faults which can be resolved completely from the page table in assembler. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/mm/Kconfig | 1 + arch/sh/mm/fault.c | 86 ------------------------------------------------------ 2 files changed, 1 insertion(+), 86 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 88e9663fc9f..6cd6d0045d1 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -20,6 +20,7 @@ config CPU_SH4 bool select CPU_HAS_INTEVT select CPU_HAS_SR_RB + select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40 config CPU_SH4A bool diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 128907ef7fc..123fb80c859 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -223,89 +223,3 @@ do_sigbus: if (!user_mode(regs)) goto no_context; } - -#ifdef CONFIG_SH_STORE_QUEUES -/* - * This is a special case for the SH-4 store queues, as pages for this - * space still need to be faulted in before it's possible to flush the - * store queue cache for writeout to the remapped region. - */ -#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) -#else -#define P3_ADDR_MAX P4SEG -#endif - -/* - * Called with interrupts disabled. - */ -asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, - unsigned long writeaccess, - unsigned long address) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - pte_t entry; - struct mm_struct *mm = current->mm; - spinlock_t *ptl; - int ret = 1; - -#ifdef CONFIG_SH_KGDB - if (kgdb_nofault && kgdb_bus_err_hook) - kgdb_bus_err_hook(); -#endif - - /* - * We don't take page faults for P1, P2, and parts of P4, these - * are always mapped, whether it be due to legacy behaviour in - * 29-bit mode, or due to PMB configuration in 32-bit mode. - */ - if (address >= P3SEG && address < P3_ADDR_MAX) { - pgd = pgd_offset_k(address); - mm = NULL; - } else { - if (unlikely(address >= TASK_SIZE || !mm)) - return 1; - - pgd = pgd_offset(mm, address); - } - - pud = pud_offset(pgd, address); - if (pud_none_or_clear_bad(pud)) - return 1; - pmd = pmd_offset(pud, address); - if (pmd_none_or_clear_bad(pmd)) - return 1; - - if (mm) - pte = pte_offset_map_lock(mm, pmd, address, &ptl); - else - pte = pte_offset_kernel(pmd, address); - - entry = *pte; - if (unlikely(pte_none(entry) || pte_not_present(entry))) - goto unlock; - if (unlikely(writeaccess && !pte_write(entry))) - goto unlock; - - if (writeaccess) - entry = pte_mkdirty(entry); - entry = pte_mkyoung(entry); - -#ifdef CONFIG_CPU_SH4 - /* - * ITLB is not affected by "ldtlb" instruction. - * So, we need to flush the entry by ourselves. - */ - __flush_tlb_page(get_asid(), address & PAGE_MASK); -#endif - - set_pte(pte, entry); - update_mmu_cache(NULL, address, entry); - ret = 0; -unlock: - if (mm) - pte_unmap_unlock(pte, ptl); - return ret; -} -- cgit v1.2.3 From 510c72ad2dd4e05e6908755f51ac89482c6eb987 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 27 Nov 2006 12:06:26 +0900 Subject: sh: Fixup various PAGE_SIZE == 4096 assumptions. There were a number of places that made evil PAGE_SIZE == 4k assumptions that ended up breaking when trying to play with 8k and 64k page sizes, this fixes those up. The most significant change is the way we load THREAD_SIZE, previously this was done via: mov #(THREAD_SIZE >> 8), reg shll8 reg to avoid a memory access and allow the immediate load. With a 64k PAGE_SIZE, we're out of range for the immediate load size without resorting to special instructions available in later ISAs (movi20s and so on). The "workaround" for this is to bump up the shift to 10 and insert a shll2, which gives a bit more flexibility while still being much cheaper than a memory access. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh4.c | 4 ++-- arch/sh/mm/clear_page.S | 18 +++++++++--------- arch/sh/mm/copy_page.S | 16 ++++++++-------- arch/sh/mm/init.c | 1 - arch/sh/mm/pg-dma.c | 2 -- 5 files changed, 19 insertions(+), 22 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 7e62ba071d6..ae531affccb 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -225,7 +225,7 @@ static inline void flush_cache_4096(unsigned long start, */ if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || (start < CACHE_OC_ADDRESS_ARRAY)) - exec_offset = 0x20000000; + exec_offset = 0x20000000; local_irq_save(flags); __flush_cache_4096(start | SH_CACHE_ASSOC, @@ -246,7 +246,7 @@ void flush_dcache_page(struct page *page) /* Loop all the D-cache */ n = cpu_data->dcache.n_aliases; - for (i = 0; i < n; i++, addr += PAGE_SIZE) + for (i = 0; i < n; i++, addr += 4096) flush_cache_4096(addr, phys); } diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S index 7b96425ae27..8a706131e52 100644 --- a/arch/sh/mm/clear_page.S +++ b/arch/sh/mm/clear_page.S @@ -1,12 +1,12 @@ -/* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $ - * +/* * __clear_user_page, __clear_user, clear_page implementation of SuperH * * Copyright (C) 2001 Kaz Kojima * Copyright (C) 2001, 2002 Niibe Yutaka - * + * Copyright (C) 2006 Paul Mundt */ #include +#include /* * clear_page_slow @@ -18,11 +18,11 @@ /* * r0 --- scratch * r4 --- to - * r5 --- to + 4096 + * r5 --- to + PAGE_SIZE */ ENTRY(clear_page_slow) mov r4,r5 - mov.w .Llimit,r0 + mov.l .Llimit,r0 add r0,r5 mov #0,r0 ! @@ -50,7 +50,7 @@ ENTRY(clear_page_slow) ! rts nop -.Llimit: .word (4096-28) +.Llimit: .long (PAGE_SIZE-28) ENTRY(__clear_user) ! @@ -164,10 +164,10 @@ ENTRY(__clear_user) * r0 --- scratch * r4 --- to * r5 --- orig_to - * r6 --- to + 4096 + * r6 --- to + PAGE_SIZE */ ENTRY(__clear_user_page) - mov.w .L4096,r0 + mov.l .Lpsz,r0 mov r4,r6 add r0,r6 mov #0,r0 @@ -191,7 +191,7 @@ ENTRY(__clear_user_page) ! rts nop -.L4096: .word 4096 +.Lpsz: .long PAGE_SIZE #endif diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S index 1addffe117c..397c94c9731 100644 --- a/arch/sh/mm/copy_page.S +++ b/arch/sh/mm/copy_page.S @@ -1,12 +1,12 @@ -/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $ - * +/* * copy_page, __copy_user_page, __copy_user implementation of SuperH * * Copyright (C) 2001 Niibe Yutaka & Kaz Kojima * Copyright (C) 2002 Toshinobu Sugioka - * + * Copyright (C) 2006 Paul Mundt */ #include +#include /* * copy_page_slow @@ -18,7 +18,7 @@ /* * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch - * r8 --- from + 4096 + * r8 --- from + PAGE_SIZE * r9 --- not used * r10 --- to * r11 --- from @@ -30,7 +30,7 @@ ENTRY(copy_page_slow) mov r4,r10 mov r5,r11 mov r5,r8 - mov.w .L4096,r0 + mov.l .Lpsz,r0 add r0,r8 ! 1: mov.l @r11+,r0 @@ -80,7 +80,7 @@ ENTRY(copy_page_slow) /* * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch - * r8 --- from + 4096 + * r8 --- from + PAGE_SIZE * r9 --- orig_to * r10 --- to * r11 --- from @@ -94,7 +94,7 @@ ENTRY(__copy_user_page) mov r5,r11 mov r6,r9 mov r5,r8 - mov.w .L4096,r0 + mov.l .Lpsz,r0 add r0,r8 ! 1: ocbi @r9 @@ -129,7 +129,7 @@ ENTRY(__copy_user_page) rts nop #endif -.L4096: .word 4096 +.Lpsz: .long PAGE_SIZE /* * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); * Return the number of bytes NOT copied diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 462bfeac6d9..59f4cc18235 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -217,7 +217,6 @@ static struct kcore_list kcore_mem, kcore_vmalloc; void __init mem_init(void) { - extern unsigned long empty_zero_page[1024]; int codesize, reservedpages, datasize, initsize; int tmp; extern unsigned long memory_start; diff --git a/arch/sh/mm/pg-dma.c b/arch/sh/mm/pg-dma.c index 1406d2e348c..bb23679369d 100644 --- a/arch/sh/mm/pg-dma.c +++ b/arch/sh/mm/pg-dma.c @@ -39,8 +39,6 @@ static void copy_page_dma(void *to, void *from) static void clear_page_dma(void *to) { - extern unsigned long empty_zero_page[1024]; - /* * We get invoked quite early on, if the DMAC hasn't been initialized * yet, fall back on the slow manual implementation. -- cgit v1.2.3 From bca7c20764c83a44c7b8b0831089922d56a3a9a2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 1 Dec 2006 12:14:11 +0900 Subject: sh: Get the PGD right in oops case with 64-bit PTEs. Previously this was using a static pgd shift in the reporting code, simply flip this to PGDIR_SHIFT which does the right thing depending on varying PTE magnitudes on the SH-X2 MMU. While we're at it, and since it's been recently added, use get_TTB() for fetching the TTB, rather than the open coded instructions. Signed-off-by: Paul Mundt --- arch/sh/mm/fault.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 123fb80c859..cfeefc10e25 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -174,11 +174,9 @@ no_context: printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n", address); printk(KERN_ALERT "pc = %08lx\n", regs->pc); - asm volatile("mov.l %1, %0" - : "=r" (page) - : "m" (__m(MMU_TTB))); + page = (unsigned long)get_TTB(); if (page) { - page = ((unsigned long *) page)[address >> 22]; + page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; printk(KERN_ALERT "*pde = %08lx\n", page); if (page & _PAGE_PRESENT) { page &= PAGE_MASK; -- cgit v1.2.3 From afbfb52e47273a440df33274452c603e8c332de2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 4 Dec 2006 18:17:28 +0900 Subject: sh: stacktrace/lockdep/irqflags tracing support. Wire up all of the essentials for lockdep.. Signed-off-by: Paul Mundt --- arch/sh/mm/fault.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index cfeefc10e25..716ebf568af 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -37,6 +37,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, int si_code; siginfo_t info; + trace_hardirqs_on(); + local_irq_enable(); + #ifdef CONFIG_SH_KGDB if (kgdb_nofault && kgdb_bus_err_hook) kgdb_bus_err_hook(); -- cgit v1.2.3 From ea0f8feaa041f3ccec3d6b8ee51325b177daef06 Mon Sep 17 00:00:00 2001 From: Jamie Lenehan Date: Wed, 6 Dec 2006 12:05:02 +0900 Subject: sh: sh775x/titan fixes for irq header changes. The following moves the creation of IPR interupts into setup-7750.c and updates a few other things to make it all work after the "Drop CPU subtype IRQ headers" commit. It boots and runs fine on my titan board. - adds an ipr_idx to the ipr_data and uses a function in the subtype code to calculate the address of the IPR registers - adds a function to enable individual interrupt mode for externals in the subtype code and calls that from the titan board code instead of doing it directly. - I changed the shift in the ipr_data to be the actual # of bits to shift, instead of the numnber / 4 - made it easier to match with the manual. Signed-off-by: Jamie Lenehan Signed-off-by: Paul Mundt --- arch/sh/mm/Kconfig | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 6cd6d0045d1..4e0362f5038 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -104,6 +104,7 @@ comment "SH-4 Processor Support" config CPU_SUBTYPE_SH7750 bool "Support SH7750 processor" select CPU_SH4 + select CPU_HAS_IPR_IRQ help Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU. @@ -119,15 +120,18 @@ config CPU_SUBTYPE_SH7750R bool "Support SH7750R processor" select CPU_SH4 select CPU_SUBTYPE_SH7750 + select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7750S bool "Support SH7750S processor" select CPU_SH4 select CPU_SUBTYPE_SH7750 + select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7751 bool "Support SH7751 processor" select CPU_SH4 + select CPU_HAS_IPR_IRQ help Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU, or if you have a HD6417751R CPU. @@ -136,6 +140,7 @@ config CPU_SUBTYPE_SH7751R bool "Support SH7751R processor" select CPU_SH4 select CPU_SUBTYPE_SH7751 + select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7760 bool "Support SH7760 processor" -- cgit v1.2.3 From 39dde65c9940c97fcd178a3d2b1c57ed8b7b68aa Mon Sep 17 00:00:00 2001 From: "Chen, Kenneth W" Date: Wed, 6 Dec 2006 20:32:03 -0800 Subject: [PATCH] shared page table for hugetlb page Following up with the work on shared page table done by Dave McCracken. This set of patch target shared page table for hugetlb memory only. The shared page table is particular useful in the situation of large number of independent processes sharing large shared memory segments. In the normal page case, the amount of memory saved from process' page table is quite significant. For hugetlb, the saving on page table memory is not the primary objective (as hugetlb itself already cuts down page table overhead significantly), instead, the purpose of using shared page table on hugetlb is to allow faster TLB refill and smaller cache pollution upon TLB miss. With PT sharing, pte entries are shared among hundreds of processes, the cache consumption used by all the page table is smaller and in return, application gets much higher cache hit ratio. One other effect is that cache hit ratio with hardware page walker hitting on pte in cache will be higher and this helps to reduce tlb miss latency. These two effects contribute to higher application performance. Signed-off-by: Ken Chen Acked-by: Hugh Dickins Cc: Dave McCracken Cc: William Lee Irwin III Cc: "Luck, Tony" Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: David Gibson Cc: Adam Litke Cc: Paul Mundt Cc: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sh/mm/hugetlbpage.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 329059d6b54..cf2c2ee35a3 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -63,6 +63,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) return pte; } +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +{ + return 0; +} + struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { -- cgit v1.2.3 From e18b890bb0881bbab6f4f1a6cd20d9c60d66b003 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 6 Dec 2006 20:33:20 -0800 Subject: [PATCH] slab: remove kmem_cache_t Replace all uses of kmem_cache_t with struct kmem_cache. The patch was generated using the following script: #!/bin/sh # # Replace one string by another in all the kernel sources. # set -e for file in `find * -name "*.c" -o -name "*.h"|xargs grep -l $1`; do quilt add $file sed -e "1,\$s/$1/$2/g" $file >/tmp/$$ mv /tmp/$$ $file quilt refresh done The script was run like this sh replace kmem_cache_t "struct kmem_cache" Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sh/mm/pmb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 92e745341e4..b60ad83a763 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -30,7 +30,7 @@ #define NR_PMB_ENTRIES 16 -static kmem_cache_t *pmb_cache; +static struct kmem_cache *pmb_cache; static unsigned long pmb_map; static struct pmb_entry pmb_init_map[] = { @@ -283,7 +283,7 @@ void pmb_unmap(unsigned long addr) } while (pmbe); } -static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags) +static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags) { struct pmb_entry *pmbe = pmb; @@ -297,7 +297,7 @@ static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags) spin_unlock_irq(&pmb_list_lock); } -static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags) +static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags) { spin_lock_irq(&pmb_list_lock); pmb_list_del(pmb); -- cgit v1.2.3 From 5b3e1a85c2145813898ac50530c70e6d03a6aa19 Mon Sep 17 00:00:00 2001 From: Haavard Skinnemoen Date: Fri, 8 Dec 2006 02:38:07 -0800 Subject: [PATCH] Generic ioremap_page_range: sh conversion Convert SH to use generic ioremap_page_range() Signed-off-by: Haavard Skinnemoen Signed-off-by: Paul Mundt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sh/mm/ioremap.c | 90 +++------------------------------------------------- 1 file changed, 4 insertions(+), 86 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 11d54c14982..90b494a0cf4 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -16,97 +16,13 @@ #include #include #include -#include +#include #include #include #include #include #include -static inline void remap_area_pte(pte_t * pte, unsigned long address, - unsigned long size, unsigned long phys_addr, unsigned long flags) -{ - unsigned long end; - unsigned long pfn; - pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); - - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; - if (address >= end) - BUG(); - pfn = phys_addr >> PAGE_SHIFT; - do { - if (!pte_none(*pte)) { - printk("remap_area_pte: page already exists\n"); - BUG(); - } - set_pte(pte, pfn_pte(pfn, pgprot)); - address += PAGE_SIZE; - pfn++; - pte++; - } while (address && (address < end)); -} - -static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, - unsigned long size, unsigned long phys_addr, unsigned long flags) -{ - unsigned long end; - - address &= ~PGDIR_MASK; - end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; - phys_addr -= address; - if (address >= end) - BUG(); - do { - pte_t * pte = pte_alloc_kernel(pmd, address); - if (!pte) - return -ENOMEM; - remap_area_pte(pte, address, end - address, address + phys_addr, flags); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address && (address < end)); - return 0; -} - -int remap_area_pages(unsigned long address, unsigned long phys_addr, - unsigned long size, unsigned long flags) -{ - int error; - pgd_t * dir; - unsigned long end = address + size; - - phys_addr -= address; - dir = pgd_offset_k(address); - flush_cache_all(); - if (address >= end) - BUG(); - do { - pud_t *pud; - pmd_t *pmd; - - error = -ENOMEM; - - pud = pud_alloc(&init_mm, dir, address); - if (!pud) - break; - pmd = pmd_alloc(&init_mm, pud, address); - if (!pmd) - break; - if (remap_area_pmd(pmd, address, end - address, - phys_addr + address, flags)) - break; - error = 0; - address = (address + PGDIR_SIZE) & PGDIR_MASK; - dir++; - } while (address && (address < end)); - flush_tlb_all(); - return error; -} - /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses @@ -121,6 +37,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, { struct vm_struct * area; unsigned long offset, last_addr, addr, orig_addr; + pgprot_t pgprot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -190,8 +107,9 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, } #endif + pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); if (likely(size)) - if (remap_area_pages(addr, phys_addr, size, flags)) { + if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { vunmap((void *)orig_addr); return NULL; } -- cgit v1.2.3 From 11cbb70ea326e8ec78b2beb2b0c85c9ec71c279b Mon Sep 17 00:00:00 2001 From: Yoshinori Sato Date: Thu, 7 Dec 2006 18:07:27 +0900 Subject: sh: Trivial build fixes for SH-2 support. Signed-off-by: Yoshinori Sato Signed-off-by: Paul Mundt --- arch/sh/mm/init.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 59f4cc18235..29bd37b1488 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -77,6 +77,7 @@ void show_mem(void) printk("%d pages swap cached\n",cached); } +#ifdef CONFIG_MMU static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) { pgd_t *pgd; @@ -139,6 +140,7 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) set_pte_phys(address, phys, prot); } +#endif /* CONFIG_MMU */ /* References to section boundaries */ -- cgit v1.2.3 From 37bda1da4570c2e9c6dd34e77d2120218e384950 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 9 Dec 2006 09:16:12 +0900 Subject: sh: Convert remaining remap_area_pages() users to ioremap_page_range(). A couple of these were missed. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index ae531affccb..c6955157c98 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -107,7 +107,7 @@ void __init p3_cache_init(void) emit_cache_params(); - if (remap_area_pages(P3SEG, 0, PAGE_SIZE * 4, _PAGE_CACHABLE)) + if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL)) panic("%s failed.", __FUNCTION__); for (i = 0; i < cpu_data->dcache.n_aliases; i++) -- cgit v1.2.3 From 41504c39726a7099e5a42508dd57fe561c8b4129 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 11 Dec 2006 20:28:03 +0900 Subject: sh: SH-MobileR SH7722 CPU support. This adds CPU support for the SH7722. Signed-off-by: Paul Mundt --- arch/sh/mm/Kconfig | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 4e0362f5038..29f4ee35c6d 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -35,6 +35,9 @@ config CPU_SUBTYPE_ST40 select CPU_SH4 select CPU_HAS_INTC2_IRQ +config CPU_SHX2 + bool + # # Processor subtypes # @@ -180,6 +183,7 @@ config CPU_SUBTYPE_SH7780 config CPU_SUBTYPE_SH7785 bool "Support SH7785 processor" select CPU_SH4A + select CPU_SHX2 select CPU_HAS_INTC2_IRQ comment "SH4AL-DSP Processor Support" @@ -192,6 +196,12 @@ config CPU_SUBTYPE_SH7343 bool "Support SH7343 processor" select CPU_SH4AL_DSP +config CPU_SUBTYPE_SH7722 + bool "Support SH7722 processor" + select CPU_SH4AL_DSP + select CPU_SHX2 + select CPU_HAS_IPR_IRQ + endmenu menu "Memory management options" @@ -250,7 +260,7 @@ config 32BIT config X2TLB bool "Enable extended TLB mode" - depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL + depends on CPU_SHX2 && MMU && EXPERIMENTAL help Selecting this option will enable the extended mode of the SH-X2 TLB. For legacy SH-X behaviour and interoperability, say N. For -- cgit v1.2.3