diff options
author | Tony Luck <tony.luck@intel.com> | 2005-05-17 09:10:20 -0700 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-05-17 09:10:20 -0700 |
commit | d0dac8082cbc2b234917ec53eb591b1ddbec80bb (patch) | |
tree | 105651af2db033de4803b91cc029381a1b9b39d2 /arch/arm | |
parent | bfd68594082d8384781c242aa72a7950b5cf51aa (diff) | |
parent | ff96b3d4b840e8aa126e0a60fd743417ffdee178 (diff) |
Merge with linus
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/mach-s3c2410/clock.c | 2 | ||||
-rw-r--r-- | arch/arm/mach-s3c2410/s3c2440.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/Kconfig | 21 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.S | 80 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 111 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v6.c | 28 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 37 | ||||
-rw-r--r-- | arch/arm/mm/mm-armv.c | 27 |
8 files changed, 186 insertions, 126 deletions
diff --git a/arch/arm/mach-s3c2410/clock.c b/arch/arm/mach-s3c2410/clock.c index e23f534d4e1..8d986b8401c 100644 --- a/arch/arm/mach-s3c2410/clock.c +++ b/arch/arm/mach-s3c2410/clock.c @@ -478,7 +478,7 @@ static int s3c2440_clk_add(struct sys_device *sysdev) { unsigned long upllcon = __raw_readl(S3C2410_UPLLCON); - s3c2440_clk_upll.rate = s3c2410_get_pll(upllcon, clk_xtal.rate) * 2; + s3c2440_clk_upll.rate = s3c2410_get_pll(upllcon, clk_xtal.rate); printk("S3C2440: Clock Support, UPLL %ld.%03ld MHz\n", print_mhz(s3c2440_clk_upll.rate)); diff --git a/arch/arm/mach-s3c2410/s3c2440.c b/arch/arm/mach-s3c2410/s3c2440.c index 9a8cc5ae225..d4c8281b55f 100644 --- a/arch/arm/mach-s3c2410/s3c2440.c +++ b/arch/arm/mach-s3c2410/s3c2440.c @@ -192,9 +192,11 @@ void __init s3c2440_map_io(struct map_desc *mach_desc, int size) iotable_init(s3c2440_iodesc, ARRAY_SIZE(s3c2440_iodesc)); iotable_init(mach_desc, size); + /* rename any peripherals used differing from the s3c2410 */ - s3c_device_i2c.name = "s3c2440-i2c"; + s3c_device_i2c.name = "s3c2440-i2c"; + s3c_device_nand.name = "s3c2440-nand"; /* change irq for watchdog */ @@ -225,7 +227,7 @@ void __init s3c2440_init_clocks(int xtal) break; case S3C2440_CLKDIVN_HDIVN_2: - hdiv = 1; + hdiv = 2; break; case S3C2440_CLKDIVN_HDIVN_4_8: diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c4fc6be629d..48bac7da8c7 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -412,21 +412,20 @@ config CPU_BPREDICT_DISABLE config TLS_REG_EMUL bool - default y if (SMP || CPU_32v6) && (CPU_32v5 || CPU_32v4 || CPU_32v3) + default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3) help - We might be running on an ARMv6+ processor which should have the TLS - register but for some reason we can't use it, or maybe an SMP system - using a pre-ARMv6 processor (there are apparently a few prototypes - like that in existence) and therefore access to that register must - be emulated. + An SMP system using a pre-ARMv6 processor (there are apparently + a few prototypes like that in existence) and therefore access to + that required register must be emulated. config HAS_TLS_REG bool - depends on CPU_32v6 - default y if !TLS_REG_EMUL + depends on !TLS_REG_EMUL + default y if SMP || CPU_32v7 help This selects support for the CP15 thread register. - It is defined to be available on ARMv6 or later. If a particular - ARMv6 or later CPU doesn't support it then it must omc;ide "select - TLS_REG_EMUL" along with its other caracteristics. + It is defined to be available on some ARMv6 processors (including + all SMP capable ARMv6's) or later processors. User space may + assume directly accessing that register and always obtain the + expected value only on ARMv7 and above. diff --git a/arch/arm/mm/copypage-v4mc.S b/arch/arm/mm/copypage-v4mc.S deleted file mode 100644 index 305af3dab3d..00000000000 --- a/arch/arm/mm/copypage-v4mc.S +++ /dev/null @@ -1,80 +0,0 @@ -/* - * linux/arch/arm/lib/copy_page-armv4mc.S - * - * Copyright (C) 1995-2001 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * ASM optimised string functions - */ -#include <linux/linkage.h> -#include <linux/init.h> -#include <asm/constants.h> - - .text - .align 5 -/* - * ARMv4 mini-dcache optimised copy_user_page - * - * We flush the destination cache lines just before we write the data into the - * corresponding address. Since the Dcache is read-allocate, this removes the - * Dcache aliasing issue. The writes will be forwarded to the write buffer, - * and merged as appropriate. - * - * Note: We rely on all ARMv4 processors implementing the "invalidate D line" - * instruction. If your processor does not supply this, you have to write your - * own copy_user_page that does the right thing. - */ -ENTRY(v4_mc_copy_user_page) - stmfd sp!, {r4, lr} @ 2 - mov r4, r0 - mov r0, r1 - bl map_page_minicache - mov r1, #PAGE_SZ/64 @ 1 - ldmia r0!, {r2, r3, ip, lr} @ 4 -1: mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line - stmia r4!, {r2, r3, ip, lr} @ 4 - ldmia r0!, {r2, r3, ip, lr} @ 4+1 - stmia r4!, {r2, r3, ip, lr} @ 4 - ldmia r0!, {r2, r3, ip, lr} @ 4 - mcr p15, 0, r4, c7, c6, 1 @ 1 invalidate D line - stmia r4!, {r2, r3, ip, lr} @ 4 - ldmia r0!, {r2, r3, ip, lr} @ 4 - subs r1, r1, #1 @ 1 - stmia r4!, {r2, r3, ip, lr} @ 4 - ldmneia r0!, {r2, r3, ip, lr} @ 4 - bne 1b @ 1 - ldmfd sp!, {r4, pc} @ 3 - - .align 5 -/* - * ARMv4 optimised clear_user_page - * - * Same story as above. - */ -ENTRY(v4_mc_clear_user_page) - str lr, [sp, #-4]! - mov r1, #PAGE_SZ/64 @ 1 - mov r2, #0 @ 1 - mov r3, #0 @ 1 - mov ip, #0 @ 1 - mov lr, #0 @ 1 -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - subs r1, r1, #1 @ 1 - bne 1b @ 1 - ldr pc, [sp], #4 - - __INITDATA - - .type v4_mc_user_fns, #object -ENTRY(v4_mc_user_fns) - .long v4_mc_clear_user_page - .long v4_mc_copy_user_page - .size v4_mc_user_fns, . - v4_mc_user_fns diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c new file mode 100644 index 00000000000..fc69dccdace --- /dev/null +++ b/arch/arm/mm/copypage-v4mc.c @@ -0,0 +1,111 @@ +/* + * linux/arch/arm/lib/copypage-armv4mc.S + * + * Copyright (C) 1995-2005 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This handles the mini data cache, as found on SA11x0 and XScale + * processors. When we copy a user page page, we map it in such a way + * that accesses to this page will not touch the main data cache, but + * will be cached in the mini data cache. This prevents us thrashing + * the main data cache on page faults. + */ +#include <linux/init.h> +#include <linux/mm.h> + +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +/* + * 0xffff8000 to 0xffffffff is reserved for any ARM architecture + * specific hacks for copying pages efficiently. + */ +#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ + L_PTE_CACHEABLE) + +#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) + +static DEFINE_SPINLOCK(minicache_lock); + +/* + * ARMv4 mini-dcache optimised copy_user_page + * + * We flush the destination cache lines just before we write the data into the + * corresponding address. Since the Dcache is read-allocate, this removes the + * Dcache aliasing issue. The writes will be forwarded to the write buffer, + * and merged as appropriate. + * + * Note: We rely on all ARMv4 processors implementing the "invalidate D line" + * instruction. If your processor does not supply this, you have to write your + * own copy_user_page that does the right thing. + */ +static void __attribute__((naked)) +mc_copy_user_page(void *from, void *to) +{ + asm volatile( + "stmfd sp!, {r4, lr} @ 2\n\ + mov r4, %2 @ 1\n\ + ldmia %0!, {r2, r3, ip, lr} @ 4\n\ +1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %1!, {r2, r3, ip, lr} @ 4\n\ + ldmia %0!, {r2, r3, ip, lr} @ 4+1\n\ + stmia %1!, {r2, r3, ip, lr} @ 4\n\ + ldmia %0!, {r2, r3, ip, lr} @ 4\n\ + mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %1!, {r2, r3, ip, lr} @ 4\n\ + ldmia %0!, {r2, r3, ip, lr} @ 4\n\ + subs r4, r4, #1 @ 1\n\ + stmia %1!, {r2, r3, ip, lr} @ 4\n\ + ldmneia %0!, {r2, r3, ip, lr} @ 4\n\ + bne 1b @ 1\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); +} + +void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + spin_lock(&minicache_lock); + + set_pte(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot)); + flush_tlb_kernel_page(0xffff8000); + + mc_copy_user_page((void *)0xffff8000, kto); + + spin_unlock(&minicache_lock); +} + +/* + * ARMv4 optimised clear_user_page + */ +void __attribute__((naked)) +v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm volatile( + "str lr, [sp, #-4]!\n\ + mov r1, %0 @ 1\n\ + mov r2, #0 @ 1\n\ + mov r3, #0 @ 1\n\ + mov ip, #0 @ 1\n\ + mov lr, #0 @ 1\n\ +1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + subs r1, r1, #1 @ 1\n\ + bne 1b @ 1\n\ + ldr pc, [sp], #4" + : + : "I" (PAGE_SIZE / 64)); +} + +struct cpu_user_fns v4_mc_user_fns __initdata = { + .cpu_clear_user_page = v4_mc_clear_user_page, + .cpu_copy_user_page = v4_mc_copy_user_page, +}; diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 694ac820885..a8c00236bd3 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -26,8 +26,8 @@ #define to_address (0xffffc000) #define to_pgprot PAGE_KERNEL -static pte_t *from_pte; -static pte_t *to_pte; +#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) + static DEFINE_SPINLOCK(v6_lock); #define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) @@ -74,8 +74,8 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd */ spin_lock(&v6_lock); - set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); - set_pte(to_pte + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); + set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); + set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); from = from_address + (offset << PAGE_SHIFT); to = to_address + (offset << PAGE_SHIFT); @@ -114,7 +114,7 @@ void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) */ spin_lock(&v6_lock); - set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); + set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); flush_tlb_kernel_page(to); clear_page((void *)to); @@ -129,21 +129,6 @@ struct cpu_user_fns v6_user_fns __initdata = { static int __init v6_userpage_init(void) { if (cache_is_vipt_aliasing()) { - pgd_t *pgd; - pmd_t *pmd; - - pgd = pgd_offset_k(from_address); - pmd = pmd_alloc(&init_mm, pgd, from_address); - if (!pmd) - BUG(); - from_pte = pte_alloc_kernel(&init_mm, pmd, from_address); - if (!from_pte) - BUG(); - - to_pte = pte_alloc_kernel(&init_mm, pmd, to_address); - if (!to_pte) - BUG(); - cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; } @@ -151,5 +136,4 @@ static int __init v6_userpage_init(void) return 0; } -__initcall(v6_userpage_init); - +core_initcall(v6_userpage_init); diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index c6de48d8950..4085ed983e4 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -13,6 +13,29 @@ #include <asm/cacheflush.h> #include <asm/system.h> +#include <asm/tlbflush.h> + +#ifdef CONFIG_CPU_CACHE_VIPT +#define ALIAS_FLUSH_START 0xffff4000 + +#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) + +static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) +{ + unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); + + set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL)); + flush_tlb_kernel_page(to); + + asm( "mcrr p15, 0, %1, %0, c14\n" + " mcrr p15, 0, %1, %0, c5\n" + : + : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES) + : "cc"); +} +#else +#define flush_pfn_alias(pfn,vaddr) do { } while (0) +#endif static void __flush_dcache_page(struct address_space *mapping, struct page *page) { @@ -37,6 +60,18 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page return; /* + * This is a page cache page. If we have a VIPT cache, we + * only need to do one flush - which would be at the relevant + * userspace colour, which is congruent with page->index. + */ + if (cache_is_vipt()) { + if (cache_is_vipt_aliasing()) + flush_pfn_alias(page_to_pfn(page), + page->index << PAGE_CACHE_SHIFT); + return; + } + + /* * There are possible user space mappings of this page: * - VIVT cache: we need to also write back and invalidate all user * data in the current VM view associated with this page. @@ -57,8 +92,6 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page continue; offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); - if (cache_is_vipt()) - break; } flush_dcache_mmap_unlock(mapping); } diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index 585dfb8e20b..2c2b93d77d4 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c @@ -37,6 +37,8 @@ pgprot_t pgprot_kernel; EXPORT_SYMBOL(pgprot_kernel); +pmd_t *top_pmd; + struct cachepolicy { const char policy[16]; unsigned int cr_mask; @@ -142,6 +144,16 @@ __setup("noalign", noalign_setup); #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) +static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) +{ + return pmd_offset(pgd, virt); +} + +static inline pmd_t *pmd_off_k(unsigned long virt) +{ + return pmd_off(pgd_offset_k(virt), virt); +} + /* * need to get a 16k page for level 1 */ @@ -220,7 +232,7 @@ void free_pgd_slow(pgd_t *pgd) return; /* pgd is always present and good */ - pmd = (pmd_t *)pgd; + pmd = pmd_off(pgd, 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { @@ -246,9 +258,8 @@ free: static inline void alloc_init_section(unsigned long virt, unsigned long phys, int prot) { - pmd_t *pmdp; + pmd_t *pmdp = pmd_off_k(virt); - pmdp = pmd_offset(pgd_offset_k(virt), virt); if (virt & (1 << 20)) pmdp++; @@ -283,11 +294,9 @@ alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) static inline void alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) { - pmd_t *pmdp; + pmd_t *pmdp = pmd_off_k(virt); pte_t *ptep; - pmdp = pmd_offset(pgd_offset_k(virt), virt); - if (pmd_none(*pmdp)) { unsigned long pmdval; ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * @@ -310,7 +319,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg */ static inline void clear_mapping(unsigned long virt) { - pmd_clear(pmd_offset(pgd_offset_k(virt), virt)); + pmd_clear(pmd_off_k(virt)); } struct mem_types { @@ -578,7 +587,7 @@ void setup_mm_for_reboot(char mode) PMD_TYPE_SECT; if (cpu_arch <= CPU_ARCH_ARMv5) pmdval |= PMD_BIT4; - pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT); + pmd = pmd_off(pgd, i << PGDIR_SHIFT); pmd[0] = __pmd(pmdval); pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); flush_pmd_entry(pmd); @@ -675,6 +684,8 @@ void __init memtable_init(struct meminfo *mi) flush_cache_all(); flush_tlb_all(); + + top_pmd = pmd_off_k(0xffff0000); } /* |