aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig17
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/copypage-v6.c6
-rw-r--r--arch/arm/mm/copypage-xscale.S113
-rw-r--r--arch/arm/mm/copypage-xscale.c131
-rw-r--r--arch/arm/mm/fault-armv.c31
-rw-r--r--arch/arm/mm/flush.c44
-rw-r--r--arch/arm/mm/ioremap.c47
-rw-r--r--arch/arm/mm/minicache.c73
9 files changed, 218 insertions, 246 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 48bac7da8c7..95606b4a3ba 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -62,7 +62,7 @@ config CPU_ARM720T
# ARM920T
config CPU_ARM920T
bool "Support ARM920T processor" if !ARCH_S3C2410
- depends on ARCH_INTEGRATOR || ARCH_S3C2410 || ARCH_IMX
+ depends on ARCH_INTEGRATOR || ARCH_S3C2410 || ARCH_IMX || ARCH_AAEC2000
default y if ARCH_S3C2410
select CPU_32v4
select CPU_ABRT_EV4T
@@ -228,7 +228,6 @@ config CPU_SA1100
select CPU_CACHE_V4WB
select CPU_CACHE_VIVT
select CPU_TLB_V4WB
- select CPU_MINICACHE
# XScale
config CPU_XSCALE
@@ -239,7 +238,6 @@ config CPU_XSCALE
select CPU_ABRT_EV5T
select CPU_CACHE_VIVT
select CPU_TLB_V4WBI
- select CPU_MINICACHE
# ARMv6
config CPU_V6
@@ -345,11 +343,6 @@ config CPU_TLB_V4WBI
config CPU_TLB_V6
bool
-config CPU_MINICACHE
- bool
- help
- Processor has a minicache.
-
comment "Processor Features"
config ARM_THUMB
@@ -429,3 +422,11 @@ config HAS_TLS_REG
assume directly accessing that register and always obtain the
expected value only on ARMv7 and above.
+config NEEDS_SYSCALL_FOR_CMPXCHG
+ bool
+ default y if SMP && (CPU_32v5 || CPU_32v4 || CPU_32v3)
+ help
+ SMP on a pre-ARMv6 processor? Well OK then.
+ Forget about fast user space cmpxchg support.
+ It is just not possible.
+
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index ccf316c11e0..59f47d4c2df 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -31,8 +31,6 @@ obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o
obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
-obj-$(CONFIG_CPU_MINICACHE) += minicache.o
-
obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index a8c00236bd3..27d041574ea 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -30,8 +30,6 @@
static DEFINE_SPINLOCK(v6_lock);
-#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
-
/*
* Copy the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of these pages.
@@ -55,7 +53,7 @@ void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
*/
void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
- unsigned int offset = DCACHE_COLOUR(vaddr);
+ unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long from, to;
/*
@@ -95,7 +93,7 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
*/
void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
{
- unsigned int offset = DCACHE_COLOUR(vaddr);
+ unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT);
/*
diff --git a/arch/arm/mm/copypage-xscale.S b/arch/arm/mm/copypage-xscale.S
deleted file mode 100644
index bb277316ef5..00000000000
--- a/arch/arm/mm/copypage-xscale.S
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * linux/arch/arm/lib/copypage-xscale.S
- *
- * Copyright (C) 2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/constants.h>
-
-/*
- * General note:
- * We don't really want write-allocate cache behaviour for these functions
- * since that will just eat through 8K of the cache.
- */
-
- .text
- .align 5
-/*
- * XScale optimised copy_user_page
- * r0 = destination
- * r1 = source
- * r2 = virtual user address of ultimate destination page
- *
- * The source page may have some clean entries in the cache already, but we
- * can safely ignore them - break_cow() will flush them out of the cache
- * if we eventually end up using our copied page.
- *
- * What we could do is use the mini-cache to buffer reads from the source
- * page. We rely on the mini-cache being smaller than one page, so we'll
- * cycle through the complete cache anyway.
- */
-ENTRY(xscale_mc_copy_user_page)
- stmfd sp!, {r4, r5, lr}
- mov r5, r0
- mov r0, r1
- bl map_page_minicache
- mov r1, r5
- mov lr, #PAGE_SZ/64-1
-
- /*
- * Strangely enough, best performance is achieved
- * when prefetching destination as well. (NP)
- */
- pld [r0, #0]
- pld [r0, #32]
- pld [r1, #0]
- pld [r1, #32]
-
-1: pld [r0, #64]
- pld [r0, #96]
- pld [r1, #64]
- pld [r1, #96]
-
-2: ldrd r2, [r0], #8
- ldrd r4, [r0], #8
- mov ip, r1
- strd r2, [r1], #8
- ldrd r2, [r0], #8
- strd r4, [r1], #8
- ldrd r4, [r0], #8
- strd r2, [r1], #8
- strd r4, [r1], #8
- mcr p15, 0, ip, c7, c10, 1 @ clean D line
- ldrd r2, [r0], #8
- mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
- ldrd r4, [r0], #8
- mov ip, r1
- strd r2, [r1], #8
- ldrd r2, [r0], #8
- strd r4, [r1], #8
- ldrd r4, [r0], #8
- strd r2, [r1], #8
- strd r4, [r1], #8
- mcr p15, 0, ip, c7, c10, 1 @ clean D line
- subs lr, lr, #1
- mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
- bgt 1b
- beq 2b
-
- ldmfd sp!, {r4, r5, pc}
-
- .align 5
-/*
- * XScale optimised clear_user_page
- * r0 = destination
- * r1 = virtual user address of ultimate destination page
- */
-ENTRY(xscale_mc_clear_user_page)
- mov r1, #PAGE_SZ/32
- mov r2, #0
- mov r3, #0
-1: mov ip, r0
- strd r2, [r0], #8
- strd r2, [r0], #8
- strd r2, [r0], #8
- strd r2, [r0], #8
- mcr p15, 0, ip, c7, c10, 1 @ clean D line
- subs r1, r1, #1
- mcr p15, 0, ip, c7, c6, 1 @ invalidate D line
- bne 1b
- mov pc, lr
-
- __INITDATA
-
- .type xscale_mc_user_fns, #object
-ENTRY(xscale_mc_user_fns)
- .long xscale_mc_clear_user_page
- .long xscale_mc_copy_user_page
- .size xscale_mc_user_fns, . - xscale_mc_user_fns
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
new file mode 100644
index 00000000000..42a6ee255ce
--- /dev/null
+++ b/arch/arm/mm/copypage-xscale.c
@@ -0,0 +1,131 @@
+/*
+ * linux/arch/arm/lib/copypage-xscale.S
+ *
+ * Copyright (C) 1995-2005 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This handles the mini data cache, as found on SA11x0 and XScale
+ * processors. When we copy a user page page, we map it in such a way
+ * that accesses to this page will not touch the main data cache, but
+ * will be cached in the mini data cache. This prevents us thrashing
+ * the main data cache on page faults.
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+/*
+ * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
+ * specific hacks for copying pages efficiently.
+ */
+#define COPYPAGE_MINICACHE 0xffff8000
+
+#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
+ L_PTE_CACHEABLE)
+
+#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
+
+static DEFINE_SPINLOCK(minicache_lock);
+
+/*
+ * XScale mini-dcache optimised copy_user_page
+ *
+ * We flush the destination cache lines just before we write the data into the
+ * corresponding address. Since the Dcache is read-allocate, this removes the
+ * Dcache aliasing issue. The writes will be forwarded to the write buffer,
+ * and merged as appropriate.
+ */
+static void __attribute__((naked))
+mc_copy_user_page(void *from, void *to)
+{
+ /*
+ * Strangely enough, best performance is achieved
+ * when prefetching destination as well. (NP)
+ */
+ asm volatile(
+ "stmfd sp!, {r4, r5, lr} \n\
+ mov lr, %2 \n\
+ pld [r0, #0] \n\
+ pld [r0, #32] \n\
+ pld [r1, #0] \n\
+ pld [r1, #32] \n\
+1: pld [r0, #64] \n\
+ pld [r0, #96] \n\
+ pld [r1, #64] \n\
+ pld [r1, #96] \n\
+2: ldrd r2, [r0], #8 \n\
+ ldrd r4, [r0], #8 \n\
+ mov ip, r1 \n\
+ strd r2, [r1], #8 \n\
+ ldrd r2, [r0], #8 \n\
+ strd r4, [r1], #8 \n\
+ ldrd r4, [r0], #8 \n\
+ strd r2, [r1], #8 \n\
+ strd r4, [r1], #8 \n\
+ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
+ ldrd r2, [r0], #8 \n\
+ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
+ ldrd r4, [r0], #8 \n\
+ mov ip, r1 \n\
+ strd r2, [r1], #8 \n\
+ ldrd r2, [r0], #8 \n\
+ strd r4, [r1], #8 \n\
+ ldrd r4, [r0], #8 \n\
+ strd r2, [r1], #8 \n\
+ strd r4, [r1], #8 \n\
+ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
+ subs lr, lr, #1 \n\
+ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
+ bgt 1b \n\
+ beq 2b \n\
+ ldmfd sp!, {r4, r5, pc} "
+ :
+ : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
+}
+
+void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+{
+ spin_lock(&minicache_lock);
+
+ set_pte(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot));
+ flush_tlb_kernel_page(COPYPAGE_MINICACHE);
+
+ mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
+
+ spin_unlock(&minicache_lock);
+}
+
+/*
+ * XScale optimised clear_user_page
+ */
+void __attribute__((naked))
+xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
+{
+ asm volatile(
+ "mov r1, %0 \n\
+ mov r2, #0 \n\
+ mov r3, #0 \n\
+1: mov ip, r0 \n\
+ strd r2, [r0], #8 \n\
+ strd r2, [r0], #8 \n\
+ strd r2, [r0], #8 \n\
+ strd r2, [r0], #8 \n\
+ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
+ subs r1, r1, #1 \n\
+ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
+ bne 1b \n\
+ mov pc, lr"
+ :
+ : "I" (PAGE_SIZE / 32));
+}
+
+struct cpu_user_fns xscale_mc_user_fns __initdata = {
+ .cpu_clear_user_page = xscale_mc_clear_user_page,
+ .cpu_copy_user_page = xscale_mc_copy_user_page,
+};
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 01967ddeef5..be4ab3d73c9 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -77,9 +77,8 @@ no_pmd:
}
static void
-make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
+make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
{
- struct address_space *mapping = page_mapping(page);
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *mpnt;
struct prio_tree_iter iter;
@@ -87,9 +86,6 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
pgoff_t pgoff;
int aliases = 0;
- if (!mapping)
- return;
-
pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
/*
@@ -115,9 +111,11 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
if (aliases)
adjust_pte(vma, addr);
else
- flush_cache_page(vma, addr, page_to_pfn(page));
+ flush_cache_page(vma, addr, pfn);
}
+void __flush_dcache_page(struct address_space *mapping, struct page *page);
+
/*
* Take care of architecture specific things when placing a new PTE into
* a page table, or changing an existing PTE. Basically, there are two
@@ -134,29 +132,22 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
+ struct address_space *mapping;
struct page *page;
if (!pfn_valid(pfn))
return;
+
page = pfn_to_page(pfn);
- if (page_mapping(page)) {
+ mapping = page_mapping(page);
+ if (mapping) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
- if (dirty) {
- /*
- * This is our first userspace mapping of this page.
- * Ensure that the physical page is coherent with
- * the kernel mapping.
- *
- * FIXME: only need to do this on VIVT and aliasing
- * VIPT cache architectures. We can do that
- * by choosing whether to set this bit...
- */
- __cpuc_flush_dcache_page(page_address(page));
- }
+ if (dirty)
+ __flush_dcache_page(mapping, page);
if (cache_is_vivt())
- make_coherent(vma, addr, page, dirty);
+ make_coherent(mapping, vma, addr, pfn);
}
}
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 4085ed983e4..191788fb18d 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -37,13 +37,8 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
#endif
-static void __flush_dcache_page(struct address_space *mapping, struct page *page)
+void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
- struct mm_struct *mm = current->active_mm;
- struct vm_area_struct *mpnt;
- struct prio_tree_iter iter;
- pgoff_t pgoff;
-
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
@@ -52,24 +47,21 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
__cpuc_flush_dcache_page(page_address(page));
/*
- * If there's no mapping pointer here, then this page isn't
- * visible to userspace yet, so there are no cache lines
- * associated with any other aliases.
- */
- if (!mapping)
- return;
-
- /*
- * This is a page cache page. If we have a VIPT cache, we
- * only need to do one flush - which would be at the relevant
+ * If this is a page cache page, and we have an aliasing VIPT cache,
+ * we only need to do one flush - which would be at the relevant
* userspace colour, which is congruent with page->index.
*/
- if (cache_is_vipt()) {
- if (cache_is_vipt_aliasing())
- flush_pfn_alias(page_to_pfn(page),
- page->index << PAGE_CACHE_SHIFT);
- return;
- }
+ if (mapping && cache_is_vipt_aliasing())
+ flush_pfn_alias(page_to_pfn(page),
+ page->index << PAGE_CACHE_SHIFT);
+}
+
+static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
+{
+ struct mm_struct *mm = current->active_mm;
+ struct vm_area_struct *mpnt;
+ struct prio_tree_iter iter;
+ pgoff_t pgoff;
/*
* There are possible user space mappings of this page:
@@ -116,12 +108,12 @@ void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
- if (cache_is_vipt_nonaliasing())
- return;
-
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
- else
+ else {
__flush_dcache_page(mapping, page);
+ if (mapping && cache_is_vivt())
+ __flush_dcache_aliases(mapping, page);
+ }
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 00bb8fd37a5..7110e54182b 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -170,3 +170,50 @@ void __iounmap(void __iomem *addr)
vfree((void *) (PAGE_MASK & (unsigned long) addr));
}
EXPORT_SYMBOL(__iounmap);
+
+#ifdef __io
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return __io(port);
+}
+EXPORT_SYMBOL(ioport_map);
+
+void ioport_unmap(void __iomem *addr)
+{
+}
+EXPORT_SYMBOL(ioport_unmap);
+#endif
+
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#include <linux/ioport.h>
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ unsigned long start = pci_resource_start(dev, bar);
+ unsigned long len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len || !start)
+ return NULL;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return ioport_map(start, len);
+ if (flags & IORESOURCE_MEM) {
+ if (flags & IORESOURCE_CACHEABLE)
+ return ioremap(start, len);
+ return ioremap_nocache(start, len);
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(pci_iomap);
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{
+ if ((unsigned long)addr >= VMALLOC_START &&
+ (unsigned long)addr < VMALLOC_END)
+ iounmap(addr);
+}
+EXPORT_SYMBOL(pci_iounmap);
+#endif
diff --git a/arch/arm/mm/minicache.c b/arch/arm/mm/minicache.c
deleted file mode 100644
index dedf2ab01b2..00000000000
--- a/arch/arm/mm/minicache.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * linux/arch/arm/mm/minicache.c
- *
- * Copyright (C) 2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This handles the mini data cache, as found on SA11x0 and XScale
- * processors. When we copy a user page page, we map it in such a way
- * that accesses to this page will not touch the main data cache, but
- * will be cached in the mini data cache. This prevents us thrashing
- * the main data cache on page faults.
- */
-#include <linux/init.h>
-#include <linux/mm.h>
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-
-/*
- * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
- * specific hacks for copying pages efficiently.
- */
-#define minicache_address (0xffff8000)
-#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
- L_PTE_CACHEABLE)
-
-static pte_t *minicache_pte;
-
-/*
- * Note that this is intended to be called only from the copy_user_page
- * asm code; anything else will require special locking to prevent the
- * mini-cache space being re-used. (Note: probably preempt unsafe).
- *
- * We rely on the fact that the minicache is 2K, and we'll be pushing
- * 4K of data through it, so we don't actually have to specifically
- * flush the minicache when we change the mapping.
- *
- * Note also: assert(PAGE_OFFSET <= virt < high_memory).
- * Unsafe: preempt, kmap.
- */
-unsigned long map_page_minicache(unsigned long virt)
-{
- set_pte(minicache_pte, pfn_pte(__pa(virt) >> PAGE_SHIFT, minicache_pgprot));
- flush_tlb_kernel_page(minicache_address);
-
- return minicache_address;
-}
-
-static int __init minicache_init(void)
-{
- pgd_t *pgd;
- pmd_t *pmd;
-
- spin_lock(&init_mm.page_table_lock);
-
- pgd = pgd_offset_k(minicache_address);
- pmd = pmd_alloc(&init_mm, pgd, minicache_address);
- if (!pmd)
- BUG();
- minicache_pte = pte_alloc_kernel(&init_mm, pmd, minicache_address);
- if (!minicache_pte)
- BUG();
-
- spin_unlock(&init_mm.page_table_lock);
-
- return 0;
-}
-
-core_initcall(minicache_init);