aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig74
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c318
-rw-r--r--arch/arm/mm/copypage-feroceon.S95
-rw-r--r--arch/arm/mm/fault-armv.c4
-rw-r--r--arch/arm/mm/flush.c2
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/iomap.c4
-rw-r--r--arch/arm/mm/mmu.c1
-rw-r--r--arch/arm/mm/proc-arm1020.S1
-rw-r--r--arch/arm/mm/proc-arm1020e.S1
-rw-r--r--arch/arm/mm/proc-arm1022.S1
-rw-r--r--arch/arm/mm/proc-arm1026.S1
-rw-r--r--arch/arm/mm/proc-arm6_7.S2
-rw-r--r--arch/arm/mm/proc-arm720.S1
-rw-r--r--arch/arm/mm/proc-arm740.S1
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S1
-rw-r--r--arch/arm/mm/proc-arm920.S1
-rw-r--r--arch/arm/mm/proc-arm922.S1
-rw-r--r--arch/arm/mm/proc-arm925.S3
-rw-r--r--arch/arm/mm/proc-arm926.S3
-rw-r--r--arch/arm/mm/proc-arm940.S3
-rw-r--r--arch/arm/mm/proc-arm946.S3
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S1
-rw-r--r--arch/arm/mm/proc-feroceon.S299
-rw-r--r--arch/arm/mm/proc-sa110.S1
-rw-r--r--arch/arm/mm/proc-sa1100.S1
-rw-r--r--arch/arm/mm/proc-v6.S15
-rw-r--r--arch/arm/mm/proc-v7.S1
-rw-r--r--arch/arm/mm/proc-xsc3.S1
-rw-r--r--arch/arm/mm/proc-xscale.S1
31 files changed, 743 insertions, 103 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 76348f060f2..a2c8b006d71 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -18,6 +18,7 @@ config CPU_ARM610
select CPU_CP15_MMU
select CPU_COPY_V3 if MMU
select CPU_TLB_V3 if MMU
+ select CPU_PABRT_NOIFAR
help
The ARM610 is the successor to the ARM3 processor
and was produced by VLSI Technology Inc.
@@ -31,6 +32,7 @@ config CPU_ARM7TDMI
depends on !MMU
select CPU_32v4T
select CPU_ABRT_LV4T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V4
help
A 32-bit RISC microprocessor based on the ARM7 processor core
@@ -49,6 +51,7 @@ config CPU_ARM710
select CPU_CP15_MMU
select CPU_COPY_V3 if MMU
select CPU_TLB_V3 if MMU
+ select CPU_PABRT_NOIFAR
help
A 32-bit RISC microprocessor based on the ARM7 processor core
designed by Advanced RISC Machines Ltd. The ARM710 is the
@@ -64,6 +67,7 @@ config CPU_ARM720T
default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712 || ARCH_H720X
select CPU_32v4T
select CPU_ABRT_LV4T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V4
select CPU_CACHE_VIVT
select CPU_CP15_MMU
@@ -82,6 +86,7 @@ config CPU_ARM740T
depends on !MMU
select CPU_32v4T
select CPU_ABRT_LV4T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V3 # although the core is v4t
select CPU_CP15_MPU
help
@@ -98,6 +103,7 @@ config CPU_ARM9TDMI
depends on !MMU
select CPU_32v4T
select CPU_ABRT_NOMMU
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V4
help
A 32-bit RISC microprocessor based on the ARM9 processor core
@@ -113,6 +119,7 @@ config CPU_ARM920T
default y if CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_AT91RM9200
select CPU_32v4T
select CPU_ABRT_EV4T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V4WT
select CPU_CACHE_VIVT
select CPU_CP15_MMU
@@ -135,6 +142,7 @@ config CPU_ARM922T
default y if ARCH_LH7A40X || ARCH_KS8695
select CPU_32v4T
select CPU_ABRT_EV4T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V4WT
select CPU_CACHE_VIVT
select CPU_CP15_MMU
@@ -155,6 +163,7 @@ config CPU_ARM925T
default y if ARCH_OMAP15XX
select CPU_32v4T
select CPU_ABRT_EV4T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V4WT
select CPU_CACHE_VIVT
select CPU_CP15_MMU
@@ -171,10 +180,11 @@ config CPU_ARM925T
# ARM926T
config CPU_ARM926T
bool "Support ARM926T processor"
- depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || MACH_REALVIEW_EB || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 || ARCH_NS9XXX || ARCH_DAVINCI
- default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 || ARCH_NS9XXX || ARCH_DAVINCI
+ depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || MACH_REALVIEW_EB || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G20 || ARCH_AT91CAP9 || ARCH_NS9XXX || ARCH_DAVINCI
+ default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G20 || ARCH_AT91CAP9 || ARCH_NS9XXX || ARCH_DAVINCI
select CPU_32v5
select CPU_ABRT_EV5TJ
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_VIVT
select CPU_CP15_MMU
select CPU_COPY_V4WB if MMU
@@ -193,6 +203,7 @@ config CPU_ARM940T
depends on !MMU
select CPU_32v4T
select CPU_ABRT_NOMMU
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_VIVT
select CPU_CP15_MPU
help
@@ -210,6 +221,7 @@ config CPU_ARM946E
depends on !MMU
select CPU_32v5
select CPU_ABRT_NOMMU
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_VIVT
select CPU_CP15_MPU
help
@@ -226,6 +238,7 @@ config CPU_ARM1020
depends on ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV4T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V4WT
select CPU_CACHE_VIVT
select CPU_CP15_MMU
@@ -244,6 +257,7 @@ config CPU_ARM1020E
depends on ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV4T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V4WT
select CPU_CACHE_VIVT
select CPU_CP15_MMU
@@ -257,6 +271,7 @@ config CPU_ARM1022
depends on ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV4T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_VIVT
select CPU_CP15_MMU
select CPU_COPY_V4WB if MMU # can probably do better
@@ -275,6 +290,7 @@ config CPU_ARM1026
depends on ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_VIVT
select CPU_CP15_MMU
select CPU_COPY_V4WB if MMU # can probably do better
@@ -293,6 +309,7 @@ config CPU_SA110
select CPU_32v3 if ARCH_RPC
select CPU_32v4 if !ARCH_RPC
select CPU_ABRT_EV4
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V4WB
select CPU_CACHE_VIVT
select CPU_CP15_MMU
@@ -314,6 +331,7 @@ config CPU_SA1100
default y
select CPU_32v4
select CPU_ABRT_EV4
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V4WB
select CPU_CACHE_VIVT
select CPU_CP15_MMU
@@ -326,6 +344,7 @@ config CPU_XSCALE
default y
select CPU_32v5
select CPU_ABRT_EV5T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_VIVT
select CPU_CP15_MMU
select CPU_TLB_V4WBI if MMU
@@ -337,6 +356,7 @@ config CPU_XSC3
default y
select CPU_32v5
select CPU_ABRT_EV5T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_VIVT
select CPU_CP15_MMU
select CPU_TLB_V4WBI if MMU
@@ -345,14 +365,15 @@ config CPU_XSC3
# Feroceon
config CPU_FEROCEON
bool
- depends on ARCH_ORION
+ depends on ARCH_ORION5X || ARCH_LOKI || ARCH_KIRKWOOD || ARCH_MV78XX0
default y
select CPU_32v5
select CPU_ABRT_EV5T
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_VIVT
select CPU_CP15_MMU
- select CPU_COPY_V4WB if MMU
- select CPU_TLB_V4WBI if MMU
+ select CPU_COPY_FEROCEON if MMU
+ select CPU_TLB_FEROCEON if MMU
config CPU_FEROCEON_OLD_ID
bool "Accept early Feroceon cores with an ARM926 ID"
@@ -366,11 +387,12 @@ config CPU_FEROCEON_OLD_ID
# ARMv6
config CPU_V6
bool "Support ARM V6 processor"
- depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2 || ARCH_MX3 || ARCH_MSM7X00A
+ depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2 || ARCH_MX3 || ARCH_MSM7X00A || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176
default y if ARCH_MX3
default y if ARCH_MSM7X00A
select CPU_32v6
select CPU_ABRT_EV6
+ select CPU_PABRT_NOIFAR
select CPU_CACHE_V6
select CPU_CACHE_VIPT
select CPU_CP15_MMU
@@ -393,10 +415,11 @@ config CPU_32v6K
# ARMv7
config CPU_V7
bool "Support ARM V7 processor"
- depends on ARCH_INTEGRATOR
+ depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB
select CPU_32v6K
select CPU_32v7
select CPU_ABRT_EV7
+ select CPU_PABRT_IFAR
select CPU_CACHE_V7
select CPU_CACHE_VIPT
select CPU_CP15_MMU
@@ -458,6 +481,12 @@ config CPU_ABRT_EV6
config CPU_ABRT_EV7
bool
+config CPU_PABRT_IFAR
+ bool
+
+config CPU_PABRT_NOIFAR
+ bool
+
# The cache model
config CPU_CACHE_V3
bool
@@ -494,6 +523,9 @@ config CPU_COPY_V4WT
config CPU_COPY_V4WB
bool
+config CPU_COPY_FEROCEON
+ bool
+
config CPU_COPY_V6
bool
@@ -519,6 +551,11 @@ config CPU_TLB_V4WBI
ARM Architecture Version 4 TLB with writeback cache and invalidate
instruction cache entry.
+config CPU_TLB_FEROCEON
+ bool
+ help
+ Feroceon TLB (v4wbi with non-outer-cachable page table walks).
+
config CPU_TLB_V6
bool
@@ -572,6 +609,13 @@ config ARM_THUMB
If you don't know what this all is, saying Y is a safe choice.
+config ARM_THUMBEE
+ bool "Enable ThumbEE CPU extension"
+ depends on CPU_V7
+ help
+ Say Y here if you have a CPU with the ThumbEE extension and code to
+ make use of it. Say N for code that can run on CPUs without ThumbEE.
+
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
depends on ARCH_SUPPORTS_BIG_ENDIAN
@@ -622,7 +666,7 @@ config CPU_DCACHE_SIZE
config CPU_DCACHE_WRITETHROUGH
bool "Force write through D-cache"
- depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_FEROCEON) && !CPU_DCACHE_DISABLE
+ depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020) && !CPU_DCACHE_DISABLE
default y if CPU_ARM925T
help
Say Y here to use the data cache in writethrough mode. Unless you
@@ -670,6 +714,18 @@ config OUTER_CACHE
bool
default n
+config CACHE_FEROCEON_L2
+ bool "Enable the Feroceon L2 cache controller"
+ depends on ARCH_KIRKWOOD || ARCH_MV78XX0
+ default y
+ select OUTER_CACHE
+ help
+ This option enables the Feroceon L2 cache controller.
+
config CACHE_L2X0
- bool
+ bool "Enable the L2x0 outer cache controller"
+ depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176
+ default y
select OUTER_CACHE
+ help
+ This option enables the L2x0 PrimeCell.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 44536a0b995..f64b92557b1 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
+obj-$(CONFIG_CPU_COPY_FEROCEON) += copypage-feroceon.o
obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o context.o
obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
@@ -45,6 +46,7 @@ obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o
+obj-$(CONFIG_CPU_TLB_FEROCEON) += tlb-v4wbi.o # reuse v4wbi TLB functions
obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o
@@ -72,4 +74,5 @@ obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o
+obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
new file mode 100644
index 00000000000..20eec4ba173
--- /dev/null
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -0,0 +1,318 @@
+/*
+ * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support
+ *
+ * Copyright (C) 2008 Marvell Semiconductor
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * References:
+ * - Unified Layer 2 Cache for Feroceon CPU Cores,
+ * Document ID MV-S104858-00, Rev. A, October 23 2007.
+ */
+
+#include <linux/init.h>
+#include <asm/cacheflush.h>
+#include <asm/plat-orion/cache-feroceon-l2.h>
+
+
+/*
+ * Low-level cache maintenance operations.
+ *
+ * As well as the regular 'clean/invalidate/flush L2 cache line by
+ * MVA' instructions, the Feroceon L2 cache controller also features
+ * 'clean/invalidate L2 range by MVA' operations.
+ *
+ * Cache range operations are initiated by writing the start and
+ * end addresses to successive cp15 registers, and process every
+ * cache line whose first byte address lies in the inclusive range
+ * [start:end].
+ *
+ * The cache range operations stall the CPU pipeline until completion.
+ *
+ * The range operations require two successive cp15 writes, in
+ * between which we don't want to be preempted.
+ */
+static inline void l2_clean_pa(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
+}
+
+static inline void l2_clean_mva_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ /*
+ * Make sure 'start' and 'end' reference the same page, as
+ * L2 is PIPT and range operations only do a TLB lookup on
+ * the start address.
+ */
+ BUG_ON((start ^ end) & ~(PAGE_SIZE - 1));
+
+ raw_local_irq_save(flags);
+ __asm__("mcr p15, 1, %0, c15, c9, 4" : : "r" (start));
+ __asm__("mcr p15, 1, %0, c15, c9, 5" : : "r" (end));
+ raw_local_irq_restore(flags);
+}
+
+static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
+{
+ l2_clean_mva_range(__phys_to_virt(start), __phys_to_virt(end));
+}
+
+static inline void l2_clean_inv_pa(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
+}
+
+static inline void l2_inv_pa(unsigned long addr)
+{
+ __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
+}
+
+static inline void l2_inv_mva_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ /*
+ * Make sure 'start' and 'end' reference the same page, as
+ * L2 is PIPT and range operations only do a TLB lookup on
+ * the start address.
+ */
+ BUG_ON((start ^ end) & ~(PAGE_SIZE - 1));
+
+ raw_local_irq_save(flags);
+ __asm__("mcr p15, 1, %0, c15, c11, 4" : : "r" (start));
+ __asm__("mcr p15, 1, %0, c15, c11, 5" : : "r" (end));
+ raw_local_irq_restore(flags);
+}
+
+static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
+{
+ l2_inv_mva_range(__phys_to_virt(start), __phys_to_virt(end));
+}
+
+
+/*
+ * Linux primitives.
+ *
+ * Note that the end addresses passed to Linux primitives are
+ * noninclusive, while the hardware cache range operations use
+ * inclusive start and end addresses.
+ */
+#define CACHE_LINE_SIZE 32
+#define MAX_RANGE_SIZE 1024
+
+static int l2_wt_override;
+
+static unsigned long calc_range_end(unsigned long start, unsigned long end)
+{
+ unsigned long range_end;
+
+ BUG_ON(start & (CACHE_LINE_SIZE - 1));
+ BUG_ON(end & (CACHE_LINE_SIZE - 1));
+
+ /*
+ * Try to process all cache lines between 'start' and 'end'.
+ */
+ range_end = end;
+
+ /*
+ * Limit the number of cache lines processed at once,
+ * since cache range operations stall the CPU pipeline
+ * until completion.
+ */
+ if (range_end > start + MAX_RANGE_SIZE)
+ range_end = start + MAX_RANGE_SIZE;
+
+ /*
+ * Cache range operations can't straddle a page boundary.
+ */
+ if (range_end > (start | (PAGE_SIZE - 1)) + 1)
+ range_end = (start | (PAGE_SIZE - 1)) + 1;
+
+ return range_end;
+}
+
+static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
+{
+ /*
+ * Clean and invalidate partial first cache line.
+ */
+ if (start & (CACHE_LINE_SIZE - 1)) {
+ l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
+ start = (start | (CACHE_LINE_SIZE - 1)) + 1;
+ }
+
+ /*
+ * Clean and invalidate partial last cache line.
+ */
+ if (end & (CACHE_LINE_SIZE - 1)) {
+ l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
+ end &= ~(CACHE_LINE_SIZE - 1);
+ }
+
+ /*
+ * Invalidate all full cache lines between 'start' and 'end'.
+ */
+ while (start != end) {
+ unsigned long range_end = calc_range_end(start, end);
+ l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
+ start = range_end;
+ }
+
+ dsb();
+}
+
+static void feroceon_l2_clean_range(unsigned long start, unsigned long end)
+{
+ /*
+ * If L2 is forced to WT, the L2 will always be clean and we
+ * don't need to do anything here.
+ */
+ if (!l2_wt_override) {
+ start &= ~(CACHE_LINE_SIZE - 1);
+ end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
+ while (start != end) {
+ unsigned long range_end = calc_range_end(start, end);
+ l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
+ start = range_end;
+ }
+ }
+
+ dsb();
+}
+
+static void feroceon_l2_flush_range(unsigned long start, unsigned long end)
+{
+ start &= ~(CACHE_LINE_SIZE - 1);
+ end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
+ while (start != end) {
+ unsigned long range_end = calc_range_end(start, end);
+ if (!l2_wt_override)
+ l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
+ l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
+ start = range_end;
+ }
+
+ dsb();
+}
+
+
+/*
+ * Routines to disable and re-enable the D-cache and I-cache at run
+ * time. These are necessary because the L2 cache can only be enabled
+ * or disabled while the L1 Dcache and Icache are both disabled.
+ */
+static void __init invalidate_and_disable_dcache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (cr & CR_C) {
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ flush_cache_all();
+ set_cr(cr & ~CR_C);
+ raw_local_irq_restore(flags);
+ }
+}
+
+static void __init enable_dcache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (!(cr & CR_C))
+ set_cr(cr | CR_C);
+}
+
+static void __init __invalidate_icache(void)
+{
+ int dummy;
+
+ __asm__ __volatile__("mcr p15, 0, %0, c7, c5, 0\n" : "=r" (dummy));
+}
+
+static void __init invalidate_and_disable_icache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (cr & CR_I) {
+ set_cr(cr & ~CR_I);
+ __invalidate_icache();
+ }
+}
+
+static void __init enable_icache(void)
+{
+ u32 cr;
+
+ cr = get_cr();
+ if (!(cr & CR_I))
+ set_cr(cr | CR_I);
+}
+
+static inline u32 read_extra_features(void)
+{
+ u32 u;
+
+ __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
+
+ return u;
+}
+
+static inline void write_extra_features(u32 u)
+{
+ __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
+}
+
+static void __init disable_l2_prefetch(void)
+{
+ u32 u;
+
+ /*
+ * Read the CPU Extra Features register and verify that the
+ * Disable L2 Prefetch bit is set.
+ */
+ u = read_extra_features();
+ if (!(u & 0x01000000)) {
+ printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n");
+ write_extra_features(u | 0x01000000);
+ }
+}
+
+static void __init enable_l2(void)
+{
+ u32 u;
+
+ u = read_extra_features();
+ if (!(u & 0x00400000)) {
+ printk(KERN_INFO "Feroceon L2: Enabling L2\n");
+
+ invalidate_and_disable_dcache();
+ invalidate_and_disable_icache();
+ write_extra_features(u | 0x00400000);
+ enable_icache();
+ enable_dcache();
+ }
+}
+
+void __init feroceon_l2_init(int __l2_wt_override)
+{
+ l2_wt_override = __l2_wt_override;
+
+ disable_l2_prefetch();
+
+ outer_cache.inv_range = feroceon_l2_inv_range;
+ outer_cache.clean_range = feroceon_l2_clean_range;
+ outer_cache.flush_range = feroceon_l2_flush_range;
+
+ enable_l2();
+
+ printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n",
+ l2_wt_override ? ", in WT override mode" : "");
+}
diff --git a/arch/arm/mm/copypage-feroceon.S b/arch/arm/mm/copypage-feroceon.S
new file mode 100644
index 00000000000..7eb0d320d24
--- /dev/null
+++ b/arch/arm/mm/copypage-feroceon.S
@@ -0,0 +1,95 @@
+/*
+ * linux/arch/arm/lib/copypage-feroceon.S
+ *
+ * Copyright (C) 2008 Marvell Semiconductors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This handles copy_user_page and clear_user_page on Feroceon
+ * more optimally than the generic implementations.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/asm-offsets.h>
+
+ .text
+ .align 5
+
+ENTRY(feroceon_copy_user_page)
+ stmfd sp!, {r4-r9, lr}
+ mov ip, #PAGE_SZ
+1: mov lr, r1
+ ldmia r1!, {r2 - r9}
+ pld [lr, #32]
+ pld [lr, #64]
+ pld [lr, #96]
+ pld [lr, #128]
+ pld [lr, #160]
+ pld [lr, #192]
+ pld [lr, #224]
+ stmia r0, {r2 - r9}
+ ldmia r1!, {r2 - r9}
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
+ add r0, r0, #32
+ stmia r0, {r2 - r9}
+ ldmia r1!, {r2 - r9}
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
+ add r0, r0, #32
+ stmia r0, {r2 - r9}
+ ldmia r1!, {r2 - r9}
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
+ add r0, r0, #32
+ stmia r0, {r2 - r9}
+ ldmia r1!, {r2 - r9}
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
+ add r0, r0, #32
+ stmia r0, {r2 - r9}
+ ldmia r1!, {r2 - r9}
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
+ add r0, r0, #32
+ stmia r0, {r2 - r9}
+ ldmia r1!, {r2 - r9}
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
+ add r0, r0, #32
+ stmia r0, {r2 - r9}
+ ldmia r1!, {r2 - r9}
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
+ add r0, r0, #32
+ stmia r0, {r2 - r9}
+ subs ip, ip, #(32 * 8)
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
+ add r0, r0, #32
+ bne 1b
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ ldmfd sp!, {r4-r9, pc}
+
+ .align 5
+
+ENTRY(feroceon_clear_user_page)
+ stmfd sp!, {r4-r7, lr}
+ mov r1, #PAGE_SZ/32
+ mov r2, #0
+ mov r3, #0
+ mov r4, #0
+ mov r5, #0
+ mov r6, #0
+ mov r7, #0
+ mov ip, #0
+ mov lr, #0
+1: stmia r0, {r2-r7, ip, lr}
+ subs r1, r1, #1
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
+ add r0, r0, #32
+ bne 1b
+ mcr p15, 0, r1, c7, c10, 4 @ drain WB
+ ldmfd sp!, {r4-r7, pc}
+
+ __INITDATA
+
+ .type feroceon_user_fns, #object
+ENTRY(feroceon_user_fns)
+ .long feroceon_clear_user_page
+ .long feroceon_copy_user_page
+ .size feroceon_user_fns, . - feroceon_user_fns
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 44558d5f931..fbfa2605844 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -144,13 +144,17 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
page = pfn_to_page(pfn);
mapping = page_mapping(page);
if (mapping) {
+#ifndef CONFIG_SMP
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty)
__flush_dcache_page(mapping, page);
+#endif
if (cache_is_vivt())
make_coherent(mapping, vma, addr, pfn);
+ else if (vma->vm_flags & VM_EXEC)
+ __flush_icache_all();
}
}
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 9df507d36e0..029ee65fda2 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -199,6 +199,8 @@ void flush_dcache_page(struct page *page)
__flush_dcache_page(mapping, page);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, page);
+ else if (mapping)
+ __flush_icache_all();
}
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ec00f26bffa..b657f1719af 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -48,8 +48,6 @@ void show_mem(void)
printk("Mem-info:\n");
show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-
for_each_online_node(node) {
pg_data_t *n = NODE_DATA(node);
struct page *map = n->node_mem_map - n->node_start_pfn;
diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c
index 62066f3020c..7429f8c0101 100644
--- a/arch/arm/mm/iomap.c
+++ b/arch/arm/mm/iomap.c
@@ -26,8 +26,8 @@ EXPORT_SYMBOL(ioport_unmap);
#ifdef CONFIG_PCI
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
- unsigned long start = pci_resource_start(dev, bar);
- unsigned long len = pci_resource_len(dev, bar);
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
unsigned long flags = pci_resource_flags(dev, bar);
if (!len || !start)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index d41a75ed3dc..2d6d682c206 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -35,6 +35,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
* zero-initialized data and COW.
*/
struct page *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
/*
* The pmd table for the upper-most set of pages.
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 700c04d6996..5673f4d6113 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -471,6 +471,7 @@ arm1020_crval:
.type arm1020_processor_functions, #object
arm1020_processor_functions:
.word v4t_early_abort
+ .word pabort_noifar
.word cpu_arm1020_proc_init
.word cpu_arm1020_proc_fin
.word cpu_arm1020_reset
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 1cc206ab5ea..4343fdb0e9e 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -452,6 +452,7 @@ arm1020e_crval:
.type arm1020e_processor_functions, #object
arm1020e_processor_functions:
.word v4t_early_abort
+ .word pabort_noifar
.word cpu_arm1020e_proc_init
.word cpu_arm1020e_proc_fin
.word cpu_arm1020e_reset
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index aff0ea08e2f..2a4ea1659e9 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -435,6 +435,7 @@ arm1022_crval:
.type arm1022_processor_functions, #object
arm1022_processor_functions:
.word v4t_early_abort
+ .word pabort_noifar
.word cpu_arm1022_proc_init
.word cpu_arm1022_proc_fin
.word cpu_arm1022_reset
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 65e43a10908..77a1babd421 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -430,6 +430,7 @@ arm1026_crval:
.type arm1026_processor_functions, #object
arm1026_processor_functions:
.word v5t_early_abort
+ .word pabort_noifar
.word cpu_arm1026_proc_init
.word cpu_arm1026_proc_fin
.word cpu_arm1026_reset
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 123a7dc7a43..c371fc87776 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -293,6 +293,7 @@ __arm7_setup: mov r0, #0
.type arm6_processor_functions, #object
ENTRY(arm6_processor_functions)
.word cpu_arm6_data_abort
+ .word pabort_noifar
.word cpu_arm6_proc_init
.word cpu_arm6_proc_fin
.word cpu_arm6_reset
@@ -309,6 +310,7 @@ ENTRY(arm6_processor_functions)
.type arm7_processor_functions, #object
ENTRY(arm7_processor_functions)
.word cpu_arm7_data_abort
+ .word pabort_noifar
.word cpu_arm7_proc_init
.word cpu_arm7_proc_fin
.word cpu_arm7_reset
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index dc763be4336..d64f8e6f75a 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -198,6 +198,7 @@ arm720_crval:
.type arm720_processor_functions, #object
ENTRY(arm720_processor_functions)
.word v4t_late_abort
+ .word pabort_noifar
.word cpu_arm720_proc_init
.word cpu_arm720_proc_fin
.word cpu_arm720_reset
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 7069f495cf9..3a57376c8bc 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -126,6 +126,7 @@ __arm740_setup:
.type arm740_processor_functions, #object
ENTRY(arm740_processor_functions)
.word v4t_late_abort
+ .word pabort_noifar
.word cpu_arm740_proc_init
.word cpu_arm740_proc_fin
.word cpu_arm740_reset
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index d091c257182..7b3ecdeb537 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -64,6 +64,7 @@ __arm7tdmi_setup:
.type arm7tdmi_processor_functions, #object
ENTRY(arm7tdmi_processor_functions)
.word v4t_late_abort
+ .word pabort_noifar
.word cpu_arm7tdmi_proc_init
.word cpu_arm7tdmi_proc_fin
.word cpu_arm7tdmi_reset
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 75c945ed6c4..28cdb060df4 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -417,6 +417,7 @@ arm920_crval:
.type arm920_processor_functions, #object
arm920_processor_functions:
.word v4t_early_abort
+ .word pabort_noifar
.word cpu_arm920_proc_init
.word cpu_arm920_proc_fin
.word cpu_arm920_reset
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index ffb751b877f..94ddcb4a4b7 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -421,6 +421,7 @@ arm922_crval:
.type arm922_processor_functions, #object
arm922_processor_functions:
.word v4t_early_abort
+ .word pabort_noifar
.word cpu_arm922_proc_init
.word cpu_arm922_proc_fin
.word cpu_arm922_reset
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 44c2c997819..d045812f339 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -332,7 +332,7 @@ ENTRY(arm925_dma_flush_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
#else
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
#endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -484,6 +484,7 @@ arm925_crval:
.type arm925_processor_functions, #object
arm925_processor_functions:
.word v4t_early_abort
+ .word pabort_noifar
.word cpu_arm925_proc_init
.word cpu_arm925_proc_fin
.word cpu_arm925_reset
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 194ef48968e..4cd33169a7c 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -295,7 +295,7 @@ ENTRY(arm926_dma_flush_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
#else
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
#endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -437,6 +437,7 @@ arm926_crval:
.type arm926_processor_functions, #object
arm926_processor_functions:
.word v5tj_early_abort
+ .word pabort_noifar
.word cpu_arm926_proc_init
.word cpu_arm926_proc_fin
.word cpu_arm926_reset
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 786c593778f..1a3d63df8e9 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -222,7 +222,7 @@ ENTRY(arm940_dma_flush_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
#else
- mcr p15, 0, r3, c7, c10, 2 @ clean D entry
+ mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
#endif
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
@@ -321,6 +321,7 @@ __arm940_setup:
.type arm940_processor_functions, #object
ENTRY(arm940_processor_functions)
.word nommu_early_abort
+ .word pabort_noifar
.word cpu_arm940_proc_init
.word cpu_arm940_proc_fin
.word cpu_arm940_reset
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index a60c1421d45..82d579ac9b9 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -265,7 +265,7 @@ ENTRY(arm946_dma_flush_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
#else
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
#endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -376,6 +376,7 @@ __arm946_setup:
.type arm946_processor_functions, #object
ENTRY(arm946_processor_functions)
.word nommu_early_abort
+ .word pabort_noifar
.word cpu_arm946_proc_init
.word cpu_arm946_proc_fin
.word cpu_arm946_reset
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 4848eeac86b..c85c1f50e39 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -64,6 +64,7 @@ __arm9tdmi_setup:
.type arm9tdmi_processor_functions, #object
ENTRY(arm9tdmi_processor_functions)
.word nommu_early_abort
+ .word pabort_noifar
.word cpu_arm9tdmi_proc_init
.word cpu_arm9tdmi_proc_fin
.word cpu_arm9tdmi_reset
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index fa0dc7e6f0e..f2e5884c513 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -44,11 +44,31 @@
*/
#define CACHE_DLINESIZE 32
+ .bss
+ .align 3
+__cache_params_loc:
+ .space 8
+
.text
+__cache_params:
+ .word __cache_params_loc
+
/*
* cpu_feroceon_proc_init()
*/
ENTRY(cpu_feroceon_proc_init)
+ mrc p15, 0, r0, c0, c0, 1 @ read cache type register
+ ldr r1, __cache_params
+ mov r2, #(16 << 5)
+ tst r0, #(1 << 16) @ get way
+ mov r0, r0, lsr #18 @ get cache size order
+ movne r3, #((4 - 1) << 30) @ 4-way
+ and r0, r0, #0xf
+ moveq r3, #0 @ 1-way
+ mov r2, r2, lsl r0 @ actual cache size
+ movne r2, r2, lsr #2 @ turned into # of sets
+ sub r2, r2, #(1 << 5)
+ stmia r1, {r2, r3}
mov pc, lr
/*
@@ -59,6 +79,13 @@ ENTRY(cpu_feroceon_proc_fin)
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
bl feroceon_flush_kern_cache_all
+
+#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH)
+ mov r0, #0
+ mcr p15, 1, r0, c15, c9, 0 @ clean L2
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+#endif
+
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
@@ -93,7 +120,7 @@ ENTRY(cpu_feroceon_reset)
*
* Called with IRQs disabled
*/
- .align 10
+ .align 5
ENTRY(cpu_feroceon_do_idle)
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
@@ -106,6 +133,7 @@ ENTRY(cpu_feroceon_do_idle)
* Clean and invalidate all cache entries in a particular
* address space.
*/
+ .align 5
ENTRY(feroceon_flush_user_cache_all)
/* FALLTHROUGH */
@@ -116,15 +144,19 @@ ENTRY(feroceon_flush_user_cache_all)
*/
ENTRY(feroceon_flush_kern_cache_all)
mov r2, #VM_EXEC
- mov ip, #0
+
__flush_whole_cache:
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
-#else
-1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate
- bne 1b
-#endif
+ ldr r1, __cache_params
+ ldmia r1, {r1, r3}
+1: orr ip, r1, r3
+2: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way
+ subs ip, ip, #(1 << 30) @ next way
+ bcs 2b
+ subs r1, r1, #(1 << 5) @ next set
+ bcs 1b
+
tst r2, #VM_EXEC
+ mov ip, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
@@ -139,30 +171,22 @@ __flush_whole_cache:
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
+ .align 5
ENTRY(feroceon_flush_user_cache_range)
- mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bgt __flush_whole_cache
1: tst r2, #VM_EXEC
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #CACHE_DLINESIZE
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
- mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
- add r0, r0, #CACHE_DLINESIZE
-#else
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
-#endif
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
+ mov ip, #0
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
@@ -176,6 +200,7 @@ ENTRY(feroceon_flush_user_cache_range)
* - start - virtual start address
* - end - virtual end address
*/
+ .align 5
ENTRY(feroceon_coherent_kern_range)
/* FALLTHROUGH */
@@ -207,6 +232,7 @@ ENTRY(feroceon_coherent_user_range)
*
* - addr - page aligned address
*/
+ .align 5
ENTRY(feroceon_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
@@ -218,6 +244,20 @@ ENTRY(feroceon_flush_kern_dcache_page)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+ .align 5
+ENTRY(feroceon_range_flush_kern_dcache_page)
+ mrs r2, cpsr
+ add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
+ orr r3, r2, #PSR_I_BIT
+ msr cpsr_c, r3 @ disable interrupts
+ mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
+ mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
+ msr cpsr_c, r2 @ restore interrupts
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
/*
* dma_inv_range(start, end)
*
@@ -231,14 +271,13 @@ ENTRY(feroceon_flush_kern_dcache_page)
*
* (same as v4wb)
*/
+ .align 5
ENTRY(feroceon_dma_inv_range)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
+ bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
-#endif
- bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -246,6 +285,22 @@ ENTRY(feroceon_dma_inv_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+ .align 5
+ENTRY(feroceon_range_dma_inv_range)
+ mrs r2, cpsr
+ tst r0, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
+ tst r1, #CACHE_DLINESIZE - 1
+ mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
+ cmp r1, r0
+ subne r1, r1, #1 @ top address is inclusive
+ orr r3, r2, #PSR_I_BIT
+ msr cpsr_c, r3 @ disable interrupts
+ mcr p15, 5, r0, c15, c14, 0 @ D inv range start
+ mcr p15, 5, r1, c15, c14, 1 @ D inv range top
+ msr cpsr_c, r2 @ restore interrupts
+ mov pc, lr
+
/*
* dma_clean_range(start, end)
*
@@ -256,14 +311,26 @@ ENTRY(feroceon_dma_inv_range)
*
* (same as v4wb)
*/
+ .align 5
ENTRY(feroceon_dma_clean_range)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
-#endif
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+ .align 5
+ENTRY(feroceon_range_dma_clean_range)
+ mrs r2, cpsr
+ cmp r1, r0
+ subne r1, r1, #1 @ top address is inclusive
+ orr r3, r2, #PSR_I_BIT
+ msr cpsr_c, r3 @ disable interrupts
+ mcr p15, 5, r0, c15, c13, 0 @ D clean range start
+ mcr p15, 5, r1, c15, c13, 1 @ D clean range top
+ msr cpsr_c, r2 @ restore interrupts
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
@@ -275,20 +342,29 @@ ENTRY(feroceon_dma_clean_range)
* - start - virtual start address
* - end - virtual end address
*/
+ .align 5
ENTRY(feroceon_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
-1:
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
-#else
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry
-#endif
+1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+ .align 5
+ENTRY(feroceon_range_dma_flush_range)
+ mrs r2, cpsr
+ cmp r1, r0
+ subne r1, r1, #1 @ top address is inclusive
+ orr r3, r2, #PSR_I_BIT
+ msr cpsr_c, r3 @ disable interrupts
+ mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
+ mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
+ msr cpsr_c, r2 @ restore interrupts
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
ENTRY(feroceon_cache_fns)
.long feroceon_flush_kern_cache_all
.long feroceon_flush_user_cache_all
@@ -300,12 +376,32 @@ ENTRY(feroceon_cache_fns)
.long feroceon_dma_clean_range
.long feroceon_dma_flush_range
+ENTRY(feroceon_range_cache_fns)
+ .long feroceon_flush_kern_cache_all
+ .long feroceon_flush_user_cache_all
+ .long feroceon_flush_user_cache_range
+ .long feroceon_coherent_kern_range
+ .long feroceon_coherent_user_range
+ .long feroceon_range_flush_kern_dcache_page
+ .long feroceon_range_dma_inv_range
+ .long feroceon_range_dma_clean_range
+ .long feroceon_range_dma_flush_range
+
+ .align 5
ENTRY(cpu_feroceon_dcache_clean_area)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH)
+ mov r2, r0
+ mov r3, r1
+#endif
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
+#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH)
+1: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry
+ add r2, r2, #CACHE_DLINESIZE
+ subs r3, r3, #CACHE_DLINESIZE
+ bhi 1b
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
@@ -322,20 +418,25 @@ ENTRY(cpu_feroceon_dcache_clean_area)
.align 5
ENTRY(cpu_feroceon_switch_mm)
#ifdef CONFIG_MMU
- mov ip, #0
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
-#else
-@ && 'Clean & Invalidate whole DCache'
-1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate
- bne 1b
-#endif
- mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
- mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ /*
+ * Note: we wish to call __flush_whole_cache but we need to preserve
+ * lr to do so. The only way without touching main memory is to
+ * use r2 which is normally used to test the VM_EXEC flag, and
+ * compensate locally for the skipped ops if it is not set.
+ */
+ mov r2, lr @ abuse r2 to preserve lr
+ bl __flush_whole_cache
+ @ if r2 contains the VM_EXEC bit then the next 2 ops are done already
+ tst r2, #VM_EXEC
+ mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache
+ mcreq p15, 0, ip, c7, c10, 4 @ drain WB
+
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
-#endif
+ mov pc, r2
+#else
mov pc, lr
+#endif
/*
* cpu_feroceon_set_pte_ext(ptep, pte, ext)
@@ -362,15 +463,11 @@ ENTRY(cpu_feroceon_set_pte_ext)
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- eor r3, r2, #0x0a @ C & small page?
- tst r3, #0x0b
- biceq r2, r2, #4
-#endif
str r2, [r0] @ hardware version
mov r0, r0
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH)
+ mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
@@ -387,32 +484,24 @@ __feroceon_setup:
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
#endif
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
- mov r0, #4 @ disable write-back on caches explicitly
- mcr p15, 7, r0, c15, c0, 0
-#endif
-
adr r5, feroceon_crval
ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, r5
orr r0, r0, r6
-#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
- orr r0, r0, #0x4000 @ .1.. .... .... ....
-#endif
mov pc, lr
.size __feroceon_setup, . - __feroceon_setup
/*
- * R
- * .RVI ZFRS BLDP WCAM
- * .011 0001 ..11 0101
+ * B
+ * R P
+ * .RVI UFRS BLDP WCAM
+ * .011 .001 ..11 0101
*
*/
.type feroceon_crval, #object
feroceon_crval:
- crval clear=0x00007f3f, mmuset=0x00003135, ucset=0x00001134
+ crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
__INITDATA
@@ -423,6 +512,7 @@ feroceon_crval:
.type feroceon_processor_functions, #object
feroceon_processor_functions:
.word v5t_early_abort
+ .word pabort_noifar
.word cpu_feroceon_proc_init
.word cpu_feroceon_proc_fin
.word cpu_feroceon_reset
@@ -449,6 +539,21 @@ cpu_feroceon_name:
.asciz "Feroceon"
.size cpu_feroceon_name, . - cpu_feroceon_name
+ .type cpu_88fr531_name, #object
+cpu_88fr531_name:
+ .asciz "Feroceon 88FR531-vd"
+ .size cpu_88fr531_name, . - cpu_88fr531_name
+
+ .type cpu_88fr571_name, #object
+cpu_88fr571_name:
+ .asciz "Feroceon 88FR571-vd"
+ .size cpu_88fr571_name, . - cpu_88fr571_name
+
+ .type cpu_88fr131_name, #object
+cpu_88fr131_name:
+ .asciz "Feroceon 88FR131"
+ .size cpu_88fr131_name, . - cpu_88fr131_name
+
.align
.section ".proc.info.init", #alloc, #execinstr
@@ -456,15 +561,15 @@ cpu_feroceon_name:
#ifdef CONFIG_CPU_FEROCEON_OLD_ID
.type __feroceon_old_id_proc_info,#object
__feroceon_old_id_proc_info:
- .long 0x41069260
- .long 0xfffffff0
- .long PMD_TYPE_SECT | \
+ .long 0x41009260
+ .long 0xff00fff0
+ .long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
+ .long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
@@ -475,22 +580,22 @@ __feroceon_old_id_proc_info:
.long cpu_feroceon_name
.long feroceon_processor_functions
.long v4wbi_tlb_fns
- .long v4wb_user_fns
+ .long feroceon_user_fns
.long feroceon_cache_fns
.size __feroceon_old_id_proc_info, . - __feroceon_old_id_proc_info
#endif
- .type __feroceon_proc_info,#object
-__feroceon_proc_info:
+ .type __88fr531_proc_info,#object
+__88fr531_proc_info:
.long 0x56055310
.long 0xfffffff0
- .long PMD_TYPE_SECT | \
+ .long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- .long PMD_TYPE_SECT | \
+ .long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
@@ -498,9 +603,59 @@ __feroceon_proc_info:
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
- .long cpu_feroceon_name
+ .long cpu_88fr531_name
.long feroceon_processor_functions
.long v4wbi_tlb_fns
- .long v4wb_user_fns
+ .long feroceon_user_fns
.long feroceon_cache_fns
- .size __feroceon_proc_info, . - __feroceon_proc_info
+ .size __88fr531_proc_info, . - __88fr531_proc_info
+
+ .type __88fr571_proc_info,#object
+__88fr571_proc_info:
+ .long 0x56155710
+ .long 0xfffffff0
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | \
+ PMD_BIT4 | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_BIT4 | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ b __feroceon_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+ .long cpu_88fr571_name
+ .long feroceon_processor_functions
+ .long v4wbi_tlb_fns
+ .long feroceon_user_fns
+ .long feroceon_range_cache_fns
+ .size __88fr571_proc_info, . - __88fr571_proc_info
+
+ .type __88fr131_proc_info,#object
+__88fr131_proc_info:
+ .long 0x56251310
+ .long 0xfffffff0
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | \
+ PMD_BIT4 | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_BIT4 | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ b __feroceon_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+ .long cpu_88fr131_name
+ .long feroceon_processor_functions
+ .long v4wbi_tlb_fns
+ .long feroceon_user_fns
+ .long feroceon_range_cache_fns
+ .size __88fr131_proc_info, . - __88fr131_proc_info
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 6e226e12989..9818195dbf1 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -216,6 +216,7 @@ sa110_crval:
.type sa110_processor_functions, #object
ENTRY(sa110_processor_functions)
.word v4_early_abort
+ .word pabort_noifar
.word cpu_sa110_proc_init
.word cpu_sa110_proc_fin
.word cpu_sa110_reset
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 9afb11d089f..c5fe27ad289 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -231,6 +231,7 @@ sa1100_crval:
.type sa1100_processor_functions, #object
ENTRY(sa1100_processor_functions)
.word v4_early_abort
+ .word pabort_noifar
.word cpu_sa1100_proc_init
.word cpu_sa1100_proc_fin
.word cpu_sa1100_reset
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index eb42e5b9486..5702ec58b2a 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -17,10 +17,6 @@
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
-#ifdef CONFIG_SMP
-#include <asm/hardware/arm_scu.h>
-#endif
-
#include "proc-macros.S"
#define D_CACHE_LINE_SIZE 32
@@ -187,20 +183,10 @@ cpu_v6_name:
*/
__v6_setup:
#ifdef CONFIG_SMP
- /* Set up the SCU on core 0 only */
- mrc p15, 0, r0, c0, c0, 5 @ CPU core number
- ands r0, r0, #15
- ldreq r0, =SCU_BASE
- ldreq r5, [r0, #SCU_CTRL]
- orreq r5, r5, #1
- streq r5, [r0, #SCU_CTRL]
-
-#ifndef CONFIG_CPU_DCACHE_DISABLE
mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode
orr r0, r0, #0x20
mcr p15, 0, r0, c1, c0, 1
#endif
-#endif
mov r0, #0
mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
@@ -233,6 +219,7 @@ v6_crval:
.type v6_processor_functions, #object
ENTRY(v6_processor_functions)
.word v6_early_abort
+ .word pabort_noifar
.word cpu_v6_proc_init
.word cpu_v6_proc_fin
.word cpu_v6_reset
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index e0acc5ae6f6..b49f9a4c82c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -205,6 +205,7 @@ __v7_setup_stack:
.type v7_processor_functions, #object
ENTRY(v7_processor_functions)
.word v7_early_abort
+ .word pabort_ifar
.word cpu_v7_proc_init
.word cpu_v7_proc_fin
.word cpu_v7_reset
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index d95921a2ab9..3533741a76f 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -450,6 +450,7 @@ xsc3_crval:
.type xsc3_processor_functions, #object
ENTRY(xsc3_processor_functions)
.word v5t_early_abort
+ .word pabort_noifar
.word cpu_xsc3_proc_init
.word cpu_xsc3_proc_fin
.word cpu_xsc3_reset
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 016690b9d56..2dd85273976 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -527,6 +527,7 @@ xscale_crval:
.type xscale_processor_functions, #object
ENTRY(xscale_processor_functions)
.word v5t_early_abort
+ .word pabort_noifar
.word cpu_xscale_proc_init
.word cpu_xscale_proc_fin
.word cpu_xscale_reset