From 0ba8b9b273c45dd23f60ff700e265a0069b33758 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 10 Aug 2008 18:08:10 +0100 Subject: [ARM] cputype: separate definitions, use them Add asm/cputype.h, moving functions and definitions from asm/system.h there. Convert all users of 'processor_id' to the more efficient read_cpuid_id() function. Signed-off-by: Russell King --- arch/arm/include/asm/cputype.h | 64 ++++++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/system.h | 58 -------------------------------------- 2 files changed, 64 insertions(+), 58 deletions(-) create mode 100644 arch/arm/include/asm/cputype.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h new file mode 100644 index 00000000000..7b9d27e749b --- /dev/null +++ b/arch/arm/include/asm/cputype.h @@ -0,0 +1,64 @@ +#ifndef __ASM_ARM_CPUTYPE_H +#define __ASM_ARM_CPUTYPE_H + +#include + +#define CPUID_ID 0 +#define CPUID_CACHETYPE 1 +#define CPUID_TCM 2 +#define CPUID_TLBTYPE 3 + +#ifdef CONFIG_CPU_CP15 +#define read_cpuid(reg) \ + ({ \ + unsigned int __val; \ + asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ + : "=r" (__val) \ + : \ + : "cc"); \ + __val; \ + }) +#else +extern unsigned int processor_id; +#define read_cpuid(reg) (processor_id) +#endif + +/* + * The CPU ID never changes at run time, so we might as well tell the + * compiler that it's constant. Use this function to read the CPU ID + * rather than directly reading processor_id or read_cpuid() directly. + */ +static inline unsigned int __attribute_const__ read_cpuid_id(void) +{ + return read_cpuid(CPUID_ID); +} + +static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) +{ + return read_cpuid(CPUID_CACHETYPE); +} + +/* + * Intel's XScale3 core supports some v6 features (supersections, L2) + * but advertises itself as v5 as it does not support the v6 ISA. For + * this reason, we need a way to explicitly test for this type of CPU. + */ +#ifndef CONFIG_CPU_XSC3 +#define cpu_is_xsc3() 0 +#else +static inline int cpu_is_xsc3(void) +{ + if ((read_cpuid_id() & 0xffffe000) == 0x69056000) + return 1; + + return 0; +} +#endif + +#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) +#define cpu_is_xscale() 0 +#else +#define cpu_is_xscale() 1 +#endif + +#endif diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 514af792a59..7aad78420f1 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -43,11 +43,6 @@ #define CR_XP (1 << 23) /* Extended page tables */ #define CR_VE (1 << 24) /* Vectored interrupts */ -#define CPUID_ID 0 -#define CPUID_CACHETYPE 1 -#define CPUID_TCM 2 -#define CPUID_TLBTYPE 3 - /* * This is used to ensure the compiler did actually allocate the register we * asked it for some inline assembly sequences. Apparently we can't trust @@ -61,36 +56,8 @@ #ifndef __ASSEMBLY__ #include -#include #include -#ifdef CONFIG_CPU_CP15 -#define read_cpuid(reg) \ - ({ \ - unsigned int __val; \ - asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \ - : "=r" (__val) \ - : \ - : "cc"); \ - __val; \ - }) -#else -extern unsigned int processor_id; -#define read_cpuid(reg) (processor_id) -#endif - -/* - * The CPU ID never changes at run time, so we might as well tell the - * compiler that it's constant. Use this function to read the CPU ID - * rather than directly reading processor_id or read_cpuid() directly. - */ -static inline unsigned int read_cpuid_id(void) __attribute_const__; - -static inline unsigned int read_cpuid_id(void) -{ - return read_cpuid(CPUID_ID); -} - #define __exception __attribute__((section(".exception.text"))) struct thread_info; @@ -131,31 +98,6 @@ extern void cpu_init(void); void arm_machine_restart(char mode); extern void (*arm_pm_restart)(char str); -/* - * Intel's XScale3 core supports some v6 features (supersections, L2) - * but advertises itself as v5 as it does not support the v6 ISA. For - * this reason, we need a way to explicitly test for this type of CPU. - */ -#ifndef CONFIG_CPU_XSC3 -#define cpu_is_xsc3() 0 -#else -static inline int cpu_is_xsc3(void) -{ - extern unsigned int processor_id; - - if ((processor_id & 0xffffe000) == 0x69056000) - return 1; - - return 0; -} -#endif - -#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) -#define cpu_is_xscale() 0 -#else -#define cpu_is_xscale() 1 -#endif - #define UDBG_UNDEFINED (1 << 0) #define UDBG_SYSCALL (1 << 1) #define UDBG_BADABORT (1 << 2) -- cgit v1.2.3 From 46097c7dd8bfaf9fb86565b6de45ab5a63afdd53 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 10 Aug 2008 18:10:19 +0100 Subject: [ARM] cachetype: move definitions to separate header Rather than pollute asm/cacheflush.h with the cache type definitions, move them to asm/cachetype.h, and include this new header where necessary. Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 90 ----------------------------------- arch/arm/include/asm/cachetype.h | 96 ++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/mmu_context.h | 1 + 3 files changed, 97 insertions(+), 90 deletions(-) create mode 100644 arch/arm/include/asm/cachetype.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 9073d9c6567..de6c59f814a 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -444,94 +444,4 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt, dmac_inv_range(start, start + size); } -#define __cacheid_present(val) (val != read_cpuid(CPUID_ID)) -#define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29)) - -#define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25)) -#define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25)) -#define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) -#define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) - -#define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val)) -#define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val)) -#define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val)) -#define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val)) -#define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0) - -#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) -/* - * VIVT caches only - */ -#define cache_is_vivt() 1 -#define cache_is_vipt() 0 -#define cache_is_vipt_nonaliasing() 0 -#define cache_is_vipt_aliasing() 0 -#define icache_is_vivt_asid_tagged() 0 - -#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT) -/* - * VIPT caches only - */ -#define cache_is_vivt() 0 -#define cache_is_vipt() 1 -#define cache_is_vipt_nonaliasing() \ - ({ \ - unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ - __cacheid_vipt_nonaliasing(__val); \ - }) - -#define cache_is_vipt_aliasing() \ - ({ \ - unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ - __cacheid_vipt_aliasing(__val); \ - }) - -#define icache_is_vivt_asid_tagged() \ - ({ \ - unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ - __cacheid_vivt_asid_tagged_instr(__val); \ - }) - -#else -/* - * VIVT or VIPT caches. Note that this is unreliable since ARM926 - * and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test. - * There's no way to tell from the CacheType register what type (!) - * the cache is. - */ -#define cache_is_vivt() \ - ({ \ - unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ - (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ - }) - -#define cache_is_vipt() \ - ({ \ - unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ - __cacheid_present(__val) && __cacheid_vipt(__val); \ - }) - -#define cache_is_vipt_nonaliasing() \ - ({ \ - unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ - __cacheid_present(__val) && \ - __cacheid_vipt_nonaliasing(__val); \ - }) - -#define cache_is_vipt_aliasing() \ - ({ \ - unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ - __cacheid_present(__val) && \ - __cacheid_vipt_aliasing(__val); \ - }) - -#define icache_is_vivt_asid_tagged() \ - ({ \ - unsigned int __val = read_cpuid(CPUID_CACHETYPE); \ - __cacheid_present(__val) && \ - __cacheid_vivt_asid_tagged_instr(__val); \ - }) - -#endif - #endif diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h new file mode 100644 index 00000000000..b52386bfd50 --- /dev/null +++ b/arch/arm/include/asm/cachetype.h @@ -0,0 +1,96 @@ +#ifndef __ASM_ARM_CACHETYPE_H +#define __ASM_ARM_CACHETYPE_H + +#include + +#define __cacheid_present(val) (val != read_cpuid_id()) +#define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29)) + +#define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25)) +#define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25)) +#define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) +#define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) + +#define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val)) +#define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val)) +#define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val)) +#define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val)) +#define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0) + +#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) +/* + * VIVT caches only + */ +#define cache_is_vivt() 1 +#define cache_is_vipt() 0 +#define cache_is_vipt_nonaliasing() 0 +#define cache_is_vipt_aliasing() 0 +#define icache_is_vivt_asid_tagged() 0 + +#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT) +/* + * VIPT caches only + */ +#define cache_is_vivt() 0 +#define cache_is_vipt() 1 +#define cache_is_vipt_nonaliasing() \ + ({ \ + unsigned int __val = read_cpuid_cachetype(); \ + __cacheid_vipt_nonaliasing(__val); \ + }) + +#define cache_is_vipt_aliasing() \ + ({ \ + unsigned int __val = read_cpuid_cachetype(); \ + __cacheid_vipt_aliasing(__val); \ + }) + +#define icache_is_vivt_asid_tagged() \ + ({ \ + unsigned int __val = read_cpuid_cachetype(); \ + __cacheid_vivt_asid_tagged_instr(__val); \ + }) + +#else +/* + * VIVT or VIPT caches. Note that this is unreliable since ARM926 + * and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test. + * There's no way to tell from the CacheType register what type (!) + * the cache is. + */ +#define cache_is_vivt() \ + ({ \ + unsigned int __val = read_cpuid_cachetype(); \ + (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ + }) + +#define cache_is_vipt() \ + ({ \ + unsigned int __val = read_cpuid_cachetype(); \ + __cacheid_present(__val) && __cacheid_vipt(__val); \ + }) + +#define cache_is_vipt_nonaliasing() \ + ({ \ + unsigned int __val = read_cpuid_cachetype(); \ + __cacheid_present(__val) && \ + __cacheid_vipt_nonaliasing(__val); \ + }) + +#define cache_is_vipt_aliasing() \ + ({ \ + unsigned int __val = read_cpuid_cachetype(); \ + __cacheid_present(__val) && \ + __cacheid_vipt_aliasing(__val); \ + }) + +#define icache_is_vivt_asid_tagged() \ + ({ \ + unsigned int __val = read_cpuid_cachetype(); \ + __cacheid_present(__val) && \ + __cacheid_vivt_asid_tagged_instr(__val); \ + }) + +#endif + +#endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a301e446007..0559f37c2a2 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -15,6 +15,7 @@ #include #include +#include #include #include -- cgit v1.2.3 From 3305a60795442a22fe8e9f5fb93a6f1f8dea6bb2 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 19 Aug 2008 04:15:23 +0100 Subject: [ARM] 5206/1: remove kprobe_trap_handler() hack As mentioned in commit 796969104cab0d454dbc792ad0d12a4f365a8564, and because of commit b03a5b7559563dafdbe52f8b5d8e453a914db941, the direct calling of kprobe_trap_handler() can be removed. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/kprobes.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index a5d0d99ad38..bb8a19bd582 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -61,7 +61,6 @@ struct kprobe_ctlblk { void arch_remove_kprobe(struct kprobe *); void kretprobe_trampoline(void); -int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr); int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); -- cgit v1.2.3 From e589ed23dd27b890900eb7514f0a9e297d1e02b5 Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Wed, 20 Aug 2008 09:36:07 +0100 Subject: [ARM] 5218/1: arm: improved futex support Linux/ARM currently doesn't support robust or PI futexes. The problem is that the kernel wants to perform certain ops (cmpxchg, set, add, or, andn, xor) atomically on user-space addresses, and ARM's futex.h doesn't support that. This patch adds that support, but only for uniprocessor machines. For UP it's enough to disable preemption to ensure mutual exclusion with other software agents (futexes don't need to care about other hardware agents, fortunately). This patch is based on one posted by Khem Raj on 2007-08-01 . (That patch is included in the -RT kernel patches.) My changes since that version include: * corrected implementation of FUTEX_OP_ANDN (must complement oparg) * added missing memory clobber to futex_atomic_cmpxchg_inatomic() * removed spinlock because it's unnecessary for UP and insufficient for SMP, instead the code is restricted to UP and relies on the fact that pagefault_disable() also disables preemption * coding style cleanups Tested on ARMv5 XScales with the glibc-2.6 nptl test suite. Tested-by: Bruce Ashfield Signed-off-by: Mikael Pettersson Signed-off-by: Russell King --- arch/arm/include/asm/futex.h | 124 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 121 insertions(+), 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 6a332a9f099..9ee743b95de 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -1,6 +1,124 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H +#ifndef _ASM_ARM_FUTEX_H +#define _ASM_ARM_FUTEX_H + +#ifdef __KERNEL__ + +#ifdef CONFIG_SMP #include -#endif +#else /* !SMP, we can work around lack of atomic ops by disabling preemption */ + +#include +#include +#include +#include + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ + __asm__ __volatile__( \ + "1: ldrt %1, [%2]\n" \ + " " insn "\n" \ + "2: strt %0, [%2]\n" \ + " mov %0, #0\n" \ + "3:\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .long 1b, 4f, 2b, 4f\n" \ + " .previous\n" \ + " .section .fixup,\"ax\"\n" \ + "4: mov %0, %4\n" \ + " b 3b\n" \ + " .previous" \ + : "=&r" (ret), "=&r" (oldval) \ + : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ + : "cc", "memory") + +static inline int +futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + pagefault_disable(); /* implies preempt_disable() */ + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); /* subsumes preempt_enable() */ + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + int val; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + pagefault_disable(); /* implies preempt_disable() */ + + __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" + "1: ldrt %0, [%3]\n" + " teq %0, %1\n" + "2: streqt %2, [%3]\n" + "3:\n" + " .section __ex_table,\"a\"\n" + " .align 3\n" + " .long 1b, 4f, 2b, 4f\n" + " .previous\n" + " .section .fixup,\"ax\"\n" + "4: mov %0, %4\n" + " b 3b\n" + " .previous" + : "=&r" (val) + : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) + : "cc", "memory"); + + pagefault_enable(); /* subsumes preempt_enable() */ + + return val; +} + +#endif /* !SMP */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_ARM_FUTEX_H */ -- cgit v1.2.3 From d81030a1badb4e4d08358ff2c2bda9b11d5a6559 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 21 Aug 2008 23:09:48 +0100 Subject: [ARM] 5211/2: fix a couple warnings from BUG() usage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When CONFIG_DEBUG_BUGVERBOSE is not set, we get warnings such as: arch/arm/mm/ioremap.c: In function ‘remap_area_pte’: arch/arm/mm/ioremap.c:67: warning: control reaches end of non-void function mm/bootmem.c: In function ‘mark_bootmem’: mm/bootmem.c:321: warning: control reaches end of non-void function fs/dcache.c: In function ‘d_materialise_unique’: fs/dcache.c:1875: warning: control reaches end of non-void function fs/nfs/client.c: In function ‘nfs_sockaddr_match_ipaddr’: fs/nfs/client.c:251: warning: control reaches end of non-void function block/cfq-iosched.c: In function ‘cfq_async_queue_prio’: block/cfq-iosched.c:1501: warning: control reaches end of non-void function Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/bug.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 7b62351f097..4d88425a416 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -12,7 +12,7 @@ extern void __bug(const char *file, int line) __attribute__((noreturn)); #else /* this just causes an oops */ -#define BUG() (*(int *)0 = 0) +#define BUG() do { *(int *)0 = 0; } while (1) #endif -- cgit v1.2.3 From 8d5796d2ec6b5a4e7a52861144e63af438d6f8f7 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Mon, 25 Aug 2008 21:03:32 +0100 Subject: [ARM] 5222/1: Allow configuring user:kernel split via Kconfig This patch adds a config option (CONFIG_VMSPLIT_*) to allow choosing between 3:1, 2:2 and 1:3 user:kernel memory splits. Tested-by: Riku Voipio Signed-off-by: Lennert Buytenhek Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index bf7c737c922..7e8d22fef29 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -13,43 +13,33 @@ #ifndef __ASM_ARM_MEMORY_H #define __ASM_ARM_MEMORY_H +#include +#include +#include +#include + /* * Allow for constants defined here to be used from assembly code * by prepending the UL suffix only with actual C code compilation. */ -#ifndef __ASSEMBLY__ -#define UL(x) (x##UL) -#else -#define UL(x) (x) -#endif - -#include -#include -#include +#define UL(x) _AC(x, UL) #ifdef CONFIG_MMU -#ifndef TASK_SIZE /* + * PAGE_OFFSET - the virtual address of the start of the kernel image * TASK_SIZE - the maximum size of a user space task. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area */ -#define TASK_SIZE UL(0xbf000000) -#define TASK_UNMAPPED_BASE UL(0x40000000) -#endif +#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) +#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) +#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) /* * The maximum size of a 26-bit user space task. */ #define TASK_SIZE_26 UL(0x04000000) -/* - * Page offset: 3GB - */ -#ifndef PAGE_OFFSET -#define PAGE_OFFSET UL(0xc0000000) -#endif - /* * The module space lives between the addresses given by TASK_SIZE * and PAGE_OFFSET - it must be within 32MB of the kernel text. -- cgit v1.2.3 From 4e6a0c397f40c9d98062aaaac66cab684f0b9186 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 29 Aug 2008 18:31:15 +0100 Subject: [ARM] 5230/1: Replace post-indexed LDRT/STRT in uaccess.h The post-index immediate value is optional if it is 0 and this patch removes it. The reason is to allow such instructions to compile to Thumb-2 where only pre-indexed LDRT/STRT instructions are allowed. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/uaccess.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index d0f51ff900b..e98ec60b340 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -225,7 +225,7 @@ do { \ #define __get_user_asm_byte(x,addr,err) \ __asm__ __volatile__( \ - "1: ldrbt %1,[%2],#0\n" \ + "1: ldrbt %1,[%2]\n" \ "2:\n" \ " .section .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -261,7 +261,7 @@ do { \ #define __get_user_asm_word(x,addr,err) \ __asm__ __volatile__( \ - "1: ldrt %1,[%2],#0\n" \ + "1: ldrt %1,[%2]\n" \ "2:\n" \ " .section .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -306,7 +306,7 @@ do { \ #define __put_user_asm_byte(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: strbt %1,[%2],#0\n" \ + "1: strbt %1,[%2]\n" \ "2:\n" \ " .section .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -339,7 +339,7 @@ do { \ #define __put_user_asm_word(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: strt %1,[%2],#0\n" \ + "1: strt %1,[%2]\n" \ "2:\n" \ " .section .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -365,7 +365,7 @@ do { \ #define __put_user_asm_dword(x,__pu_addr,err) \ __asm__ __volatile__( \ "1: strt " __reg_oper1 ", [%1], #4\n" \ - "2: strt " __reg_oper0 ", [%1], #0\n" \ + "2: strt " __reg_oper0 ", [%1]\n" \ "3:\n" \ " .section .fixup,\"ax\"\n" \ " .align 2\n" \ -- cgit v1.2.3 From 1de765c1e940e23d83ec57035769e8af003f8796 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 6 Sep 2008 10:14:24 +0100 Subject: [ARM] remove pc_pointer() pc_pointer() was a function to mask the PC for 26-bit ARMs, which we no longer support. Remove it. Signed-off-by: Russell King --- arch/arm/include/asm/ptrace.h | 7 +------ arch/arm/include/asm/thread_info.h | 2 +- 2 files changed, 2 insertions(+), 7 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index b415c0e8545..73192618f1c 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -54,7 +54,6 @@ #define PSR_C_BIT 0x20000000 #define PSR_Z_BIT 0x40000000 #define PSR_N_BIT 0x80000000 -#define PCMASK 0 /* * Groups of PSR bits @@ -139,11 +138,7 @@ static inline int valid_user_regs(struct pt_regs *regs) return 0; } -#define pc_pointer(v) \ - ((v) & ~PCMASK) - -#define instruction_pointer(regs) \ - (pc_pointer((regs)->ARM_pc)) +#define instruction_pointer(regs) (regs)->ARM_pc #ifdef CONFIG_SMP extern unsigned long profile_pc(struct pt_regs *regs); diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index e56fa48e4ae..68b9ec82a37 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -98,7 +98,7 @@ static inline struct thread_info *current_thread_info(void) } #define thread_saved_pc(tsk) \ - ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc))) + ((unsigned long)(task_thread_info(tsk)->cpu_context.pc)) #define thread_saved_fp(tsk) \ ((unsigned long)(task_thread_info(tsk)->cpu_context.fp)) -- cgit v1.2.3 From 446616dbb48c7dc039649f796c3fab55c44bd0bc Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 6 Sep 2008 10:56:27 +0100 Subject: [ARM] sparse: quieten arch/arm/kernel/irq.c Signed-off-by: Russell King --- arch/arm/include/asm/irq.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index d6786090d02..a0009aa5d15 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -22,6 +22,10 @@ #ifndef __ASSEMBLY__ struct irqaction; extern void migrate_irqs(void); + +extern void asm_do_IRQ(unsigned int, struct pt_regs *); +void init_IRQ(void); + #endif #endif -- cgit v1.2.3 From fced80c735941fa518ac67c0b61bbe153fb8c050 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 6 Sep 2008 12:10:45 +0100 Subject: [ARM] Convert asm/io.h to linux/io.h Signed-off-by: Russell King --- arch/arm/include/asm/mc146818rtc.h | 2 +- arch/arm/include/asm/vga.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h index e1ca48a9e97..6b884d2b0b6 100644 --- a/arch/arm/include/asm/mc146818rtc.h +++ b/arch/arm/include/asm/mc146818rtc.h @@ -4,8 +4,8 @@ #ifndef _ASM_MC146818RTC_H #define _ASM_MC146818RTC_H +#include #include -#include #ifndef RTC_PORT #define RTC_PORT(x) (0x70 + (x)) diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h index 6a3cd2a2f67..250a4dd0063 100644 --- a/arch/arm/include/asm/vga.h +++ b/arch/arm/include/asm/vga.h @@ -1,8 +1,8 @@ #ifndef ASMARM_VGA_H #define ASMARM_VGA_H +#include #include -#include #define VGA_MAP_MEM(x,s) (PCIMEM_BASE + (x)) -- cgit v1.2.3 From 73b610affe0b24ecb808ef68a1b0a436a4cf7bd5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Sep 2008 13:35:27 +0100 Subject: [ARM] pxa: remove references to pxa_gpio_mode() in comments Signed-off-by: Russell King --- arch/arm/include/asm/mach/udc_pxa2xx.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h index 270902c353f..f3eabf1ecec 100644 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ b/arch/arm/include/asm/mach/udc_pxa2xx.h @@ -18,8 +18,7 @@ struct pxa2xx_udc_mach_info { /* Boards following the design guidelines in the developer's manual, * with on-chip GPIOs not Lubbock's weird hardware, can have a sane * VBUS IRQ and omit the methods above. Store the GPIO number - * here; for GPIO 0, also mask in one of the pxa_gpio_mode() bits. - * Note that sometimes the signals go through inverters... + * here. Note that sometimes the signals go through inverters... */ bool gpio_vbus_inverted; u16 gpio_vbus; /* high == vbus present */ -- cgit v1.2.3 From c0e9587841a0fd79bbf8296034faefb9afe72fb4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Sep 2008 15:35:28 +0100 Subject: [ARM] Introduce new bitmask based cache type macros Rather than trying to (inaccurately) decode the cache type from the registers each time we need to decide what type of cache we have, use a bitmask initialized early during boot. Since the setup is a one-off initialization, we can be a little more clever and take account of the CPU architecture as well. Note that we continue to achieve the compactness on optimised kernels by forcing tests to always-false or always-true as appropriate, thereby allowing the compiler to do build-time code elimination. Signed-off-by: Russell King --- arch/arm/include/asm/cachetype.h | 118 ++++++++++++--------------------------- 1 file changed, 37 insertions(+), 81 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h index b52386bfd50..d3a4c2cb9f2 100644 --- a/arch/arm/include/asm/cachetype.h +++ b/arch/arm/include/asm/cachetype.h @@ -1,96 +1,52 @@ #ifndef __ASM_ARM_CACHETYPE_H #define __ASM_ARM_CACHETYPE_H -#include +#define CACHEID_VIVT (1 << 0) +#define CACHEID_VIPT_NONALIASING (1 << 1) +#define CACHEID_VIPT_ALIASING (1 << 2) +#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING) +#define CACHEID_ASID_TAGGED (1 << 3) -#define __cacheid_present(val) (val != read_cpuid_id()) -#define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29)) +extern unsigned int cacheid; -#define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25)) -#define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25)) -#define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25)) -#define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23)) +#define cache_is_vivt() cacheid_is(CACHEID_VIVT) +#define cache_is_vipt() cacheid_is(CACHEID_VIPT) +#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING) +#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING) +#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED) -#define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val)) -#define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val)) -#define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val)) -#define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val)) -#define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0) - -#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) /* - * VIVT caches only + * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture + * Mask out support which will never be present on newer CPUs. + * - v6+ is never VIVT + * - v7+ VIPT never aliases */ -#define cache_is_vivt() 1 -#define cache_is_vipt() 0 -#define cache_is_vipt_nonaliasing() 0 -#define cache_is_vipt_aliasing() 0 -#define icache_is_vivt_asid_tagged() 0 +#if __LINUX_ARM_ARCH__ >= 7 +#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED) +#elif __LINUX_ARM_ARCH__ >= 6 +#define __CACHEID_ARCH_MIN (~CACHEID_VIVT) +#else +#define __CACHEID_ARCH_MIN (~0) +#endif -#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT) /* - * VIPT caches only + * Mask out support which isn't configured */ -#define cache_is_vivt() 0 -#define cache_is_vipt() 1 -#define cache_is_vipt_nonaliasing() \ - ({ \ - unsigned int __val = read_cpuid_cachetype(); \ - __cacheid_vipt_nonaliasing(__val); \ - }) - -#define cache_is_vipt_aliasing() \ - ({ \ - unsigned int __val = read_cpuid_cachetype(); \ - __cacheid_vipt_aliasing(__val); \ - }) - -#define icache_is_vivt_asid_tagged() \ - ({ \ - unsigned int __val = read_cpuid_cachetype(); \ - __cacheid_vivt_asid_tagged_instr(__val); \ - }) - +#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT) +#define __CACHEID_ALWAYS (CACHEID_VIVT) +#define __CACHEID_NEVER (~CACHEID_VIVT) +#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT) +#define __CACHEID_ALWAYS (0) +#define __CACHEID_NEVER (CACHEID_VIVT) #else -/* - * VIVT or VIPT caches. Note that this is unreliable since ARM926 - * and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test. - * There's no way to tell from the CacheType register what type (!) - * the cache is. - */ -#define cache_is_vivt() \ - ({ \ - unsigned int __val = read_cpuid_cachetype(); \ - (!__cacheid_present(__val)) || __cacheid_vivt(__val); \ - }) - -#define cache_is_vipt() \ - ({ \ - unsigned int __val = read_cpuid_cachetype(); \ - __cacheid_present(__val) && __cacheid_vipt(__val); \ - }) - -#define cache_is_vipt_nonaliasing() \ - ({ \ - unsigned int __val = read_cpuid_cachetype(); \ - __cacheid_present(__val) && \ - __cacheid_vipt_nonaliasing(__val); \ - }) - -#define cache_is_vipt_aliasing() \ - ({ \ - unsigned int __val = read_cpuid_cachetype(); \ - __cacheid_present(__val) && \ - __cacheid_vipt_aliasing(__val); \ - }) - -#define icache_is_vivt_asid_tagged() \ - ({ \ - unsigned int __val = read_cpuid_cachetype(); \ - __cacheid_present(__val) && \ - __cacheid_vivt_asid_tagged_instr(__val); \ - }) - +#define __CACHEID_ALWAYS (0) +#define __CACHEID_NEVER (0) #endif +static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask) +{ + return (__CACHEID_ALWAYS & mask) | + (~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid); +} + #endif -- cgit v1.2.3 From afd1a321c49a250dab97cef6f2d3c3c9b9d0174a Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Sep 2008 16:30:57 +0100 Subject: [ARM] Update dma_map_sg()/dma_unmap_sg() API Update the ARM DMA scatter gather APIs for the scatterlist changes. Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 115 ++----------------------------------- 1 file changed, 5 insertions(+), 110 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 7b95d205839..eff954852c2 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -281,75 +281,6 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, dma_unmap_single(dev, handle, size, dir); } -/** - * dma_map_sg - map a set of SG buffers for streaming mode DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @sg: list of buffers - * @nents: number of buffers to map - * @dir: DMA transfer direction - * - * Map a set of buffers described by scatterlist in streaming - * mode for DMA. This is the scatter-gather version of the - * above dma_map_single interface. Here the scatter gather list - * elements are each tagged with the appropriate dma address - * and length. They are obtained via sg_dma_{address,length}(SG). - * - * NOTE: An implementation may be able to use a smaller number of - * DMA address/length pairs than there are SG table elements. - * (for example via virtual mapping capabilities) - * The routine returns the number of addr/length pairs actually - * used, at most nents. - * - * Device ownership issues as mentioned above for dma_map_single are - * the same here. - */ -#ifndef CONFIG_DMABOUNCE -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nents; i++, sg++) { - char *virt; - - sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset; - virt = sg_virt(sg); - - if (!arch_is_coherent()) - dma_cache_maint(virt, sg->length, dir); - } - - return nents; -} -#else -extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -#endif - -/** - * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @sg: list of buffers - * @nents: number of buffers to map - * @dir: DMA transfer direction - * - * Unmap a set of streaming mode DMA translations. - * Again, CPU read rules concerning calls here are the same as for - * dma_unmap_single() above. - */ -#ifndef CONFIG_DMABOUNCE -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) -{ - - /* nothing to do */ -} -#else -extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -#endif - - /** * dma_sync_single_range_for_cpu * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -405,50 +336,14 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, dma_sync_single_range_for_device(dev, handle, 0, size, dir); } - -/** - * dma_sync_sg_for_cpu - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @sg: list of buffers - * @nents: number of buffers to map - * @dir: DMA transfer direction - * - * Make physical memory consistent for a set of streaming - * mode DMA translations after a transfer. - * - * The same as dma_sync_single_for_* but for a scatter-gather list, - * same rules and usage. +/* + * The scatter list versions of the above methods. */ -#ifndef CONFIG_DMABOUNCE -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nents; i++, sg++) { - char *virt = sg_virt(sg); - if (!arch_is_coherent()) - dma_cache_maint(virt, sg->length, dir); - } -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir) -{ - int i; - - for (i = 0; i < nents; i++, sg++) { - char *virt = sg_virt(sg); - if (!arch_is_coherent()) - dma_cache_maint(virt, sg->length, dir); - } -} -#else +extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); +extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); -#endif + #ifdef CONFIG_DMABOUNCE /* -- cgit v1.2.3 From 56f55f8b58a02e95b401cb50df05086cabeaeeb5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Sep 2008 20:59:12 +0100 Subject: [ARM] dma: provide a better dma_map_page() implementation We can translate a struct page directly to a DMA address using page_to_dma(). No need to use page_address() followed by virt_to_dma(). Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index eff954852c2..856ee1bdee5 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -227,13 +227,22 @@ extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_d * can regain ownership by calling dma_unmap_page() or * dma_sync_single_for_cpu(). */ +#ifndef CONFIG_DMABOUNCE static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { - return dma_map_single(dev, page_address(page) + offset, size, dir); + if (!arch_is_coherent()) + dma_cache_maint(page_address(page) + offset, size, dir); + + return page_to_dma(dev, page) + offset; } +#else +extern dma_addr_t dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir); +#endif /** * dma_unmap_single - unmap a single buffer previously mapped -- cgit v1.2.3 From 2638b4dbe768aba023a06acd8e7eba708bb76ee6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Sep 2008 21:38:41 +0100 Subject: [ARM] dma: Reduce to one dma_sync_sg_* implementation Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 856ee1bdee5..29404f71ab8 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -410,6 +410,17 @@ extern void dmabounce_unregister_dev(struct device *); * */ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); + +/* + * Private functions + */ +int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, + size_t, enum dma_data_direction); +int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, + size_t, enum dma_data_direction); +#else +#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) +#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) #endif /* CONFIG_DMABOUNCE */ #endif /* __KERNEL__ */ -- cgit v1.2.3 From 8c8a0ec57ee285ff407e9a64b3a5a37eaf800ad8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Sep 2008 21:52:49 +0100 Subject: [ARM] dma: use new dmabounce_sync_for_xxx() for dma_sync_single_xxx() Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 150 +++++++++++++++++++------------------ 1 file changed, 76 insertions(+), 74 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 29404f71ab8..c003ad390de 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -184,6 +184,76 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size); +#ifdef CONFIG_DMABOUNCE +/* + * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" + * and utilize bounce buffers as needed to work around limited DMA windows. + * + * On the SA-1111, a bug limits DMA to only certain regions of RAM. + * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) + * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) + * + * The following are helper functions used by the dmabounce subystem + * + */ + +/** + * dmabounce_register_dev + * + * @dev: valid struct device pointer + * @small_buf_size: size of buffers to use with small buffer pool + * @large_buf_size: size of buffers to use with large buffer pool (can be 0) + * + * This function should be called by low-level platform code to register + * a device as requireing DMA buffer bouncing. The function will allocate + * appropriate DMA pools for the device. + * + */ +extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); + +/** + * dmabounce_unregister_dev + * + * @dev: valid struct device pointer + * + * This function should be called by low-level platform code when device + * that was previously registered with dmabounce_register_dev is removed + * from the system. + * + */ +extern void dmabounce_unregister_dev(struct device *); + +/** + * dma_needs_bounce + * + * @dev: valid struct device pointer + * @dma_handle: dma_handle of unbounced buffer + * @size: size of region being mapped + * + * Platforms that utilize the dmabounce mechanism must implement + * this function. + * + * The dmabounce routines call this function whenever a dma-mapping + * is requested to determine whether a given buffer needs to be bounced + * or not. The function must return 0 if the buffer is OK for + * DMA access and 1 if the buffer needs to be bounced. + * + */ +extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); + +/* + * Private functions + */ +int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, + size_t, enum dma_data_direction); +int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, + size_t, enum dma_data_direction); +#else +#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) +#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) +#endif /* CONFIG_DMABOUNCE */ + + /** * dma_map_single - map a single buffer for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -308,12 +378,14 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, * must first the perform a dma_sync_for_device, and then the * device again owns the buffer. */ -#ifndef CONFIG_DMABOUNCE static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, unsigned long offset, size_t size, enum dma_data_direction dir) { + if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) + return; + if (!arch_is_coherent()) dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); } @@ -323,13 +395,12 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, unsigned long offset, size_t size, enum dma_data_direction dir) { + if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) + return; + if (!arch_is_coherent()) dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); } -#else -extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); -extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); -#endif static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, @@ -354,74 +425,5 @@ extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum d extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); -#ifdef CONFIG_DMABOUNCE -/* - * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" - * and utilize bounce buffers as needed to work around limited DMA windows. - * - * On the SA-1111, a bug limits DMA to only certain regions of RAM. - * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) - * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) - * - * The following are helper functions used by the dmabounce subystem - * - */ - -/** - * dmabounce_register_dev - * - * @dev: valid struct device pointer - * @small_buf_size: size of buffers to use with small buffer pool - * @large_buf_size: size of buffers to use with large buffer pool (can be 0) - * - * This function should be called by low-level platform code to register - * a device as requireing DMA buffer bouncing. The function will allocate - * appropriate DMA pools for the device. - * - */ -extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); - -/** - * dmabounce_unregister_dev - * - * @dev: valid struct device pointer - * - * This function should be called by low-level platform code when device - * that was previously registered with dmabounce_register_dev is removed - * from the system. - * - */ -extern void dmabounce_unregister_dev(struct device *); - -/** - * dma_needs_bounce - * - * @dev: valid struct device pointer - * @dma_handle: dma_handle of unbounced buffer - * @size: size of region being mapped - * - * Platforms that utilize the dmabounce mechanism must implement - * this function. - * - * The dmabounce routines call this function whenever a dma-mapping - * is requested to determine whether a given buffer needs to be bounced - * or not. The function must return 0 if the buffer is OK for - * DMA access and 1 if the buffer needs to be bounced. - * - */ -extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); - -/* - * Private functions - */ -int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, - size_t, enum dma_data_direction); -int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, - size_t, enum dma_data_direction); -#else -#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) -#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) -#endif /* CONFIG_DMABOUNCE */ - #endif /* __KERNEL__ */ #endif -- cgit v1.2.3 From 125ab12acf64ff86b55d20e14db20becd917b7c4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Sep 2008 22:16:22 +0100 Subject: [ARM] dma: fix dmabounce dma_sync_xxx() implementations The dmabounce dma_sync_xxx() implementation have been broken for quite some time; they all copy data between the DMA buffer and the CPU visible buffer no irrespective of the change of ownership. (IOW, a DMA_FROM_DEVICE mapping copies data from the DMA buffer to the CPU buffer during a call to dma_sync_single_for_device().) Fix it by getting rid of sync_single(), moving the contents into the recently created dmabounce_sync_for_xxx() functions and adjusting appropriately. This also makes it possible to properly support the DMA range sync functions. Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index c003ad390de..1204dc958c4 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -241,6 +241,15 @@ extern void dmabounce_unregister_dev(struct device *); */ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); +/* + * The DMA API, implemented by dmabounce.c. See below for descriptions. + */ +extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); +extern dma_addr_t dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir); +extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); + /* * Private functions */ @@ -251,7 +260,6 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, #else #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) -#endif /* CONFIG_DMABOUNCE */ /** @@ -268,7 +276,6 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, * can regain ownership by calling dma_unmap_single() or * dma_sync_single_for_cpu(). */ -#ifndef CONFIG_DMABOUNCE static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) @@ -278,9 +285,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, return virt_to_dma(dev, cpu_addr); } -#else -extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); -#endif + /** * dma_map_page - map a portion of a page for streaming DMA @@ -297,7 +302,6 @@ extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_d * can regain ownership by calling dma_unmap_page() or * dma_sync_single_for_cpu(). */ -#ifndef CONFIG_DMABOUNCE static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, @@ -308,11 +312,6 @@ dma_map_page(struct device *dev, struct page *page, return page_to_dma(dev, page) + offset; } -#else -extern dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir); -#endif /** * dma_unmap_single - unmap a single buffer previously mapped @@ -328,16 +327,13 @@ extern dma_addr_t dma_map_page(struct device *dev, struct page *page, * After this call, reads by the CPU to the buffer are guaranteed to see * whatever the device wrote there. */ -#ifndef CONFIG_DMABOUNCE static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { /* nothing to do */ } -#else -extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); -#endif +#endif /* CONFIG_DMABOUNCE */ /** * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() -- cgit v1.2.3 From 3216a97bb0d5166ec5795aa3db1c3a02415ac060 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 25 Sep 2008 22:23:31 +0100 Subject: [ARM] dma: coding style cleanups Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 104 +++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 56 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 1204dc958c4..1532b7a6079 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -104,15 +104,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) * Dummy noncoherent implementation. We don't provide a dma_cache_sync * function so drivers using this API are highlighted with build warnings. */ -static inline void * -dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) +static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp) { return NULL; } -static inline void -dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle) +static inline void dma_free_noncoherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t handle) { } @@ -127,8 +126,7 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, * return the CPU-viewed address, and sets @handle to be the * device-viewed address. */ -extern void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); +extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); /** * dma_free_coherent - free memory allocated by dma_alloc_coherent @@ -143,9 +141,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf * References to memory and mappings associated with cpu_addr/handle * during and after this call executing are illegal. */ -extern void -dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle); +extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); /** * dma_mmap_coherent - map a coherent DMA allocation into user space @@ -159,8 +155,8 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, * into user space. The coherent DMA buffer must not be freed by the * driver until the user space mapping has been released. */ -int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t handle, size_t size); +int dma_mmap_coherent(struct device *, struct vm_area_struct *, + void *, dma_addr_t, size_t); /** @@ -174,14 +170,14 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, * return the CPU-viewed address, and sets @handle to be the * device-viewed address. */ -extern void * -dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); +extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, + gfp_t); #define dma_free_writecombine(dev,size,cpu_addr,handle) \ dma_free_coherent(dev,size,cpu_addr,handle) -int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t handle, size_t size); +int dma_mmap_writecombine(struct device *, struct vm_area_struct *, + void *, dma_addr_t, size_t); #ifdef CONFIG_DMABOUNCE @@ -209,7 +205,8 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, * appropriate DMA pools for the device. * */ -extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); +extern int dmabounce_register_dev(struct device *, unsigned long, + unsigned long); /** * dmabounce_unregister_dev @@ -244,19 +241,20 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); /* * The DMA API, implemented by dmabounce.c. See below for descriptions. */ -extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); -extern dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir); -extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); +extern dma_addr_t dma_map_single(struct device *, void *, size_t, + enum dma_data_direction); +extern dma_addr_t dma_map_page(struct device *, struct page *, + unsigned long, size_t, enum dma_data_direction); +extern void dma_unmap_single(struct device *, dma_addr_t, size_t, + enum dma_data_direction); /* * Private functions */ int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, - size_t, enum dma_data_direction); + size_t, enum dma_data_direction); int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, - size_t, enum dma_data_direction); + size_t, enum dma_data_direction); #else #define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) #define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) @@ -276,9 +274,8 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, * can regain ownership by calling dma_unmap_single() or * dma_sync_single_for_cpu(). */ -static inline dma_addr_t -dma_map_single(struct device *dev, void *cpu_addr, size_t size, - enum dma_data_direction dir) +static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, + size_t size, enum dma_data_direction dir) { if (!arch_is_coherent()) dma_cache_maint(cpu_addr, size, dir); @@ -286,7 +283,6 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, return virt_to_dma(dev, cpu_addr); } - /** * dma_map_page - map a portion of a page for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -302,10 +298,8 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, * can regain ownership by calling dma_unmap_page() or * dma_sync_single_for_cpu(). */ -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir) +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir) { if (!arch_is_coherent()) dma_cache_maint(page_address(page) + offset, size, dir); @@ -327,9 +321,8 @@ dma_map_page(struct device *dev, struct page *page, * After this call, reads by the CPU to the buffer are guaranteed to see * whatever the device wrote there. */ -static inline void -dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, - enum dma_data_direction dir) +static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) { /* nothing to do */ } @@ -349,9 +342,8 @@ dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, * After this call, reads by the CPU to the buffer are guaranteed to see * whatever the device wrote there. */ -static inline void -dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, - enum dma_data_direction dir) +static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) { dma_unmap_single(dev, handle, size, dir); } @@ -374,10 +366,9 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, * must first the perform a dma_sync_for_device, and then the * device again owns the buffer. */ -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, - unsigned long offset, size_t size, - enum dma_data_direction dir) +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t handle, unsigned long offset, size_t size, + enum dma_data_direction dir) { if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) return; @@ -386,10 +377,9 @@ dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); } -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, - unsigned long offset, size_t size, - enum dma_data_direction dir) +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t handle, unsigned long offset, size_t size, + enum dma_data_direction dir) { if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; @@ -398,16 +388,14 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); } -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, - enum dma_data_direction dir) +static inline void dma_sync_single_for_cpu(struct device *dev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) { dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); } -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, - enum dma_data_direction dir) +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) { dma_sync_single_range_for_device(dev, handle, 0, size, dir); } @@ -415,10 +403,14 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, /* * The scatter list versions of the above methods. */ -extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); -extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); +extern int dma_map_sg(struct device *, struct scatterlist *, int, + enum dma_data_direction); +extern void dma_unmap_sg(struct device *, struct scatterlist *, int, + enum dma_data_direction); +extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, + enum dma_data_direction); +extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, + enum dma_data_direction); #endif /* __KERNEL__ */ -- cgit v1.2.3 From 0e18b5d7c6339311f1e32e7b186ae3556c5b6d33 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 29 Sep 2008 13:48:17 +0100 Subject: [ARM] dma: add validation of DMA params Validate the direction argument like x86 does. In addition, validate the dma_unmap_* parameters against those passed to dma_map_* when using the DMA bounce code. Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 1532b7a6079..2544a087c21 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -277,6 +277,8 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { + BUG_ON(!valid_dma_direction(dir)); + if (!arch_is_coherent()) dma_cache_maint(cpu_addr, size, dir); @@ -301,6 +303,8 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { + BUG_ON(!valid_dma_direction(dir)); + if (!arch_is_coherent()) dma_cache_maint(page_address(page) + offset, size, dir); @@ -370,6 +374,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, unsigned long offset, size_t size, enum dma_data_direction dir) { + BUG_ON(!valid_dma_direction(dir)); + if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) return; @@ -381,6 +387,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, unsigned long offset, size_t size, enum dma_data_direction dir) { + BUG_ON(!valid_dma_direction(dir)); + if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; -- cgit v1.2.3 From 309dbbabee7b19e003e1ba4b98f43d28f390a84e Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 29 Sep 2008 19:50:59 +0100 Subject: [ARM] dma: don't touch cache on dma_*_for_cpu() As per the dma_unmap_* calls, we don't touch the cache when a DMA buffer transitions from device to CPU ownership. Presently, no problems have been identified with speculative cache prefetching which in itself is a new feature in later architectures. We may have to revisit the DMA API later for these architectures anyway. Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 2544a087c21..ad62020763f 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -376,11 +376,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); - if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) - return; - - if (!arch_is_coherent()) - dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); + dmabounce_sync_for_cpu(dev, handle, offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, -- cgit v1.2.3 From 7807c6098a716567fe408775c1c1999467088305 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 30 Sep 2008 11:30:24 +0100 Subject: [ARM] dma: fix some comments in dma-mapping.h ... to prevent people being mislead. Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index ad62020763f..1cb8602dd9d 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -297,8 +297,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, * or written back. * * The device owns this memory once this call has completed. The CPU - * can regain ownership by calling dma_unmap_page() or - * dma_sync_single_for_cpu(). + * can regain ownership by calling dma_unmap_page(). */ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) @@ -315,8 +314,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, * dma_unmap_single - unmap a single buffer previously mapped * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @handle: DMA address of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction + * @size: size of buffer (same as passed to dma_map_single) + * @dir: DMA transfer direction (same as passed to dma_map_single) * * Unmap a single streaming mode DMA translation. The handle and size * must match what was provided in the previous dma_map_single() call. @@ -336,11 +335,11 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @handle: DMA address of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction + * @size: size of buffer (same as passed to dma_map_page) + * @dir: DMA transfer direction (same as passed to dma_map_page) * - * Unmap a single streaming mode DMA translation. The handle and size - * must match what was provided in the previous dma_map_single() call. + * Unmap a page streaming mode DMA translation. The handle and size + * must match what was provided in the previous dma_map_page() call. * All other usages are undefined. * * After this call, reads by the CPU to the buffer are guaranteed to see -- cgit v1.2.3 From dfcc64497cbbf942cdd5af4b7eb17542b62aa759 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 30 Sep 2008 16:05:09 +0100 Subject: [ARM] 5271/1: get rid of pages_to_mb() There is no use of this in the whole tree. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 8e21ef15bd7..ec630109a8c 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -319,11 +319,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) -/* - * Permanent address of a page. We never have highmem, so this is trivial. - */ -#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) - /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. -- cgit v1.2.3 From 6c5da7aced798c7781f054a76c769b85f0173561 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 30 Sep 2008 19:31:44 +0100 Subject: [ARM] mm: move vmalloc= parsing to arch/arm/mm/mmu.c There's no point scattering this around the tree, the parsing of the parameter might as well live beside the code which uses it. That also means we can make vmalloc_reserve a static variable. Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 7e8d22fef29..7834adbe177 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -139,14 +139,6 @@ #define arch_adjust_zones(node,size,holes) do { } while (0) #endif -/* - * Amount of memory reserved for the vmalloc() area, and minimum - * address for vmalloc mappings. - */ -extern unsigned long vmalloc_reserve; - -#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) - /* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. -- cgit v1.2.3 From 9cff96e5bfc8e366166bfb07610604c7604ac48c Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 6 Sep 2008 18:53:37 +0100 Subject: [ARM] Re-jig Linux PTE bits to allow room for 4 memory type bits Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 8e21ef15bd7..5c75e02b3c7 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -166,10 +166,10 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define L_PTE_YOUNG (1 << 1) #define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */ #define L_PTE_CACHEABLE (1 << 3) /* matches PTE */ -#define L_PTE_USER (1 << 4) -#define L_PTE_WRITE (1 << 5) -#define L_PTE_EXEC (1 << 6) -#define L_PTE_DIRTY (1 << 7) +#define L_PTE_DIRTY (1 << 6) +#define L_PTE_WRITE (1 << 7) +#define L_PTE_USER (1 << 8) +#define L_PTE_EXEC (1 << 9) #define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ #ifndef __ASSEMBLY__ -- cgit v1.2.3 From bb30f36f9b71c31dc8fe3483bba4c9884fc86080 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 6 Sep 2008 20:04:59 +0100 Subject: [ARM] Introduce new PTE memory type bits Provide L_PTE_MT_xxx definitions to describe the memory types that we use in Linux/ARM. These definitions are carefully picked such that: 1. their LSBs match what is required for pre-ARMv6 CPUs. 2. they all have a unique encoding, including after modification by build_mem_type_table() (the result being that some have more than one combination.) Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 5c75e02b3c7..8df2e254a3e 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -164,14 +164,35 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define L_PTE_PRESENT (1 << 0) #define L_PTE_FILE (1 << 1) /* only when !PRESENT */ #define L_PTE_YOUNG (1 << 1) -#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */ -#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */ +#define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */ +#define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */ #define L_PTE_DIRTY (1 << 6) #define L_PTE_WRITE (1 << 7) #define L_PTE_USER (1 << 8) #define L_PTE_EXEC (1 << 9) #define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ +/* + * These are the memory types, defined to be compatible with + * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB + * (note: build_mem_type_table modifies these bits + * to work with our existing proc-*.S setup.) + */ +#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */ +#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */ +#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */ +#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */ +#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */ +#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */ +#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 (pre-v6) */ +#define L_PTE_MT_DEV_SHARED2 (0x05 << 2) /* 0101 (v6) */ +#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ +#define L_PTE_MT_DEV_IXP2000 (0x0d << 2) /* 1101 */ +#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 (pre-v6, !xsc3) */ +#define L_PTE_MT_DEV_WC2 (0x08 << 2) /* 1000 (xsc3, v6) */ +#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ +#define L_PTE_MT_MASK (0x0f << 2) + #ifndef __ASSEMBLY__ /* @@ -180,7 +201,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); * as well as any architecture dependent bits like global/ASID and SMP * shared mapping bits. */ -#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE +#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG #define _L_PTE_READ L_PTE_USER | L_PTE_EXEC extern pgprot_t pgprot_user; @@ -286,8 +307,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* * Mark the prot value as uncacheable and unbufferable. */ -#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE)) -#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE) +#define pgprot_noncached(prot) \ + __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED) +#define pgprot_writecombine(prot) \ + __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd)) -- cgit v1.2.3 From 9e8b5199a753a2583a8ef8360e6428304a242283 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 6 Sep 2008 20:47:54 +0100 Subject: [ARM] Convert Xscale and Xscale3 to use new memory types Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 8df2e254a3e..8f039a08b00 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -188,8 +188,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define L_PTE_MT_DEV_SHARED2 (0x05 << 2) /* 0101 (v6) */ #define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ #define L_PTE_MT_DEV_IXP2000 (0x0d << 2) /* 1101 */ -#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 (pre-v6, !xsc3) */ -#define L_PTE_MT_DEV_WC2 (0x08 << 2) /* 1000 (xsc3, v6) */ +#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 (pre-v6) */ +#define L_PTE_MT_DEV_WC2 (0x08 << 2) /* 1000 (v6) */ #define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ #define L_PTE_MT_MASK (0x0f << 2) -- cgit v1.2.3 From 639b0ae7f5bcd645862a9c3ea2d4321475c71d7a Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 6 Sep 2008 21:07:45 +0100 Subject: [ARM] Convert ARMv6 and ARMv7 to use new memory types Signed-off-by: Russell King --- arch/arm/include/asm/pgtable.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 8f039a08b00..dfeff814a94 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -175,8 +175,6 @@ extern void __pgd_error(const char *file, int line, unsigned long val); /* * These are the memory types, defined to be compatible with * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB - * (note: build_mem_type_table modifies these bits - * to work with our existing proc-*.S setup.) */ #define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */ #define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */ @@ -184,12 +182,10 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */ #define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */ #define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */ -#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 (pre-v6) */ -#define L_PTE_MT_DEV_SHARED2 (0x05 << 2) /* 0101 (v6) */ +#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */ #define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ #define L_PTE_MT_DEV_IXP2000 (0x0d << 2) /* 1101 */ -#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 (pre-v6) */ -#define L_PTE_MT_DEV_WC2 (0x08 << 2) /* 1000 (v6) */ +#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */ #define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ #define L_PTE_MT_MASK (0x0f << 2) -- cgit v1.2.3 From db5b7169474882fabbd811a4cf5c1bae3157e677 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 7 Sep 2008 12:42:51 +0100 Subject: [ARM] Remove MT_DEVICE_IXP2000 and associated definitions As of the previous commit, MT_DEVICE_IXP2000 encodes to the same PTE bit encoding as MT_DEVICE, so it's now redundant. Convert MT_DEVICE_IXP2000 to use MT_DEVICE instead, and remove its aliases. Signed-off-by: Russell King --- arch/arm/include/asm/io.h | 5 ++--- arch/arm/include/asm/mach/map.h | 15 +++++++-------- arch/arm/include/asm/pgtable.h | 1 - 3 files changed, 9 insertions(+), 12 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 71934856fc2..a8094451be5 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -60,10 +60,9 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); #define MT_DEVICE 0 #define MT_DEVICE_NONSHARED 1 #define MT_DEVICE_CACHED 2 -#define MT_DEVICE_IXP2000 3 -#define MT_DEVICE_WC 4 +#define MT_DEVICE_WC 3 /* - * types 5 onwards can be found in asm/mach/map.h and are undefined + * types 4 onwards can be found in asm/mach/map.h and are undefined * for ioremap */ diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 9eb936e49cc..72586cd4372 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -18,16 +18,15 @@ struct map_desc { unsigned int type; }; -/* types 0-4 are defined in asm/io.h */ -#define MT_CACHECLEAN 5 -#define MT_MINICLEAN 6 -#define MT_LOW_VECTORS 7 -#define MT_HIGH_VECTORS 8 -#define MT_MEMORY 9 -#define MT_ROM 10 +/* types 0-3 are defined in asm/io.h */ +#define MT_CACHECLEAN 4 +#define MT_MINICLEAN 5 +#define MT_LOW_VECTORS 6 +#define MT_HIGH_VECTORS 7 +#define MT_MEMORY 8 +#define MT_ROM 9 #define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED -#define MT_IXP2000_DEVICE MT_DEVICE_IXP2000 #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index dfeff814a94..e5054b026c2 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -184,7 +184,6 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */ #define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */ #define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ -#define L_PTE_MT_DEV_IXP2000 (0x0d << 2) /* 1101 */ #define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */ #define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ #define L_PTE_MT_MASK (0x0f << 2) -- cgit v1.2.3 From 9b727abdff93b0039fba94e96216fc280af4cf01 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 7 Sep 2008 12:45:01 +0100 Subject: [ARM] Remove MT_NONSHARED_DEVICE alias Use MT_DEVICE_NONSHARED instead. Signed-off-by: Russell King --- arch/arm/include/asm/mach/map.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 72586cd4372..cb1139ac194 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -26,8 +26,6 @@ struct map_desc { #define MT_MEMORY 8 #define MT_ROM 9 -#define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED - #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); #else -- cgit v1.2.3 From 5ec9407dd1196daaf12b427b351e2cd62d2a16a7 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 7 Sep 2008 19:15:31 +0100 Subject: [ARM] Don't include asm/elf.h in asm code asm code really wants asm/hwcap.h, so include that instead. Signed-off-by: Russell King --- arch/arm/include/asm/elf.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 4ca75162748..7ea302c14a5 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -3,7 +3,6 @@ #include -#ifndef __ASSEMBLY__ /* * ELF register definitions.. */ @@ -17,7 +16,6 @@ typedef unsigned long elf_freg_t[3]; typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_fp elf_fpregset_t; -#endif #define EM_ARM 40 #define EF_ARM_APCS26 0x08 @@ -41,7 +39,6 @@ typedef struct user_fp elf_fpregset_t; #endif #define ELF_ARCH EM_ARM -#ifndef __ASSEMBLY__ /* * This yields a string that ld.so will use to load implementation * specific libraries for optimization. This is more specific in @@ -59,7 +56,6 @@ typedef struct user_fp elf_fpregset_t; #define ELF_PLATFORM (elf_platform) extern char elf_platform[]; -#endif /* * This is used to ensure we don't load something for the wrong architecture. -- cgit v1.2.3 From 8ec53663d2698076468b3e1edc4e1b418bd54de3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 7 Sep 2008 17:16:54 +0100 Subject: [ARM] Improve non-executable support Add support for detecting non-executable stack binaries, and adjust permissions to prevent execution from data and stack areas. Also, ensure that READ_IMPLIES_EXEC is enabled for older CPUs where that is true, and for any executable-stack binary. Signed-off-by: Russell King --- arch/arm/include/asm/elf.h | 68 +++++++++++++++++++++--------------------- arch/arm/include/asm/page.h | 5 ++-- arch/arm/include/asm/pgtable.h | 47 ++++++++++++++++------------- 3 files changed, 64 insertions(+), 56 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 7ea302c14a5..5be016980c1 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -18,9 +18,32 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_fp elf_fpregset_t; #define EM_ARM 40 -#define EF_ARM_APCS26 0x08 -#define EF_ARM_SOFT_FLOAT 0x200 -#define EF_ARM_EABI_MASK 0xFF000000 + +#define EF_ARM_EABI_MASK 0xff000000 +#define EF_ARM_EABI_UNKNOWN 0x00000000 +#define EF_ARM_EABI_VER1 0x01000000 +#define EF_ARM_EABI_VER2 0x02000000 +#define EF_ARM_EABI_VER3 0x03000000 +#define EF_ARM_EABI_VER4 0x04000000 +#define EF_ARM_EABI_VER5 0x05000000 + +#define EF_ARM_BE8 0x00800000 /* ABI 4,5 */ +#define EF_ARM_LE8 0x00400000 /* ABI 4,5 */ +#define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */ +#define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */ +#define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */ +#define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */ +#define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */ +#define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */ +#define EF_ARM_PIC 0x00000020 /* ABI 0 */ +#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */ +#define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */ +#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */ +#define EF_ARM_APCS_26 0x00000008 /* ABI 0 */ +#define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */ +#define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */ +#define EF_ARM_HASENTRY 0x00000002 /* All */ +#define EF_ARM_RELEXEC 0x00000001 /* All */ #define R_ARM_NONE 0 #define R_ARM_PC24 1 @@ -57,23 +80,16 @@ typedef struct user_fp elf_fpregset_t; extern char elf_platform[]; -/* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) ((x)->e_machine == EM_ARM && ELF_PROC_OK(x)) +struct elf32_hdr; /* - * 32-bit code is always OK. Some cpus can do 26-bit, some can't. + * This is used to ensure we don't load something for the wrong architecture. */ -#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x)) - -#define ELF_THUMB_OK(x) \ - ((elf_hwcap & HWCAP_THUMB && ((x)->e_entry & 1) == 1) || \ - ((x)->e_entry & 3) == 0) +extern int elf_check_arch(const struct elf32_hdr *); +#define elf_check_arch elf_check_arch -#define ELF_26BIT_OK(x) \ - ((elf_hwcap & HWCAP_26BIT && (x)->e_flags & EF_ARM_APCS26) || \ - ((x)->e_flags & EF_ARM_APCS26) == 0) +extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); +#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 @@ -90,23 +106,7 @@ extern char elf_platform[]; have no such handler. */ #define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 -/* - * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0 - * and CP1, we only enable access to the iWMMXt coprocessor if the - * binary is EABI or softfloat (and thus, guaranteed not to use - * FPA instructions.) - */ -#define SET_PERSONALITY(ex, ibcs2) \ - do { \ - if ((ex).e_flags & EF_ARM_APCS26) { \ - set_personality(PER_LINUX); \ - } else { \ - set_personality(PER_LINUX_32BIT); \ - if (elf_hwcap & HWCAP_IWMMXT && (ex).e_flags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) \ - set_thread_flag(TIF_USING_IWMMXT); \ - else \ - clear_thread_flag(TIF_USING_IWMMXT); \ - } \ - } while (0) +extern void elf_set_personality(const struct elf32_hdr *); +#define SET_PERSONALITY(ex, ibcs2) elf_set_personality(&(ex)) #endif diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index cf2e2680daa..bed1c0a0036 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -184,8 +184,9 @@ typedef struct page *pgtable_t; #endif /* !__ASSEMBLY__ */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) /* * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index e5054b026c2..b02be6c55ae 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -197,22 +197,29 @@ extern void __pgd_error(const char *file, int line, unsigned long val); * shared mapping bits. */ #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG -#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC extern pgprot_t pgprot_user; extern pgprot_t pgprot_kernel; -#define PAGE_NONE pgprot_user -#define PAGE_COPY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ) -#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ | \ - L_PTE_WRITE) -#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ) -#define PAGE_KERNEL pgprot_kernel - -#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) -#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) -#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE) -#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) +#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) + +#define PAGE_NONE pgprot_user +#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) +#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) +#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER) +#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) +#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER) +#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) +#define PAGE_KERNEL pgprot_kernel +#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC) + +#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) +#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) +#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) +#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) +#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) +#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) +#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) #endif /* __ASSEMBLY__ */ @@ -228,19 +235,19 @@ extern pgprot_t pgprot_kernel; #define __P001 __PAGE_READONLY #define __P010 __PAGE_COPY #define __P011 __PAGE_COPY -#define __P100 __PAGE_READONLY -#define __P101 __PAGE_READONLY -#define __P110 __PAGE_COPY -#define __P111 __PAGE_COPY +#define __P100 __PAGE_READONLY_EXEC +#define __P101 __PAGE_READONLY_EXEC +#define __P110 __PAGE_COPY_EXEC +#define __P111 __PAGE_COPY_EXEC #define __S000 __PAGE_NONE #define __S001 __PAGE_READONLY #define __S010 __PAGE_SHARED #define __S011 __PAGE_SHARED -#define __S100 __PAGE_READONLY -#define __S101 __PAGE_READONLY -#define __S110 __PAGE_SHARED -#define __S111 __PAGE_SHARED +#define __S100 __PAGE_READONLY_EXEC +#define __S101 __PAGE_READONLY_EXEC +#define __S110 __PAGE_SHARED_EXEC +#define __S111 __PAGE_SHARED_EXEC #ifndef __ASSEMBLY__ /* -- cgit v1.2.3 From d2a38ef9c1585b47462c7be5501228ac57fbd3b1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 1 Oct 2008 16:56:15 +0100 Subject: [ARM] mm: provide helpers for accessing membanks Provide helpers for getting physical addresses or pfns from the meminfo array, and use them. Move for_each_nodebank() to asm/setup.h alongside the meminfo structure definition. Signed-off-by: Russell King --- arch/arm/include/asm/setup.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 7bbf105463f..a65413ba121 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -209,6 +209,17 @@ struct meminfo { struct membank bank[NR_BANKS]; }; +#define for_each_nodebank(iter,mi,no) \ + for (iter = 0; iter < mi->nr_banks; iter++) \ + if (mi->bank[iter].node == no) + +#define bank_pfn_start(bank) __phys_to_pfn((bank)->start) +#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) +#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT) +#define bank_phys_start(bank) (bank)->start +#define bank_phys_end(bank) ((bank)->start + (bank)->size) +#define bank_phys_size(bank) (bank)->size + /* * Early command line parameters. */ -- cgit v1.2.3 From 07f841b7c587a3cbf481509be09ba5eda05f8d31 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 1 Oct 2008 17:11:06 +0100 Subject: [ARM] mm: enable sparsemem on clps7500 and RiscPC Signed-off-by: Russell King --- arch/arm/include/asm/sparsemem.h | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/sparsemem.h b/arch/arm/include/asm/sparsemem.h index 277158191a0..00098615c6f 100644 --- a/arch/arm/include/asm/sparsemem.h +++ b/arch/arm/include/asm/sparsemem.h @@ -3,8 +3,22 @@ #include -#define MAX_PHYSADDR_BITS 32 -#define MAX_PHYSMEM_BITS 32 -#define SECTION_SIZE_BITS NODE_MEM_SIZE_BITS +/* + * Two definitions are required for sparsemem: + * + * MAX_PHYSMEM_BITS: The number of physical address bits required + * to address the last byte of memory. + * + * SECTION_SIZE_BITS: The number of physical address bits to cover + * the maximum amount of memory in a section. + * + * Eg, if you have 2 banks of up to 64MB at 0x80000000, 0x84000000, + * then MAX_PHYSMEM_BITS is 32, SECTION_SIZE_BITS is 26. + * + * Define these in your mach/memory.h. + */ +#if !defined(SECTION_SIZE_BITS) || !defined(MAX_PHYSMEM_BITS) +#error Sparsemem is not supported on this platform +#endif #endif -- cgit v1.2.3 From 3bca103a1e658d23737d20e1989139d9ca8973bf Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 7 Oct 2008 20:14:55 +0100 Subject: [ARM] 5295/1: make ZONE_DMA optional Most ARM machines don't need a special "DMA" memory zone, and when configured out, the kernel becomes a bit smaller: | text data bss dec hex filename |3826182 102384 111700 4040266 3da64a vmlinux |3823593 101616 111700 4036909 3d992d vmlinux.nodmazone This is because the system now has only one zone total which effect is to optimize away many conditionals in page allocation paths. So let's configure this zone only on machines that need split zones. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 7834adbe177..809ff9ab853 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -137,6 +137,8 @@ #ifndef arch_adjust_zones #define arch_adjust_zones(node,size,holes) do { } while (0) +#elif !defined(CONFIG_ZONE_DMA) +#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA" #endif /* -- cgit v1.2.3