aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/bug.h2
-rw-r--r--arch/arm/include/asm/cacheflush.h90
-rw-r--r--arch/arm/include/asm/cachetype.h52
-rw-r--r--arch/arm/include/asm/cnt32_to_63.h78
-rw-r--r--arch/arm/include/asm/cputype.h64
-rw-r--r--arch/arm/include/asm/dma-mapping.h378
-rw-r--r--arch/arm/include/asm/elf.h72
-rw-r--r--arch/arm/include/asm/futex.h124
-rw-r--r--arch/arm/include/asm/io.h5
-rw-r--r--arch/arm/include/asm/irq.h4
-rw-r--r--arch/arm/include/asm/kprobes.h1
-rw-r--r--arch/arm/include/asm/mach/map.h17
-rw-r--r--arch/arm/include/asm/mach/udc_pxa2xx.h3
-rw-r--r--arch/arm/include/asm/mc146818rtc.h2
-rw-r--r--arch/arm/include/asm/memory.h40
-rw-r--r--arch/arm/include/asm/mmu_context.h1
-rw-r--r--arch/arm/include/asm/page.h5
-rw-r--r--arch/arm/include/asm/pgtable.h88
-rw-r--r--arch/arm/include/asm/ptrace.h7
-rw-r--r--arch/arm/include/asm/setup.h11
-rw-r--r--arch/arm/include/asm/sparsemem.h20
-rw-r--r--arch/arm/include/asm/system.h58
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/uaccess.h10
-rw-r--r--arch/arm/include/asm/vga.h2
25 files changed, 536 insertions, 600 deletions
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 7b62351f097..4d88425a416 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -12,7 +12,7 @@ extern void __bug(const char *file, int line) __attribute__((noreturn));
#else
/* this just causes an oops */
-#define BUG() (*(int *)0 = 0)
+#define BUG() do { *(int *)0 = 0; } while (1)
#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 9073d9c6567..de6c59f814a 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -444,94 +444,4 @@ static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
dmac_inv_range(start, start + size);
}
-#define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
-#define __cacheid_type_v7(val) ((val & (7 << 29)) == (4 << 29))
-
-#define __cacheid_vivt_prev7(val) ((val & (15 << 25)) != (14 << 25))
-#define __cacheid_vipt_prev7(val) ((val & (15 << 25)) == (14 << 25))
-#define __cacheid_vipt_nonaliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
-#define __cacheid_vipt_aliasing_prev7(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
-
-#define __cacheid_vivt(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vivt_prev7(val))
-#define __cacheid_vipt(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_prev7(val))
-#define __cacheid_vipt_nonaliasing(val) (__cacheid_type_v7(val) ? 1 : __cacheid_vipt_nonaliasing_prev7(val))
-#define __cacheid_vipt_aliasing(val) (__cacheid_type_v7(val) ? 0 : __cacheid_vipt_aliasing_prev7(val))
-#define __cacheid_vivt_asid_tagged_instr(val) (__cacheid_type_v7(val) ? ((val & (3 << 14)) == (1 << 14)) : 0)
-
-#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
-/*
- * VIVT caches only
- */
-#define cache_is_vivt() 1
-#define cache_is_vipt() 0
-#define cache_is_vipt_nonaliasing() 0
-#define cache_is_vipt_aliasing() 0
-#define icache_is_vivt_asid_tagged() 0
-
-#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
-/*
- * VIPT caches only
- */
-#define cache_is_vivt() 0
-#define cache_is_vipt() 1
-#define cache_is_vipt_nonaliasing() \
- ({ \
- unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
- __cacheid_vipt_nonaliasing(__val); \
- })
-
-#define cache_is_vipt_aliasing() \
- ({ \
- unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
- __cacheid_vipt_aliasing(__val); \
- })
-
-#define icache_is_vivt_asid_tagged() \
- ({ \
- unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
- __cacheid_vivt_asid_tagged_instr(__val); \
- })
-
-#else
-/*
- * VIVT or VIPT caches. Note that this is unreliable since ARM926
- * and V6 CPUs satisfy the "(val & (15 << 25)) == (14 << 25)" test.
- * There's no way to tell from the CacheType register what type (!)
- * the cache is.
- */
-#define cache_is_vivt() \
- ({ \
- unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
- (!__cacheid_present(__val)) || __cacheid_vivt(__val); \
- })
-
-#define cache_is_vipt() \
- ({ \
- unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
- __cacheid_present(__val) && __cacheid_vipt(__val); \
- })
-
-#define cache_is_vipt_nonaliasing() \
- ({ \
- unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
- __cacheid_present(__val) && \
- __cacheid_vipt_nonaliasing(__val); \
- })
-
-#define cache_is_vipt_aliasing() \
- ({ \
- unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
- __cacheid_present(__val) && \
- __cacheid_vipt_aliasing(__val); \
- })
-
-#define icache_is_vivt_asid_tagged() \
- ({ \
- unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
- __cacheid_present(__val) && \
- __cacheid_vivt_asid_tagged_instr(__val); \
- })
-
-#endif
-
#endif
diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h
new file mode 100644
index 00000000000..d3a4c2cb9f2
--- /dev/null
+++ b/arch/arm/include/asm/cachetype.h
@@ -0,0 +1,52 @@
+#ifndef __ASM_ARM_CACHETYPE_H
+#define __ASM_ARM_CACHETYPE_H
+
+#define CACHEID_VIVT (1 << 0)
+#define CACHEID_VIPT_NONALIASING (1 << 1)
+#define CACHEID_VIPT_ALIASING (1 << 2)
+#define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)
+#define CACHEID_ASID_TAGGED (1 << 3)
+
+extern unsigned int cacheid;
+
+#define cache_is_vivt() cacheid_is(CACHEID_VIVT)
+#define cache_is_vipt() cacheid_is(CACHEID_VIPT)
+#define cache_is_vipt_nonaliasing() cacheid_is(CACHEID_VIPT_NONALIASING)
+#define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING)
+#define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED)
+
+/*
+ * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture
+ * Mask out support which will never be present on newer CPUs.
+ * - v6+ is never VIVT
+ * - v7+ VIPT never aliases
+ */
+#if __LINUX_ARM_ARCH__ >= 7
+#define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING | CACHEID_ASID_TAGGED)
+#elif __LINUX_ARM_ARCH__ >= 6
+#define __CACHEID_ARCH_MIN (~CACHEID_VIVT)
+#else
+#define __CACHEID_ARCH_MIN (~0)
+#endif
+
+/*
+ * Mask out support which isn't configured
+ */
+#if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
+#define __CACHEID_ALWAYS (CACHEID_VIVT)
+#define __CACHEID_NEVER (~CACHEID_VIVT)
+#elif !defined(CONFIG_CPU_CACHE_VIVT) && defined(CONFIG_CPU_CACHE_VIPT)
+#define __CACHEID_ALWAYS (0)
+#define __CACHEID_NEVER (CACHEID_VIVT)
+#else
+#define __CACHEID_ALWAYS (0)
+#define __CACHEID_NEVER (0)
+#endif
+
+static inline unsigned int __attribute__((pure)) cacheid_is(unsigned int mask)
+{
+ return (__CACHEID_ALWAYS & mask) |
+ (~__CACHEID_NEVER & __CACHEID_ARCH_MIN & mask & cacheid);
+}
+
+#endif
diff --git a/arch/arm/include/asm/cnt32_to_63.h b/arch/arm/include/asm/cnt32_to_63.h
deleted file mode 100644
index 480c873fa74..00000000000
--- a/arch/arm/include/asm/cnt32_to_63.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * include/asm/cnt32_to_63.h -- extend a 32-bit counter to 63 bits
- *
- * Author: Nicolas Pitre
- * Created: December 3, 2006
- * Copyright: MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- */
-
-#ifndef __INCLUDE_CNT32_TO_63_H__
-#define __INCLUDE_CNT32_TO_63_H__
-
-#include <linux/compiler.h>
-#include <asm/types.h>
-#include <asm/byteorder.h>
-
-/*
- * Prototype: u64 cnt32_to_63(u32 cnt)
- * Many hardware clock counters are only 32 bits wide and therefore have
- * a relatively short period making wrap-arounds rather frequent. This
- * is a problem when implementing sched_clock() for example, where a 64-bit
- * non-wrapping monotonic value is expected to be returned.
- *
- * To overcome that limitation, let's extend a 32-bit counter to 63 bits
- * in a completely lock free fashion. Bits 0 to 31 of the clock are provided
- * by the hardware while bits 32 to 62 are stored in memory. The top bit in
- * memory is used to synchronize with the hardware clock half-period. When
- * the top bit of both counters (hardware and in memory) differ then the
- * memory is updated with a new value, incrementing it when the hardware
- * counter wraps around.
- *
- * Because a word store in memory is atomic then the incremented value will
- * always be in synch with the top bit indicating to any potential concurrent
- * reader if the value in memory is up to date or not with regards to the
- * needed increment. And any race in updating the value in memory is harmless
- * as the same value would simply be stored more than once.
- *
- * The only restriction for the algorithm to work properly is that this
- * code must be executed at least once per each half period of the 32-bit
- * counter to properly update the state bit in memory. This is usually not a
- * problem in practice, but if it is then a kernel timer could be scheduled
- * to manage for this code to be executed often enough.
- *
- * Note that the top bit (bit 63) in the returned value should be considered
- * as garbage. It is not cleared here because callers are likely to use a
- * multiplier on the returned value which can get rid of the top bit
- * implicitly by making the multiplier even, therefore saving on a runtime
- * clear-bit instruction. Otherwise caller must remember to clear the top
- * bit explicitly.
- */
-
-/* this is used only to give gcc a clue about good code generation */
-typedef union {
- struct {
-#if defined(__LITTLE_ENDIAN)
- u32 lo, hi;
-#elif defined(__BIG_ENDIAN)
- u32 hi, lo;
-#endif
- };
- u64 val;
-} cnt32_to_63_t;
-
-#define cnt32_to_63(cnt_lo) \
-({ \
- static volatile u32 __m_cnt_hi = 0; \
- cnt32_to_63_t __x; \
- __x.hi = __m_cnt_hi; \
- __x.lo = (cnt_lo); \
- if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
- __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
- __x.val; \
-})
-
-#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
new file mode 100644
index 00000000000..7b9d27e749b
--- /dev/null
+++ b/arch/arm/include/asm/cputype.h
@@ -0,0 +1,64 @@
+#ifndef __ASM_ARM_CPUTYPE_H
+#define __ASM_ARM_CPUTYPE_H
+
+#include <linux/stringify.h>
+
+#define CPUID_ID 0
+#define CPUID_CACHETYPE 1
+#define CPUID_TCM 2
+#define CPUID_TLBTYPE 3
+
+#ifdef CONFIG_CPU_CP15
+#define read_cpuid(reg) \
+ ({ \
+ unsigned int __val; \
+ asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
+ : "=r" (__val) \
+ : \
+ : "cc"); \
+ __val; \
+ })
+#else
+extern unsigned int processor_id;
+#define read_cpuid(reg) (processor_id)
+#endif
+
+/*
+ * The CPU ID never changes at run time, so we might as well tell the
+ * compiler that it's constant. Use this function to read the CPU ID
+ * rather than directly reading processor_id or read_cpuid() directly.
+ */
+static inline unsigned int __attribute_const__ read_cpuid_id(void)
+{
+ return read_cpuid(CPUID_ID);
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
+{
+ return read_cpuid(CPUID_CACHETYPE);
+}
+
+/*
+ * Intel's XScale3 core supports some v6 features (supersections, L2)
+ * but advertises itself as v5 as it does not support the v6 ISA. For
+ * this reason, we need a way to explicitly test for this type of CPU.
+ */
+#ifndef CONFIG_CPU_XSC3
+#define cpu_is_xsc3() 0
+#else
+static inline int cpu_is_xsc3(void)
+{
+ if ((read_cpuid_id() & 0xffffe000) == 0x69056000)
+ return 1;
+
+ return 0;
+}
+#endif
+
+#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
+#define cpu_is_xscale() 0
+#else
+#define cpu_is_xscale() 1
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 7b95d205839..1cb8602dd9d 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -104,15 +104,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
* Dummy noncoherent implementation. We don't provide a dma_cache_sync
* function so drivers using this API are highlighted with build warnings.
*/
-static inline void *
-dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp)
{
return NULL;
}
-static inline void
-dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle)
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle)
{
}
@@ -127,8 +126,7 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
* return the CPU-viewed address, and sets @handle to be the
* device-viewed address.
*/
-extern void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
+extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
/**
* dma_free_coherent - free memory allocated by dma_alloc_coherent
@@ -143,9 +141,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
-extern void
-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle);
+extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
/**
* dma_mmap_coherent - map a coherent DMA allocation into user space
@@ -159,8 +155,8 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
* into user space. The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released.
*/
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t handle, size_t size);
+int dma_mmap_coherent(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t);
/**
@@ -174,14 +170,94 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
* return the CPU-viewed address, and sets @handle to be the
* device-viewed address.
*/
-extern void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
+extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
+ gfp_t);
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
dma_free_coherent(dev,size,cpu_addr,handle)
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t handle, size_t size);
+int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t);
+
+
+#ifdef CONFIG_DMABOUNCE
+/*
+ * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
+ * and utilize bounce buffers as needed to work around limited DMA windows.
+ *
+ * On the SA-1111, a bug limits DMA to only certain regions of RAM.
+ * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
+ * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
+ *
+ * The following are helper functions used by the dmabounce subystem
+ *
+ */
+
+/**
+ * dmabounce_register_dev
+ *
+ * @dev: valid struct device pointer
+ * @small_buf_size: size of buffers to use with small buffer pool
+ * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
+ *
+ * This function should be called by low-level platform code to register
+ * a device as requireing DMA buffer bouncing. The function will allocate
+ * appropriate DMA pools for the device.
+ *
+ */
+extern int dmabounce_register_dev(struct device *, unsigned long,
+ unsigned long);
+
+/**
+ * dmabounce_unregister_dev
+ *
+ * @dev: valid struct device pointer
+ *
+ * This function should be called by low-level platform code when device
+ * that was previously registered with dmabounce_register_dev is removed
+ * from the system.
+ *
+ */
+extern void dmabounce_unregister_dev(struct device *);
+
+/**
+ * dma_needs_bounce
+ *
+ * @dev: valid struct device pointer
+ * @dma_handle: dma_handle of unbounced buffer
+ * @size: size of region being mapped
+ *
+ * Platforms that utilize the dmabounce mechanism must implement
+ * this function.
+ *
+ * The dmabounce routines call this function whenever a dma-mapping
+ * is requested to determine whether a given buffer needs to be bounced
+ * or not. The function must return 0 if the buffer is OK for
+ * DMA access and 1 if the buffer needs to be bounced.
+ *
+ */
+extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
+
+/*
+ * The DMA API, implemented by dmabounce.c. See below for descriptions.
+ */
+extern dma_addr_t dma_map_single(struct device *, void *, size_t,
+ enum dma_data_direction);
+extern dma_addr_t dma_map_page(struct device *, struct page *,
+ unsigned long, size_t, enum dma_data_direction);
+extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
+ enum dma_data_direction);
+
+/*
+ * Private functions
+ */
+int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
+ size_t, enum dma_data_direction);
+int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
+ size_t, enum dma_data_direction);
+#else
+#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1)
+#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1)
/**
@@ -198,19 +274,16 @@ int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
* can regain ownership by calling dma_unmap_single() or
* dma_sync_single_for_cpu().
*/
-#ifndef CONFIG_DMABOUNCE
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction dir)
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction dir)
{
+ BUG_ON(!valid_dma_direction(dir));
+
if (!arch_is_coherent())
dma_cache_maint(cpu_addr, size, dir);
return virt_to_dma(dev, cpu_addr);
}
-#else
-extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
-#endif
/**
* dma_map_page - map a portion of a page for streaming DMA
@@ -224,23 +297,25 @@ extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_d
* or written back.
*
* The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_page() or
- * dma_sync_single_for_cpu().
+ * can regain ownership by calling dma_unmap_page().
*/
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir)
{
- return dma_map_single(dev, page_address(page) + offset, size, dir);
+ BUG_ON(!valid_dma_direction(dir));
+
+ if (!arch_is_coherent())
+ dma_cache_maint(page_address(page) + offset, size, dir);
+
+ return page_to_dma(dev, page) + offset;
}
/**
* dma_unmap_single - unmap a single buffer previously mapped
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
+ * @size: size of buffer (same as passed to dma_map_single)
+ * @dir: DMA transfer direction (same as passed to dma_map_single)
*
* Unmap a single streaming mode DMA translation. The handle and size
* must match what was provided in the previous dma_map_single() call.
@@ -249,108 +324,34 @@ dma_map_page(struct device *dev, struct page *page,
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-#ifndef CONFIG_DMABOUNCE
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
{
/* nothing to do */
}
-#else
-extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
-#endif
+#endif /* CONFIG_DMABOUNCE */
/**
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
*
- * Unmap a single streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_single() call.
+ * Unmap a page streaming mode DMA translation. The handle and size
+ * must match what was provided in the previous dma_map_page() call.
* All other usages are undefined.
*
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
{
dma_unmap_single(dev, handle, size, dir);
}
/**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scatter-gather version of the
- * above dma_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for dma_map_single are
- * the same here.
- */
-#ifndef CONFIG_DMABOUNCE
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
-{
- int i;
-
- for (i = 0; i < nents; i++, sg++) {
- char *virt;
-
- sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset;
- virt = sg_virt(sg);
-
- if (!arch_is_coherent())
- dma_cache_maint(virt, sg->length, dir);
- }
-
- return nents;
-}
-#else
-extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
-#endif
-
-/**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Unmap a set of streaming mode DMA translations.
- * Again, CPU read rules concerning calls here are the same as for
- * dma_unmap_single() above.
- */
-#ifndef CONFIG_DMABOUNCE
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
-{
-
- /* nothing to do */
-}
-#else
-extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
-#endif
-
-
-/**
* dma_sync_single_range_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @handle: DMA address of buffer
@@ -368,145 +369,52 @@ extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_da
* must first the perform a dma_sync_for_device, and then the
* device again owns the buffer.
*/
-#ifndef CONFIG_DMABOUNCE
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t handle, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
- if (!arch_is_coherent())
- dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
+ BUG_ON(!valid_dma_direction(dir));
+
+ dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
}
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t handle, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
+ BUG_ON(!valid_dma_direction(dir));
+
+ if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
+ return;
+
if (!arch_is_coherent())
dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
}
-#else
-extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
-extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction);
-#endif
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
dma_sync_single_range_for_device(dev, handle, 0, size, dir);
}
-
-/**
- * dma_sync_sg_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as dma_sync_single_for_* but for a scatter-gather list,
- * same rules and usage.
- */
-#ifndef CONFIG_DMABOUNCE
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
-{
- int i;
-
- for (i = 0; i < nents; i++, sg++) {
- char *virt = sg_virt(sg);
- if (!arch_is_coherent())
- dma_cache_maint(virt, sg->length, dir);
- }
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
-{
- int i;
-
- for (i = 0; i < nents; i++, sg++) {
- char *virt = sg_virt(sg);
- if (!arch_is_coherent())
- dma_cache_maint(virt, sg->length, dir);
- }
-}
-#else
-extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
-#endif
-
-#ifdef CONFIG_DMABOUNCE
/*
- * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
- * and utilize bounce buffers as needed to work around limited DMA windows.
- *
- * On the SA-1111, a bug limits DMA to only certain regions of RAM.
- * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
- * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
- *
- * The following are helper functions used by the dmabounce subystem
- *
- */
-
-/**
- * dmabounce_register_dev
- *
- * @dev: valid struct device pointer
- * @small_buf_size: size of buffers to use with small buffer pool
- * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
- *
- * This function should be called by low-level platform code to register
- * a device as requireing DMA buffer bouncing. The function will allocate
- * appropriate DMA pools for the device.
- *
- */
-extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
-
-/**
- * dmabounce_unregister_dev
- *
- * @dev: valid struct device pointer
- *
- * This function should be called by low-level platform code when device
- * that was previously registered with dmabounce_register_dev is removed
- * from the system.
- *
+ * The scatter list versions of the above methods.
*/
-extern void dmabounce_unregister_dev(struct device *);
+extern int dma_map_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
+extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
+extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
+extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
-/**
- * dma_needs_bounce
- *
- * @dev: valid struct device pointer
- * @dma_handle: dma_handle of unbounced buffer
- * @size: size of region being mapped
- *
- * Platforms that utilize the dmabounce mechanism must implement
- * this function.
- *
- * The dmabounce routines call this function whenever a dma-mapping
- * is requested to determine whether a given buffer needs to be bounced
- * or not. The function must return 0 if the buffer is OK for
- * DMA access and 1 if the buffer needs to be bounced.
- *
- */
-extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-#endif /* CONFIG_DMABOUNCE */
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 4ca75162748..5be016980c1 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -3,7 +3,6 @@
#include <asm/hwcap.h>
-#ifndef __ASSEMBLY__
/*
* ELF register definitions..
*/
@@ -17,12 +16,34 @@ typedef unsigned long elf_freg_t[3];
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fp elf_fpregset_t;
-#endif
#define EM_ARM 40
-#define EF_ARM_APCS26 0x08
-#define EF_ARM_SOFT_FLOAT 0x200
-#define EF_ARM_EABI_MASK 0xFF000000
+
+#define EF_ARM_EABI_MASK 0xff000000
+#define EF_ARM_EABI_UNKNOWN 0x00000000
+#define EF_ARM_EABI_VER1 0x01000000
+#define EF_ARM_EABI_VER2 0x02000000
+#define EF_ARM_EABI_VER3 0x03000000
+#define EF_ARM_EABI_VER4 0x04000000
+#define EF_ARM_EABI_VER5 0x05000000
+
+#define EF_ARM_BE8 0x00800000 /* ABI 4,5 */
+#define EF_ARM_LE8 0x00400000 /* ABI 4,5 */
+#define EF_ARM_MAVERICK_FLOAT 0x00000800 /* ABI 0 */
+#define EF_ARM_VFP_FLOAT 0x00000400 /* ABI 0 */
+#define EF_ARM_SOFT_FLOAT 0x00000200 /* ABI 0 */
+#define EF_ARM_OLD_ABI 0x00000100 /* ABI 0 */
+#define EF_ARM_NEW_ABI 0x00000080 /* ABI 0 */
+#define EF_ARM_ALIGN8 0x00000040 /* ABI 0 */
+#define EF_ARM_PIC 0x00000020 /* ABI 0 */
+#define EF_ARM_MAPSYMSFIRST 0x00000010 /* ABI 2 */
+#define EF_ARM_APCS_FLOAT 0x00000010 /* ABI 0, floats in fp regs */
+#define EF_ARM_DYNSYMSUSESEGIDX 0x00000008 /* ABI 2 */
+#define EF_ARM_APCS_26 0x00000008 /* ABI 0 */
+#define EF_ARM_SYMSARESORTED 0x00000004 /* ABI 1,2 */
+#define EF_ARM_INTERWORK 0x00000004 /* ABI 0 */
+#define EF_ARM_HASENTRY 0x00000002 /* All */
+#define EF_ARM_RELEXEC 0x00000001 /* All */
#define R_ARM_NONE 0
#define R_ARM_PC24 1
@@ -41,7 +62,6 @@ typedef struct user_fp elf_fpregset_t;
#endif
#define ELF_ARCH EM_ARM
-#ifndef __ASSEMBLY__
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
@@ -59,25 +79,17 @@ typedef struct user_fp elf_fpregset_t;
#define ELF_PLATFORM (elf_platform)
extern char elf_platform[];
-#endif
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) ((x)->e_machine == EM_ARM && ELF_PROC_OK(x))
+struct elf32_hdr;
/*
- * 32-bit code is always OK. Some cpus can do 26-bit, some can't.
+ * This is used to ensure we don't load something for the wrong architecture.
*/
-#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x))
-
-#define ELF_THUMB_OK(x) \
- ((elf_hwcap & HWCAP_THUMB && ((x)->e_entry & 1) == 1) || \
- ((x)->e_entry & 3) == 0)
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
-#define ELF_26BIT_OK(x) \
- ((elf_hwcap & HWCAP_26BIT && (x)->e_flags & EF_ARM_APCS26) || \
- ((x)->e_flags & EF_ARM_APCS26) == 0)
+extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
+#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
@@ -94,23 +106,7 @@ extern char elf_platform[];
have no such handler. */
#define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0
-/*
- * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0
- * and CP1, we only enable access to the iWMMXt coprocessor if the
- * binary is EABI or softfloat (and thus, guaranteed not to use
- * FPA instructions.)
- */
-#define SET_PERSONALITY(ex, ibcs2) \
- do { \
- if ((ex).e_flags & EF_ARM_APCS26) { \
- set_personality(PER_LINUX); \
- } else { \
- set_personality(PER_LINUX_32BIT); \
- if (elf_hwcap & HWCAP_IWMMXT && (ex).e_flags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) \
- set_thread_flag(TIF_USING_IWMMXT); \
- else \
- clear_thread_flag(TIF_USING_IWMMXT); \
- } \
- } while (0)
+extern void elf_set_personality(const struct elf32_hdr *);
+#define SET_PERSONALITY(ex, ibcs2) elf_set_personality(&(ex))
#endif
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 6a332a9f099..9ee743b95de 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -1,6 +1,124 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
+#ifndef _ASM_ARM_FUTEX_H
+#define _ASM_ARM_FUTEX_H
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_SMP
#include <asm-generic/futex.h>
-#endif
+#else /* !SMP, we can work around lack of atomic ops by disabling preemption */
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile__( \
+ "1: ldrt %1, [%2]\n" \
+ " " insn "\n" \
+ "2: strt %0, [%2]\n" \
+ " mov %0, #0\n" \
+ "3:\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4f, 2b, 4f\n" \
+ " .previous\n" \
+ " .section .fixup,\"ax\"\n" \
+ "4: mov %0, %4\n" \
+ " b 3b\n" \
+ " .previous" \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
+ : "cc", "memory")
+
+static inline int
+futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+{
+ int val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: ldrt %0, [%3]\n"
+ " teq %0, %1\n"
+ "2: streqt %2, [%3]\n"
+ "3:\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .long 1b, 4f, 2b, 4f\n"
+ " .previous\n"
+ " .section .fixup,\"ax\"\n"
+ "4: mov %0, %4\n"
+ " b 3b\n"
+ " .previous"
+ : "=&r" (val)
+ : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+ : "cc", "memory");
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ return val;
+}
+
+#endif /* !SMP */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 71934856fc2..a8094451be5 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -60,10 +60,9 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
#define MT_DEVICE 0
#define MT_DEVICE_NONSHARED 1
#define MT_DEVICE_CACHED 2
-#define MT_DEVICE_IXP2000 3
-#define MT_DEVICE_WC 4
+#define MT_DEVICE_WC 3
/*
- * types 5 onwards can be found in asm/mach/map.h and are undefined
+ * types 4 onwards can be found in asm/mach/map.h and are undefined
* for ioremap
*/
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index d6786090d02..a0009aa5d15 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -22,6 +22,10 @@
#ifndef __ASSEMBLY__
struct irqaction;
extern void migrate_irqs(void);
+
+extern void asm_do_IRQ(unsigned int, struct pt_regs *);
+void init_IRQ(void);
+
#endif
#endif
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index a5d0d99ad38..bb8a19bd582 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -61,7 +61,6 @@ struct kprobe_ctlblk {
void arch_remove_kprobe(struct kprobe *);
void kretprobe_trampoline(void);
-int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 9eb936e49cc..cb1139ac194 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -18,16 +18,13 @@ struct map_desc {
unsigned int type;
};
-/* types 0-4 are defined in asm/io.h */
-#define MT_CACHECLEAN 5
-#define MT_MINICLEAN 6
-#define MT_LOW_VECTORS 7
-#define MT_HIGH_VECTORS 8
-#define MT_MEMORY 9
-#define MT_ROM 10
-
-#define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED
-#define MT_IXP2000_DEVICE MT_DEVICE_IXP2000
+/* types 0-3 are defined in asm/io.h */
+#define MT_CACHECLEAN 4
+#define MT_MINICLEAN 5
+#define MT_LOW_VECTORS 6
+#define MT_HIGH_VECTORS 7
+#define MT_MEMORY 8
+#define MT_ROM 9
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h
index 270902c353f..f3eabf1ecec 100644
--- a/arch/arm/include/asm/mach/udc_pxa2xx.h
+++ b/arch/arm/include/asm/mach/udc_pxa2xx.h
@@ -18,8 +18,7 @@ struct pxa2xx_udc_mach_info {
/* Boards following the design guidelines in the developer's manual,
* with on-chip GPIOs not Lubbock's weird hardware, can have a sane
* VBUS IRQ and omit the methods above. Store the GPIO number
- * here; for GPIO 0, also mask in one of the pxa_gpio_mode() bits.
- * Note that sometimes the signals go through inverters...
+ * here. Note that sometimes the signals go through inverters...
*/
bool gpio_vbus_inverted;
u16 gpio_vbus; /* high == vbus present */
diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h
index e1ca48a9e97..6b884d2b0b6 100644
--- a/arch/arm/include/asm/mc146818rtc.h
+++ b/arch/arm/include/asm/mc146818rtc.h
@@ -4,8 +4,8 @@
#ifndef _ASM_MC146818RTC_H
#define _ASM_MC146818RTC_H
+#include <linux/io.h>
#include <mach/irqs.h>
-#include <asm/io.h>
#ifndef RTC_PORT
#define RTC_PORT(x) (0x70 + (x))
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index bf7c737c922..809ff9ab853 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -13,30 +13,27 @@
#ifndef __ASM_ARM_MEMORY_H
#define __ASM_ARM_MEMORY_H
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <mach/memory.h>
+#include <asm/sizes.h>
+
/*
* Allow for constants defined here to be used from assembly code
* by prepending the UL suffix only with actual C code compilation.
*/
-#ifndef __ASSEMBLY__
-#define UL(x) (x##UL)
-#else
-#define UL(x) (x)
-#endif
-
-#include <linux/compiler.h>
-#include <mach/memory.h>
-#include <asm/sizes.h>
+#define UL(x) _AC(x, UL)
#ifdef CONFIG_MMU
-#ifndef TASK_SIZE
/*
+ * PAGE_OFFSET - the virtual address of the start of the kernel image
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
-#define TASK_SIZE UL(0xbf000000)
-#define TASK_UNMAPPED_BASE UL(0x40000000)
-#endif
+#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
+#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
/*
* The maximum size of a 26-bit user space task.
@@ -44,13 +41,6 @@
#define TASK_SIZE_26 UL(0x04000000)
/*
- * Page offset: 3GB
- */
-#ifndef PAGE_OFFSET
-#define PAGE_OFFSET UL(0xc0000000)
-#endif
-
-/*
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
@@ -147,17 +137,11 @@
#ifndef arch_adjust_zones
#define arch_adjust_zones(node,size,holes) do { } while (0)
+#elif !defined(CONFIG_ZONE_DMA)
+#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA"
#endif
/*
- * Amount of memory reserved for the vmalloc() area, and minimum
- * address for vmalloc mappings.
- */
-extern unsigned long vmalloc_reserve;
-
-#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
-
-/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a301e446007..0559f37c2a2 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
#include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h>
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index cf2e2680daa..bed1c0a0036 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -184,8 +184,9 @@ typedef struct page *pgtable_t;
#endif /* !__ASSEMBLY__ */
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_DEFAULT_FLAGS \
+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
/*
* With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 8e21ef15bd7..110295c5461 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -164,14 +164,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define L_PTE_PRESENT (1 << 0)
#define L_PTE_FILE (1 << 1) /* only when !PRESENT */
#define L_PTE_YOUNG (1 << 1)
-#define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */
-#define L_PTE_CACHEABLE (1 << 3) /* matches PTE */
-#define L_PTE_USER (1 << 4)
-#define L_PTE_WRITE (1 << 5)
-#define L_PTE_EXEC (1 << 6)
-#define L_PTE_DIRTY (1 << 7)
+#define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */
+#define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */
+#define L_PTE_DIRTY (1 << 6)
+#define L_PTE_WRITE (1 << 7)
+#define L_PTE_USER (1 << 8)
+#define L_PTE_EXEC (1 << 9)
#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */
+/*
+ * These are the memory types, defined to be compatible with
+ * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
+ */
+#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */
+#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */
+#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */
+#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */
+#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */
+#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */
+#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */
+#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */
+#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */
+#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */
+#define L_PTE_MT_MASK (0x0f << 2)
+
#ifndef __ASSEMBLY__
/*
@@ -180,23 +196,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
* as well as any architecture dependent bits like global/ASID and SMP
* shared mapping bits.
*/
-#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
-#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC
+#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel;
-#define PAGE_NONE pgprot_user
-#define PAGE_COPY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
-#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ | \
- L_PTE_WRITE)
-#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user) | _L_PTE_READ)
-#define PAGE_KERNEL pgprot_kernel
-
-#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
-#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
-#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
-#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
+#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
+
+#define PAGE_NONE pgprot_user
+#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE)
+#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
+#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER)
+#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
+#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER)
+#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
+#define PAGE_KERNEL pgprot_kernel
+#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC)
+
+#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
+#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE)
+#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
+#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
+#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
+#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
+#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
#endif /* __ASSEMBLY__ */
@@ -212,19 +235,19 @@ extern pgprot_t pgprot_kernel;
#define __P001 __PAGE_READONLY
#define __P010 __PAGE_COPY
#define __P011 __PAGE_COPY
-#define __P100 __PAGE_READONLY
-#define __P101 __PAGE_READONLY
-#define __P110 __PAGE_COPY
-#define __P111 __PAGE_COPY
+#define __P100 __PAGE_READONLY_EXEC
+#define __P101 __PAGE_READONLY_EXEC
+#define __P110 __PAGE_COPY_EXEC
+#define __P111 __PAGE_COPY_EXEC
#define __S000 __PAGE_NONE
#define __S001 __PAGE_READONLY
#define __S010 __PAGE_SHARED
#define __S011 __PAGE_SHARED
-#define __S100 __PAGE_READONLY
-#define __S101 __PAGE_READONLY
-#define __S110 __PAGE_SHARED
-#define __S111 __PAGE_SHARED
+#define __S100 __PAGE_READONLY_EXEC
+#define __S101 __PAGE_READONLY_EXEC
+#define __S110 __PAGE_SHARED_EXEC
+#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
/*
@@ -286,8 +309,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
* Mark the prot value as uncacheable and unbufferable.
*/
-#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
-#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
+#define pgprot_noncached(prot) \
+ __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED)
+#define pgprot_writecombine(prot) \
+ __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
@@ -320,11 +345,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
/*
- * Permanent address of a page. We never have highmem, so this is trivial.
- */
-#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
-
-/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index b415c0e8545..73192618f1c 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -54,7 +54,6 @@
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
#define PSR_N_BIT 0x80000000
-#define PCMASK 0
/*
* Groups of PSR bits
@@ -139,11 +138,7 @@ static inline int valid_user_regs(struct pt_regs *regs)
return 0;
}
-#define pc_pointer(v) \
- ((v) & ~PCMASK)
-
-#define instruction_pointer(regs) \
- (pc_pointer((regs)->ARM_pc))
+#define instruction_pointer(regs) (regs)->ARM_pc
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 7bbf105463f..a65413ba121 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -209,6 +209,17 @@ struct meminfo {
struct membank bank[NR_BANKS];
};
+#define for_each_nodebank(iter,mi,no) \
+ for (iter = 0; iter < mi->nr_banks; iter++) \
+ if (mi->bank[iter].node == no)
+
+#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
+#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
+#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
+#define bank_phys_start(bank) (bank)->start
+#define bank_phys_end(bank) ((bank)->start + (bank)->size)
+#define bank_phys_size(bank) (bank)->size
+
/*
* Early command line parameters.
*/
diff --git a/arch/arm/include/asm/sparsemem.h b/arch/arm/include/asm/sparsemem.h
index 277158191a0..00098615c6f 100644
--- a/arch/arm/include/asm/sparsemem.h
+++ b/arch/arm/include/asm/sparsemem.h
@@ -3,8 +3,22 @@
#include <asm/memory.h>
-#define MAX_PHYSADDR_BITS 32
-#define MAX_PHYSMEM_BITS 32
-#define SECTION_SIZE_BITS NODE_MEM_SIZE_BITS
+/*
+ * Two definitions are required for sparsemem:
+ *
+ * MAX_PHYSMEM_BITS: The number of physical address bits required
+ * to address the last byte of memory.
+ *
+ * SECTION_SIZE_BITS: The number of physical address bits to cover
+ * the maximum amount of memory in a section.
+ *
+ * Eg, if you have 2 banks of up to 64MB at 0x80000000, 0x84000000,
+ * then MAX_PHYSMEM_BITS is 32, SECTION_SIZE_BITS is 26.
+ *
+ * Define these in your mach/memory.h.
+ */
+#if !defined(SECTION_SIZE_BITS) || !defined(MAX_PHYSMEM_BITS)
+#error Sparsemem is not supported on this platform
+#endif
#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 514af792a59..7aad78420f1 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -43,11 +43,6 @@
#define CR_XP (1 << 23) /* Extended page tables */
#define CR_VE (1 << 24) /* Vectored interrupts */
-#define CPUID_ID 0
-#define CPUID_CACHETYPE 1
-#define CPUID_TCM 2
-#define CPUID_TLBTYPE 3
-
/*
* This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences. Apparently we can't trust
@@ -61,36 +56,8 @@
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
-#include <linux/stringify.h>
#include <linux/irqflags.h>
-#ifdef CONFIG_CPU_CP15
-#define read_cpuid(reg) \
- ({ \
- unsigned int __val; \
- asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
- : "=r" (__val) \
- : \
- : "cc"); \
- __val; \
- })
-#else
-extern unsigned int processor_id;
-#define read_cpuid(reg) (processor_id)
-#endif
-
-/*
- * The CPU ID never changes at run time, so we might as well tell the
- * compiler that it's constant. Use this function to read the CPU ID
- * rather than directly reading processor_id or read_cpuid() directly.
- */
-static inline unsigned int read_cpuid_id(void) __attribute_const__;
-
-static inline unsigned int read_cpuid_id(void)
-{
- return read_cpuid(CPUID_ID);
-}
-
#define __exception __attribute__((section(".exception.text")))
struct thread_info;
@@ -131,31 +98,6 @@ extern void cpu_init(void);
void arm_machine_restart(char mode);
extern void (*arm_pm_restart)(char str);
-/*
- * Intel's XScale3 core supports some v6 features (supersections, L2)
- * but advertises itself as v5 as it does not support the v6 ISA. For
- * this reason, we need a way to explicitly test for this type of CPU.
- */
-#ifndef CONFIG_CPU_XSC3
-#define cpu_is_xsc3() 0
-#else
-static inline int cpu_is_xsc3(void)
-{
- extern unsigned int processor_id;
-
- if ((processor_id & 0xffffe000) == 0x69056000)
- return 1;
-
- return 0;
-}
-#endif
-
-#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
-#define cpu_is_xscale() 0
-#else
-#define cpu_is_xscale() 1
-#endif
-
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index e56fa48e4ae..68b9ec82a37 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -98,7 +98,7 @@ static inline struct thread_info *current_thread_info(void)
}
#define thread_saved_pc(tsk) \
- ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
#define thread_saved_fp(tsk) \
((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index d0f51ff900b..e98ec60b340 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -225,7 +225,7 @@ do { \
#define __get_user_asm_byte(x,addr,err) \
__asm__ __volatile__( \
- "1: ldrbt %1,[%2],#0\n" \
+ "1: ldrbt %1,[%2]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -261,7 +261,7 @@ do { \
#define __get_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
- "1: ldrt %1,[%2],#0\n" \
+ "1: ldrt %1,[%2]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -306,7 +306,7 @@ do { \
#define __put_user_asm_byte(x,__pu_addr,err) \
__asm__ __volatile__( \
- "1: strbt %1,[%2],#0\n" \
+ "1: strbt %1,[%2]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -339,7 +339,7 @@ do { \
#define __put_user_asm_word(x,__pu_addr,err) \
__asm__ __volatile__( \
- "1: strt %1,[%2],#0\n" \
+ "1: strt %1,[%2]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -365,7 +365,7 @@ do { \
#define __put_user_asm_dword(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strt " __reg_oper1 ", [%1], #4\n" \
- "2: strt " __reg_oper0 ", [%1], #0\n" \
+ "2: strt " __reg_oper0 ", [%1]\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h
index 6a3cd2a2f67..250a4dd0063 100644
--- a/arch/arm/include/asm/vga.h
+++ b/arch/arm/include/asm/vga.h
@@ -1,8 +1,8 @@
#ifndef ASMARM_VGA_H
#define ASMARM_VGA_H
+#include <linux/io.h>
#include <mach/hardware.h>
-#include <asm/io.h>
#define VGA_MAP_MEM(x,s) (PCIMEM_BASE + (x))