aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-ixp4xx/io.h9
-rw-r--r--include/asm-arm/io.h21
-rw-r--r--include/asm-arm/uaccess.h8
-rw-r--r--include/asm-parisc/irq.h5
-rw-r--r--include/asm-parisc/smp.h7
-rw-r--r--include/asm-parisc/spinlock.h19
-rw-r--r--include/asm-parisc/tlbflush.h16
-rw-r--r--include/asm-powerpc/cputable.h22
-rw-r--r--include/asm-powerpc/delay.h40
-rw-r--r--include/asm-powerpc/dma-mapping.h (renamed from include/asm-ppc/dma-mapping.h)138
-rw-r--r--include/asm-powerpc/eeh.h4
-rw-r--r--include/asm-powerpc/io.h (renamed from include/asm-ppc64/io.h)12
-rw-r--r--include/asm-powerpc/mmu.h (renamed from include/asm-ppc64/mmu.h)14
-rw-r--r--include/asm-powerpc/mmu_context.h (renamed from include/asm-ppc64/mmu_context.h)12
-rw-r--r--include/asm-powerpc/mmzone.h (renamed from include/asm-ppc64/mmzone.h)0
-rw-r--r--include/asm-powerpc/page_64.h4
-rw-r--r--include/asm-powerpc/pci-bridge.h (renamed from include/asm-ppc64/pci-bridge.h)12
-rw-r--r--include/asm-powerpc/pci.h (renamed from include/asm-ppc64/pci.h)110
-rw-r--r--include/asm-powerpc/pgalloc.h (renamed from include/asm-ppc64/pgalloc.h)11
-rw-r--r--include/asm-powerpc/pgtable-4k.h (renamed from include/asm-ppc64/pgtable-4k.h)0
-rw-r--r--include/asm-powerpc/pgtable-64k.h (renamed from include/asm-ppc64/pgtable-64k.h)0
-rw-r--r--include/asm-powerpc/pgtable.h (renamed from include/asm-ppc64/pgtable.h)20
-rw-r--r--include/asm-powerpc/ppc-pci.h2
-rw-r--r--include/asm-powerpc/spinlock.h (renamed from include/asm-ppc64/spinlock.h)72
-rw-r--r--include/asm-powerpc/topology.h4
-rw-r--r--include/asm-ppc/cpm2.h2
-rw-r--r--include/asm-ppc/io.h17
-rw-r--r--include/asm-ppc64/dma-mapping.h136
-rw-r--r--include/asm-ppc64/imalloc.h26
-rw-r--r--include/asm-ppc64/ptrace-common.h164
-rw-r--r--include/linux/cciss_ioctl.h2
-rw-r--r--include/linux/ide.h4
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/net/ieee80211.h2
34 files changed, 372 insertions, 545 deletions
diff --git a/include/asm-arm/arch-ixp4xx/io.h b/include/asm-arm/arch-ixp4xx/io.h
index 688f7f90d93..942b622455b 100644
--- a/include/asm-arm/arch-ixp4xx/io.h
+++ b/include/asm-arm/arch-ixp4xx/io.h
@@ -59,11 +59,10 @@ extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
* fallback to the default.
*/
static inline void __iomem *
-__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned long align)
+__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags)
{
- extern void __iomem * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
if((addr < 0x48000000) || (addr > 0x4fffffff))
- return __ioremap(addr, size, flags, align);
+ return __ioremap(addr, size, flags);
return (void *)addr;
}
@@ -71,13 +70,11 @@ __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned
static inline void
__ixp4xx_iounmap(void __iomem *addr)
{
- extern void __iounmap(void __iomem *addr);
-
if ((u32)addr >= VMALLOC_START)
__iounmap(addr);
}
-#define __arch_ioremap(a, s, f, x) __ixp4xx_ioremap(a, s, f, x)
+#define __arch_ioremap(a, s, f) __ixp4xx_ioremap(a, s, f)
#define __arch_iounmap(a) __ixp4xx_iounmap(a)
#define writeb(v, p) __ixp4xx_writeb(v, p)
diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h
index 2e6799632f1..ae69db4a101 100644
--- a/include/asm-arm/io.h
+++ b/include/asm-arm/io.h
@@ -55,6 +55,12 @@ extern void __raw_readsl(void __iomem *addr, void *data, int longlen);
#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a))
/*
+ * Architecture ioremap implementation.
+ */
+extern void __iomem * __ioremap(unsigned long, size_t, unsigned long);
+extern void __iounmap(void __iomem *addr);
+
+/*
* Bad read/write accesses...
*/
extern void __readwrite_bug(const char *fn);
@@ -256,18 +262,15 @@ out:
* ioremap takes a PCI memory address, as specified in
* Documentation/IO-mapping.txt.
*/
-extern void __iomem * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
-extern void __iounmap(void __iomem *addr);
-
#ifndef __arch_ioremap
-#define ioremap(cookie,size) __ioremap(cookie,size,0,1)
-#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0,1)
-#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE,1)
+#define ioremap(cookie,size) __ioremap(cookie,size,0)
+#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0)
+#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE)
#define iounmap(cookie) __iounmap(cookie)
#else
-#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0,1)
-#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0,1)
-#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE,1)
+#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0)
+#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0)
+#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE)
#define iounmap(cookie) __arch_iounmap(cookie)
#endif
diff --git a/include/asm-arm/uaccess.h b/include/asm-arm/uaccess.h
index a2fdad0138b..064f0f5e8e2 100644
--- a/include/asm-arm/uaccess.h
+++ b/include/asm-arm/uaccess.h
@@ -100,7 +100,6 @@ static inline void set_fs (mm_segment_t fs)
extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
-extern int __get_user_8(void *);
extern int __get_user_bad(void);
#define __get_user_x(__r2,__p,__e,__s,__i...) \
@@ -114,7 +113,7 @@ extern int __get_user_bad(void);
#define get_user(x,p) \
({ \
const register typeof(*(p)) __user *__p asm("r0") = (p);\
- register typeof(*(p)) __r2 asm("r2"); \
+ register unsigned int __r2 asm("r2"); \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
@@ -126,12 +125,9 @@ extern int __get_user_bad(void);
case 4: \
__get_user_x(__r2, __p, __e, 4, "lr"); \
break; \
- case 8: \
- __get_user_x(__r2, __p, __e, 8, "lr"); \
- break; \
default: __e = __get_user_bad(); break; \
} \
- x = __r2; \
+ x = (typeof(*(p))) __r2; \
__e; \
})
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h
index f876bdf2205..b0a30e2c981 100644
--- a/include/asm-parisc/irq.h
+++ b/include/asm-parisc/irq.h
@@ -8,6 +8,7 @@
#define _ASM_PARISC_IRQ_H
#include <linux/config.h>
+#include <linux/cpumask.h>
#include <asm/types.h>
#define NO_IRQ (-1)
@@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits);
extern int txn_claim_irq(int);
extern unsigned int txn_alloc_data(unsigned int);
extern unsigned long txn_alloc_addr(unsigned int);
+extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
-
-extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
+extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
/* soft power switch support (power.c) */
extern struct tasklet_struct power_tasklet;
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h
index 9413f67a540..dbdbd2e9fdf 100644
--- a/include/asm-parisc/smp.h
+++ b/include/asm-parisc/smp.h
@@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map;
#define cpu_logical_map(cpu) (cpu)
extern void smp_send_reschedule(int cpu);
+extern void smp_send_all_nop(void);
#endif /* !ASSEMBLY */
@@ -53,7 +54,11 @@ extern unsigned long cpu_present_mask;
#define raw_smp_processor_id() (current_thread_info()->cpu)
-#endif /* CONFIG_SMP */
+#else /* CONFIG_SMP */
+
+static inline void smp_send_all_nop(void) { return; }
+
+#endif
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index 7c3f406a746..16c2ac075fc 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -11,18 +11,25 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x)
return *a == 0;
}
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
#define __raw_spin_unlock_wait(x) \
do { cpu_relax(); } while (__raw_spin_is_locked(x))
-static inline void __raw_spin_lock(raw_spinlock_t *x)
+static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
+ unsigned long flags)
{
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
- while (*a == 0);
+ while (*a == 0)
+ if (flags & PSW_SM_I) {
+ local_irq_enable();
+ cpu_relax();
+ local_irq_disable();
+ } else
+ cpu_relax();
mb();
}
@@ -60,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
{
- unsigned long flags;
- local_irq_save(flags);
__raw_spin_lock(&rw->lock);
rw->counter++;
__raw_spin_unlock(&rw->lock);
- local_irq_restore(flags);
}
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
{
- unsigned long flags;
- local_irq_save(flags);
__raw_spin_lock(&rw->lock);
rw->counter--;
__raw_spin_unlock(&rw->lock);
- local_irq_restore(flags);
}
/* write_lock is less trivial. We optimistically grab the lock and check
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h
index e97aa8d1eff..c9ec39c6fc6 100644
--- a/include/asm-parisc/tlbflush.h
+++ b/include/asm-parisc/tlbflush.h
@@ -12,21 +12,15 @@
* N class systems, only one PxTLB inter processor broadcast can be
* active at any one time on the Merced bus. This tlb purge
* synchronisation is fairly lightweight and harmless so we activate
- * it on all SMP systems not just the N class. */
-#ifdef CONFIG_SMP
+ * it on all SMP systems not just the N class. We also need to have
+ * preemption disabled on uniprocessor machines, and spin_lock does that
+ * nicely.
+ */
extern spinlock_t pa_tlb_lock;
#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
-#else
-
-#define purge_tlb_start(x) do { } while(0)
-#define purge_tlb_end(x) do { } while (0)
-
-#endif
-
-
extern void flush_tlb_all(void);
/*
@@ -88,7 +82,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
- preempt_disable();
mtsp(vma->vm_mm->context,1);
purge_tlb_start();
if (split_tlb) {
@@ -102,7 +95,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
pdtlb(start);
start += PAGE_SIZE;
}
- preempt_enable();
}
purge_tlb_end();
}
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 04e2726002c..d1cfa3f515e 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -90,6 +90,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
+#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
#ifdef __powerpc64__
/* Add the 64b processor unique features in the top half of the word */
@@ -97,7 +98,6 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000)
#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000)
#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000)
-#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000001000000000)
#define CPU_FTR_IABR ASM_CONST(0x0000002000000000)
#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000)
#define CPU_FTR_CTRL ASM_CONST(0x0000008000000000)
@@ -113,7 +113,6 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_16M_PAGE ASM_CONST(0x0)
#define CPU_FTR_TLBIEL ASM_CONST(0x0)
#define CPU_FTR_NOEXECUTE ASM_CONST(0x0)
-#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0)
#define CPU_FTR_IABR ASM_CONST(0x0)
#define CPU_FTR_MMCRA ASM_CONST(0x0)
#define CPU_FTR_CTRL ASM_CONST(0x0)
@@ -273,18 +272,21 @@ enum {
CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_NODSISRALIGN,
CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP |
- CPU_FTR_MAYBE_CAN_NAP,
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN,
CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
- CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
- CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
- CPU_FTRS_E200 = CPU_FTR_USE_TB,
- CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+ CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+ CPU_FTR_NODSISRALIGN,
+ CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+ CPU_FTR_NODSISRALIGN,
+ CPU_FTRS_E200 = CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN,
+ CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+ CPU_FTR_NODSISRALIGN,
CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
- CPU_FTR_BIG_PHYS,
- CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON,
+ CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN,
+ CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN,
#ifdef __powerpc64__
CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
diff --git a/include/asm-powerpc/delay.h b/include/asm-powerpc/delay.h
index 1492aa9ab71..54fe1f4f8fd 100644
--- a/include/asm-powerpc/delay.h
+++ b/include/asm-powerpc/delay.h
@@ -13,43 +13,7 @@
* Anton Blanchard.
*/
-extern unsigned long tb_ticks_per_usec;
-
-#ifdef CONFIG_PPC64
-/* define these here to prevent circular dependencies */
-/* these instructions control the thread priority on multi-threaded cpus */
-#define __HMT_low() asm volatile("or 1,1,1")
-#define __HMT_medium() asm volatile("or 2,2,2")
-#else
-#define __HMT_low()
-#define __HMT_medium()
-#endif
-
-#define __barrier() asm volatile("" ::: "memory")
-
-static inline unsigned long __get_tb(void)
-{
- unsigned long rval;
-
- asm volatile("mftb %0" : "=r" (rval));
- return rval;
-}
-
-static inline void __delay(unsigned long loops)
-{
- unsigned long start = __get_tb();
-
- while((__get_tb() - start) < loops)
- __HMT_low();
- __HMT_medium();
- __barrier();
-}
-
-static inline void udelay(unsigned long usecs)
-{
- unsigned long loops = tb_ticks_per_usec * usecs;
-
- __delay(loops);
-}
+extern void __delay(unsigned long loops);
+extern void udelay(unsigned long usecs);
#endif /* _ASM_POWERPC_DELAY_H */
diff --git a/include/asm-ppc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 6e963511443..59a80163f75 100644
--- a/include/asm-ppc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -1,15 +1,22 @@
/*
- * This is based on both include/asm-sh/dma-mapping.h and
- * include/asm-ppc/pci.h
+ * Copyright (C) 2004 IBM
+ *
+ * Implements the generic device dma API for powerpc.
+ * the pci and vio busses
*/
-#ifndef __ASM_PPC_DMA_MAPPING_H
-#define __ASM_PPC_DMA_MAPPING_H
+#ifndef _ASM_DMA_MAPPING_H
+#define _ASM_DMA_MAPPING_H
#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/cache.h>
/* need struct page definitions */
#include <linux/mm.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
+#include <asm/bug.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
@@ -24,22 +31,12 @@ extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
-#define dma_cache_inv(_start,_size) \
- invalidate_dcache_range(_start, (_start + _size))
-#define dma_cache_wback(_start,_size) \
- clean_dcache_range(_start, (_start + _size))
-#define dma_cache_wback_inv(_start,_size) \
- flush_dcache_range(_start, (_start + _size))
#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
* Cache coherent cores.
*/
-#define dma_cache_inv(_start,_size) do { } while (0)
-#define dma_cache_wback(_start,_size) do { } while (0)
-#define dma_cache_wback_inv(_start,_size) do { } while (0)
-
#define __dma_alloc_coherent(gfp, size, handle) NULL
#define __dma_free_coherent(size, addr) do { } while (0)
#define __dma_sync(addr, size, rw) do { } while (0)
@@ -47,6 +44,30 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
#endif /* ! CONFIG_NOT_COHERENT_CACHE */
+#ifdef CONFIG_PPC64
+
+extern int dma_supported(struct device *dev, u64 mask);
+extern int dma_set_mask(struct device *dev, u64 dma_mask);
+extern void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle);
+extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction direction);
+extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction);
+extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction);
+extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction);
+extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries, enum dma_data_direction direction);
+
+#else /* CONFIG_PPC64 */
+
#define dma_supported(dev, mask) (1)
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
@@ -144,29 +165,27 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
/* We don't do anything here. */
#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
+#endif /* CONFIG_PPC64 */
+
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
-
__dma_sync(bus_to_virt(dma_handle), size, direction);
}
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
-
__dma_sync(bus_to_virt(dma_handle), size, direction);
}
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
{
int i;
@@ -176,9 +195,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
}
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
{
int i;
@@ -188,6 +207,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
__dma_sync_page(sg->page, sg->offset, sg->length, direction);
}
+static inline int dma_mapping_error(dma_addr_t dma_addr)
+{
+#ifdef CONFIG_PPC64
+ return (dma_addr == DMA_ERROR_CODE);
+#else
+ return 0;
+#endif
+}
+
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_NOT_COHERENT_CACHE
@@ -198,40 +226,60 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
static inline int dma_get_cache_alignment(void)
{
+#ifdef CONFIG_PPC64
+ /* no easy way to get cache size on all processors, so return
+ * the maximum possible, to be safe */
+ return (1 << L1_CACHE_SHIFT_MAX);
+#else
/*
* Each processor family will define its own L1_CACHE_SHIFT,
* L1_CACHE_BYTES wraps to this, so this is always safe.
*/
return L1_CACHE_BYTES;
+#endif
}
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
{
/* just sync everything for now */
dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
}
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
{
/* just sync everything for now */
dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
}
static inline void dma_cache_sync(void *vaddr, size_t size,
- enum dma_data_direction direction)
+ enum dma_data_direction direction)
{
+ BUG_ON(direction == DMA_NONE);
__dma_sync(vaddr, size, (int)direction);
}
-static inline int dma_mapping_error(dma_addr_t dma_addr)
-{
- return 0;
-}
-
-#endif /* __ASM_PPC_DMA_MAPPING_H */
+/*
+ * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
+ */
+struct dma_mapping_ops {
+ void * (*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+ dma_addr_t (*map_single)(struct device *dev, void *ptr,
+ size_t size, enum dma_data_direction direction);
+ void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction);
+ int (*map_sg)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction);
+ int (*dma_supported)(struct device *dev, u64 mask);
+ int (*dac_dma_supported)(struct device *dev, u64 mask);
+};
+
+#endif /* _ASM_DMA_MAPPING_H */
diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h
index 89f26ab3190..f8633aafe4b 100644
--- a/include/asm-powerpc/eeh.h
+++ b/include/asm-powerpc/eeh.h
@@ -30,6 +30,8 @@ struct device_node;
#ifdef CONFIG_EEH
+extern int eeh_subsystem_enabled;
+
/* Values for eeh_mode bits in device_node */
#define EEH_MODE_SUPPORTED (1<<0)
#define EEH_MODE_NOCHECK (1<<1)
@@ -75,7 +77,7 @@ void eeh_remove_device(struct pci_dev *);
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* which does further tests out of line.
*/
-#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0)
+#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
/*
* Reads from a device which has been isolated by EEH will return
diff --git a/include/asm-ppc64/io.h b/include/asm-powerpc/io.h
index 77fc07c3c6b..48938d84d05 100644
--- a/include/asm-ppc64/io.h
+++ b/include/asm-powerpc/io.h
@@ -1,5 +1,5 @@
-#ifndef _PPC64_IO_H
-#define _PPC64_IO_H
+#ifndef _ASM_POWERPC_IO_H
+#define _ASM_POWERPC_IO_H
/*
* This program is free software; you can redistribute it and/or
@@ -8,7 +8,10 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
+#ifndef CONFIG_PPC64
+#include <asm-ppc/io.h>
+#else
+
#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/byteorder.h>
@@ -455,4 +458,5 @@ extern int check_legacy_ioport(unsigned long base_port);
#endif /* __KERNEL__ */
-#endif /* _PPC64_IO_H */
+#endif /* CONFIG_PPC64 */
+#endif /* _ASM_POWERPC_IO_H */
diff --git a/include/asm-ppc64/mmu.h b/include/asm-powerpc/mmu.h
index 1a7e0afa2dc..c1b4bbabbe9 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -1,3 +1,10 @@
+#ifndef _ASM_POWERPC_MMU_H_
+#define _ASM_POWERPC_MMU_H_
+
+#ifndef CONFIG_PPC64
+#include <asm-ppc/mmu.h>
+#else
+
/*
* PowerPC memory management structures
*
@@ -10,10 +17,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifndef _PPC64_MMU_H_
-#define _PPC64_MMU_H_
-
-#include <linux/config.h>
#include <asm/asm-compat.h>
#include <asm/page.h>
@@ -392,4 +395,5 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
#endif /* __ASSEMBLY */
-#endif /* _PPC64_MMU_H_ */
+#endif /* CONFIG_PPC64 */
+#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-powerpc/mmu_context.h
index 4f512e9fa6b..ea6798c7d5f 100644
--- a/include/asm-ppc64/mmu_context.h
+++ b/include/asm-powerpc/mmu_context.h
@@ -1,7 +1,10 @@
-#ifndef __PPC64_MMU_CONTEXT_H
-#define __PPC64_MMU_CONTEXT_H
+#ifndef __ASM_POWERPC_MMU_CONTEXT_H
+#define __ASM_POWERPC_MMU_CONTEXT_H
+
+#ifndef CONFIG_PPC64
+#include <asm-ppc/mmu_context.h>
+#else
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/mmu.h>
@@ -82,4 +85,5 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
local_irq_restore(flags);
}
-#endif /* __PPC64_MMU_CONTEXT_H */
+#endif /* CONFIG_PPC64 */
+#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/include/asm-ppc64/mmzone.h b/include/asm-powerpc/mmzone.h
index 54958d6cae0..54958d6cae0 100644
--- a/include/asm-ppc64/mmzone.h
+++ b/include/asm-powerpc/mmzone.h
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index c16f106b537..1e6e7846824 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -86,7 +86,11 @@ static inline void copy_page(void *to, void *from)
extern u64 ppc64_pft_size;
/* Large pages size */
+#ifdef CONFIG_HUGETLB_PAGE
extern unsigned int HPAGE_SHIFT;
+#else
+#define HPAGE_SHIFT PAGE_SHIFT
+#endif
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-powerpc/pci-bridge.h
index cf04327a597..223ec7bd81d 100644
--- a/include/asm-ppc64/pci-bridge.h
+++ b/include/asm-powerpc/pci-bridge.h
@@ -1,8 +1,10 @@
-#ifdef __KERNEL__
-#ifndef _ASM_PCI_BRIDGE_H
-#define _ASM_PCI_BRIDGE_H
+#ifndef _ASM_POWERPC_PCI_BRIDGE_H
+#define _ASM_POWERPC_PCI_BRIDGE_H
+
+#ifndef CONFIG_PPC64
+#include <asm-ppc/pci-bridge.h>
+#else
-#include <linux/config.h>
#include <linux/pci.h>
#include <linux/list.h>
@@ -147,5 +149,5 @@ extern void pcibios_free_controller(struct pci_controller *phb);
#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
+#endif /* CONFIG_PPC64 */
#endif
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/pci.h b/include/asm-powerpc/pci.h
index fafdf885a3c..d5934a076bd 100644
--- a/include/asm-ppc64/pci.h
+++ b/include/asm-powerpc/pci.h
@@ -1,5 +1,5 @@
-#ifndef __PPC64_PCI_H
-#define __PPC64_PCI_H
+#ifndef __ASM_POWERPC_PCI_H
+#define __ASM_POWERPC_PCI_H
#ifdef __KERNEL__
/*
@@ -18,6 +18,7 @@
#include <asm/scatterlist.h>
#include <asm/io.h>
#include <asm/prom.h>
+#include <asm/pci-bridge.h>
#include <asm-generic/pci-dma-compat.h>
@@ -26,11 +27,21 @@
struct pci_dev;
-#ifdef CONFIG_PPC_ISERIES
+/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
+#define IOBASE_BRIDGE_NUMBER 0
+#define IOBASE_MEMORY 1
+#define IOBASE_IO 2
+#define IOBASE_ISA_IO 3
+#define IOBASE_ISA_MEM 4
+
+/*
+ * Set this to 1 if you want the kernel to re-assign all PCI
+ * bus numbers
+ */
+extern int pci_assign_all_buses;
+#define pcibios_assign_all_busses() (pci_assign_all_buses)
+
#define pcibios_scan_all_fns(a, b) 0
-#else
-extern int pcibios_scan_all_fns(struct pci_bus *bus, int devfn);
-#endif
static inline void pcibios_set_master(struct pci_dev *dev)
{
@@ -50,6 +61,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? 15 : 14;
}
+#ifdef CONFIG_PPC64
#define HAVE_ARCH_PCI_MWI 1
static inline int pcibios_prep_mwi(struct pci_dev *dev)
{
@@ -64,12 +76,10 @@ static inline int pcibios_prep_mwi(struct pci_dev *dev)
return 0;
}
-extern unsigned int pcibios_assign_all_busses(void);
-
extern struct dma_mapping_ops pci_dma_ops;
/* For DAC DMA, we currently don't support it by default, but
- * we let the platform override this
+ * we let 64-bit platforms override this.
*/
static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
{
@@ -102,6 +112,35 @@ extern int pci_domain_nr(struct pci_bus *bus);
/* Decide whether to display the domain number in /proc */
extern int pci_proc_domain(struct pci_bus *bus);
+#else /* 32-bit */
+
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ *strat = PCI_DMA_BURST_INFINITY;
+ *strategy_parameter = ~0UL;
+}
+#endif
+
+/*
+ * At present there are very few 32-bit PPC machines that can have
+ * memory above the 4GB point, and we don't support that.
+ */
+#define pci_dac_dma_supported(pci_dev, mask) (0)
+
+/* Return the index of the PCI controller for device PDEV. */
+#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
+
+/* Set the name of the bus as it appears in /proc/bus/pci */
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
struct vm_area_struct;
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
@@ -110,6 +149,7 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
+#ifdef CONFIG_PPC64
/* pci_unmap_{single,page} is not a nop, thus... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
@@ -124,22 +164,40 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
-/* The PCI address space does equal the physical memory
- * address space. The networking and block device layers use
+/* The PCI address space does not equal the physical memory address
+ * space (we have an IOMMU). The IDE and SCSI device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (0)
+
+#else /* 32-bit */
+
+/* The PCI address space does equal the physical memory
+ * address space (no IOMMU). The IDE and SCSI device layers use
+ * this boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS (1)
+
+/* pci_unmap_{page,single} is a nop so... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME) (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+
+#endif /* CONFIG_PPC64 */
-extern void
-pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+extern void pcibios_resource_to_bus(struct pci_dev *dev,
+ struct pci_bus_region *region,
struct resource *res);
-extern void
-pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+extern void pcibios_bus_to_resource(struct pci_dev *dev,
+ struct resource *res,
struct pci_bus_region *region);
-static inline struct resource *
-pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
+ struct resource *res)
{
struct resource *root = NULL;
@@ -151,14 +209,12 @@ pcibios_select_root(struct pci_dev *pdev, struct resource *res)
return root;
}
-extern int
-unmap_bus_range(struct pci_bus *bus);
+extern int unmap_bus_range(struct pci_bus *bus);
-extern int
-remap_bus_range(struct pci_bus *bus);
+extern int remap_bus_range(struct pci_bus *bus);
-extern void
-pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus);
+extern void pcibios_fixup_device_resources(struct pci_dev *dev,
+ struct pci_bus *bus);
extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
@@ -180,14 +236,12 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long size,
pgprot_t prot);
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#if defined(CONFIG_PPC_MULTIPLATFORM) || defined(CONFIG_PPC32)
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
u64 *start, u64 *end);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
+#endif /* CONFIG_PPC_MULTIPLATFORM || CONFIG_PPC32 */
#endif /* __KERNEL__ */
-
-#endif /* __PPC64_PCI_H */
+#endif /* __ASM_POWERPC_PCI_H */
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-powerpc/pgalloc.h
index dcf3622d194..bfc2113b363 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -1,5 +1,9 @@
-#ifndef _PPC64_PGALLOC_H
-#define _PPC64_PGALLOC_H
+#ifndef _ASM_POWERPC_PGALLOC_H
+#define _ASM_POWERPC_PGALLOC_H
+
+#ifndef CONFIG_PPC64
+#include <asm-ppc/pgalloc.h>
+#else
#include <linux/mm.h>
#include <linux/slab.h>
@@ -148,4 +152,5 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
#define check_pgt_cache() do { } while (0)
-#endif /* _PPC64_PGALLOC_H */
+#endif /* CONFIG_PPC64 */
+#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/include/asm-ppc64/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index e9590c06ad9..e9590c06ad9 100644
--- a/include/asm-ppc64/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
diff --git a/include/asm-ppc64/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 154f1840ece..154f1840ece 100644
--- a/include/asm-ppc64/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-powerpc/pgtable.h
index a9783ba7fe9..0303f57366c 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -1,5 +1,9 @@
-#ifndef _PPC64_PGTABLE_H
-#define _PPC64_PGTABLE_H
+#ifndef _ASM_POWERPC_PGTABLE_H
+#define _ASM_POWERPC_PGTABLE_H
+
+#ifndef CONFIG_PPC64
+#include <asm-ppc/pgtable.h>
+#else
/*
* This file contains the functions and defines necessary to modify and use
@@ -47,6 +51,13 @@ struct mm_struct;
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
/*
+ * Define the address range of the imalloc VM area.
+ */
+#define PHBS_IO_BASE VMALLOC_END
+#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
+#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
+
+/*
* Common bits in a linux-style PTE. These match the bits in the
* (hardware-defined) PowerPC PTE as closely as possible. Additional
* bits may be defined in pgtable-*.h
@@ -69,7 +80,7 @@ struct mm_struct;
#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
-/* __pgprot defined in asm-ppc64/page.h */
+/* __pgprot defined in asm-powerpc/page.h */
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
@@ -509,4 +520,5 @@ void pgtable_cache_init(void);
#endif /* __ASSEMBLY__ */
-#endif /* _PPC64_PGTABLE_H */
+#endif /* CONFIG_PPC64 */
+#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
index 2e36e5a7f4f..36cdc869e58 100644
--- a/include/asm-powerpc/ppc-pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -48,8 +48,6 @@ extern void pSeries_final_fixup(void);
extern void pSeries_irq_bus_setup(struct pci_bus *bus);
extern unsigned long pci_probe_only;
-extern unsigned long pci_assign_all_buses;
-extern int pci_read_irq_line(struct pci_dev *pci_dev);
/* ---- EEH internal-use-only related routines ---- */
#ifdef CONFIG_EEH
diff --git a/include/asm-ppc64/spinlock.h b/include/asm-powerpc/spinlock.h
index 7d84fb5e39f..caa4b14e0e9 100644
--- a/include/asm-ppc64/spinlock.h
+++ b/include/asm-powerpc/spinlock.h
@@ -18,31 +18,41 @@
*
* (the type definitions are in asm/spinlock_types.h)
*/
-#include <linux/config.h>
+#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/hvcall.h>
#include <asm/iseries/hv_call.h>
+#endif
+#include <asm/asm-compat.h>
+#include <asm/synch.h>
#define __raw_spin_is_locked(x) ((x)->slock != 0)
+#ifdef CONFIG_PPC64
+/* use 0x800000yy when locked, where yy == CPU number */
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
+#else
+#define LOCK_TOKEN 1
+#endif
+
/*
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
*/
static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
{
- unsigned long tmp, tmp2;
+ unsigned long tmp, token;
+ token = LOCK_TOKEN;
__asm__ __volatile__(
-" lwz %1,%3(13) # __spin_trylock\n\
-1: lwarx %0,0,%2\n\
+"1: lwarx %0,0,%2 # __spin_trylock\n\
cmpwi 0,%0,0\n\
bne- 2f\n\
stwcx. %1,0,%2\n\
bne- 1b\n\
isync\n\
-2:" : "=&r" (tmp), "=&r" (tmp2)
- : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token))
+2:" : "=&r" (tmp)
+ : "r" (token), "r" (&lock->slock)
: "cr0", "memory");
return tmp;
@@ -113,11 +123,17 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long
static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
- __asm__ __volatile__("lwsync # __raw_spin_unlock": : :"memory");
+ __asm__ __volatile__(SYNC_ON_SMP" # __raw_spin_unlock"
+ : : :"memory");
lock->slock = 0;
}
+#ifdef CONFIG_PPC64
extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
+#else
+#define __raw_spin_unlock_wait(lock) \
+ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#endif
/*
* Read-write spinlocks, allowing multiple readers
@@ -133,6 +149,14 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
#define __raw_write_can_lock(rw) (!(rw)->lock)
+#ifdef CONFIG_PPC64
+#define __DO_SIGN_EXTEND "extsw %0,%0\n"
+#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
+#else
+#define __DO_SIGN_EXTEND
+#define WRLOCK_TOKEN (-1)
+#endif
+
/*
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
@@ -142,11 +166,12 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw)
long tmp;
__asm__ __volatile__(
-"1: lwarx %0,0,%1 # read_trylock\n\
- extsw %0,%0\n\
- addic. %0,%0,1\n\
- ble- 2f\n\
- stwcx. %0,0,%1\n\
+"1: lwarx %0,0,%1 # read_trylock\n"
+ __DO_SIGN_EXTEND
+" addic. %0,%0,1\n\
+ ble- 2f\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
bne- 1b\n\
isync\n\
2:" : "=&r" (tmp)
@@ -162,18 +187,19 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw)
*/
static __inline__ long __write_trylock(raw_rwlock_t *rw)
{
- long tmp, tmp2;
+ long tmp, token;
+ token = WRLOCK_TOKEN;
__asm__ __volatile__(
-" lwz %1,%3(13) # write_trylock\n\
-1: lwarx %0,0,%2\n\
+"1: lwarx %0,0,%2 # write_trylock\n\
cmpwi 0,%0,0\n\
- bne- 2f\n\
- stwcx. %1,0,%2\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %1,0,%2\n\
bne- 1b\n\
isync\n\
-2:" : "=&r" (tmp), "=&r" (tmp2)
- : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token))
+2:" : "=&r" (tmp)
+ : "r" (token), "r" (&rw->lock)
: "cr0", "memory");
return tmp;
@@ -224,8 +250,9 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
__asm__ __volatile__(
"eieio # read_unlock\n\
1: lwarx %0,0,%1\n\
- addic %0,%0,-1\n\
- stwcx. %0,0,%1\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
bne- 1b"
: "=&r"(tmp)
: "r"(&rw->lock)
@@ -234,7 +261,8 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
{
- __asm__ __volatile__("lwsync # write_unlock": : :"memory");
+ __asm__ __volatile__(SYNC_ON_SMP" # write_unlock"
+ : : :"memory");
rw->lock = 0;
}
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 015d28746e1..db8095cbe09 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -41,6 +41,10 @@ static inline int node_to_first_cpu(int node)
.cache_hot_time = (10*1000000), \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
+ .busy_idx = 3, \
+ .idle_idx = 1, \
+ .newidle_idx = 2, \
+ .wake_idx = 1, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_EXEC \
| SD_BALANCE_NEWIDLE \
diff --git a/include/asm-ppc/cpm2.h b/include/asm-ppc/cpm2.h
index 43d2ebbc774..b638b87cebe 100644
--- a/include/asm-ppc/cpm2.h
+++ b/include/asm-ppc/cpm2.h
@@ -1091,5 +1091,7 @@ typedef struct im_idma {
#define CPM_IMMR_OFFSET 0x101a8
#endif
+#define FCC_PSMR_RMII ((uint)0x00020000) /* Use RMII interface */
+
#endif /* __CPM2__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index 2bfdf9c9845..84ac6e258ee 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -545,6 +545,23 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#include <asm/mpc8260_pci9.h>
#endif
+#ifdef CONFIG_NOT_COHERENT_CACHE
+
+#define dma_cache_inv(_start,_size) \
+ invalidate_dcache_range(_start, (_start + _size))
+#define dma_cache_wback(_start,_size) \
+ clean_dcache_range(_start, (_start + _size))
+#define dma_cache_wback_inv(_start,_size) \
+ flush_dcache_range(_start, (_start + _size))
+
+#else
+
+#define dma_cache_inv(_start,_size) do { } while (0)
+#define dma_cache_wback(_start,_size) do { } while (0)
+#define dma_cache_wback_inv(_start,_size) do { } while (0)
+
+#endif
+
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
diff --git a/include/asm-ppc64/dma-mapping.h b/include/asm-ppc64/dma-mapping.h
deleted file mode 100644
index fb68fa23bea..00000000000
--- a/include/asm-ppc64/dma-mapping.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* Copyright (C) 2004 IBM
- *
- * Implements the generic device dma API for ppc64. Handles
- * the pci and vio busses
- */
-
-#ifndef _ASM_DMA_MAPPING_H
-#define _ASM_DMA_MAPPING_H
-
-#include <linux/types.h>
-#include <linux/cache.h>
-/* need struct page definitions */
-#include <linux/mm.h>
-#include <asm/scatterlist.h>
-#include <asm/bug.h>
-
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle);
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction);
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline int dma_mapping_error(dma_addr_t dma_addr)
-{
- return (dma_addr == DMA_ERROR_CODE);
-}
-
-/* Now for the API extensions over the pci_ one */
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d) (1)
-
-static inline int
-dma_get_cache_alignment(void)
-{
- /* no easy way to get cache size on all processors, so return
- * the maximum possible, to be safe */
- return (1 << L1_CACHE_SHIFT_MAX);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-static inline void
-dma_cache_sync(void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- /* nothing to do */
-}
-
-/*
- * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
- */
-struct dma_mapping_ops {
- void * (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
- void (*free_coherent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *dev, void *ptr,
- size_t size, enum dma_data_direction direction);
- void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction);
- int (*map_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
- void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
- int (*dma_supported)(struct device *dev, u64 mask);
- int (*dac_dma_supported)(struct device *dev, u64 mask);
-};
-
-#endif /* _ASM_DMA_MAPPING_H */
diff --git a/include/asm-ppc64/imalloc.h b/include/asm-ppc64/imalloc.h
deleted file mode 100644
index 42adf7033a8..00000000000
--- a/include/asm-ppc64/imalloc.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _PPC64_IMALLOC_H
-#define _PPC64_IMALLOC_H
-
-/*
- * Define the address range of the imalloc VM area.
- */
-#define PHBS_IO_BASE VMALLOC_END
-#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
-#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
-
-
-/* imalloc region types */
-#define IM_REGION_UNUSED 0x1
-#define IM_REGION_SUBSET 0x2
-#define IM_REGION_EXISTS 0x4
-#define IM_REGION_OVERLAP 0x8
-#define IM_REGION_SUPERSET 0x10
-
-extern struct vm_struct * im_get_free_area(unsigned long size);
-extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
- int region_type);
-extern void im_free(void *addr);
-
-extern unsigned long ioremap_bot;
-
-#endif /* _PPC64_IMALLOC_H */
diff --git a/include/asm-ppc64/ptrace-common.h b/include/asm-ppc64/ptrace-common.h
deleted file mode 100644
index b1babb72967..00000000000
--- a/include/asm-ppc64/ptrace-common.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * linux/arch/ppc64/kernel/ptrace-common.h
- *
- * Copyright (c) 2002 Stephen Rothwell, IBM Coproration
- * Extracted from ptrace.c and ptrace32.c
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
- */
-
-#ifndef _PPC64_PTRACE_COMMON_H
-#define _PPC64_PTRACE_COMMON_H
-
-#include <linux/config.h>
-#include <asm/system.h>
-
-/*
- * Set of msr bits that gdb can change on behalf of a process.
- */
-#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1)
-
-/*
- * Get contents of register REGNO in task TASK.
- */
-static inline unsigned long get_reg(struct task_struct *task, int regno)
-{
- unsigned long tmp = 0;
-
- /*
- * Put the correct FP bits in, they might be wrong as a result
- * of our lazy FP restore.
- */
- if (regno == PT_MSR) {
- tmp = ((unsigned long *)task->thread.regs)[PT_MSR];
- tmp |= task->thread.fpexc_mode;
- } else if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
- tmp = ((unsigned long *)task->thread.regs)[regno];
- }
-
- return tmp;
-}
-
-/*
- * Write contents of register REGNO in task TASK.
- */
-static inline int put_reg(struct task_struct *task, int regno,
- unsigned long data)
-{
- if (regno < PT_SOFTE) {
- if (regno == PT_MSR)
- data = (data & MSR_DEBUGCHANGE)
- | (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
- ((unsigned long *)task->thread.regs)[regno] = data;
- return 0;
- }
- return -EIO;
-}
-
-static inline void set_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
- if (regs != NULL)
- regs->msr |= MSR_SE;
- set_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
-}
-
-static inline void clear_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
- if (regs != NULL)
- regs->msr &= ~MSR_SE;
- clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
-}
-
-#ifdef CONFIG_ALTIVEC
-/*
- * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
- * The transfer totals 34 quadword. Quadwords 0-31 contain the
- * corresponding vector registers. Quadword 32 contains the vscr as the
- * last word (offset 12) within that quadword. Quadword 33 contains the
- * vrsave as the first word (offset 0) within the quadword.
- *
- * This definition of the VMX state is compatible with the current PPC32
- * ptrace interface. This allows signal handling and ptrace to use the
- * same structures. This also simplifies the implementation of a bi-arch
- * (combined (32- and 64-bit) gdb.
- */
-
-/*
- * Get contents of AltiVec register state in task TASK
- */
-static inline int get_vrregs(unsigned long __user *data,
- struct task_struct *task)
-{
- unsigned long regsize;
-
- /* copy AltiVec registers VR[0] .. VR[31] */
- regsize = 32 * sizeof(vector128);
- if (copy_to_user(data, task->thread.vr, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VSCR */
- regsize = 1 * sizeof(vector128);
- if (copy_to_user(data, &task->thread.vscr, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VRSAVE */
- if (put_user(task->thread.vrsave, (u32 __user *)data))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * Write contents of AltiVec register state into task TASK.
- */
-static inline int set_vrregs(struct task_struct *task,
- unsigned long __user *data)
-{
- unsigned long regsize;
-
- /* copy AltiVec registers VR[0] .. VR[31] */
- regsize = 32 * sizeof(vector128);
- if (copy_from_user(task->thread.vr, data, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VSCR */
- regsize = 1 * sizeof(vector128);
- if (copy_from_user(&task->thread.vscr, data, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
-
- /* copy VRSAVE */
- if (get_user(task->thread.vrsave, (u32 __user *)data))
- return -EFAULT;
-
- return 0;
-}
-#endif
-
-static inline int ptrace_set_debugreg(struct task_struct *task,
- unsigned long addr, unsigned long data)
-{
- /* We only support one DABR and no IABRS at the moment */
- if (addr > 0)
- return -EINVAL;
-
- /* The bottom 3 bits are flags */
- if ((data & ~0x7UL) >= TASK_SIZE)
- return -EIO;
-
- /* Ensure translation is on */
- if (data && !(data & DABR_TRANSLATION))
- return -EIO;
-
- task->thread.dabr = data;
- return 0;
-}
-
-#endif /* _PPC64_PTRACE_COMMON_H */
diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
index 424d5e622b4..6e27f42e3a5 100644
--- a/include/linux/cciss_ioctl.h
+++ b/include/linux/cciss_ioctl.h
@@ -10,8 +10,8 @@
typedef struct _cciss_pci_info_struct
{
unsigned char bus;
- unsigned short domain;
unsigned char dev_fn;
+ unsigned short domain;
__u32 board_id;
} cciss_pci_info_struct;
diff --git a/include/linux/ide.h b/include/linux/ide.h
index ac8b25fa650..e99019057ba 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1089,9 +1089,11 @@ enum {
/*
* Subdrivers support.
+ *
+ * The gendriver.owner field should be set to the module owner of this driver.
+ * The gendriver.name field should be set to the name of this driver
*/
typedef struct ide_driver_s {
- struct module *owner;
const char *version;
u8 media;
unsigned supports_dsc_overlap : 1;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1013a42d10b..0986d19be0b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -940,7 +940,9 @@ unsigned long max_sane_readahead(unsigned long nr);
/* Do stack extension */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+#ifdef CONFIG_IA64
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+#endif
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index b93fd8c1d88..cde2f4f4f50 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -1042,7 +1042,7 @@ static inline u8 *ieee80211_get_payload(struct ieee80211_hdr *hdr)
case IEEE80211_4ADDR_LEN:
return ((struct ieee80211_hdr_4addr *)hdr)->payload;
}
-
+ return NULL;
}
static inline int ieee80211_is_ofdm_rate(u8 rate)