From ce07d90aa80a4c9a956eb2f662e4be5ea5c6baf8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 16 Nov 2005 14:38:19 +0000 Subject: [ARM] Fix arch-realview/system.h to use __io_address() Move __io_address to arch-realview/hardware.h, drop core.h from platsmp.c and localtimer.c, and include asm/io.h where required. Signed-off-by: Russell King --- include/asm-arm/arch-realview/hardware.h | 1 + include/asm-arm/arch-realview/system.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-arm/arch-realview/hardware.h b/include/asm-arm/arch-realview/hardware.h index 67879cdb6ef..9ca76dc3a7a 100644 --- a/include/asm-arm/arch-realview/hardware.h +++ b/include/asm-arm/arch-realview/hardware.h @@ -27,5 +27,6 @@ /* macro to get at IO space when running virtually */ #define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000) +#define __io_address(n) __io(IO_ADDRESS(n)) #endif diff --git a/include/asm-arm/arch-realview/system.h b/include/asm-arm/arch-realview/system.h index 9f8fcbca086..6f3d0ce0ca1 100644 --- a/include/asm-arm/arch-realview/system.h +++ b/include/asm-arm/arch-realview/system.h @@ -36,7 +36,7 @@ static inline void arch_idle(void) static inline void arch_reset(char mode) { - unsigned int hdr_ctrl = (IO_ADDRESS(REALVIEW_SYS_BASE) + REALVIEW_SYS_RESETCTL_OFFSET); + void __iomem *hdr_ctrl = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_RESETCTL_OFFSET; unsigned int val; /* -- cgit v1.2.3 From 0a5709b2dc84140082ea235130a05c05d51f94a2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 16 Nov 2005 14:51:20 +0000 Subject: [ARM] Include asm/hardware.h instead of asm/arch/hardware.h Rationalise hardware.h include. Signed-off-by: Russell King --- include/asm-arm/arch-clps711x/uncompress.h | 2 +- include/asm-arm/arch-epxa10db/uncompress.h | 2 +- include/asm-arm/arch-h720x/uncompress.h | 2 +- include/asm-arm/arch-imx/irqs.h | 2 +- include/asm-arm/arch-imx/timex.h | 2 +- include/asm-arm/arch-integrator/smp.h | 2 +- include/asm-arm/arch-l7200/aux_reg.h | 2 +- include/asm-arm/arch-l7200/gp_timers.h | 2 +- include/asm-arm/arch-omap/gpio.h | 2 +- include/asm-arm/arch-omap/irqs.h | 2 +- include/asm-arm/arch-omap/mcbsp.h | 2 +- include/asm-arm/arch-omap/system.h | 2 +- include/asm-arm/arch-rpc/system.h | 2 +- include/asm-arm/hardware/dec21285.h | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/include/asm-arm/arch-clps711x/uncompress.h b/include/asm-arm/arch-clps711x/uncompress.h index 7d0ab791b16..9fc4bcfa168 100644 --- a/include/asm-arm/arch-clps711x/uncompress.h +++ b/include/asm-arm/arch-clps711x/uncompress.h @@ -19,7 +19,7 @@ */ #include #include -#include +#include #include #undef CLPS7111_BASE diff --git a/include/asm-arm/arch-epxa10db/uncompress.h b/include/asm-arm/arch-epxa10db/uncompress.h index d33ad6a9374..fdfe0e6848f 100644 --- a/include/asm-arm/arch-epxa10db/uncompress.h +++ b/include/asm-arm/arch-epxa10db/uncompress.h @@ -19,7 +19,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "asm/arch/platform.h" -#include "asm/arch/hardware.h" +#include "asm/hardware.h" #define UART00_TYPE (volatile unsigned int*) #include "asm/arch/uart00.h" diff --git a/include/asm-arm/arch-h720x/uncompress.h b/include/asm-arm/arch-h720x/uncompress.h index 2fffacf85a0..9535764bcc7 100644 --- a/include/asm-arm/arch-h720x/uncompress.h +++ b/include/asm-arm/arch-h720x/uncompress.h @@ -7,7 +7,7 @@ #ifndef __ASM_ARCH_UNCOMPRESS_H #define __ASM_ARCH_UNCOMPRESS_H -#include +#include #define LSR 0x14 #define TEMPTY 0x40 diff --git a/include/asm-arm/arch-imx/irqs.h b/include/asm-arm/arch-imx/irqs.h index 238197cfb9d..f195542898e 100644 --- a/include/asm-arm/arch-imx/irqs.h +++ b/include/asm-arm/arch-imx/irqs.h @@ -23,7 +23,7 @@ #define __ARM_IRQS_H__ /* Use the imx definitions */ -#include +#include /* * IMX Interrupt numbers diff --git a/include/asm-arm/arch-imx/timex.h b/include/asm-arm/arch-imx/timex.h index d65ab3cd5d5..8c91674706b 100644 --- a/include/asm-arm/arch-imx/timex.h +++ b/include/asm-arm/arch-imx/timex.h @@ -21,7 +21,7 @@ #ifndef __ASM_ARCH_TIMEX_H #define __ASM_ARCH_TIMEX_H -#include +#include #define CLOCK_TICK_RATE (CLK32) #endif diff --git a/include/asm-arm/arch-integrator/smp.h b/include/asm-arm/arch-integrator/smp.h index 0ec7093f7c3..da6981efdc3 100644 --- a/include/asm-arm/arch-integrator/smp.h +++ b/include/asm-arm/arch-integrator/smp.h @@ -3,7 +3,7 @@ #include -#include +#include #include #define hard_smp_processor_id() \ diff --git a/include/asm-arm/arch-l7200/aux_reg.h b/include/asm-arm/arch-l7200/aux_reg.h index 762cbc76c50..5b4396de16a 100644 --- a/include/asm-arm/arch-l7200/aux_reg.h +++ b/include/asm-arm/arch-l7200/aux_reg.h @@ -9,7 +9,7 @@ #ifndef _ASM_ARCH_AUXREG_H #define _ASM_ARCH_AUXREG_H -#include +#include #define l7200aux_reg *((volatile unsigned int *) (AUX_BASE)) diff --git a/include/asm-arm/arch-l7200/gp_timers.h b/include/asm-arm/arch-l7200/gp_timers.h index 6f20962df24..9c4804d1357 100644 --- a/include/asm-arm/arch-l7200/gp_timers.h +++ b/include/asm-arm/arch-l7200/gp_timers.h @@ -10,7 +10,7 @@ #ifndef _ASM_ARCH_GPTIMERS_H #define _ASM_ARCH_GPTIMERS_H -#include +#include /* * Layout of L7200 general purpose timer registers diff --git a/include/asm-arm/arch-omap/gpio.h b/include/asm-arm/arch-omap/gpio.h index 1b3885741ac..f486b72070e 100644 --- a/include/asm-arm/arch-omap/gpio.h +++ b/include/asm-arm/arch-omap/gpio.h @@ -26,7 +26,7 @@ #ifndef __ASM_ARCH_OMAP_GPIO_H #define __ASM_ARCH_OMAP_GPIO_H -#include +#include #include #include diff --git a/include/asm-arm/arch-omap/irqs.h b/include/asm-arm/arch-omap/irqs.h index 9779686bdce..4ffce1d7775 100644 --- a/include/asm-arm/arch-omap/irqs.h +++ b/include/asm-arm/arch-omap/irqs.h @@ -260,7 +260,7 @@ extern void omap_init_irq(void); * The definition of NR_IRQS is in board-specific header file, which is * included via hardware.h */ -#include +#include #ifndef NR_IRQS #define NR_IRQS IH_BOARD_BASE diff --git a/include/asm-arm/arch-omap/mcbsp.h b/include/asm-arm/arch-omap/mcbsp.h index 305bdeb16ab..e79d98ab2ab 100644 --- a/include/asm-arm/arch-omap/mcbsp.h +++ b/include/asm-arm/arch-omap/mcbsp.h @@ -24,7 +24,7 @@ #ifndef __ASM_ARCH_OMAP_MCBSP_H #define __ASM_ARCH_OMAP_MCBSP_H -#include +#include #define OMAP730_MCBSP1_BASE 0xfffb1000 #define OMAP730_MCBSP2_BASE 0xfffb1800 diff --git a/include/asm-arm/arch-omap/system.h b/include/asm-arm/arch-omap/system.h index b43cdd2a387..9af415d2944 100644 --- a/include/asm-arm/arch-omap/system.h +++ b/include/asm-arm/arch-omap/system.h @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #ifndef CONFIG_MACH_VOICEBLUE diff --git a/include/asm-arm/arch-rpc/system.h b/include/asm-arm/arch-rpc/system.h index ca3277d1d5e..729c2ae4b51 100644 --- a/include/asm-arm/arch-rpc/system.h +++ b/include/asm-arm/arch-rpc/system.h @@ -7,7 +7,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#include +#include #include #include diff --git a/include/asm-arm/hardware/dec21285.h b/include/asm-arm/hardware/dec21285.h index 9049f0ddaec..6685e3fb97b 100644 --- a/include/asm-arm/hardware/dec21285.h +++ b/include/asm-arm/hardware/dec21285.h @@ -20,7 +20,7 @@ #include #ifndef __ASSEMBLY__ -#include +#include #define DC21285_IO(x) ((volatile unsigned long *)(ARMCSR_BASE+(x))) #else #define DC21285_IO(x) (x) -- cgit v1.2.3 From a7d068336197945dc4af65c5973c996e526d51cb Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Wed, 16 Nov 2005 15:05:11 +0000 Subject: [ARM] 3165/1: fix atomic_cmpxchg() implementation for ARMv6+ Patch from Nicolas Pitre If 'old' and 'oldval' are different then 'res' never gets set. In that case, if ever %0 happened to contain anything but zero (rather likely) then the code will loop forever (or until another CPU just come along and change the atomic value to match 'old' which is rather unlikely). Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- include/asm-arm/atomic.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h index 75b80271972..5f827509e92 100644 --- a/include/asm-arm/atomic.h +++ b/include/asm-arm/atomic.h @@ -87,6 +87,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) do { __asm__ __volatile__("@ atomic_cmpxchg\n" "ldrex %1, [%2]\n" + "mov %0, #0\n" "teq %1, %3\n" "strexeq %0, %4, [%2]\n" : "=&r" (res), "=&r" (oldval) -- cgit v1.2.3 From 8dc39b883e9497445b53c498be7493c3e43af006 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 16 Nov 2005 17:23:57 +0000 Subject: [ARM] Add linux/compiler.h includes where required atomic.h, bitops.h and mmu_context.h are using likely/unlikely. thread_info.h uses __attribute_const__. Hence these files require linux/compiler.h to be included. Signed-off-by: Russell King --- include/asm-arm/atomic.h | 1 + include/asm-arm/bitops.h | 1 + include/asm-arm/mmu_context.h | 1 + include/asm-arm/thread_info.h | 1 + 4 files changed, 4 insertions(+) (limited to 'include') diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h index 5f827509e92..d8ba0a9eb08 100644 --- a/include/asm-arm/atomic.h +++ b/include/asm-arm/atomic.h @@ -12,6 +12,7 @@ #define __ASM_ARM_ATOMIC_H #include +#include typedef struct { volatile int counter; } atomic_t; diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h index e007dd990da..7399d431edf 100644 --- a/include/asm-arm/bitops.h +++ b/include/asm-arm/bitops.h @@ -19,6 +19,7 @@ #ifdef __KERNEL__ +#include #include #define smp_mb__before_clear_bit() mb() diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h index 3d4b810d8c3..81c59facea3 100644 --- a/include/asm-arm/mmu_context.h +++ b/include/asm-arm/mmu_context.h @@ -13,6 +13,7 @@ #ifndef __ASM_ARM_MMU_CONTEXT_H #define __ASM_ARM_MMU_CONTEXT_H +#include #include #include diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h index 8252a4cd860..7c98557b717 100644 --- a/include/asm-arm/thread_info.h +++ b/include/asm-arm/thread_info.h @@ -12,6 +12,7 @@ #ifdef __KERNEL__ +#include #include #define THREAD_SIZE_ORDER 1 -- cgit v1.2.3 From 1b12050f17460dc312cfd8cc59c79e181b23062b Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 16 Nov 2005 17:38:40 +0000 Subject: [ARM] Move zone adjustment for SA1111 on SA11x0 platforms Unfortunately, using PAGE_SHIFT in asm/arch/memory.h is unsafe, and we can't include asm/page.h into this file because then we have a circular dependency. Move the offending code to arch/arm/common/sa1111.c instead. Signed-off-by: Russell King --- include/asm-arm/arch-sa1100/memory.h | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/asm-arm/arch-sa1100/memory.h b/include/asm-arm/arch-sa1100/memory.h index 0fc555b4c91..018a9f0e398 100644 --- a/include/asm-arm/arch-sa1100/memory.h +++ b/include/asm-arm/arch-sa1100/memory.h @@ -18,20 +18,10 @@ #ifndef __ASSEMBLY__ #ifdef CONFIG_SA1111 -static inline void -__arch_adjust_zones(int node, unsigned long *size, unsigned long *holes) -{ - unsigned int sz = SZ_1M >> PAGE_SHIFT; - - if (node != 0) - sz = 0; - - size[1] = size[0] - sz; - size[0] = sz; -} +void sa1111_adjust_zones(int node, unsigned long *size, unsigned long *holes); #define arch_adjust_zones(node, size, holes) \ - __arch_adjust_zones(node, size, holes) + sa1111_adjust_zones(node, size, holes) #define ISA_DMA_THRESHOLD (PHYS_OFFSET + SZ_1M - 1) -- cgit v1.2.3 From 49ee57a3295a227b6a02785f75ccd521e493e983 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 16 Nov 2005 18:03:10 +0000 Subject: [ARM] Use unsigned long not u32 in atomic_cmpxchg Since atomic.h does not include types.h, u32 may not be defined. Since atomics are supposed to work on unsigned long quantities, use unsigned long instead. Signed-off-by: Russell King --- include/asm-arm/atomic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h index d8ba0a9eb08..d586f65c822 100644 --- a/include/asm-arm/atomic.h +++ b/include/asm-arm/atomic.h @@ -83,7 +83,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) { - u32 oldval, res; + unsigned long oldval, res; do { __asm__ __volatile__("@ atomic_cmpxchg\n" -- cgit v1.2.3 From 5470dc656820fb67c0a2e352f0aaa48b86c19026 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 16 Nov 2005 18:36:49 +0000 Subject: [ARM] No need to include asm/proc-fns.h into asm/system.h In the old days when arm26/arm32 was combined into the same architecture, proc-fns.h provided the xchg implementation for arm26 CPUs. Since we no longer combine these two, this include is no longer required. Remove it. Signed-off-by: Russell King --- include/asm-arm/system.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index 8efa4ebdcac..5621d61ebc0 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -93,8 +93,6 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), int sig, const char *name); -#include - #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) @@ -102,6 +100,8 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, extern asmlinkage void __backtrace(void); extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +struct mm_struct; extern void show_pte(struct mm_struct *mm, unsigned long addr); extern void __show_regs(struct pt_regs *); -- cgit v1.2.3 From 5cfccd7f132432dd4705444a44b51d12ef88a85f Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 16 Nov 2005 23:37:53 +1100 Subject: [PATCH] powerpc: Fix database regression due to scheduler changes PowerPC's NUMA domain doesn't currently set up some of the newer sched-domains parameters. Brian Twichell discovered and diagnosed a 1.5% OLTP database regression on a 4 core POWER5 system that was due to the use of NUMA scheduling on ppc64. This patch applies some saneish values to the parameters, in line with other architectures. This solves the regression. Signed-off-by: Nick Piggin Signed-off-by: Paul Mackerras --- include/asm-powerpc/topology.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h index 015d28746e1..0e8c20c9606 100644 --- a/include/asm-powerpc/topology.h +++ b/include/asm-powerpc/topology.h @@ -41,6 +41,10 @@ static inline int node_to_first_cpu(int node) .cache_hot_time = (10*1000000), \ .cache_nice_tries = 1, \ .per_cpu_gain = 100, \ + .busy_idx = 3, \ + .idle_id = 1, \ + .newidle_idx = 2, \ + .wake_idx = 1, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_EXEC \ | SD_BALANCE_NEWIDLE \ -- cgit v1.2.3 From 1e28a7ddd3e713384e9c6768e7c502031dc205e2 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 17 Nov 2005 00:44:03 +0000 Subject: [PATCH] Avoid use of uninitialised spinlock in EEH. If the kernel supports both G5 and pSeries, and CONFIG_EEH is enabled, eeh_init() is (quite reasonably) never called when we boot on a G5. Yet eeh_check_failure() still gets called. We should avoid doing that if !eeh_subsystem_enabled. Signed-off-by: David Woodhouse Signed-off-by: Paul Mackerras --- include/asm-powerpc/eeh.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h index 89f26ab3190..f8633aafe4b 100644 --- a/include/asm-powerpc/eeh.h +++ b/include/asm-powerpc/eeh.h @@ -30,6 +30,8 @@ struct device_node; #ifdef CONFIG_EEH +extern int eeh_subsystem_enabled; + /* Values for eeh_mode bits in device_node */ #define EEH_MODE_SUPPORTED (1<<0) #define EEH_MODE_NOCHECK (1<<1) @@ -75,7 +77,7 @@ void eeh_remove_device(struct pci_dev *); * If this macro yields TRUE, the caller relays to eeh_check_failure() * which does further tests out of line. */ -#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0) +#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled) /* * Reads from a device which has been isolated by EEH will return -- cgit v1.2.3 From db7f6861822c80f17a23647b4d0042dcc56e2024 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 8 Nov 2005 22:23:13 +0000 Subject: [MIPS] Delete duplicate definitions of break codes. Signed-off-by: Ralf Baechle --- include/asm-mips/signal.h | 21 --------------------- 1 file changed, 21 deletions(-) (limited to 'include') diff --git a/include/asm-mips/signal.h b/include/asm-mips/signal.h index 8ca539e80d8..6fe903e09c6 100644 --- a/include/asm-mips/signal.h +++ b/include/asm-mips/signal.h @@ -155,27 +155,6 @@ typedef struct sigaltstack { #ifdef __KERNEL__ #include -/* - * The following break codes are or were in use for specific purposes in - * other MIPS operating systems. Linux/MIPS doesn't use all of them. The - * unused ones are here as placeholders; we might encounter them in - * non-Linux/MIPS object files or make use of them in the future. - */ -#define BRK_USERBP 0 /* User bp (used by debuggers) */ -#define BRK_KERNELBP 1 /* Break in the kernel */ -#define BRK_ABORT 2 /* Sometimes used by abort(3) to SIGIOT */ -#define BRK_BD_TAKEN 3 /* For bd slot emulation - not implemented */ -#define BRK_BD_NOTTAKEN 4 /* For bd slot emulation - not implemented */ -#define BRK_SSTEPBP 5 /* User bp (used by debuggers) */ -#define BRK_OVERFLOW 6 /* Overflow check */ -#define BRK_DIVZERO 7 /* Divide by zero check */ -#define BRK_RANGE 8 /* Range error check */ -#define BRK_STACKOVERFLOW 9 /* For Ada stackchecking */ -#define BRK_NORLD 10 /* No rld found - not used by Linux/MIPS */ -#define _BRK_THREADBP 11 /* For threads, user bp (used by debuggers) */ -#define BRK_MULOVF 1023 /* Multiply overflow */ -#define BRK_BUG 512 /* Used by BUG() */ - #define ptrace_signal_deliver(regs, cookie) do { } while (0) #endif /* __KERNEL__ */ -- cgit v1.2.3 From 6f17ce33fef3fd84e3e45850c9388d118adfad96 Mon Sep 17 00:00:00 2001 From: Yoichi Yuasa Date: Thu, 10 Nov 2005 22:42:36 +0900 Subject: Add GT64111 PCI ID back Signed-off-by: Yoichi Yuasa Signed-off-by: Ralf Baechle --- include/linux/pci_ids.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d4c1c8fd292..7b387faedb4 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1401,6 +1401,7 @@ #define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334 #define PCI_VENDOR_ID_MARVELL 0x11ab +#define PCI_DEVICE_ID_MARVELL_GT64111 0x4146 #define PCI_DEVICE_ID_MARVELL_GT64260 0x6430 #define PCI_DEVICE_ID_MARVELL_MV64360 0x6460 #define PCI_DEVICE_ID_MARVELL_MV64460 0x6480 -- cgit v1.2.3 From bdc3c3c7cbc3e1244c03640b4b372d097a1dacf3 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 17 Nov 2005 16:23:42 +0000 Subject: [MIPS] Add missing arch defines for the Alchemy MTD driver. Signed-off-by: Ralf Baechle --- include/asm-mips/mach-db1x00/db1200.h | 3 +++ include/asm-mips/mach-db1x00/db1x00.h | 6 ++++++ include/asm-mips/mach-pb1x00/pb1200.h | 3 +++ include/asm-mips/mach-pb1x00/pb1550.h | 7 +++++++ 4 files changed, 19 insertions(+) (limited to 'include') diff --git a/include/asm-mips/mach-db1x00/db1200.h b/include/asm-mips/mach-db1x00/db1200.h index 5d894376fc1..647fdb54cc1 100644 --- a/include/asm-mips/mach-db1x00/db1200.h +++ b/include/asm-mips/mach-db1x00/db1200.h @@ -220,5 +220,8 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR; #define BOARD_PC1_INT DB1200_PC1_INT #define BOARD_CARD_INSERTED(SOCKET) bcsr->sig_status & (1<<(8+(2*SOCKET))) +/* Nand chip select */ +#define NAND_CS 1 + #endif /* __ASM_DB1200_H */ diff --git a/include/asm-mips/mach-db1x00/db1x00.h b/include/asm-mips/mach-db1x00/db1x00.h index efafe65258b..7b28b23f91c 100644 --- a/include/asm-mips/mach-db1x00/db1x00.h +++ b/include/asm-mips/mach-db1x00/db1x00.h @@ -200,6 +200,12 @@ typedef volatile struct ((NAND_T_PUL & 0xF) << NAND_T_PUL_SHIFT) | \ ((NAND_T_SU & 0xF) << NAND_T_SU_SHIFT) | \ ((NAND_T_WH & 0xF) << NAND_T_WH_SHIFT) +#define NAND_CS 1 + +/* should be done by yamon */ +#define NAND_STCFG 0x00400005 /* 8-bit NAND */ +#define NAND_STTIME 0x00007774 /* valid for 396MHz SD=2 only */ +#define NAND_STADDR 0x12000FFF /* physical address 0x20000000 */ #endif /* __ASM_DB1X00_H */ diff --git a/include/asm-mips/mach-pb1x00/pb1200.h b/include/asm-mips/mach-pb1x00/pb1200.h index 9a3088b19bf..409d443322c 100644 --- a/include/asm-mips/mach-pb1x00/pb1200.h +++ b/include/asm-mips/mach-pb1x00/pb1200.h @@ -248,5 +248,8 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR; #define BOARD_PC1_INT PB1200_PC1_INT #define BOARD_CARD_INSERTED(SOCKET) bcsr->sig_status & (1<<(8+(2*SOCKET))) +/* Nand chip select */ +#define NAND_CS 1 + #endif /* __ASM_PB1200_H */ diff --git a/include/asm-mips/mach-pb1x00/pb1550.h b/include/asm-mips/mach-pb1x00/pb1550.h index 431d6088ea9..9578ead11e8 100644 --- a/include/asm-mips/mach-pb1x00/pb1550.h +++ b/include/asm-mips/mach-pb1x00/pb1550.h @@ -166,4 +166,11 @@ static BCSR * const bcsr = (BCSR *)BCSR_PHYS_ADDR; ((NAND_T_SU & 0xF) << NAND_T_SU_SHIFT) | \ ((NAND_T_WH & 0xF) << NAND_T_WH_SHIFT) +#define NAND_CS 1 + +/* should be done by yamon */ +#define NAND_STCFG 0x00400005 /* 8-bit NAND */ +#define NAND_STTIME 0x00007774 /* valid for 396MHz SD=2 only */ +#define NAND_STADDR 0x12000FFF /* physical address 0x20000000 */ + #endif /* __ASM_PB1550_H */ -- cgit v1.2.3 From 443bf3292f04c53e92bf0588f1aa2c9b421545e2 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 17 Nov 2005 16:23:42 +0000 Subject: Add definitions for the Dallas DS17287 RTC. Signed-off-by: Ralf Baechle --- include/linux/ds17287rtc.h | 67 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 include/linux/ds17287rtc.h (limited to 'include') diff --git a/include/linux/ds17287rtc.h b/include/linux/ds17287rtc.h new file mode 100644 index 00000000000..c281ba42e28 --- /dev/null +++ b/include/linux/ds17287rtc.h @@ -0,0 +1,67 @@ +/* + * ds17287rtc.h - register definitions for the ds1728[57] RTC / CMOS RAM + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * (C) 2003 Guido Guenther + */ +#ifndef __LINUX_DS17287RTC_H +#define __LINUX_DS17287RTC_H + +#include /* get the user-level API */ +#include /* spinlock_t */ +#include + +/* Register A */ +#define DS_REGA_DV2 0x40 /* countdown chain */ +#define DS_REGA_DV1 0x20 /* oscillator enable */ +#define DS_REGA_DV0 0x10 /* bank select */ + +/* bank 1 registers */ +#define DS_B1_MODEL 0x40 /* model number byte */ +#define DS_B1_SN1 0x41 /* serial number byte 1 */ +#define DS_B1_SN2 0x42 /* serial number byte 2 */ +#define DS_B1_SN3 0x43 /* serial number byte 3 */ +#define DS_B1_SN4 0x44 /* serial number byte 4 */ +#define DS_B1_SN5 0x45 /* serial number byte 5 */ +#define DS_B1_SN6 0x46 /* serial number byte 6 */ +#define DS_B1_CRC 0x47 /* CRC byte */ +#define DS_B1_CENTURY 0x48 /* Century byte */ +#define DS_B1_DALARM 0x49 /* date alarm */ +#define DS_B1_XCTRL4A 0x4a /* extendec control register 4a */ +#define DS_B1_XCTRL4B 0x4b /* extendec control register 4b */ +#define DS_B1_RTCADDR2 0x4e /* rtc address 2 */ +#define DS_B1_RTCADDR3 0x4f /* rtc address 3 */ +#define DS_B1_RAMLSB 0x50 /* extended ram LSB */ +#define DS_B1_RAMMSB 0x51 /* extended ram MSB */ +#define DS_B1_RAMDPORT 0x53 /* extended ram data port */ + +/* register details */ +/* extended control register 4a */ +#define DS_XCTRL4A_VRT2 0x80 /* valid ram and time */ +#define DS_XCTRL4A_INCR 0x40 /* increment progress status */ +#define DS_XCTRL4A_BME 0x20 /* burst mode enable */ +#define DS_XCTRL4A_PAB 0x08 /* power active bar ctrl */ +#define DS_XCTRL4A_RF 0x04 /* ram clear flag */ +#define DS_XCTRL4A_WF 0x02 /* wake up alarm flag */ +#define DS_XCTRL4A_KF 0x01 /* kickstart flag */ + +/* interrupt causes */ +#define DS_XCTRL4A_IFS (DS_XCTRL4A_RF|DS_XCTRL4A_WF|DS_XCTRL4A_KF) + +/* extended control register 4b */ +#define DS_XCTRL4B_ABE 0x80 /* auxiliary battery enable */ +#define DS_XCTRL4B_E32K 0x40 /* enable 32.768 kHz Output */ +#define DS_XCTRL4B_CS 0x20 /* crystal select */ +#define DS_XCTRL4B_RCE 0x10 /* ram clear enable */ +#define DS_XCTRL4B_PRS 0x08 /* PAB resec select */ +#define DS_XCTRL4B_RIE 0x04 /* ram clear interrupt enable */ +#define DS_XCTRL4B_WFE 0x02 /* wake up alarm interrupt enable */ +#define DS_XCTRL4B_KFE 0x01 /* kickstart interrupt enable */ + +/* interrupt enable bits */ +#define DS_XCTRL4B_IFES (DS_XCTRL4B_RIE|DS_XCTRL4B_WFE|DS_XCTRL4B_KFE) + +#endif /* __LINUX_DS17287RTC_H */ -- cgit v1.2.3 From cd017fbdd33f2d8294b0e0324faa1dc7750b4af0 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 17 Nov 2005 16:23:43 +0000 Subject: Add definitions for the Dallas DS1742 RTC / non-volatile memory. Signed-off-by: Ralf Baechle --- include/linux/ds1742rtc.h | 53 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 include/linux/ds1742rtc.h (limited to 'include') diff --git a/include/linux/ds1742rtc.h b/include/linux/ds1742rtc.h new file mode 100644 index 00000000000..a83cdd1cafc --- /dev/null +++ b/include/linux/ds1742rtc.h @@ -0,0 +1,53 @@ +/* + * ds1742rtc.h - register definitions for the Real-Time-Clock / CMOS RAM + * + * Copyright (C) 1999-2001 Toshiba Corporation + * Copyright (C) 2003 Ralf Baechle (ralf@linux-mips.org) + * + * Permission is hereby granted to copy, modify and redistribute this code + * in terms of the GNU Library General Public License, Version 2 or later, + * at your option. + */ +#ifndef __LINUX_DS1742RTC_H +#define __LINUX_DS1742RTC_H + +#include + +#define RTC_BRAM_SIZE 0x800 +#define RTC_OFFSET 0x7f8 + +/* + * Register summary + */ +#define RTC_CONTROL (RTC_OFFSET + 0) +#define RTC_CENTURY (RTC_OFFSET + 0) +#define RTC_SECONDS (RTC_OFFSET + 1) +#define RTC_MINUTES (RTC_OFFSET + 2) +#define RTC_HOURS (RTC_OFFSET + 3) +#define RTC_DAY (RTC_OFFSET + 4) +#define RTC_DATE (RTC_OFFSET + 5) +#define RTC_MONTH (RTC_OFFSET + 6) +#define RTC_YEAR (RTC_OFFSET + 7) + +#define RTC_CENTURY_MASK 0x3f +#define RTC_SECONDS_MASK 0x7f +#define RTC_DAY_MASK 0x07 + +/* + * Bits in the Control/Century register + */ +#define RTC_WRITE 0x80 +#define RTC_READ 0x40 + +/* + * Bits in the Seconds register + */ +#define RTC_STOP 0x80 + +/* + * Bits in the Day register + */ +#define RTC_BATT_FLAG 0x80 +#define RTC_FREQ_TEST 0x40 + +#endif /* __LINUX_DS1742RTC_H */ -- cgit v1.2.3 From 16212017a54afdb702ecc796aaa0448b795de03b Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 17 Nov 2005 16:23:44 +0000 Subject: [MIPS] IP32: No need to include . Signed-off-by: Ralf Baechle --- include/asm-mips/mach-ip32/mc146818rtc.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-mips/mach-ip32/mc146818rtc.h b/include/asm-mips/mach-ip32/mc146818rtc.h index f5d780ff843..c28ba8d8407 100644 --- a/include/asm-mips/mach-ip32/mc146818rtc.h +++ b/include/asm-mips/mach-ip32/mc146818rtc.h @@ -11,7 +11,6 @@ #ifndef __ASM_MACH_IP32_MC146818RTC_H #define __ASM_MACH_IP32_MC146818RTC_H -#include #include #define RTC_PORT(x) (0x70 + (x)) @@ -26,8 +25,10 @@ static inline void CMOS_WRITE(unsigned char data, unsigned long addr) mace->isa.rtc[addr << 8] = data; } -/* FIXME: Do it right. For now just assume that noone lives in 20th century - * and no O2 user in 22th century ;-) */ +/* + * FIXME: Do it right. For now just assume that noone lives in 20th century + * and no O2 user in 22th century ;-) + */ #define mc146818_decode_year(year) ((year) + 2000) #define RTC_ALWAYS_BCD 0 -- cgit v1.2.3 From efd9412d850397fc129c17eb33c84f74abb0d3ee Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 11 Nov 2005 11:46:25 +0000 Subject: [MIPS] JMR3927: Undo accidental rename. Signed-off-by: Ralf Baechle --- include/asm-mips/mach-jmr3927/asm/ds1742.h | 16 ---------------- include/asm-mips/mach-jmr3927/ds1742.h | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 16 deletions(-) delete mode 100644 include/asm-mips/mach-jmr3927/asm/ds1742.h create mode 100644 include/asm-mips/mach-jmr3927/ds1742.h (limited to 'include') diff --git a/include/asm-mips/mach-jmr3927/asm/ds1742.h b/include/asm-mips/mach-jmr3927/asm/ds1742.h deleted file mode 100644 index 134a4b6c334..00000000000 --- a/include/asm-mips/mach-jmr3927/asm/ds1742.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2003 by Ralf Baechle - */ -#ifndef __ASM_MACH_JMR3927_ASM_DS1742_H -#define __ASM_MACH_JMR3927_ASM_DS1742_H - -#include - -#define rtc_read(reg) (jmr3927_nvram_in(addr)) -#define rtc_write(data, reg) (jmr3927_nvram_out((data),(reg))) - -#endif /* __ASM_MACH_JMR3927_ASM_DS1742_H */ diff --git a/include/asm-mips/mach-jmr3927/ds1742.h b/include/asm-mips/mach-jmr3927/ds1742.h new file mode 100644 index 00000000000..134a4b6c334 --- /dev/null +++ b/include/asm-mips/mach-jmr3927/ds1742.h @@ -0,0 +1,16 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003 by Ralf Baechle + */ +#ifndef __ASM_MACH_JMR3927_ASM_DS1742_H +#define __ASM_MACH_JMR3927_ASM_DS1742_H + +#include + +#define rtc_read(reg) (jmr3927_nvram_in(addr)) +#define rtc_write(data, reg) (jmr3927_nvram_out((data),(reg))) + +#endif /* __ASM_MACH_JMR3927_ASM_DS1742_H */ -- cgit v1.2.3 From 84c493d8e143360cfba3efede97e5a93d62c4d3d Mon Sep 17 00:00:00 2001 From: Arnaud Giersch Date: Sun, 13 Nov 2005 00:38:18 +0100 Subject: [MIPS] IP32 Fix and complete IP32 parport definitions Fix, complete, and indent IP32 parport definitions. Definition were wrong for CTXINUSE and DMACTIVE (1-bit shift). Add macros DATA_BOUND, DATALEN_SHIFT, and CTRSHIFT. Signed-off-by: Arnaud Giersch Signed-off-by: Ralf Baechle --- include/asm-mips/ip32/mace.h | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/asm-mips/ip32/mace.h b/include/asm-mips/ip32/mace.h index 5bdc51d85b6..e514efe44c0 100644 --- a/include/asm-mips/ip32/mace.h +++ b/include/asm-mips/ip32/mace.h @@ -150,24 +150,34 @@ struct mace_audio { /* register definitions for parallel port DMA */ struct mace_parport { -/* 0 - do nothing, 1 - pulse terminal count to the device after buffer is drained */ -#define MACEPAR_CONTEXT_LASTFLAG BIT(63) -/* Should not cross 4K page boundary */ -#define MACEPAR_CONTEXT_DATALEN_MASK 0xfff00000000 -/* Can be arbitrarily aligned on any byte boundary on output, 64 byte aligned on input */ -#define MACEPAR_CONTEXT_BASEADDR_MASK 0xffffffff + /* 0 - do nothing, + * 1 - pulse terminal count to the device after buffer is drained */ +#define MACEPAR_CONTEXT_LASTFLAG BIT(63) + /* Should not cross 4K page boundary */ +#define MACEPAR_CONTEXT_DATA_BOUND 0x0000000000001000UL +#define MACEPAR_CONTEXT_DATALEN_MASK 0x00000fff00000000UL +#define MACEPAR_CONTEXT_DATALEN_SHIFT 32 + /* Can be arbitrarily aligned on any byte boundary on output, + * 64 byte aligned on input */ +#define MACEPAR_CONTEXT_BASEADDR_MASK 0x00000000ffffffffUL volatile u64 context_a; volatile u64 context_b; -#define MACEPAR_CTLSTAT_DIRECTION BIT(0) /* 0 - mem->device, 1 - device->mem */ -#define MACEPAR_CTLSTAT_ENABLE BIT(1) /* 0 - channel frozen, 1 - channel enabled */ -#define MACEPAR_CTLSTAT_RESET BIT(2) /* 0 - channel active, 1 - complete channel reset */ -#define MACEPAR_CTLSTAT_CTXB_VALID BIT(3) -#define MACEPAR_CTLSTAT_CTXA_VALID BIT(4) - volatile u64 cntlstat; /* Control/Status register */ -#define MACEPAR_DIAG_CTXINUSE BIT(1) -#define MACEPAR_DIAG_DMACTIVE BIT(2) /* 1 - Dma engine is enabled and processing something */ -#define MACEPAR_DIAG_CTRMASK 0x3ffc /* Counter of bytes left */ - volatile u64 diagnostic; /* RO: diagnostic register */ + /* 0 - mem->device, 1 - device->mem */ +#define MACEPAR_CTLSTAT_DIRECTION BIT(0) + /* 0 - channel frozen, 1 - channel enabled */ +#define MACEPAR_CTLSTAT_ENABLE BIT(1) + /* 0 - channel active, 1 - complete channel reset */ +#define MACEPAR_CTLSTAT_RESET BIT(2) +#define MACEPAR_CTLSTAT_CTXB_VALID BIT(3) +#define MACEPAR_CTLSTAT_CTXA_VALID BIT(4) + volatile u64 cntlstat; /* Control/Status register */ +#define MACEPAR_DIAG_CTXINUSE BIT(0) + /* 1 - Dma engine is enabled and processing something */ +#define MACEPAR_DIAG_DMACTIVE BIT(1) + /* Counter of bytes left */ +#define MACEPAR_DIAG_CTRMASK 0x0000000000003ffcUL +#define MACEPAR_DIAG_CTRSHIFT 2 + volatile u64 diagnostic; /* RO: diagnostic register */ }; /* ISA Control and DMA registers */ -- cgit v1.2.3 From 59f145d28ce853b13dafdfab438c48f3ead0b38e Mon Sep 17 00:00:00 2001 From: Arnaud Giersch Date: Sun, 13 Nov 2005 00:38:18 +0100 Subject: [MIPS] IP32: Fix sparse warnings. Add __iomem qualifier to crime and mace pointers. Signed-off-by: Arnaud Giersch Signed-off-by: Ralf Baechle --- include/asm-mips/ip32/crime.h | 2 +- include/asm-mips/ip32/mace.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-mips/ip32/crime.h b/include/asm-mips/ip32/crime.h index 152879bae20..a13702fafa8 100644 --- a/include/asm-mips/ip32/crime.h +++ b/include/asm-mips/ip32/crime.h @@ -154,7 +154,7 @@ struct sgi_crime { #define CRIME_MEM_ERROR_ECC_REPL_MASK 0xffffffff }; -extern struct sgi_crime *crime; +extern struct sgi_crime __iomem *crime; #define CRIME_HI_MEM_BASE 0x40000000 /* this is where whole 1G of RAM is mapped */ diff --git a/include/asm-mips/ip32/mace.h b/include/asm-mips/ip32/mace.h index e514efe44c0..990082c81f3 100644 --- a/include/asm-mips/ip32/mace.h +++ b/include/asm-mips/ip32/mace.h @@ -363,6 +363,6 @@ struct sgi_mace { char _pad6[0x80000 - sizeof(struct mace_isa)]; }; -extern struct sgi_mace *mace; +extern struct sgi_mace __iomem *mace; #endif /* __ASM_MACE_H__ */ -- cgit v1.2.3 From 99289a4e8a9cb3fa6caa8fc4ebf57a33db497340 Mon Sep 17 00:00:00 2001 From: Arnaud Giersch Date: Sun, 13 Nov 2005 00:38:18 +0100 Subject: [MIPS] Add const qualifier to writes##bwlq. Add const qualifier to parameter addr of writes##bwlq. Signed-off-by: Arnaud Giersch Signed-off-by: Ralf Baechle --- include/asm-mips/io.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h index 3061870b7f6..d42685747e7 100644 --- a/include/asm-mips/io.h +++ b/include/asm-mips/io.h @@ -459,10 +459,10 @@ __BUILDIO(q, u64) #define __BUILD_MEMORY_STRING(bwlq, type) \ \ -static inline void writes##bwlq(volatile void __iomem *mem, void *addr, \ - unsigned int count) \ +static inline void writes##bwlq(volatile void __iomem *mem, \ + const void *addr, unsigned int count) \ { \ - volatile type *__addr = addr; \ + const volatile type *__addr = addr; \ \ while (count--) { \ mem_write##bwlq(*__addr, mem); \ -- cgit v1.2.3 From f10d14ddec8daf11a298f05ab3d644887df39830 Mon Sep 17 00:00:00 2001 From: Arnaud Giersch Date: Sun, 13 Nov 2005 00:38:18 +0100 Subject: [MIPS] Fix documentation typos. Signed-off-by: Arnaud Giersch Signed-off-by: Ralf Baechle --- include/asm-mips/atomic.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 2c87b41e69b..55c37c106ef 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -231,11 +231,12 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) } /* - * atomic_sub_if_positive - add integer to atomic variable + * atomic_sub_if_positive - conditionally subtract integer from atomic variable + * @i: integer value to subtract * @v: pointer of type atomic_t * - * Atomically test @v and decrement if it is greater than 0. - * The function returns the old value of @v minus 1. + * Atomically test @v and subtract @i if @v is greater or equal than @i. + * The function returns the old value of @v minus @i. */ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) { @@ -577,11 +578,12 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) } /* - * atomic64_sub_if_positive - add integer to atomic variable + * atomic64_sub_if_positive - conditionally subtract integer from atomic variable + * @i: integer value to subtract * @v: pointer of type atomic64_t * - * Atomically test @v and decrement if it is greater than 0. - * The function returns the old value of @v minus 1. + * Atomically test @v and subtract @i if @v is greater or equal than @i. + * The function returns the old value of @v minus @i. */ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) { -- cgit v1.2.3 From c183f1224bbae052b5fbb971d6eafc5cbdc6be4f Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 15 Nov 2005 13:05:26 +0000 Subject: [MIPS] JMR3927: Fix include wrapper symbol. Signed-off-by: Ralf Baechle --- include/asm-mips/mach-jmr3927/ds1742.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/asm-mips/mach-jmr3927/ds1742.h b/include/asm-mips/mach-jmr3927/ds1742.h index 134a4b6c334..cff6192d4bd 100644 --- a/include/asm-mips/mach-jmr3927/ds1742.h +++ b/include/asm-mips/mach-jmr3927/ds1742.h @@ -5,12 +5,12 @@ * * Copyright (C) 2003 by Ralf Baechle */ -#ifndef __ASM_MACH_JMR3927_ASM_DS1742_H -#define __ASM_MACH_JMR3927_ASM_DS1742_H +#ifndef __ASM_MACH_JMR3927_DS1742_H +#define __ASM_MACH_JMR3927_DS1742_H #include #define rtc_read(reg) (jmr3927_nvram_in(addr)) #define rtc_write(data, reg) (jmr3927_nvram_out((data),(reg))) -#endif /* __ASM_MACH_JMR3927_ASM_DS1742_H */ +#endif /* __ASM_MACH_JMR3927_DS1742_H */ -- cgit v1.2.3 From 561a0792405bea8ead78990d755dd1f95b8e95b8 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 15 Nov 2005 13:25:59 +0000 Subject: [MIPS] SEAD: Delete seadint_init() prototype. There is no definition for seadint_init() and the unprotected prototype breaks compilation of assembler files. Signed-off-by: Ralf Baechle --- include/asm-mips/mips-boards/seadint.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/asm-mips/mips-boards/seadint.h b/include/asm-mips/mips-boards/seadint.h index c3dcfcb928b..ba88f769f97 100644 --- a/include/asm-mips/mips-boards/seadint.h +++ b/include/asm-mips/mips-boards/seadint.h @@ -23,6 +23,4 @@ #define SEADINT_UART0 2 #define SEADINT_UART1 3 -extern void seadint_init(void); - #endif /* !(_MIPS_SEADINT_H) */ -- cgit v1.2.3 From 1a6ea3ec6784cf3dedc338e1980dc0b4cf28a805 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 15 Nov 2005 16:10:01 +0000 Subject: [MIPS] SEAD: More build fixes. Signed-off-by: Ralf Baechle --- include/asm-mips/mips-boards/seadint.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-mips/mips-boards/seadint.h b/include/asm-mips/mips-boards/seadint.h index ba88f769f97..365c2a3c64f 100644 --- a/include/asm-mips/mips-boards/seadint.h +++ b/include/asm-mips/mips-boards/seadint.h @@ -20,7 +20,14 @@ #ifndef _MIPS_SEADINT_H #define _MIPS_SEADINT_H -#define SEADINT_UART0 2 -#define SEADINT_UART1 3 +/* + * Interrupts 0..7 are used for SEAD CPU interrupts + */ +#define MIPSCPU_INT_BASE 0 + +#define MIPSCPU_INT_UART0 2 +#define MIPSCPU_INT_UART1 3 + +#define MIPSCPU_INT_CPUCTR 7 #endif /* !(_MIPS_SEADINT_H) */ -- cgit v1.2.3 From b7fd1edd2c0c225afa96af92d4adecb91e7d439d Mon Sep 17 00:00:00 2001 From: Constantine Gavrilov Date: Thu, 17 Nov 2005 11:40:43 +0200 Subject: [PATCH] x86: fix sigaddset() inline asm memory constraint Due to incomplete memory constraints, gcc would miscompile code with sigaddset on i386 if sig arg was const. A quote form Jakub to make the issue clear: "You need either __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig-1) : "cc"); or __asm__("btsl %1,%0" : "=m"(*set) : "Ir"(_sig-1), "m"(*set) : "cc"); because the btsl instruction doesn't just set the memory to some value, but needs to read its previous content as well. If you don't tell that fact to GCC, GCC is of course free to optimize as if the asm was just setting the value and not depended on the previous value." Signed-off-by: Linus Torvalds --- include/asm-i386/signal.h | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h index cbb47d34aa3..6ba29f145bf 100644 --- a/include/asm-i386/signal.h +++ b/include/asm-i386/signal.h @@ -159,14 +159,37 @@ typedef struct sigaltstack { #define __HAVE_ARCH_SIG_BITOPS -static __inline__ void sigaddset(sigset_t *set, int _sig) +#define sigaddset(set,sig) \ + (__builtin_constant_p(sig) ? \ + __const_sigaddset((set),(sig)) : \ + __gen_sigaddset((set),(sig))) + +static __inline__ void __gen_sigaddset(sigset_t *set, int _sig) { - __asm__("btsl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); + __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); } -static __inline__ void sigdelset(sigset_t *set, int _sig) +static __inline__ void __const_sigaddset(sigset_t *set, int _sig) { - __asm__("btrl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); + unsigned long sig = _sig - 1; + set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); +} + +#define sigdelset(set,sig) \ + (__builtin_constant_p(sig) ? \ + __const_sigdelset((set),(sig)) : \ + __gen_sigdelset((set),(sig))) + + +static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) +{ + __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); +} + +static __inline__ void __const_sigaddset(sigset_t *set, int _sig) +{ + unsigned long sig = _sig - 1; + set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); } static __inline__ int __const_sigismember(sigset_t *set, int _sig) -- cgit v1.2.3 From 67a1901ff498363e253b90ba132e336c925203ed Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 17 Nov 2005 16:48:00 +0000 Subject: [ARM] __ioremap doesn't use 4th argument The "align" argument in ARMs __ioremap is unused and provides a misleading expectation that it might do something. It doesn't. Remove it. Signed-off-by: Russell King --- include/asm-arm/arch-ixp4xx/io.h | 9 +++------ include/asm-arm/io.h | 21 ++++++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/include/asm-arm/arch-ixp4xx/io.h b/include/asm-arm/arch-ixp4xx/io.h index 688f7f90d93..942b622455b 100644 --- a/include/asm-arm/arch-ixp4xx/io.h +++ b/include/asm-arm/arch-ixp4xx/io.h @@ -59,11 +59,10 @@ extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data); * fallback to the default. */ static inline void __iomem * -__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned long align) +__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags) { - extern void __iomem * __ioremap(unsigned long, size_t, unsigned long, unsigned long); if((addr < 0x48000000) || (addr > 0x4fffffff)) - return __ioremap(addr, size, flags, align); + return __ioremap(addr, size, flags); return (void *)addr; } @@ -71,13 +70,11 @@ __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned static inline void __ixp4xx_iounmap(void __iomem *addr) { - extern void __iounmap(void __iomem *addr); - if ((u32)addr >= VMALLOC_START) __iounmap(addr); } -#define __arch_ioremap(a, s, f, x) __ixp4xx_ioremap(a, s, f, x) +#define __arch_ioremap(a, s, f) __ixp4xx_ioremap(a, s, f) #define __arch_iounmap(a) __ixp4xx_iounmap(a) #define writeb(v, p) __ixp4xx_writeb(v, p) diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h index 2e6799632f1..ae69db4a101 100644 --- a/include/asm-arm/io.h +++ b/include/asm-arm/io.h @@ -54,6 +54,12 @@ extern void __raw_readsl(void __iomem *addr, void *data, int longlen); #define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) #define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a)) +/* + * Architecture ioremap implementation. + */ +extern void __iomem * __ioremap(unsigned long, size_t, unsigned long); +extern void __iounmap(void __iomem *addr); + /* * Bad read/write accesses... */ @@ -256,18 +262,15 @@ out: * ioremap takes a PCI memory address, as specified in * Documentation/IO-mapping.txt. */ -extern void __iomem * __ioremap(unsigned long, size_t, unsigned long, unsigned long); -extern void __iounmap(void __iomem *addr); - #ifndef __arch_ioremap -#define ioremap(cookie,size) __ioremap(cookie,size,0,1) -#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0,1) -#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE,1) +#define ioremap(cookie,size) __ioremap(cookie,size,0) +#define ioremap_nocache(cookie,size) __ioremap(cookie,size,0) +#define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE) #define iounmap(cookie) __iounmap(cookie) #else -#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0,1) -#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0,1) -#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE,1) +#define ioremap(cookie,size) __arch_ioremap((cookie),(size),0) +#define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0) +#define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE) #define iounmap(cookie) __arch_iounmap(cookie) #endif -- cgit v1.2.3 From cd02e27b1514a27b2a8ab59755ae6d23d4d8a10f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 17 Nov 2005 10:04:31 -0800 Subject: x86: Fix silly typo in recent fixes The second __const_sigaddset() should have been a sigdelset.. Compile trouble noted by Greg K-H. Signed-off-by: Linus Torvalds --- include/asm-i386/signal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h index 6ba29f145bf..76524b4052a 100644 --- a/include/asm-i386/signal.h +++ b/include/asm-i386/signal.h @@ -186,7 +186,7 @@ static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); } -static __inline__ void __const_sigaddset(sigset_t *set, int _sig) +static __inline__ void __const_sigdelset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); -- cgit v1.2.3 From d911aed8adf74e1fae88d082b8474b2175b7f1da Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 17 Nov 2005 16:27:02 -0500 Subject: [PARISC] Fix our interrupts not to use smp_call_function Fix our interrupts not to use smp_call_function On K and D class smp, the generic code calls this under an irq spinlock, which causes the WARN_ON() message in smp_call_function() (and is also illegal because it could deadlock). The fix is to use a new scheme based on the IPI_NOP. Signed-off-by: James Bottomley Signed-off-by: Kyle McMartin --- include/asm-parisc/smp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h index 9413f67a540..a5191950ce0 100644 --- a/include/asm-parisc/smp.h +++ b/include/asm-parisc/smp.h @@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map; #define cpu_logical_map(cpu) (cpu) extern void smp_send_reschedule(int cpu); +extern void smp_send_all_nop(void); #endif /* !ASSEMBLY */ -- cgit v1.2.3 From 1d4c452a85503cdb4bca5925cf698b61d3aa43a0 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Thu, 17 Nov 2005 16:27:44 -0500 Subject: [PARISC] Fix uniprocessor build by dummying smp_send_all_nop() Since irq.c uses smp_send_all_nop, we must define it for UP builds as well. Make it a static inline so it gets optimized away. This forces irq.c to include though. Signed-off-by: Kyle McMartin --- include/asm-parisc/smp.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h index a5191950ce0..dbdbd2e9fdf 100644 --- a/include/asm-parisc/smp.h +++ b/include/asm-parisc/smp.h @@ -54,7 +54,11 @@ extern unsigned long cpu_present_mask; #define raw_smp_processor_id() (current_thread_info()->cpu) -#endif /* CONFIG_SMP */ +#else /* CONFIG_SMP */ + +static inline void smp_send_all_nop(void) { return; } + +#endif #define NO_PROC_ID 0xFF /* No processor magic marker */ #define ANY_PROC_ID 0xFF /* Any processor magic marker */ -- cgit v1.2.3 From c2ab64d09815cc4d48347ee3679658f197455a2a Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 17 Nov 2005 16:28:37 -0500 Subject: [PARISC] Add IRQ affinities This really only adds them for the machines I can check SMP on, which is CPU interrupts and IOSAPIC (so not any of the GSC based machines). With this patch, irqbalanced can be used to maintain irq balancing. Unfortunately, irqbalanced is a bit x86 centric, so it doesn't do an incredibly good job, but it does work. Signed-off-by: James Bottomley Signed-off-by: Kyle McMartin --- include/asm-parisc/irq.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h index f876bdf2205..b0a30e2c981 100644 --- a/include/asm-parisc/irq.h +++ b/include/asm-parisc/irq.h @@ -8,6 +8,7 @@ #define _ASM_PARISC_IRQ_H #include +#include #include #define NO_IRQ (-1) @@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits); extern int txn_claim_irq(int); extern unsigned int txn_alloc_data(unsigned int); extern unsigned long txn_alloc_addr(unsigned int); +extern unsigned long txn_affinity_addr(unsigned int irq, int cpu); extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *); - -extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *); +extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest); /* soft power switch support (power.c) */ extern struct tasklet_struct power_tasklet; -- cgit v1.2.3 From 08dc2ca61e683e9119ff534dfcd0fd555401fcf7 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 17 Nov 2005 16:35:09 -0500 Subject: [PARISC] Fix our spinlock implementation We actually have two separate bad bugs 1. The read_lock implementation spins with disabled interrupts. This is completely wrong 2. Our spin_lock_irqsave should check to see if interrupts were enabled before the call and re-enable interrupts around the inner spin loop. The problem is that if we spin with interrupts off, we can't receive IPIs. This has resulted in a bug where SMP machines suddenly spit smp_call_function timeout messages and hang. The scenario I've caught is CPU0 does a flush_tlb_all holding the vmlist_lock for write. CPU1 tries a cat of /proc/meminfo which tries to acquire vmlist_lock for read CPU1 is now spinning with interrupts disabled CPU0 tries to execute a smp_call_function to flush the local tlb caches This is now a deadlock because CPU1 is spinning with interrupts disabled and can never receive the IPI Signed-off-by: James Bottomley Signed-off-by: Kyle McMartin --- include/asm-parisc/spinlock.h | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h index 7c3f406a746..16c2ac075fc 100644 --- a/include/asm-parisc/spinlock.h +++ b/include/asm-parisc/spinlock.h @@ -11,18 +11,25 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x) return *a == 0; } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) #define __raw_spin_unlock_wait(x) \ do { cpu_relax(); } while (__raw_spin_is_locked(x)) -static inline void __raw_spin_lock(raw_spinlock_t *x) +static inline void __raw_spin_lock_flags(raw_spinlock_t *x, + unsigned long flags) { volatile unsigned int *a; mb(); a = __ldcw_align(x); while (__ldcw(a) == 0) - while (*a == 0); + while (*a == 0) + if (flags & PSW_SM_I) { + local_irq_enable(); + cpu_relax(); + local_irq_disable(); + } else + cpu_relax(); mb(); } @@ -60,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x) static __inline__ void __raw_read_lock(raw_rwlock_t *rw) { - unsigned long flags; - local_irq_save(flags); __raw_spin_lock(&rw->lock); rw->counter++; __raw_spin_unlock(&rw->lock); - local_irq_restore(flags); } static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) { - unsigned long flags; - local_irq_save(flags); __raw_spin_lock(&rw->lock); rw->counter--; __raw_spin_unlock(&rw->lock); - local_irq_restore(flags); } /* write_lock is less trivial. We optimistically grab the lock and check -- cgit v1.2.3 From 29a622dd2b577d98731d325954f328b810826cfa Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Thu, 17 Nov 2005 16:44:14 -0500 Subject: [PARISC] Always spinlock tlb flush operations to ensure preempt safety Since taking a spinlock disables preempt, and we need to spinlock tlb flush on SMP for N class, we might as well just spinlock on uniprocessor machines too. Signed-off-by: Matthew Wilcox Signed-off-by: Kyle McMartin --- include/asm-parisc/tlbflush.h | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h index e97aa8d1eff..c9ec39c6fc6 100644 --- a/include/asm-parisc/tlbflush.h +++ b/include/asm-parisc/tlbflush.h @@ -12,21 +12,15 @@ * N class systems, only one PxTLB inter processor broadcast can be * active at any one time on the Merced bus. This tlb purge * synchronisation is fairly lightweight and harmless so we activate - * it on all SMP systems not just the N class. */ -#ifdef CONFIG_SMP + * it on all SMP systems not just the N class. We also need to have + * preemption disabled on uniprocessor machines, and spin_lock does that + * nicely. + */ extern spinlock_t pa_tlb_lock; #define purge_tlb_start(x) spin_lock(&pa_tlb_lock) #define purge_tlb_end(x) spin_unlock(&pa_tlb_lock) -#else - -#define purge_tlb_start(x) do { } while(0) -#define purge_tlb_end(x) do { } while (0) - -#endif - - extern void flush_tlb_all(void); /* @@ -88,7 +82,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ flush_tlb_all(); else { - preempt_disable(); mtsp(vma->vm_mm->context,1); purge_tlb_start(); if (split_tlb) { @@ -102,7 +95,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, pdtlb(start); start += PAGE_SIZE; } - preempt_enable(); } purge_tlb_end(); } -- cgit v1.2.3 From d2a33170972c5772826f7f6cc950ab69ba034667 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 17 Nov 2005 20:34:35 +1100 Subject: [PATCH] powerpc: Fix typo in topology.h The fix to topology.h (5cfccd7f132432dd4705444a44b51d12ef88a85f) seems to have a typeo, struct sched_domain has an idle_idx member but not an idle_id member. I assume this is the fix. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- include/asm-powerpc/topology.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h index 0e8c20c9606..db8095cbe09 100644 --- a/include/asm-powerpc/topology.h +++ b/include/asm-powerpc/topology.h @@ -42,7 +42,7 @@ static inline int node_to_first_cpu(int node) .cache_nice_tries = 1, \ .per_cpu_gain = 100, \ .busy_idx = 3, \ - .idle_id = 1, \ + .idle_idx = 1, \ .newidle_idx = 2, \ .wake_idx = 1, \ .flags = SD_LOAD_BALANCE \ -- cgit v1.2.3 From 6defa38b3754c84cd3449447477aed81ea979407 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 18 Nov 2005 13:44:17 +1100 Subject: powerpc: Fix delay functions for 601 processors My earlier merge of delay.h introduced a timebase-based udelay for 32-bit machines but also broke the 601, which doesn't have the timebase register. This fixes it by using the 601's RTC register on the 601, and also moves __delay() and udelay() to be out-of-line in arch/powerpc/kernel/time.c. These functions aren't really performance critical, after all. Signed-off-by: Paul Mackerras --- include/asm-powerpc/delay.h | 40 ++-------------------------------------- 1 file changed, 2 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/include/asm-powerpc/delay.h b/include/asm-powerpc/delay.h index 1492aa9ab71..54fe1f4f8fd 100644 --- a/include/asm-powerpc/delay.h +++ b/include/asm-powerpc/delay.h @@ -13,43 +13,7 @@ * Anton Blanchard. */ -extern unsigned long tb_ticks_per_usec; - -#ifdef CONFIG_PPC64 -/* define these here to prevent circular dependencies */ -/* these instructions control the thread priority on multi-threaded cpus */ -#define __HMT_low() asm volatile("or 1,1,1") -#define __HMT_medium() asm volatile("or 2,2,2") -#else -#define __HMT_low() -#define __HMT_medium() -#endif - -#define __barrier() asm volatile("" ::: "memory") - -static inline unsigned long __get_tb(void) -{ - unsigned long rval; - - asm volatile("mftb %0" : "=r" (rval)); - return rval; -} - -static inline void __delay(unsigned long loops) -{ - unsigned long start = __get_tb(); - - while((__get_tb() - start) < loops) - __HMT_low(); - __HMT_medium(); - __barrier(); -} - -static inline void udelay(unsigned long usecs) -{ - unsigned long loops = tb_ticks_per_usec * usecs; - - __delay(loops); -} +extern void __delay(unsigned long loops); +extern void udelay(unsigned long usecs); #endif /* _ASM_POWERPC_DELAY_H */ -- cgit v1.2.3 From 5daf9071b527089b1bd5d9cb3a5354b83121550e Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 18 Nov 2005 14:09:41 +1100 Subject: [PATCH] powerpc: merge align.c This patch merges align.c, the result isn't quite what was in ppc64 nor what was in ppc32 :) It should implement all the functionalities of both though. Kumar, since you played with that in the past, I suppose you have some test cases for verifying that it works properly before I dig out the 601 machine ? :) Since it's likely that I won't be able to test all scenario, code inspection is much welcome. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- include/asm-powerpc/cputable.h | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 04e2726002c..d1cfa3f515e 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -90,6 +90,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset); #define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) #define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) #define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000) +#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) #ifdef __powerpc64__ /* Add the 64b processor unique features in the top half of the word */ @@ -97,7 +98,6 @@ extern void do_cpu_ftr_fixups(unsigned long offset); #define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000) #define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000) #define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000) -#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000001000000000) #define CPU_FTR_IABR ASM_CONST(0x0000002000000000) #define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000) #define CPU_FTR_CTRL ASM_CONST(0x0000008000000000) @@ -113,7 +113,6 @@ extern void do_cpu_ftr_fixups(unsigned long offset); #define CPU_FTR_16M_PAGE ASM_CONST(0x0) #define CPU_FTR_TLBIEL ASM_CONST(0x0) #define CPU_FTR_NOEXECUTE ASM_CONST(0x0) -#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0) #define CPU_FTR_IABR ASM_CONST(0x0) #define CPU_FTR_MMCRA ASM_CONST(0x0) #define CPU_FTR_CTRL ASM_CONST(0x0) @@ -273,18 +272,21 @@ enum { CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE, CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | - CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE, + CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_NODSISRALIGN, CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP | - CPU_FTR_MAYBE_CAN_NAP, + CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN, CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, - CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, - CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, - CPU_FTRS_E200 = CPU_FTR_USE_TB, - CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, + CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | + CPU_FTR_NODSISRALIGN, + CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | + CPU_FTR_NODSISRALIGN, + CPU_FTRS_E200 = CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN, + CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | + CPU_FTR_NODSISRALIGN, CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | - CPU_FTR_BIG_PHYS, - CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON, + CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN, + CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN, #ifdef __powerpc64__ CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR, -- cgit v1.2.3 From d2c5b69099ff747f9757da2416383b9a999171b1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Nov 2005 14:22:03 +0000 Subject: [ARM] Fix get_user when passed a const pointer Unfortunately, later gcc versions error out when our get_user is passed a const pointer, since we write to a temporary variable declared as typeof(*(p)) which propagates the const-ness. Signed-off-by: Russell King --- include/asm-arm/uaccess.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-arm/uaccess.h b/include/asm-arm/uaccess.h index a2fdad0138b..064f0f5e8e2 100644 --- a/include/asm-arm/uaccess.h +++ b/include/asm-arm/uaccess.h @@ -100,7 +100,6 @@ static inline void set_fs (mm_segment_t fs) extern int __get_user_1(void *); extern int __get_user_2(void *); extern int __get_user_4(void *); -extern int __get_user_8(void *); extern int __get_user_bad(void); #define __get_user_x(__r2,__p,__e,__s,__i...) \ @@ -114,7 +113,7 @@ extern int __get_user_bad(void); #define get_user(x,p) \ ({ \ const register typeof(*(p)) __user *__p asm("r0") = (p);\ - register typeof(*(p)) __r2 asm("r2"); \ + register unsigned int __r2 asm("r2"); \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ @@ -126,12 +125,9 @@ extern int __get_user_bad(void); case 4: \ __get_user_x(__r2, __p, __e, 4, "lr"); \ break; \ - case 8: \ - __get_user_x(__r2, __p, __e, 8, "lr"); \ - break; \ default: __e = __get_user_bad(); break; \ } \ - x = __r2; \ + x = (typeof(*(p))) __r2; \ __e; \ }) -- cgit v1.2.3 From 78baa2f8ad53968ff82ad9827b7793b3f46cba0e Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Sat, 19 Nov 2005 00:33:56 +1100 Subject: ppc32: move some dma routines Every other architecture define dma_cache_{inv,wback,wback_inv} in asm/io.h and doing so brings us closer to ppc64. Signed-off-by: Stephen Rothwell --- include/asm-ppc/dma-mapping.h | 10 ---------- include/asm-ppc/io.h | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/asm-ppc/dma-mapping.h b/include/asm-ppc/dma-mapping.h index 6e963511443..798602620e8 100644 --- a/include/asm-ppc/dma-mapping.h +++ b/include/asm-ppc/dma-mapping.h @@ -24,22 +24,12 @@ extern void __dma_free_coherent(size_t size, void *vaddr); extern void __dma_sync(void *vaddr, size_t size, int direction); extern void __dma_sync_page(struct page *page, unsigned long offset, size_t size, int direction); -#define dma_cache_inv(_start,_size) \ - invalidate_dcache_range(_start, (_start + _size)) -#define dma_cache_wback(_start,_size) \ - clean_dcache_range(_start, (_start + _size)) -#define dma_cache_wback_inv(_start,_size) \ - flush_dcache_range(_start, (_start + _size)) #else /* ! CONFIG_NOT_COHERENT_CACHE */ /* * Cache coherent cores. */ -#define dma_cache_inv(_start,_size) do { } while (0) -#define dma_cache_wback(_start,_size) do { } while (0) -#define dma_cache_wback_inv(_start,_size) do { } while (0) - #define __dma_alloc_coherent(gfp, size, handle) NULL #define __dma_free_coherent(size, addr) do { } while (0) #define __dma_sync(addr, size, rw) do { } while (0) diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h index 2bfdf9c9845..84ac6e258ee 100644 --- a/include/asm-ppc/io.h +++ b/include/asm-ppc/io.h @@ -545,6 +545,23 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *); #include #endif +#ifdef CONFIG_NOT_COHERENT_CACHE + +#define dma_cache_inv(_start,_size) \ + invalidate_dcache_range(_start, (_start + _size)) +#define dma_cache_wback(_start,_size) \ + clean_dcache_range(_start, (_start + _size)) +#define dma_cache_wback_inv(_start,_size) \ + flush_dcache_range(_start, (_start + _size)) + +#else + +#define dma_cache_inv(_start,_size) do { } while (0) +#define dma_cache_wback(_start,_size) do { } while (0) +#define dma_cache_wback_inv(_start,_size) do { } while (0) + +#endif + /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access -- cgit v1.2.3 From 78b09735a2f42f32c4611d92ea51755e1faae385 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Sat, 19 Nov 2005 01:40:46 +1100 Subject: powerpc: merge dma-mapping.h Signed-off-by: Stephen Rothwell --- include/asm-powerpc/dma-mapping.h | 285 ++++++++++++++++++++++++++++++++++++++ include/asm-ppc/dma-mapping.h | 227 ------------------------------ include/asm-ppc64/dma-mapping.h | 136 ------------------ 3 files changed, 285 insertions(+), 363 deletions(-) create mode 100644 include/asm-powerpc/dma-mapping.h delete mode 100644 include/asm-ppc/dma-mapping.h delete mode 100644 include/asm-ppc64/dma-mapping.h (limited to 'include') diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h new file mode 100644 index 00000000000..59a80163f75 --- /dev/null +++ b/include/asm-powerpc/dma-mapping.h @@ -0,0 +1,285 @@ +/* + * Copyright (C) 2004 IBM + * + * Implements the generic device dma API for powerpc. + * the pci and vio busses + */ +#ifndef _ASM_DMA_MAPPING_H +#define _ASM_DMA_MAPPING_H + +#include +#include +#include +/* need struct page definitions */ +#include +#include +#include +#include + +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) + +#ifdef CONFIG_NOT_COHERENT_CACHE +/* + * DMA-consistent mapping functions for PowerPCs that don't support + * cache snooping. These allocate/free a region of uncached mapped + * memory space for use with DMA devices. Alternatively, you could + * allocate the space "normally" and use the cache management functions + * to ensure it is consistent. + */ +extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); +extern void __dma_free_coherent(size_t size, void *vaddr); +extern void __dma_sync(void *vaddr, size_t size, int direction); +extern void __dma_sync_page(struct page *page, unsigned long offset, + size_t size, int direction); + +#else /* ! CONFIG_NOT_COHERENT_CACHE */ +/* + * Cache coherent cores. + */ + +#define __dma_alloc_coherent(gfp, size, handle) NULL +#define __dma_free_coherent(size, addr) do { } while (0) +#define __dma_sync(addr, size, rw) do { } while (0) +#define __dma_sync_page(pg, off, sz, rw) do { } while (0) + +#endif /* ! CONFIG_NOT_COHERENT_CACHE */ + +#ifdef CONFIG_PPC64 + +extern int dma_supported(struct device *dev, u64 mask); +extern int dma_set_mask(struct device *dev, u64 dma_mask); +extern void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); +extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle); +extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, + size_t size, enum dma_data_direction direction); +extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction); +extern dma_addr_t dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction); +extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, + size_t size, enum dma_data_direction direction); +extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); +extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nhwentries, enum dma_data_direction direction); + +#else /* CONFIG_PPC64 */ + +#define dma_supported(dev, mask) (1) + +static inline int dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + + *dev->dma_mask = dma_mask; + + return 0; +} + +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t * dma_handle, + gfp_t gfp) +{ +#ifdef CONFIG_NOT_COHERENT_CACHE + return __dma_alloc_coherent(size, dma_handle, gfp); +#else + void *ret; + /* ignore region specifiers */ + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); + + if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) + gfp |= GFP_DMA; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + + if (ret != NULL) { + memset(ret, 0, size); + *dma_handle = virt_to_bus(ret); + } + + return ret; +#endif +} + +static inline void +dma_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ +#ifdef CONFIG_NOT_COHERENT_CACHE + __dma_free_coherent(size, vaddr); +#else + free_pages((unsigned long)vaddr, get_order(size)); +#endif +} + +static inline dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + + __dma_sync(ptr, size, direction); + + return virt_to_bus(ptr); +} + +/* We do nothing. */ +#define dma_unmap_single(dev, addr, size, dir) do { } while (0) + +static inline dma_addr_t +dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + + __dma_sync_page(page, offset, size, direction); + + return page_to_bus(page) + offset; +} + +/* We do nothing. */ +#define dma_unmap_page(dev, handle, size, dir) do { } while (0) + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + int i; + + BUG_ON(direction == DMA_NONE); + + for (i = 0; i < nents; i++, sg++) { + BUG_ON(!sg->page); + __dma_sync_page(sg->page, sg->offset, sg->length, direction); + sg->dma_address = page_to_bus(sg->page) + sg->offset; + } + + return nents; +} + +/* We don't do anything here. */ +#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) + +#endif /* CONFIG_PPC64 */ + +static inline void dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + __dma_sync(bus_to_virt(dma_handle), size, direction); +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + __dma_sync(bus_to_virt(dma_handle), size, direction); +} + +static inline void dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + int i; + + BUG_ON(direction == DMA_NONE); + + for (i = 0; i < nents; i++, sg++) + __dma_sync_page(sg->page, sg->offset, sg->length, direction); +} + +static inline void dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + int i; + + BUG_ON(direction == DMA_NONE); + + for (i = 0; i < nents; i++, sg++) + __dma_sync_page(sg->page, sg->offset, sg->length, direction); +} + +static inline int dma_mapping_error(dma_addr_t dma_addr) +{ +#ifdef CONFIG_PPC64 + return (dma_addr == DMA_ERROR_CODE); +#else + return 0; +#endif +} + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +#ifdef CONFIG_NOT_COHERENT_CACHE +#define dma_is_consistent(d) (0) +#else +#define dma_is_consistent(d) (1) +#endif + +static inline int dma_get_cache_alignment(void) +{ +#ifdef CONFIG_PPC64 + /* no easy way to get cache size on all processors, so return + * the maximum possible, to be safe */ + return (1 << L1_CACHE_SHIFT_MAX); +#else + /* + * Each processor family will define its own L1_CACHE_SHIFT, + * L1_CACHE_BYTES wraps to this, so this is always safe. + */ + return L1_CACHE_BYTES; +#endif +} + +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + /* just sync everything for now */ + dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); +} + +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + /* just sync everything for now */ + dma_sync_single_for_device(dev, dma_handle, offset + size, direction); +} + +static inline void dma_cache_sync(void *vaddr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + __dma_sync(vaddr, size, (int)direction); +} + +/* + * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO + */ +struct dma_mapping_ops { + void * (*alloc_coherent)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + void (*free_coherent)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + dma_addr_t (*map_single)(struct device *dev, void *ptr, + size_t size, enum dma_data_direction direction); + void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction); + int (*map_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction); + void (*unmap_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction); + int (*dma_supported)(struct device *dev, u64 mask); + int (*dac_dma_supported)(struct device *dev, u64 mask); +}; + +#endif /* _ASM_DMA_MAPPING_H */ diff --git a/include/asm-ppc/dma-mapping.h b/include/asm-ppc/dma-mapping.h deleted file mode 100644 index 798602620e8..00000000000 --- a/include/asm-ppc/dma-mapping.h +++ /dev/null @@ -1,227 +0,0 @@ -/* - * This is based on both include/asm-sh/dma-mapping.h and - * include/asm-ppc/pci.h - */ -#ifndef __ASM_PPC_DMA_MAPPING_H -#define __ASM_PPC_DMA_MAPPING_H - -#include -/* need struct page definitions */ -#include -#include -#include - -#ifdef CONFIG_NOT_COHERENT_CACHE -/* - * DMA-consistent mapping functions for PowerPCs that don't support - * cache snooping. These allocate/free a region of uncached mapped - * memory space for use with DMA devices. Alternatively, you could - * allocate the space "normally" and use the cache management functions - * to ensure it is consistent. - */ -extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); -extern void __dma_free_coherent(size_t size, void *vaddr); -extern void __dma_sync(void *vaddr, size_t size, int direction); -extern void __dma_sync_page(struct page *page, unsigned long offset, - size_t size, int direction); - -#else /* ! CONFIG_NOT_COHERENT_CACHE */ -/* - * Cache coherent cores. - */ - -#define __dma_alloc_coherent(gfp, size, handle) NULL -#define __dma_free_coherent(size, addr) do { } while (0) -#define __dma_sync(addr, size, rw) do { } while (0) -#define __dma_sync_page(pg, off, sz, rw) do { } while (0) - -#endif /* ! CONFIG_NOT_COHERENT_CACHE */ - -#define dma_supported(dev, mask) (1) - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} - -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, - gfp_t gfp) -{ -#ifdef CONFIG_NOT_COHERENT_CACHE - return __dma_alloc_coherent(size, dma_handle, gfp); -#else - void *ret; - /* ignore region specifiers */ - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); - - if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) - gfp |= GFP_DMA; - - ret = (void *)__get_free_pages(gfp, get_order(size)); - - if (ret != NULL) { - memset(ret, 0, size); - *dma_handle = virt_to_bus(ret); - } - - return ret; -#endif -} - -static inline void -dma_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ -#ifdef CONFIG_NOT_COHERENT_CACHE - __dma_free_coherent(size, vaddr); -#else - free_pages((unsigned long)vaddr, get_order(size)); -#endif -} - -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - __dma_sync(ptr, size, direction); - - return virt_to_bus(ptr); -} - -/* We do nothing. */ -#define dma_unmap_single(dev, addr, size, dir) do { } while (0) - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - __dma_sync_page(page, offset, size, direction); - - return page_to_bus(page) + offset; -} - -/* We do nothing. */ -#define dma_unmap_page(dev, handle, size, dir) do { } while (0) - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - int i; - - BUG_ON(direction == DMA_NONE); - - for (i = 0; i < nents; i++, sg++) { - BUG_ON(!sg->page); - __dma_sync_page(sg->page, sg->offset, sg->length, direction); - sg->dma_address = page_to_bus(sg->page) + sg->offset; - } - - return nents; -} - -/* We don't do anything here. */ -#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - __dma_sync(bus_to_virt(dma_handle), size, direction); -} - -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - __dma_sync(bus_to_virt(dma_handle), size, direction); -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - int i; - - BUG_ON(direction == DMA_NONE); - - for (i = 0; i < nents; i++, sg++) - __dma_sync_page(sg->page, sg->offset, sg->length, direction); -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - int i; - - BUG_ON(direction == DMA_NONE); - - for (i = 0; i < nents; i++, sg++) - __dma_sync_page(sg->page, sg->offset, sg->length, direction); -} - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#ifdef CONFIG_NOT_COHERENT_CACHE -#define dma_is_consistent(d) (0) -#else -#define dma_is_consistent(d) (1) -#endif - -static inline int dma_get_cache_alignment(void) -{ - /* - * Each processor family will define its own L1_CACHE_SHIFT, - * L1_CACHE_BYTES wraps to this, so this is always safe. - */ - return L1_CACHE_BYTES; -} - -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - /* just sync everything for now */ - dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - /* just sync everything for now */ - dma_sync_single_for_device(dev, dma_handle, offset + size, direction); -} - -static inline void dma_cache_sync(void *vaddr, size_t size, - enum dma_data_direction direction) -{ - __dma_sync(vaddr, size, (int)direction); -} - -static inline int dma_mapping_error(dma_addr_t dma_addr) -{ - return 0; -} - -#endif /* __ASM_PPC_DMA_MAPPING_H */ diff --git a/include/asm-ppc64/dma-mapping.h b/include/asm-ppc64/dma-mapping.h deleted file mode 100644 index fb68fa23bea..00000000000 --- a/include/asm-ppc64/dma-mapping.h +++ /dev/null @@ -1,136 +0,0 @@ -/* Copyright (C) 2004 IBM - * - * Implements the generic device dma API for ppc64. Handles - * the pci and vio busses - */ - -#ifndef _ASM_DMA_MAPPING_H -#define _ASM_DMA_MAPPING_H - -#include -#include -/* need struct page definitions */ -#include -#include -#include - -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) - -extern int dma_supported(struct device *dev, u64 mask); -extern int dma_set_mask(struct device *dev, u64 dma_mask); -extern void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); -extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle); -extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, enum dma_data_direction direction); -extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction); -extern dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction); -extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction direction); -extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction); -extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nhwentries, enum dma_data_direction direction); - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline int dma_mapping_error(dma_addr_t dma_addr) -{ - return (dma_addr == DMA_ERROR_CODE); -} - -/* Now for the API extensions over the pci_ one */ - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d) (1) - -static inline int -dma_get_cache_alignment(void) -{ - /* no easy way to get cache size on all processors, so return - * the maximum possible, to be safe */ - return (1 << L1_CACHE_SHIFT_MAX); -} - -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline void -dma_cache_sync(void *vaddr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -/* - * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO - */ -struct dma_mapping_ops { - void * (*alloc_coherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - void (*free_coherent)(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - dma_addr_t (*map_single)(struct device *dev, void *ptr, - size_t size, enum dma_data_direction direction); - void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction); - int (*map_sg)(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); - void (*unmap_sg)(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); - int (*dma_supported)(struct device *dev, u64 mask); - int (*dac_dma_supported)(struct device *dev, u64 mask); -}; - -#endif /* _ASM_DMA_MAPPING_H */ -- cgit v1.2.3 From 966cc04b4d9d3d7a49e744888628acc36ebec5d4 Mon Sep 17 00:00:00 2001 From: Vitaly Bordug Date: Fri, 18 Nov 2005 01:10:55 -0800 Subject: [PATCH] ppc32: add missing define for fs_enet Ethernet driver This adds the FCC_PSMR_RMII defenition, which is used in fs_enet to enable RMII mode. Signed-off-by: Vitaly Bordug Cc: Jeff Garzik Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-ppc/cpm2.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/asm-ppc/cpm2.h b/include/asm-ppc/cpm2.h index 43d2ebbc774..b638b87cebe 100644 --- a/include/asm-ppc/cpm2.h +++ b/include/asm-ppc/cpm2.h @@ -1091,5 +1091,7 @@ typedef struct im_idma { #define CPM_IMMR_OFFSET 0x101a8 #endif +#define FCC_PSMR_RMII ((uint)0x00020000) /* Use RMII interface */ + #endif /* __CPM2__ */ #endif /* __KERNEL__ */ -- cgit v1.2.3 From b50ce2324cecf4efc7babe31f4aa1a07f9157317 Mon Sep 17 00:00:00 2001 From: Andy Whitcroft Date: Fri, 18 Nov 2005 01:11:02 -0800 Subject: [PATCH] ppc64 need HPAGE_SHIFT when huge pages disabled With the new powerpc architecture we don't seem to be able to disable huge pages anymore. mm/built-in.o(.toc1+0xae0): undefined reference to `HPAGE_SHIFT' make: *** [.tmp_vmlinux1] Error 1 We seem to need to define HPAGE_SHIFT to something when HUGETLB_PAGE isn't defined. This patch defines it to PAGE_SHIFT when we have no support. Signed-off-by: Andy Whitcroft Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-powerpc/page_64.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h index c16f106b537..1e6e7846824 100644 --- a/include/asm-powerpc/page_64.h +++ b/include/asm-powerpc/page_64.h @@ -86,7 +86,11 @@ static inline void copy_page(void *to, void *from) extern u64 ppc64_pft_size; /* Large pages size */ +#ifdef CONFIG_HUGETLB_PAGE extern unsigned int HPAGE_SHIFT; +#else +#define HPAGE_SHIFT PAGE_SHIFT +#endif #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -- cgit v1.2.3 From cea00da3972806d213a7dbe1b98e889435385e6b Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 9 Nov 2005 11:30:51 -0800 Subject: [PATCH] git-netdev-all-ieee80211_get_payload-warning-fix include/net/ieee80211.h: In function `ieee80211_get_payload': include/net/ieee80211.h:1046: warning: control reaches end of non-void function Cc: Jeff Garzik Signed-off-by: Andrew Morton Signed-off-by: Jeff Garzik --- include/net/ieee80211.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index b93fd8c1d88..cde2f4f4f50 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h @@ -1042,7 +1042,7 @@ static inline u8 *ieee80211_get_payload(struct ieee80211_hdr *hdr) case IEEE80211_4ADDR_LEN: return ((struct ieee80211_hdr_4addr *)hdr)->payload; } - + return NULL; } static inline int ieee80211_is_ofdm_rate(u8 rate) -- cgit v1.2.3 From 7f0d50391adf371a0e66da0a1a44ba5cc6744ee8 Mon Sep 17 00:00:00 2001 From: mikem Date: Fri, 18 Nov 2005 22:00:17 +0100 Subject: [PATCH 1/3] cciss: bug fix for hpacucli This patch fixes a bug that breaks hpacucli, a command line interface for the HP Array Config Utility. Without this fix the utility will not detect any controllers in the system. I thought I had already fixed this, but I guess not. Thanks to all who reported the issue. Please consider this this inclusion. Signed-off-by: Mike Miller Signed-off-by: Jens Axboe --- include/linux/cciss_ioctl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h index 424d5e622b4..6e27f42e3a5 100644 --- a/include/linux/cciss_ioctl.h +++ b/include/linux/cciss_ioctl.h @@ -10,8 +10,8 @@ typedef struct _cciss_pci_info_struct { unsigned char bus; - unsigned short domain; unsigned char dev_fn; + unsigned short domain; __u32 board_id; } cciss_pci_info_struct; -- cgit v1.2.3 From 4ef3b8f4a5c056d5f784725974a530d1a7b4a811 Mon Sep 17 00:00:00 2001 From: Laurent Riffard Date: Fri, 18 Nov 2005 22:15:40 +0100 Subject: [PATCH] ide: remove ide_driver_t.owner field The structure ide_driver_t have a .owner field which is a duplicate of .gendriver.owner field (.gen_driver is a struct device_driver). This patch removes ide_driver_t's owner field. Signed-off-by: Laurent Riffard Signed-off-by: Bartlomiej Zolnierkiewicz --- include/linux/ide.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/ide.h b/include/linux/ide.h index ac8b25fa650..e99019057ba 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1089,9 +1089,11 @@ enum { /* * Subdrivers support. + * + * The gendriver.owner field should be set to the module owner of this driver. + * The gendriver.name field should be set to the name of this driver */ typedef struct ide_driver_s { - struct module *owner; const char *version; u8 media; unsigned supports_dsc_overlap : 1; -- cgit v1.2.3 From 9ab8851549fb9ed570013c33e0786a3fd084be41 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 18 Nov 2005 16:16:42 -0500 Subject: [PARISC] Fix compile warning caused by conflicting types of expand_upwards() Fix compile warning caused by conflicting types of expand_upwards. IA64 requires it to not be static inline, as it's used outside mm/mmap.c Signed-off-by: Matthew Wilcox Signed-off-by: Kyle McMartin --- include/linux/mm.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 1013a42d10b..0986d19be0b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -940,7 +940,9 @@ unsigned long max_sane_readahead(unsigned long nr); /* Do stack extension */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); +#ifdef CONFIG_IA64 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); +#endif /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); -- cgit v1.2.3 From 800fc3eeb0eed3bf98d621c0da24d68cabcf6526 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 16 Nov 2005 15:43:48 +1100 Subject: [PATCH] powerpc: Remove imalloc.h asm-ppc64/imalloc.h is only included from files in arch/powerpc/mm. We already have a header for mm local definitions, arch/powerpc/mm/mmu_decl.h. Thus, this patch moves the contents of imalloc.h into mmu_decl.h. The only exception are the definitions of PHBS_IO_BASE, IMALLOC_BASE and IMALLOC_END. Those are moved into pgtable.h, next to similar definitions of VMALLOC_START and VMALLOC_SIZE. Built for multiplatform 32bit and 64bit (ARCH=powerpc). Signed-off-by: David Gibson Signed-off-by: Paul Mackerras --- include/asm-ppc64/imalloc.h | 26 -------------------------- include/asm-ppc64/pgtable.h | 7 +++++++ 2 files changed, 7 insertions(+), 26 deletions(-) delete mode 100644 include/asm-ppc64/imalloc.h (limited to 'include') diff --git a/include/asm-ppc64/imalloc.h b/include/asm-ppc64/imalloc.h deleted file mode 100644 index 42adf7033a8..00000000000 --- a/include/asm-ppc64/imalloc.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef _PPC64_IMALLOC_H -#define _PPC64_IMALLOC_H - -/* - * Define the address range of the imalloc VM area. - */ -#define PHBS_IO_BASE VMALLOC_END -#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ -#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) - - -/* imalloc region types */ -#define IM_REGION_UNUSED 0x1 -#define IM_REGION_SUBSET 0x2 -#define IM_REGION_EXISTS 0x4 -#define IM_REGION_OVERLAP 0x8 -#define IM_REGION_SUPERSET 0x10 - -extern struct vm_struct * im_get_free_area(unsigned long size); -extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size, - int region_type); -extern void im_free(void *addr); - -extern unsigned long ioremap_bot; - -#endif /* _PPC64_IMALLOC_H */ diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h index a9783ba7fe9..dee36c83be1 100644 --- a/include/asm-ppc64/pgtable.h +++ b/include/asm-ppc64/pgtable.h @@ -46,6 +46,13 @@ struct mm_struct; #define VMALLOC_SIZE (0x80000000000UL) #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) +/* + * Define the address range of the imalloc VM area. + */ +#define PHBS_IO_BASE VMALLOC_END +#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ +#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) + /* * Common bits in a linux-style PTE. These match the bits in the * (hardware-defined) PowerPC PTE as closely as possible. Additional -- cgit v1.2.3 From 047ea7846565917c4a666635fa1fa4b5c587cd55 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 19 Nov 2005 20:17:32 +1100 Subject: powerpc: Trivially merge several headers from asm-ppc64 to asm-powerpc For these, I have just done the lame-o merge where the file ends up looking like: #ifndef CONFIG_PPC64 #include #else ... contents from asm-ppc64/foo.h #endif so nothing has changed, really, except that we reduce include/asm-ppc64 a bit more. Signed-off-by: Paul Mackerras --- include/asm-powerpc/io.h | 462 +++++++++++++++++++++++++++++++++ include/asm-powerpc/mmu.h | 399 +++++++++++++++++++++++++++++ include/asm-powerpc/mmu_context.h | 89 +++++++ include/asm-powerpc/mmzone.h | 50 ++++ include/asm-powerpc/pci-bridge.h | 153 +++++++++++ include/asm-powerpc/pgalloc.h | 156 ++++++++++++ include/asm-powerpc/pgtable-4k.h | 91 +++++++ include/asm-powerpc/pgtable-64k.h | 90 +++++++ include/asm-powerpc/pgtable.h | 524 ++++++++++++++++++++++++++++++++++++++ include/asm-ppc64/io.h | 458 --------------------------------- include/asm-ppc64/mmu.h | 395 ---------------------------- include/asm-ppc64/mmu_context.h | 85 ------- include/asm-ppc64/mmzone.h | 50 ---- include/asm-ppc64/pci-bridge.h | 151 ----------- include/asm-ppc64/pgalloc.h | 151 ----------- include/asm-ppc64/pgtable-4k.h | 91 ------- include/asm-ppc64/pgtable-64k.h | 90 ------- include/asm-ppc64/pgtable.h | 519 ------------------------------------- 18 files changed, 2014 insertions(+), 1990 deletions(-) create mode 100644 include/asm-powerpc/io.h create mode 100644 include/asm-powerpc/mmu.h create mode 100644 include/asm-powerpc/mmu_context.h create mode 100644 include/asm-powerpc/mmzone.h create mode 100644 include/asm-powerpc/pci-bridge.h create mode 100644 include/asm-powerpc/pgalloc.h create mode 100644 include/asm-powerpc/pgtable-4k.h create mode 100644 include/asm-powerpc/pgtable-64k.h create mode 100644 include/asm-powerpc/pgtable.h delete mode 100644 include/asm-ppc64/io.h delete mode 100644 include/asm-ppc64/mmu.h delete mode 100644 include/asm-ppc64/mmu_context.h delete mode 100644 include/asm-ppc64/mmzone.h delete mode 100644 include/asm-ppc64/pci-bridge.h delete mode 100644 include/asm-ppc64/pgalloc.h delete mode 100644 include/asm-ppc64/pgtable-4k.h delete mode 100644 include/asm-ppc64/pgtable-64k.h delete mode 100644 include/asm-ppc64/pgtable.h (limited to 'include') diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h new file mode 100644 index 00000000000..48938d84d05 --- /dev/null +++ b/include/asm-powerpc/io.h @@ -0,0 +1,462 @@ +#ifndef _ASM_POWERPC_IO_H +#define _ASM_POWERPC_IO_H + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef CONFIG_PPC64 +#include +#else + +#include +#include +#include +#ifdef CONFIG_PPC_ISERIES +#include +#endif +#include +#include + +#include + +#define __ide_mm_insw(p, a, c) _insw_ns((volatile u16 __iomem *)(p), (a), (c)) +#define __ide_mm_insl(p, a, c) _insl_ns((volatile u32 __iomem *)(p), (a), (c)) +#define __ide_mm_outsw(p, a, c) _outsw_ns((volatile u16 __iomem *)(p), (a), (c)) +#define __ide_mm_outsl(p, a, c) _outsl_ns((volatile u32 __iomem *)(p), (a), (c)) + + +#define SIO_CONFIG_RA 0x398 +#define SIO_CONFIG_RD 0x399 + +#define SLOW_DOWN_IO + +extern unsigned long isa_io_base; +extern unsigned long pci_io_base; +extern unsigned long io_page_mask; + +#define MAX_ISA_PORT 0x10000 + +#define _IO_IS_VALID(port) ((port) >= MAX_ISA_PORT || (1 << (port>>PAGE_SHIFT)) \ + & io_page_mask) + +#ifdef CONFIG_PPC_ISERIES +/* __raw_* accessors aren't supported on iSeries */ +#define __raw_readb(addr) { BUG(); 0; } +#define __raw_readw(addr) { BUG(); 0; } +#define __raw_readl(addr) { BUG(); 0; } +#define __raw_readq(addr) { BUG(); 0; } +#define __raw_writeb(v, addr) { BUG(); 0; } +#define __raw_writew(v, addr) { BUG(); 0; } +#define __raw_writel(v, addr) { BUG(); 0; } +#define __raw_writeq(v, addr) { BUG(); 0; } +#define readb(addr) iSeries_Read_Byte(addr) +#define readw(addr) iSeries_Read_Word(addr) +#define readl(addr) iSeries_Read_Long(addr) +#define writeb(data, addr) iSeries_Write_Byte((data),(addr)) +#define writew(data, addr) iSeries_Write_Word((data),(addr)) +#define writel(data, addr) iSeries_Write_Long((data),(addr)) +#define memset_io(a,b,c) iSeries_memset_io((a),(b),(c)) +#define memcpy_fromio(a,b,c) iSeries_memcpy_fromio((a), (b), (c)) +#define memcpy_toio(a,b,c) iSeries_memcpy_toio((a), (b), (c)) + +#define inb(addr) readb(((void __iomem *)(long)(addr))) +#define inw(addr) readw(((void __iomem *)(long)(addr))) +#define inl(addr) readl(((void __iomem *)(long)(addr))) +#define outb(data,addr) writeb(data,((void __iomem *)(long)(addr))) +#define outw(data,addr) writew(data,((void __iomem *)(long)(addr))) +#define outl(data,addr) writel(data,((void __iomem *)(long)(addr))) +/* + * The *_ns versions below don't do byte-swapping. + * Neither do the standard versions now, these are just here + * for older code. + */ +#define insw_ns(port, buf, ns) _insw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) +#define insl_ns(port, buf, nl) _insl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) +#else + +static inline unsigned char __raw_readb(const volatile void __iomem *addr) +{ + return *(volatile unsigned char __force *)addr; +} +static inline unsigned short __raw_readw(const volatile void __iomem *addr) +{ + return *(volatile unsigned short __force *)addr; +} +static inline unsigned int __raw_readl(const volatile void __iomem *addr) +{ + return *(volatile unsigned int __force *)addr; +} +static inline unsigned long __raw_readq(const volatile void __iomem *addr) +{ + return *(volatile unsigned long __force *)addr; +} +static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr) +{ + *(volatile unsigned char __force *)addr = v; +} +static inline void __raw_writew(unsigned short v, volatile void __iomem *addr) +{ + *(volatile unsigned short __force *)addr = v; +} +static inline void __raw_writel(unsigned int v, volatile void __iomem *addr) +{ + *(volatile unsigned int __force *)addr = v; +} +static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) +{ + *(volatile unsigned long __force *)addr = v; +} +#define readb(addr) eeh_readb(addr) +#define readw(addr) eeh_readw(addr) +#define readl(addr) eeh_readl(addr) +#define readq(addr) eeh_readq(addr) +#define writeb(data, addr) eeh_writeb((data), (addr)) +#define writew(data, addr) eeh_writew((data), (addr)) +#define writel(data, addr) eeh_writel((data), (addr)) +#define writeq(data, addr) eeh_writeq((data), (addr)) +#define memset_io(a,b,c) eeh_memset_io((a),(b),(c)) +#define memcpy_fromio(a,b,c) eeh_memcpy_fromio((a),(b),(c)) +#define memcpy_toio(a,b,c) eeh_memcpy_toio((a),(b),(c)) +#define inb(port) eeh_inb((unsigned long)port) +#define outb(val, port) eeh_outb(val, (unsigned long)port) +#define inw(port) eeh_inw((unsigned long)port) +#define outw(val, port) eeh_outw(val, (unsigned long)port) +#define inl(port) eeh_inl((unsigned long)port) +#define outl(val, port) eeh_outl(val, (unsigned long)port) + +/* + * The insw/outsw/insl/outsl macros don't do byte-swapping. + * They are only used in practice for transferring buffers which + * are arrays of bytes, and byte-swapping is not appropriate in + * that case. - paulus */ +#define insb(port, buf, ns) eeh_insb((port), (buf), (ns)) +#define insw(port, buf, ns) eeh_insw_ns((port), (buf), (ns)) +#define insl(port, buf, nl) eeh_insl_ns((port), (buf), (nl)) +#define insw_ns(port, buf, ns) eeh_insw_ns((port), (buf), (ns)) +#define insl_ns(port, buf, nl) eeh_insl_ns((port), (buf), (nl)) + +#define outsb(port, buf, ns) _outsb((u8 __iomem *)((port)+pci_io_base), (buf), (ns)) +#define outsw(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) +#define outsl(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) + +#endif + +#define readb_relaxed(addr) readb(addr) +#define readw_relaxed(addr) readw(addr) +#define readl_relaxed(addr) readl(addr) +#define readq_relaxed(addr) readq(addr) + +extern void _insb(volatile u8 __iomem *port, void *buf, int ns); +extern void _outsb(volatile u8 __iomem *port, const void *buf, int ns); +extern void _insw(volatile u16 __iomem *port, void *buf, int ns); +extern void _outsw(volatile u16 __iomem *port, const void *buf, int ns); +extern void _insl(volatile u32 __iomem *port, void *buf, int nl); +extern void _outsl(volatile u32 __iomem *port, const void *buf, int nl); +extern void _insw_ns(volatile u16 __iomem *port, void *buf, int ns); +extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, int ns); +extern void _insl_ns(volatile u32 __iomem *port, void *buf, int nl); +extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl); + +#define mmiowb() + +/* + * output pause versions need a delay at least for the + * w83c105 ide controller in a p610. + */ +#define inb_p(port) inb(port) +#define outb_p(val, port) (udelay(1), outb((val), (port))) +#define inw_p(port) inw(port) +#define outw_p(val, port) (udelay(1), outw((val), (port))) +#define inl_p(port) inl(port) +#define outl_p(val, port) (udelay(1), outl((val), (port))) + +/* + * The *_ns versions below don't do byte-swapping. + * Neither do the standard versions now, these are just here + * for older code. + */ +#define outsw_ns(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) +#define outsl_ns(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) + + +#define IO_SPACE_LIMIT ~(0UL) + + +#ifdef __KERNEL__ +extern int __ioremap_explicit(unsigned long p_addr, unsigned long v_addr, + unsigned long size, unsigned long flags); +extern void __iomem *__ioremap(unsigned long address, unsigned long size, + unsigned long flags); + +/** + * ioremap - map bus memory into CPU space + * @address: bus address of the memory + * @size: size of the resource to map + * + * ioremap performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + */ +extern void __iomem *ioremap(unsigned long address, unsigned long size); + +#define ioremap_nocache(addr, size) ioremap((addr), (size)) +extern int iounmap_explicit(volatile void __iomem *addr, unsigned long size); +extern void iounmap(volatile void __iomem *addr); +extern void __iomem * reserve_phb_iospace(unsigned long size); + +/** + * virt_to_phys - map virtual addresses to physical + * @address: address to remap + * + * The returned physical address is the physical (CPU) mapping for + * the memory address given. It is only valid to use this function on + * addresses directly mapped or allocated via kmalloc. + * + * This function does not give bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ +static inline unsigned long virt_to_phys(volatile void * address) +{ + return __pa((unsigned long)address); +} + +/** + * phys_to_virt - map physical address to virtual + * @address: address to remap + * + * The returned virtual address is a current CPU mapping for + * the memory address given. It is only valid to use this function on + * addresses that have a kernel mapping + * + * This function does not handle bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ +static inline void * phys_to_virt(unsigned long address) +{ + return (void *)__va(address); +} + +/* + * Change "struct page" to physical address. + */ +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) + +/* We do NOT want virtual merging, it would put too much pressure on + * our iommu allocator. Instead, we want drivers to be smart enough + * to coalesce sglists that happen to have been mapped in a contiguous + * way by the iommu + */ +#define BIO_VMERGE_BOUNDARY 0 + +#endif /* __KERNEL__ */ + +static inline void iosync(void) +{ + __asm__ __volatile__ ("sync" : : : "memory"); +} + +/* Enforce in-order execution of data I/O. + * No distinction between read/write on PPC; use eieio for all three. + */ +#define iobarrier_rw() eieio() +#define iobarrier_r() eieio() +#define iobarrier_w() eieio() + +/* + * 8, 16 and 32 bit, big and little endian I/O operations, with barrier. + * These routines do not perform EEH-related I/O address translation, + * and should not be used directly by device drivers. Use inb/readb + * instead. + */ +static inline int in_8(const volatile unsigned char __iomem *addr) +{ + int ret; + + __asm__ __volatile__("lbz%U1%X1 %0,%1; twi 0,%0,0; isync" + : "=r" (ret) : "m" (*addr)); + return ret; +} + +static inline void out_8(volatile unsigned char __iomem *addr, int val) +{ + __asm__ __volatile__("stb%U0%X0 %1,%0; sync" + : "=m" (*addr) : "r" (val)); +} + +static inline int in_le16(const volatile unsigned short __iomem *addr) +{ + int ret; + + __asm__ __volatile__("lhbrx %0,0,%1; twi 0,%0,0; isync" + : "=r" (ret) : "r" (addr), "m" (*addr)); + return ret; +} + +static inline int in_be16(const volatile unsigned short __iomem *addr) +{ + int ret; + + __asm__ __volatile__("lhz%U1%X1 %0,%1; twi 0,%0,0; isync" + : "=r" (ret) : "m" (*addr)); + return ret; +} + +static inline void out_le16(volatile unsigned short __iomem *addr, int val) +{ + __asm__ __volatile__("sthbrx %1,0,%2; sync" + : "=m" (*addr) : "r" (val), "r" (addr)); +} + +static inline void out_be16(volatile unsigned short __iomem *addr, int val) +{ + __asm__ __volatile__("sth%U0%X0 %1,%0; sync" + : "=m" (*addr) : "r" (val)); +} + +static inline unsigned in_le32(const volatile unsigned __iomem *addr) +{ + unsigned ret; + + __asm__ __volatile__("lwbrx %0,0,%1; twi 0,%0,0; isync" + : "=r" (ret) : "r" (addr), "m" (*addr)); + return ret; +} + +static inline unsigned in_be32(const volatile unsigned __iomem *addr) +{ + unsigned ret; + + __asm__ __volatile__("lwz%U1%X1 %0,%1; twi 0,%0,0; isync" + : "=r" (ret) : "m" (*addr)); + return ret; +} + +static inline void out_le32(volatile unsigned __iomem *addr, int val) +{ + __asm__ __volatile__("stwbrx %1,0,%2; sync" : "=m" (*addr) + : "r" (val), "r" (addr)); +} + +static inline void out_be32(volatile unsigned __iomem *addr, int val) +{ + __asm__ __volatile__("stw%U0%X0 %1,%0; sync" + : "=m" (*addr) : "r" (val)); +} + +static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) +{ + unsigned long tmp, ret; + + __asm__ __volatile__( + "ld %1,0(%2)\n" + "twi 0,%1,0\n" + "isync\n" + "rldimi %0,%1,5*8,1*8\n" + "rldimi %0,%1,3*8,2*8\n" + "rldimi %0,%1,1*8,3*8\n" + "rldimi %0,%1,7*8,4*8\n" + "rldicl %1,%1,32,0\n" + "rlwimi %0,%1,8,8,31\n" + "rlwimi %0,%1,24,16,23\n" + : "=r" (ret) , "=r" (tmp) : "b" (addr) , "m" (*addr)); + return ret; +} + +static inline unsigned long in_be64(const volatile unsigned long __iomem *addr) +{ + unsigned long ret; + + __asm__ __volatile__("ld%U1%X1 %0,%1; twi 0,%0,0; isync" + : "=r" (ret) : "m" (*addr)); + return ret; +} + +static inline void out_le64(volatile unsigned long __iomem *addr, unsigned long val) +{ + unsigned long tmp; + + __asm__ __volatile__( + "rldimi %0,%1,5*8,1*8\n" + "rldimi %0,%1,3*8,2*8\n" + "rldimi %0,%1,1*8,3*8\n" + "rldimi %0,%1,7*8,4*8\n" + "rldicl %1,%1,32,0\n" + "rlwimi %0,%1,8,8,31\n" + "rlwimi %0,%1,24,16,23\n" + "std %0,0(%3)\n" + "sync" + : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr)); +} + +static inline void out_be64(volatile unsigned long __iomem *addr, unsigned long val) +{ + __asm__ __volatile__("std%U0%X0 %1,%0; sync" : "=m" (*addr) : "r" (val)); +} + +#ifndef CONFIG_PPC_ISERIES +#include +#endif + +#ifdef __KERNEL__ + +/** + * check_signature - find BIOS signatures + * @io_addr: mmio address to check + * @signature: signature block + * @length: length of signature + * + * Perform a signature comparison with the mmio address io_addr. This + * address should have been obtained by ioremap. + * Returns 1 on a match. + */ +static inline int check_signature(const volatile void __iomem * io_addr, + const unsigned char *signature, int length) +{ + int retval = 0; +#ifndef CONFIG_PPC_ISERIES + do { + if (readb(io_addr) != *signature) + goto out; + io_addr++; + signature++; + length--; + } while (length); + retval = 1; +out: +#endif + return retval; +} + +/* Nothing to do */ + +#define dma_cache_inv(_start,_size) do { } while (0) +#define dma_cache_wback(_start,_size) do { } while (0) +#define dma_cache_wback_inv(_start,_size) do { } while (0) + +/* Check of existence of legacy devices */ +extern int check_legacy_ioport(unsigned long base_port); + + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +#endif /* __KERNEL__ */ + +#endif /* CONFIG_PPC64 */ +#endif /* _ASM_POWERPC_IO_H */ diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h new file mode 100644 index 00000000000..c1b4bbabbe9 --- /dev/null +++ b/include/asm-powerpc/mmu.h @@ -0,0 +1,399 @@ +#ifndef _ASM_POWERPC_MMU_H_ +#define _ASM_POWERPC_MMU_H_ + +#ifndef CONFIG_PPC64 +#include +#else + +/* + * PowerPC memory management structures + * + * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> + * PPC64 rework. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include + +/* + * Segment table + */ + +#define STE_ESID_V 0x80 +#define STE_ESID_KS 0x20 +#define STE_ESID_KP 0x10 +#define STE_ESID_N 0x08 + +#define STE_VSID_SHIFT 12 + +/* Location of cpu0's segment table */ +#define STAB0_PAGE 0x6 +#define STAB0_PHYS_ADDR (STAB0_PAGE<<12) + +#ifndef __ASSEMBLY__ +extern char initial_stab[]; +#endif /* ! __ASSEMBLY */ + +/* + * SLB + */ + +#define SLB_NUM_BOLTED 3 +#define SLB_CACHE_ENTRIES 8 + +/* Bits in the SLB ESID word */ +#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ + +/* Bits in the SLB VSID word */ +#define SLB_VSID_SHIFT 12 +#define SLB_VSID_B ASM_CONST(0xc000000000000000) +#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) +#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) +#define SLB_VSID_KS ASM_CONST(0x0000000000000800) +#define SLB_VSID_KP ASM_CONST(0x0000000000000400) +#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ +#define SLB_VSID_L ASM_CONST(0x0000000000000100) +#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ +#define SLB_VSID_LP ASM_CONST(0x0000000000000030) +#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) +#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) +#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) +#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) +#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) + +#define SLB_VSID_KERNEL (SLB_VSID_KP) +#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) + +#define SLBIE_C (0x08000000) + +/* + * Hash table + */ + +#define HPTES_PER_GROUP 8 + +#define HPTE_V_AVPN_SHIFT 7 +#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) +#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) +#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN)) +#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) +#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) +#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) +#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) +#define HPTE_V_VALID ASM_CONST(0x0000000000000001) + +#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) +#define HPTE_R_TS ASM_CONST(0x4000000000000000) +#define HPTE_R_RPN_SHIFT 12 +#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) +#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) +#define HPTE_R_PP ASM_CONST(0x0000000000000003) +#define HPTE_R_N ASM_CONST(0x0000000000000004) + +/* Values for PP (assumes Ks=0, Kp=1) */ +/* pp0 will always be 0 for linux */ +#define PP_RWXX 0 /* Supervisor read/write, User none */ +#define PP_RWRX 1 /* Supervisor read/write, User read */ +#define PP_RWRW 2 /* Supervisor read/write, User read/write */ +#define PP_RXRX 3 /* Supervisor read, User read */ + +#ifndef __ASSEMBLY__ + +typedef struct { + unsigned long v; + unsigned long r; +} hpte_t; + +extern hpte_t *htab_address; +extern unsigned long htab_hash_mask; + +/* + * Page size definition + * + * shift : is the "PAGE_SHIFT" value for that page size + * sllp : is a bit mask with the value of SLB L || LP to be or'ed + * directly to a slbmte "vsid" value + * penc : is the HPTE encoding mask for the "LP" field: + * + */ +struct mmu_psize_def +{ + unsigned int shift; /* number of bits */ + unsigned int penc; /* HPTE encoding */ + unsigned int tlbiel; /* tlbiel supported for that page size */ + unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ + unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ +}; + +#endif /* __ASSEMBLY__ */ + +/* + * The kernel use the constants below to index in the page sizes array. + * The use of fixed constants for this purpose is better for performances + * of the low level hash refill handlers. + * + * A non supported page size has a "shift" field set to 0 + * + * Any new page size being implemented can get a new entry in here. Whether + * the kernel will use it or not is a different matter though. The actual page + * size used by hugetlbfs is not defined here and may be made variable + */ + +#define MMU_PAGE_4K 0 /* 4K */ +#define MMU_PAGE_64K 1 /* 64K */ +#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */ +#define MMU_PAGE_1M 3 /* 1M */ +#define MMU_PAGE_16M 4 /* 16M */ +#define MMU_PAGE_16G 5 /* 16G */ +#define MMU_PAGE_COUNT 6 + +#ifndef __ASSEMBLY__ + +/* + * The current system page sizes + */ +extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; +extern int mmu_linear_psize; +extern int mmu_virtual_psize; + +#ifdef CONFIG_HUGETLB_PAGE +/* + * The page size index of the huge pages for use by hugetlbfs + */ +extern int mmu_huge_psize; + +#endif /* CONFIG_HUGETLB_PAGE */ + +/* + * This function sets the AVPN and L fields of the HPTE appropriately + * for the page size + */ +static inline unsigned long hpte_encode_v(unsigned long va, int psize) +{ + unsigned long v = + v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); + v <<= HPTE_V_AVPN_SHIFT; + if (psize != MMU_PAGE_4K) + v |= HPTE_V_LARGE; + return v; +} + +/* + * This function sets the ARPN, and LP fields of the HPTE appropriately + * for the page size. We assume the pa is already "clean" that is properly + * aligned for the requested page size + */ +static inline unsigned long hpte_encode_r(unsigned long pa, int psize) +{ + unsigned long r; + + /* A 4K page needs no special encoding */ + if (psize == MMU_PAGE_4K) + return pa & HPTE_R_RPN; + else { + unsigned int penc = mmu_psize_defs[psize].penc; + unsigned int shift = mmu_psize_defs[psize].shift; + return (pa & ~((1ul << shift) - 1)) | (penc << 12); + } + return r; +} + +/* + * This hashes a virtual address for a 256Mb segment only for now + */ + +static inline unsigned long hpt_hash(unsigned long va, unsigned int shift) +{ + return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift); +} + +extern int __hash_page_4K(unsigned long ea, unsigned long access, + unsigned long vsid, pte_t *ptep, unsigned long trap, + unsigned int local); +extern int __hash_page_64K(unsigned long ea, unsigned long access, + unsigned long vsid, pte_t *ptep, unsigned long trap, + unsigned int local); +struct mm_struct; +extern int hash_huge_page(struct mm_struct *mm, unsigned long access, + unsigned long ea, unsigned long vsid, int local); + +extern void htab_finish_init(void); +extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, + unsigned long pstart, unsigned long mode, + int psize); + +extern void htab_initialize(void); +extern void htab_initialize_secondary(void); +extern void hpte_init_native(void); +extern void hpte_init_lpar(void); +extern void hpte_init_iSeries(void); +extern void mm_init_ppc64(void); + +extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, + unsigned long va, unsigned long prpn, + unsigned long rflags, + unsigned long vflags, int psize); + +extern long native_hpte_insert(unsigned long hpte_group, + unsigned long va, unsigned long prpn, + unsigned long rflags, + unsigned long vflags, int psize); + +extern long iSeries_hpte_insert(unsigned long hpte_group, + unsigned long va, unsigned long prpn, + unsigned long rflags, + unsigned long vflags, int psize); + +extern void stabs_alloc(void); +extern void slb_initialize(void); +extern void stab_initialize(unsigned long stab); + +#endif /* __ASSEMBLY__ */ + +/* + * VSID allocation + * + * We first generate a 36-bit "proto-VSID". For kernel addresses this + * is equal to the ESID, for user addresses it is: + * (context << 15) | (esid & 0x7fff) + * + * The two forms are distinguishable because the top bit is 0 for user + * addresses, whereas the top two bits are 1 for kernel addresses. + * Proto-VSIDs with the top two bits equal to 0b10 are reserved for + * now. + * + * The proto-VSIDs are then scrambled into real VSIDs with the + * multiplicative hash: + * + * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS + * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 + * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF + * + * This scramble is only well defined for proto-VSIDs below + * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are + * reserved. VSID_MULTIPLIER is prime, so in particular it is + * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. + * Because the modulus is 2^n-1 we can compute it efficiently without + * a divide or extra multiply (see below). + * + * This scheme has several advantages over older methods: + * + * - We have VSIDs allocated for every kernel address + * (i.e. everything above 0xC000000000000000), except the very top + * segment, which simplifies several things. + * + * - We allow for 15 significant bits of ESID and 20 bits of + * context for user addresses. i.e. 8T (43 bits) of address space for + * up to 1M contexts (although the page table structure and context + * allocation will need changes to take advantage of this). + * + * - The scramble function gives robust scattering in the hash + * table (at least based on some initial results). The previous + * method was more susceptible to pathological cases giving excessive + * hash collisions. + */ +/* + * WARNING - If you change these you must make sure the asm + * implementations in slb_allocate (slb_low.S), do_stab_bolted + * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. + * + * You'll also need to change the precomputed VSID values in head.S + * which are used by the iSeries firmware. + */ + +#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ +#define VSID_BITS 36 +#define VSID_MODULUS ((1UL<= \ + * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ + * the bit clear, r3 already has the answer we want, if it \ + * doesn't, the answer is the low 36 bits of r3+1. So in all \ + * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ + addi rx,rt,1; \ + srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ + add rt,rt,rx + + +#ifndef __ASSEMBLY__ + +typedef unsigned long mm_context_id_t; + +typedef struct { + mm_context_id_t id; +#ifdef CONFIG_HUGETLB_PAGE + u16 low_htlb_areas, high_htlb_areas; +#endif +} mm_context_t; + + +static inline unsigned long vsid_scramble(unsigned long protovsid) +{ +#if 0 + /* The code below is equivalent to this function for arguments + * < 2^VSID_BITS, which is all this should ever be called + * with. However gcc is not clever enough to compute the + * modulus (2^n-1) without a second multiply. */ + return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); +#else /* 1 */ + unsigned long x; + + x = protovsid * VSID_MULTIPLIER; + x = (x >> VSID_BITS) + (x & VSID_MODULUS); + return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; +#endif /* 1 */ +} + +/* This is only valid for addresses >= KERNELBASE */ +static inline unsigned long get_kernel_vsid(unsigned long ea) +{ + return vsid_scramble(ea >> SID_SHIFT); +} + +/* This is only valid for user addresses (which are below 2^41) */ +static inline unsigned long get_vsid(unsigned long context, unsigned long ea) +{ + return vsid_scramble((context << USER_ESID_BITS) + | (ea >> SID_SHIFT)); +} + +#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) +#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) + +#endif /* __ASSEMBLY */ + +#endif /* CONFIG_PPC64 */ +#endif /* _ASM_POWERPC_MMU_H_ */ diff --git a/include/asm-powerpc/mmu_context.h b/include/asm-powerpc/mmu_context.h new file mode 100644 index 00000000000..ea6798c7d5f --- /dev/null +++ b/include/asm-powerpc/mmu_context.h @@ -0,0 +1,89 @@ +#ifndef __ASM_POWERPC_MMU_CONTEXT_H +#define __ASM_POWERPC_MMU_CONTEXT_H + +#ifndef CONFIG_PPC64 +#include +#else + +#include +#include +#include +#include + +/* + * Copyright (C) 2001 PPC 64 Team, IBM Corp + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* + * Getting into a kernel thread, there is no valid user segment, mark + * paca->pgdir NULL so that SLB miss on user addresses will fault + */ +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +#ifdef CONFIG_PPC_64K_PAGES + get_paca()->pgdir = NULL; +#endif /* CONFIG_PPC_64K_PAGES */ +} + +#define NO_CONTEXT 0 +#define MAX_CONTEXT (0x100000-1) + +extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); +extern void destroy_context(struct mm_struct *mm); + +extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm); +extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); + +/* + * switch_mm is the entry point called from the architecture independent + * code in kernel/sched.c + */ +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask)) + cpu_set(smp_processor_id(), next->cpu_vm_mask); + + /* No need to flush userspace segments if the mm doesnt change */ +#ifdef CONFIG_PPC_64K_PAGES + if (prev == next && get_paca()->pgdir == next->pgd) + return; +#else + if (prev == next) + return; +#endif /* CONFIG_PPC_64K_PAGES */ + +#ifdef CONFIG_ALTIVEC + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + asm volatile ("dssall"); +#endif /* CONFIG_ALTIVEC */ + + if (cpu_has_feature(CPU_FTR_SLB)) + switch_slb(tsk, next); + else + switch_stab(tsk, next); +} + +#define deactivate_mm(tsk,mm) do { } while (0) + +/* + * After we have set current->mm to a new value, this activates + * the context for the new mm so we see the new mappings. + */ +static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm(prev, next, current); + local_irq_restore(flags); +} + +#endif /* CONFIG_PPC64 */ +#endif /* __ASM_POWERPC_MMU_CONTEXT_H */ diff --git a/include/asm-powerpc/mmzone.h b/include/asm-powerpc/mmzone.h new file mode 100644 index 00000000000..54958d6cae0 --- /dev/null +++ b/include/asm-powerpc/mmzone.h @@ -0,0 +1,50 @@ +/* + * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99 + * + * PowerPC64 port: + * Copyright (C) 2002 Anton Blanchard, IBM Corp. + */ +#ifndef _ASM_MMZONE_H_ +#define _ASM_MMZONE_H_ + +#include + +/* + * generic non-linear memory support: + * + * 1) we will not split memory into more chunks than will fit into the + * flags field of the struct page + */ + +#ifdef CONFIG_NEED_MULTIPLE_NODES + +extern struct pglist_data *node_data[]; +/* + * Return a pointer to the node data for node n. + */ +#define NODE_DATA(nid) (node_data[nid]) + +/* + * Following are specific to this numa platform. + */ + +extern int numa_cpu_lookup_table[]; +extern cpumask_t numa_cpumask_lookup_table[]; +#ifdef CONFIG_MEMORY_HOTPLUG +extern unsigned long max_pfn; +#endif + +/* + * Following are macros that each numa implmentation must define. + */ + +#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) +#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn) + +#endif /* CONFIG_NEED_MULTIPLE_NODES */ + +#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID +extern int __init early_pfn_to_nid(unsigned long pfn); +#endif + +#endif /* _ASM_MMZONE_H_ */ diff --git a/include/asm-powerpc/pci-bridge.h b/include/asm-powerpc/pci-bridge.h new file mode 100644 index 00000000000..223ec7bd81d --- /dev/null +++ b/include/asm-powerpc/pci-bridge.h @@ -0,0 +1,153 @@ +#ifndef _ASM_POWERPC_PCI_BRIDGE_H +#define _ASM_POWERPC_PCI_BRIDGE_H + +#ifndef CONFIG_PPC64 +#include +#else + +#include +#include + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* + * Structure of a PCI controller (host bridge) + */ +struct pci_controller { + struct pci_bus *bus; + char is_dynamic; + void *arch_data; + struct list_head list_node; + + int first_busno; + int last_busno; + + void __iomem *io_base_virt; + unsigned long io_base_phys; + + /* Some machines have a non 1:1 mapping of + * the PCI memory space in the CPU bus space + */ + unsigned long pci_mem_offset; + unsigned long pci_io_size; + + struct pci_ops *ops; + volatile unsigned int __iomem *cfg_addr; + volatile void __iomem *cfg_data; + + /* Currently, we limit ourselves to 1 IO range and 3 mem + * ranges since the common pci_bus structure can't handle more + */ + struct resource io_resource; + struct resource mem_resources[3]; + int global_number; + int local_number; + unsigned long buid; + unsigned long dma_window_base_cur; + unsigned long dma_window_size; +}; + +/* + * PCI stuff, for nodes representing PCI devices, pointed to + * by device_node->data. + */ +struct pci_controller; +struct iommu_table; + +struct pci_dn { + int busno; /* for pci devices */ + int bussubno; /* for pci devices */ + int devfn; /* for pci devices */ + +#ifdef CONFIG_PPC_PSERIES + int eeh_mode; /* See eeh.h for possible EEH_MODEs */ + int eeh_config_addr; + int eeh_check_count; /* # times driver ignored error */ + int eeh_freeze_count; /* # times this device froze up. */ + int eeh_is_bridge; /* device is pci-to-pci bridge */ +#endif + int pci_ext_config_space; /* for pci devices */ + struct pci_controller *phb; /* for pci devices */ + struct iommu_table *iommu_table; /* for phb's or bridges */ + struct pci_dev *pcidev; /* back-pointer to the pci device */ + struct device_node *node; /* back-pointer to the device_node */ +#ifdef CONFIG_PPC_ISERIES + struct list_head Device_List; + int Irq; /* Assigned IRQ */ + int Flags; /* Possible flags(disable/bist)*/ + u8 LogicalSlot; /* Hv Slot Index for Tces */ +#endif + u32 config_space[16]; /* saved PCI config space */ +}; + +/* Get the pointer to a device_node's pci_dn */ +#define PCI_DN(dn) ((struct pci_dn *) (dn)->data) + +struct device_node *fetch_dev_dn(struct pci_dev *dev); + +/* Get a device_node from a pci_dev. This code must be fast except + * in the case where the sysdata is incorrect and needs to be fixed + * up (this will only happen once). + * In this case the sysdata will have been inherited from a PCI host + * bridge or a PCI-PCI bridge further up the tree, so it will point + * to a valid struct pci_dn, just not the one we want. + */ +static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev) +{ + struct device_node *dn = dev->sysdata; + struct pci_dn *pdn = dn->data; + + if (pdn && pdn->devfn == dev->devfn && pdn->busno == dev->bus->number) + return dn; /* fast path. sysdata is good */ + return fetch_dev_dn(dev); +} + +static inline int pci_device_from_OF_node(struct device_node *np, + u8 *bus, u8 *devfn) +{ + if (!PCI_DN(np)) + return -ENODEV; + *bus = PCI_DN(np)->busno; + *devfn = PCI_DN(np)->devfn; + return 0; +} + +static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) +{ + if (bus->self) + return pci_device_to_OF_node(bus->self); + else + return bus->sysdata; /* Must be root bus (PHB) */ +} + +extern void pci_process_bridge_OF_ranges(struct pci_controller *hose, + struct device_node *dev, int primary); + +extern int pcibios_remove_root_bus(struct pci_controller *phb); + +extern void phbs_remap_io(void); + +static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus) +{ + struct device_node *busdn = bus->sysdata; + + BUG_ON(busdn == NULL); + return PCI_DN(busdn)->phb; +} + +extern struct pci_controller * +pcibios_alloc_controller(struct device_node *dev); +extern void pcibios_free_controller(struct pci_controller *phb); + +/* Return values for ppc_md.pci_probe_mode function */ +#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */ +#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */ +#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */ + +#endif /* CONFIG_PPC64 */ +#endif diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h new file mode 100644 index 00000000000..bfc2113b363 --- /dev/null +++ b/include/asm-powerpc/pgalloc.h @@ -0,0 +1,156 @@ +#ifndef _ASM_POWERPC_PGALLOC_H +#define _ASM_POWERPC_PGALLOC_H + +#ifndef CONFIG_PPC64 +#include +#else + +#include +#include +#include +#include + +extern kmem_cache_t *pgtable_cache[]; + +#ifdef CONFIG_PPC_64K_PAGES +#define PTE_CACHE_NUM 0 +#define PMD_CACHE_NUM 1 +#define PGD_CACHE_NUM 2 +#else +#define PTE_CACHE_NUM 0 +#define PMD_CACHE_NUM 1 +#define PUD_CACHE_NUM 1 +#define PGD_CACHE_NUM 0 +#endif + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL); +} + +static inline void pgd_free(pgd_t *pgd) +{ + kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); +} + +#ifndef CONFIG_PPC_64K_PAGES + +#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) + +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM], + GFP_KERNEL|__GFP_REPEAT); +} + +static inline void pud_free(pud_t *pud) +{ + kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); +} + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + pud_set(pud, (unsigned long)pmd); +} + +#define pmd_populate(mm, pmd, pte_page) \ + pmd_populate_kernel(mm, pmd, page_address(pte_page)) +#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) + + +#else /* CONFIG_PPC_64K_PAGES */ + +#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + pmd_set(pmd, (unsigned long)pte); +} + +#define pmd_populate(mm, pmd, pte_page) \ + pmd_populate_kernel(mm, pmd, page_address(pte_page)) + +#endif /* CONFIG_PPC_64K_PAGES */ + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM], + GFP_KERNEL|__GFP_REPEAT); +} + +static inline void pmd_free(pmd_t *pmd) +{ + kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); +} + +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long address) +{ + return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], + GFP_KERNEL|__GFP_REPEAT); +} + +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + return virt_to_page(pte_alloc_one_kernel(mm, address)); +} + +static inline void pte_free_kernel(pte_t *pte) +{ + kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte); +} + +static inline void pte_free(struct page *ptepage) +{ + pte_free_kernel(page_address(ptepage)); +} + +#define PGF_CACHENUM_MASK 0xf + +typedef struct pgtable_free { + unsigned long val; +} pgtable_free_t; + +static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, + unsigned long mask) +{ + BUG_ON(cachenum > PGF_CACHENUM_MASK); + + return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; +} + +static inline void pgtable_free(pgtable_free_t pgf) +{ + void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); + int cachenum = pgf.val & PGF_CACHENUM_MASK; + + kmem_cache_free(pgtable_cache[cachenum], p); +} + +extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); + +#define __pte_free_tlb(tlb, ptepage) \ + pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ + PTE_CACHE_NUM, PTE_TABLE_SIZE-1)) +#define __pmd_free_tlb(tlb, pmd) \ + pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ + PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) +#ifndef CONFIG_PPC_64K_PAGES +#define __pud_free_tlb(tlb, pmd) \ + pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ + PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) +#endif /* CONFIG_PPC_64K_PAGES */ + +#define check_pgt_cache() do { } while (0) + +#endif /* CONFIG_PPC64 */ +#endif /* _ASM_POWERPC_PGALLOC_H */ diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h new file mode 100644 index 00000000000..e9590c06ad9 --- /dev/null +++ b/include/asm-powerpc/pgtable-4k.h @@ -0,0 +1,91 @@ +/* + * Entries per page directory level. The PTE level must use a 64b record + * for each page table entry. The PMD and PGD level use a 32b record for + * each entry by assuming that each entry is page aligned. + */ +#define PTE_INDEX_SIZE 9 +#define PMD_INDEX_SIZE 7 +#define PUD_INDEX_SIZE 7 +#define PGD_INDEX_SIZE 9 + +#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) +#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) +#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) +#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) + +#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) +#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) +#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) +#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) + +/* PMD_SHIFT determines what a second-level page table entry can map */ +#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* With 4k base page size, hugepage PTEs go at the PMD level */ +#define MIN_HUGEPTE_SHIFT PMD_SHIFT + +/* PUD_SHIFT determines what a third-level page table entry can map */ +#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* PGDIR_SHIFT determines what a fourth-level page table entry can map */ +#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* PTE bits */ +#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ +#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ +#define _PAGE_F_SECOND _PAGE_SECONDARY +#define _PAGE_F_GIX _PAGE_GROUP_IX + +/* PTE flags to conserve for HPTE identification */ +#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ + _PAGE_SECONDARY | _PAGE_GROUP_IX) + +/* PAGE_MASK gives the right answer below, but only by accident */ +/* It should be preserving the high 48 bits and then specifically */ +/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ + _PAGE_HPTEFLAGS) + +/* Bits to mask out from a PMD to get to the PTE page */ +#define PMD_MASKED_BITS 0 +/* Bits to mask out from a PUD to get to the PMD page */ +#define PUD_MASKED_BITS 0 +/* Bits to mask out from a PGD to get to the PUD page */ +#define PGD_MASKED_BITS 0 + +/* shift to put page number into pte */ +#define PTE_RPN_SHIFT (17) + +#define __real_pte(e,p) ((real_pte_t)(e)) +#define __rpte_to_pte(r) (r) +#define __rpte_to_hidx(r,index) (pte_val((r)) >> 12) + +#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ + do { \ + index = 0; \ + shift = mmu_psize_defs[psize].shift; \ + +#define pte_iterate_hashed_end() } while(0) + +/* + * 4-level page tables related bits + */ + +#define pgd_none(pgd) (!pgd_val(pgd)) +#define pgd_bad(pgd) (pgd_val(pgd) == 0) +#define pgd_present(pgd) (pgd_val(pgd) != 0) +#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) +#define pgd_page(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) + +#define pud_offset(pgdp, addr) \ + (((pud_t *) pgd_page(*(pgdp))) + \ + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) + +#define pud_ERROR(e) \ + printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e)) diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h new file mode 100644 index 00000000000..154f1840ece --- /dev/null +++ b/include/asm-powerpc/pgtable-64k.h @@ -0,0 +1,90 @@ +#include + + +#define PTE_INDEX_SIZE 12 +#define PMD_INDEX_SIZE 12 +#define PUD_INDEX_SIZE 0 +#define PGD_INDEX_SIZE 4 + +#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) +#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) +#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) + +#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) +#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) +#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) + +/* With 4k base page size, hugepage PTEs go at the PMD level */ +#define MIN_HUGEPTE_SHIFT PAGE_SHIFT + +/* PMD_SHIFT determines what a second-level page table entry can map */ +#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* PGDIR_SHIFT determines what a third-level page table entry can map */ +#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* Additional PTE bits (don't change without checking asm in hash_low.S) */ +#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ +#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ +#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ +#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ +#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ + +/* PTE flags to conserve for HPTE identification */ +#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_HPTE_SUB |\ + _PAGE_COMBO) + +/* Shift to put page number into pte. + * + * That gives us a max RPN of 32 bits, which means a max of 48 bits + * of addressable physical space. + * We could get 3 more bits here by setting PTE_RPN_SHIFT to 29 but + * 32 makes PTEs more readable for debugging for now :) + */ +#define PTE_RPN_SHIFT (32) +#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT)) +#define PTE_RPN_MASK (~((1UL<> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) +#define __rpte_to_pte(r) ((r).pte) +#define __rpte_sub_valid(rpte, index) \ + (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) + + +/* Trick: we set __end to va + 64k, which happens works for + * a 16M page as well as we want only one iteration + */ +#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ + do { \ + unsigned long __end = va + PAGE_SIZE; \ + unsigned __split = (psize == MMU_PAGE_4K || \ + psize == MMU_PAGE_64K_AP); \ + shift = mmu_psize_defs[psize].shift; \ + for (index = 0; va < __end; index++, va += (1 << shift)) { \ + if (!__split || __rpte_sub_valid(rpte, index)) do { \ + +#define pte_iterate_hashed_end() } while(0); } } while(0) + + +#endif /* __ASSEMBLY__ */ diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h new file mode 100644 index 00000000000..0303f57366c --- /dev/null +++ b/include/asm-powerpc/pgtable.h @@ -0,0 +1,524 @@ +#ifndef _ASM_POWERPC_PGTABLE_H +#define _ASM_POWERPC_PGTABLE_H + +#ifndef CONFIG_PPC64 +#include +#else + +/* + * This file contains the functions and defines necessary to modify and use + * the ppc64 hashed page table. + */ + +#ifndef __ASSEMBLY__ +#include +#include +#include /* For TASK_SIZE */ +#include +#include +#include +struct mm_struct; +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_PPC_64K_PAGES +#include +#else +#include +#endif + +#define FIRST_USER_ADDRESS 0 + +/* + * Size of EA range mapped by our pagetables. + */ +#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ + PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) +#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE) + +#if TASK_SIZE_USER64 > PGTABLE_RANGE +#error TASK_SIZE_USER64 exceeds pagetable range +#endif + +#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) +#error TASK_SIZE_USER64 exceeds user VSID range +#endif + +/* + * Define the address range of the vmalloc VM area. + */ +#define VMALLOC_START (0xD000000000000000ul) +#define VMALLOC_SIZE (0x80000000000UL) +#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) + +/* + * Define the address range of the imalloc VM area. + */ +#define PHBS_IO_BASE VMALLOC_END +#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ +#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) + +/* + * Common bits in a linux-style PTE. These match the bits in the + * (hardware-defined) PowerPC PTE as closely as possible. Additional + * bits may be defined in pgtable-*.h + */ +#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ +#define _PAGE_USER 0x0002 /* matches one of the PP bits */ +#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ +#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ +#define _PAGE_GUARDED 0x0008 +#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ +#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ +#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ +#define _PAGE_DIRTY 0x0080 /* C: page changed */ +#define _PAGE_ACCESSED 0x0100 /* R: page referenced */ +#define _PAGE_RW 0x0200 /* software: user write access allowed */ +#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ +#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ + +#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) + +#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) + +/* __pgprot defined in asm-powerpc/page.h */ +#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) + +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) +#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ + _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC) + +#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) +#define HAVE_PAGE_AGP + +/* PTEIDX nibble */ +#define _PTEIDX_SECONDARY 0x8 +#define _PTEIDX_GROUP_IX 0x7 + + +/* + * POWER4 and newer have per page execute protection, older chips can only + * do this on a segment (256MB) basis. + * + * Also, write permissions imply read permissions. + * This is the closest we can get.. + * + * Note due to the way vm flags are laid out, the bits are XWR + */ +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_READONLY_X +#define __P101 PAGE_READONLY_X +#define __P110 PAGE_COPY_X +#define __P111 PAGE_COPY_X + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_READONLY_X +#define __S101 PAGE_READONLY_X +#define __S110 PAGE_SHARED_X +#define __S111 PAGE_SHARED_X + +#ifndef __ASSEMBLY__ + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_HUGETLB_PAGE + +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + +#endif + +#ifndef __ASSEMBLY__ + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + * + * mk_pte takes a (struct page *) as input + */ +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) +{ + pte_t pte; + + + pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot); + return pte; +} + +#define pte_modify(_pte, newprot) \ + (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) + +#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) +#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) + +/* pte_clear moved to later in this file */ + +#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) +#define pte_page(x) pfn_to_page(pte_pfn(x)) + +#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_bad(pmd) (pmd_val(pmd) == 0) +#define pmd_present(pmd) (pmd_val(pmd) != 0) +#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) +#define pmd_page_kernel(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) +#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) + +#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) +#define pud_none(pud) (!pud_val(pud)) +#define pud_bad(pud) ((pud_val(pud)) == 0) +#define pud_present(pud) (pud_val(pud) != 0) +#define pud_clear(pudp) (pud_val(*(pudp)) = 0) +#define pud_page(pud) (pud_val(pud) & ~PUD_MASKED_BITS) + +#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) + +/* + * Find an entry in a page-table-directory. We combine the address region + * (the high order N bits) and the pgd portion of the address. + */ +/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ +#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff) + +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) + +#define pmd_offset(pudp,addr) \ + (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) + +#define pte_offset_kernel(dir,addr) \ + (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) + +#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) +#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) +#define pte_unmap(pte) do { } while(0) +#define pte_unmap_nested(pte) do { } while(0) + +/* to find an entry in a kernel page-table-directory */ +/* This now only contains the vmalloc pages */ +#define pgd_offset_k(address) pgd_offset(&init_mm, address) + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;} +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} +static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} +static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} + +static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } +static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } + +static inline pte_t pte_rdprotect(pte_t pte) { + pte_val(pte) &= ~_PAGE_USER; return pte; } +static inline pte_t pte_exprotect(pte_t pte) { + pte_val(pte) &= ~_PAGE_EXEC; return pte; } +static inline pte_t pte_wrprotect(pte_t pte) { + pte_val(pte) &= ~(_PAGE_RW); return pte; } +static inline pte_t pte_mkclean(pte_t pte) { + pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } +static inline pte_t pte_mkold(pte_t pte) { + pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } +static inline pte_t pte_mkread(pte_t pte) { + pte_val(pte) |= _PAGE_USER; return pte; } +static inline pte_t pte_mkexec(pte_t pte) { + pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } +static inline pte_t pte_mkwrite(pte_t pte) { + pte_val(pte) |= _PAGE_RW; return pte; } +static inline pte_t pte_mkdirty(pte_t pte) { + pte_val(pte) |= _PAGE_DIRTY; return pte; } +static inline pte_t pte_mkyoung(pte_t pte) { + pte_val(pte) |= _PAGE_ACCESSED; return pte; } +static inline pte_t pte_mkhuge(pte_t pte) { + return pte; } + +/* Atomic PTE updates */ +static inline unsigned long pte_update(pte_t *p, unsigned long clr) +{ + unsigned long old, tmp; + + __asm__ __volatile__( + "1: ldarx %0,0,%3 # pte_update\n\ + andi. %1,%0,%6\n\ + bne- 1b \n\ + andc %1,%0,%4 \n\ + stdcx. %1,0,%3 \n\ + bne- 1b" + : "=&r" (old), "=&r" (tmp), "=m" (*p) + : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY) + : "cc" ); + return old; +} + +/* PTE updating functions, this function puts the PTE in the + * batch, doesn't actually triggers the hash flush immediately, + * you need to call flush_tlb_pending() to do that. + * Pass -1 for "normal" size (4K or 64K) + */ +extern void hpte_update(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pte, int huge); + +static inline int __ptep_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long old; + + if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) + return 0; + old = pte_update(ptep, _PAGE_ACCESSED); + if (old & _PAGE_HASHPTE) { + hpte_update(mm, addr, ptep, old, 0); + flush_tlb_pending(); + } + return (old & _PAGE_ACCESSED) != 0; +} +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ +({ \ + int __r; \ + __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ + __r; \ +}) + +/* + * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the + * moment we always flush but we need to fix hpte_update and test if the + * optimisation is worth it. + */ +static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long old; + + if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) + return 0; + old = pte_update(ptep, _PAGE_DIRTY); + if (old & _PAGE_HASHPTE) + hpte_update(mm, addr, ptep, old, 0); + return (old & _PAGE_DIRTY) != 0; +} +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY +#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \ +({ \ + int __r; \ + __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \ + __r; \ +}) + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + unsigned long old; + + if ((pte_val(*ptep) & _PAGE_RW) == 0) + return; + old = pte_update(ptep, _PAGE_RW); + if (old & _PAGE_HASHPTE) + hpte_update(mm, addr, ptep, old, 0); +} + +/* + * We currently remove entries from the hashtable regardless of whether + * the entry was young or dirty. The generic routines only flush if the + * entry was young or dirty which is not good enough. + * + * We should be more intelligent about this but for the moment we override + * these functions and force a tlb flush unconditionally + */ +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define ptep_clear_flush_young(__vma, __address, __ptep) \ +({ \ + int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ + __ptep); \ + __young; \ +}) + +#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH +#define ptep_clear_flush_dirty(__vma, __address, __ptep) \ +({ \ + int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ + __ptep); \ + flush_tlb_page(__vma, __address); \ + __dirty; \ +}) + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long old = pte_update(ptep, ~0UL); + + if (old & _PAGE_HASHPTE) + hpte_update(mm, addr, ptep, old, 0); + return __pte(old); +} + +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t * ptep) +{ + unsigned long old = pte_update(ptep, ~0UL); + + if (old & _PAGE_HASHPTE) + hpte_update(mm, addr, ptep, old, 0); +} + +/* + * set_pte stores a linux PTE into the linux page table. + */ +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + if (pte_present(*ptep)) { + pte_clear(mm, addr, ptep); + flush_tlb_pending(); + } + pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); + +#ifdef CONFIG_PPC_64K_PAGES + if (mmu_virtual_psize != MMU_PAGE_64K) + pte = __pte(pte_val(pte) | _PAGE_COMBO); +#endif /* CONFIG_PPC_64K_PAGES */ + + *ptep = pte; +} + +/* Set the dirty and/or accessed bits atomically in a linux PTE, this + * function doesn't need to flush the hash entry + */ +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) +{ + unsigned long bits = pte_val(entry) & + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); + unsigned long old, tmp; + + __asm__ __volatile__( + "1: ldarx %0,0,%4\n\ + andi. %1,%0,%6\n\ + bne- 1b \n\ + or %0,%3,%0\n\ + stdcx. %0,0,%4\n\ + bne- 1b" + :"=&r" (old), "=&r" (tmp), "=m" (*ptep) + :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) + :"cc"); +} +#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ + do { \ + __ptep_set_access_flags(__ptep, __entry, __dirty); \ + flush_tlb_page_nohash(__vma, __address); \ + } while(0) + +/* + * Macro to mark a page protection value as "uncacheable". + */ +#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) + +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#define __HAVE_PHYS_MEM_ACCESS_PROT + +#define __HAVE_ARCH_PTE_SAME +#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) + +#define pte_ERROR(e) \ + printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pgd_ERROR(e) \ + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + +extern pgd_t swapper_pg_dir[]; + +extern void paging_init(void); + +#ifdef CONFIG_HUGETLB_PAGE +#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ + free_pgd_range(tlb, addr, end, floor, ceiling) +#endif + +/* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to put a corresponding HPTE into the hash table + * ahead of time, instead of waiting for the inevitable extra + * hash-table miss exception. + */ +struct vm_area_struct; +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); + +/* Encode and de-code a swap entry */ +#define __swp_type(entry) (((entry).val >> 1) & 0x3f) +#define __swp_offset(entry) ((entry).val >> 8) +#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) +#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) +#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) +#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) +#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) + +/* + * kern_addr_valid is intended to indicate whether an address is a valid + * kernel address. Most 32-bit archs define it as always true (like this) + * but most 64-bit archs actually perform a test. What should we do here? + * The only use is in fs/ncpfs/dir.c + */ +#define kern_addr_valid(addr) (1) + +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +void pgtable_cache_init(void); + +/* + * find_linux_pte returns the address of a linux pte for a given + * effective address and directory. If not found, it returns zero. + */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) +{ + pgd_t *pg; + pud_t *pu; + pmd_t *pm; + pte_t *pt = NULL; + + pg = pgdir + pgd_index(ea); + if (!pgd_none(*pg)) { + pu = pud_offset(pg, ea); + if (!pud_none(*pu)) { + pm = pmd_offset(pu, ea); + if (pmd_present(*pm)) + pt = pte_offset_kernel(pm, ea); + } + } + return pt; +} + +#include + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_PPC64 */ +#endif /* _ASM_POWERPC_PGTABLE_H */ diff --git a/include/asm-ppc64/io.h b/include/asm-ppc64/io.h deleted file mode 100644 index 77fc07c3c6b..00000000000 --- a/include/asm-ppc64/io.h +++ /dev/null @@ -1,458 +0,0 @@ -#ifndef _PPC64_IO_H -#define _PPC64_IO_H - -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#ifdef CONFIG_PPC_ISERIES -#include -#endif -#include -#include - -#include - -#define __ide_mm_insw(p, a, c) _insw_ns((volatile u16 __iomem *)(p), (a), (c)) -#define __ide_mm_insl(p, a, c) _insl_ns((volatile u32 __iomem *)(p), (a), (c)) -#define __ide_mm_outsw(p, a, c) _outsw_ns((volatile u16 __iomem *)(p), (a), (c)) -#define __ide_mm_outsl(p, a, c) _outsl_ns((volatile u32 __iomem *)(p), (a), (c)) - - -#define SIO_CONFIG_RA 0x398 -#define SIO_CONFIG_RD 0x399 - -#define SLOW_DOWN_IO - -extern unsigned long isa_io_base; -extern unsigned long pci_io_base; -extern unsigned long io_page_mask; - -#define MAX_ISA_PORT 0x10000 - -#define _IO_IS_VALID(port) ((port) >= MAX_ISA_PORT || (1 << (port>>PAGE_SHIFT)) \ - & io_page_mask) - -#ifdef CONFIG_PPC_ISERIES -/* __raw_* accessors aren't supported on iSeries */ -#define __raw_readb(addr) { BUG(); 0; } -#define __raw_readw(addr) { BUG(); 0; } -#define __raw_readl(addr) { BUG(); 0; } -#define __raw_readq(addr) { BUG(); 0; } -#define __raw_writeb(v, addr) { BUG(); 0; } -#define __raw_writew(v, addr) { BUG(); 0; } -#define __raw_writel(v, addr) { BUG(); 0; } -#define __raw_writeq(v, addr) { BUG(); 0; } -#define readb(addr) iSeries_Read_Byte(addr) -#define readw(addr) iSeries_Read_Word(addr) -#define readl(addr) iSeries_Read_Long(addr) -#define writeb(data, addr) iSeries_Write_Byte((data),(addr)) -#define writew(data, addr) iSeries_Write_Word((data),(addr)) -#define writel(data, addr) iSeries_Write_Long((data),(addr)) -#define memset_io(a,b,c) iSeries_memset_io((a),(b),(c)) -#define memcpy_fromio(a,b,c) iSeries_memcpy_fromio((a), (b), (c)) -#define memcpy_toio(a,b,c) iSeries_memcpy_toio((a), (b), (c)) - -#define inb(addr) readb(((void __iomem *)(long)(addr))) -#define inw(addr) readw(((void __iomem *)(long)(addr))) -#define inl(addr) readl(((void __iomem *)(long)(addr))) -#define outb(data,addr) writeb(data,((void __iomem *)(long)(addr))) -#define outw(data,addr) writew(data,((void __iomem *)(long)(addr))) -#define outl(data,addr) writel(data,((void __iomem *)(long)(addr))) -/* - * The *_ns versions below don't do byte-swapping. - * Neither do the standard versions now, these are just here - * for older code. - */ -#define insw_ns(port, buf, ns) _insw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) -#define insl_ns(port, buf, nl) _insl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) -#else - -static inline unsigned char __raw_readb(const volatile void __iomem *addr) -{ - return *(volatile unsigned char __force *)addr; -} -static inline unsigned short __raw_readw(const volatile void __iomem *addr) -{ - return *(volatile unsigned short __force *)addr; -} -static inline unsigned int __raw_readl(const volatile void __iomem *addr) -{ - return *(volatile unsigned int __force *)addr; -} -static inline unsigned long __raw_readq(const volatile void __iomem *addr) -{ - return *(volatile unsigned long __force *)addr; -} -static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr) -{ - *(volatile unsigned char __force *)addr = v; -} -static inline void __raw_writew(unsigned short v, volatile void __iomem *addr) -{ - *(volatile unsigned short __force *)addr = v; -} -static inline void __raw_writel(unsigned int v, volatile void __iomem *addr) -{ - *(volatile unsigned int __force *)addr = v; -} -static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) -{ - *(volatile unsigned long __force *)addr = v; -} -#define readb(addr) eeh_readb(addr) -#define readw(addr) eeh_readw(addr) -#define readl(addr) eeh_readl(addr) -#define readq(addr) eeh_readq(addr) -#define writeb(data, addr) eeh_writeb((data), (addr)) -#define writew(data, addr) eeh_writew((data), (addr)) -#define writel(data, addr) eeh_writel((data), (addr)) -#define writeq(data, addr) eeh_writeq((data), (addr)) -#define memset_io(a,b,c) eeh_memset_io((a),(b),(c)) -#define memcpy_fromio(a,b,c) eeh_memcpy_fromio((a),(b),(c)) -#define memcpy_toio(a,b,c) eeh_memcpy_toio((a),(b),(c)) -#define inb(port) eeh_inb((unsigned long)port) -#define outb(val, port) eeh_outb(val, (unsigned long)port) -#define inw(port) eeh_inw((unsigned long)port) -#define outw(val, port) eeh_outw(val, (unsigned long)port) -#define inl(port) eeh_inl((unsigned long)port) -#define outl(val, port) eeh_outl(val, (unsigned long)port) - -/* - * The insw/outsw/insl/outsl macros don't do byte-swapping. - * They are only used in practice for transferring buffers which - * are arrays of bytes, and byte-swapping is not appropriate in - * that case. - paulus */ -#define insb(port, buf, ns) eeh_insb((port), (buf), (ns)) -#define insw(port, buf, ns) eeh_insw_ns((port), (buf), (ns)) -#define insl(port, buf, nl) eeh_insl_ns((port), (buf), (nl)) -#define insw_ns(port, buf, ns) eeh_insw_ns((port), (buf), (ns)) -#define insl_ns(port, buf, nl) eeh_insl_ns((port), (buf), (nl)) - -#define outsb(port, buf, ns) _outsb((u8 __iomem *)((port)+pci_io_base), (buf), (ns)) -#define outsw(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) -#define outsl(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) - -#endif - -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) -#define readq_relaxed(addr) readq(addr) - -extern void _insb(volatile u8 __iomem *port, void *buf, int ns); -extern void _outsb(volatile u8 __iomem *port, const void *buf, int ns); -extern void _insw(volatile u16 __iomem *port, void *buf, int ns); -extern void _outsw(volatile u16 __iomem *port, const void *buf, int ns); -extern void _insl(volatile u32 __iomem *port, void *buf, int nl); -extern void _outsl(volatile u32 __iomem *port, const void *buf, int nl); -extern void _insw_ns(volatile u16 __iomem *port, void *buf, int ns); -extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, int ns); -extern void _insl_ns(volatile u32 __iomem *port, void *buf, int nl); -extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl); - -#define mmiowb() - -/* - * output pause versions need a delay at least for the - * w83c105 ide controller in a p610. - */ -#define inb_p(port) inb(port) -#define outb_p(val, port) (udelay(1), outb((val), (port))) -#define inw_p(port) inw(port) -#define outw_p(val, port) (udelay(1), outw((val), (port))) -#define inl_p(port) inl(port) -#define outl_p(val, port) (udelay(1), outl((val), (port))) - -/* - * The *_ns versions below don't do byte-swapping. - * Neither do the standard versions now, these are just here - * for older code. - */ -#define outsw_ns(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) -#define outsl_ns(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) - - -#define IO_SPACE_LIMIT ~(0UL) - - -#ifdef __KERNEL__ -extern int __ioremap_explicit(unsigned long p_addr, unsigned long v_addr, - unsigned long size, unsigned long flags); -extern void __iomem *__ioremap(unsigned long address, unsigned long size, - unsigned long flags); - -/** - * ioremap - map bus memory into CPU space - * @address: bus address of the memory - * @size: size of the resource to map - * - * ioremap performs a platform specific sequence of operations to - * make bus memory CPU accessible via the readb/readw/readl/writeb/ - * writew/writel functions and the other mmio helpers. The returned - * address is not guaranteed to be usable directly as a virtual - * address. - */ -extern void __iomem *ioremap(unsigned long address, unsigned long size); - -#define ioremap_nocache(addr, size) ioremap((addr), (size)) -extern int iounmap_explicit(volatile void __iomem *addr, unsigned long size); -extern void iounmap(volatile void __iomem *addr); -extern void __iomem * reserve_phb_iospace(unsigned long size); - -/** - * virt_to_phys - map virtual addresses to physical - * @address: address to remap - * - * The returned physical address is the physical (CPU) mapping for - * the memory address given. It is only valid to use this function on - * addresses directly mapped or allocated via kmalloc. - * - * This function does not give bus mappings for DMA transfers. In - * almost all conceivable cases a device driver should not be using - * this function - */ -static inline unsigned long virt_to_phys(volatile void * address) -{ - return __pa((unsigned long)address); -} - -/** - * phys_to_virt - map physical address to virtual - * @address: address to remap - * - * The returned virtual address is a current CPU mapping for - * the memory address given. It is only valid to use this function on - * addresses that have a kernel mapping - * - * This function does not handle bus mappings for DMA transfers. In - * almost all conceivable cases a device driver should not be using - * this function - */ -static inline void * phys_to_virt(unsigned long address) -{ - return (void *)__va(address); -} - -/* - * Change "struct page" to physical address. - */ -#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) - -/* We do NOT want virtual merging, it would put too much pressure on - * our iommu allocator. Instead, we want drivers to be smart enough - * to coalesce sglists that happen to have been mapped in a contiguous - * way by the iommu - */ -#define BIO_VMERGE_BOUNDARY 0 - -#endif /* __KERNEL__ */ - -static inline void iosync(void) -{ - __asm__ __volatile__ ("sync" : : : "memory"); -} - -/* Enforce in-order execution of data I/O. - * No distinction between read/write on PPC; use eieio for all three. - */ -#define iobarrier_rw() eieio() -#define iobarrier_r() eieio() -#define iobarrier_w() eieio() - -/* - * 8, 16 and 32 bit, big and little endian I/O operations, with barrier. - * These routines do not perform EEH-related I/O address translation, - * and should not be used directly by device drivers. Use inb/readb - * instead. - */ -static inline int in_8(const volatile unsigned char __iomem *addr) -{ - int ret; - - __asm__ __volatile__("lbz%U1%X1 %0,%1; twi 0,%0,0; isync" - : "=r" (ret) : "m" (*addr)); - return ret; -} - -static inline void out_8(volatile unsigned char __iomem *addr, int val) -{ - __asm__ __volatile__("stb%U0%X0 %1,%0; sync" - : "=m" (*addr) : "r" (val)); -} - -static inline int in_le16(const volatile unsigned short __iomem *addr) -{ - int ret; - - __asm__ __volatile__("lhbrx %0,0,%1; twi 0,%0,0; isync" - : "=r" (ret) : "r" (addr), "m" (*addr)); - return ret; -} - -static inline int in_be16(const volatile unsigned short __iomem *addr) -{ - int ret; - - __asm__ __volatile__("lhz%U1%X1 %0,%1; twi 0,%0,0; isync" - : "=r" (ret) : "m" (*addr)); - return ret; -} - -static inline void out_le16(volatile unsigned short __iomem *addr, int val) -{ - __asm__ __volatile__("sthbrx %1,0,%2; sync" - : "=m" (*addr) : "r" (val), "r" (addr)); -} - -static inline void out_be16(volatile unsigned short __iomem *addr, int val) -{ - __asm__ __volatile__("sth%U0%X0 %1,%0; sync" - : "=m" (*addr) : "r" (val)); -} - -static inline unsigned in_le32(const volatile unsigned __iomem *addr) -{ - unsigned ret; - - __asm__ __volatile__("lwbrx %0,0,%1; twi 0,%0,0; isync" - : "=r" (ret) : "r" (addr), "m" (*addr)); - return ret; -} - -static inline unsigned in_be32(const volatile unsigned __iomem *addr) -{ - unsigned ret; - - __asm__ __volatile__("lwz%U1%X1 %0,%1; twi 0,%0,0; isync" - : "=r" (ret) : "m" (*addr)); - return ret; -} - -static inline void out_le32(volatile unsigned __iomem *addr, int val) -{ - __asm__ __volatile__("stwbrx %1,0,%2; sync" : "=m" (*addr) - : "r" (val), "r" (addr)); -} - -static inline void out_be32(volatile unsigned __iomem *addr, int val) -{ - __asm__ __volatile__("stw%U0%X0 %1,%0; sync" - : "=m" (*addr) : "r" (val)); -} - -static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) -{ - unsigned long tmp, ret; - - __asm__ __volatile__( - "ld %1,0(%2)\n" - "twi 0,%1,0\n" - "isync\n" - "rldimi %0,%1,5*8,1*8\n" - "rldimi %0,%1,3*8,2*8\n" - "rldimi %0,%1,1*8,3*8\n" - "rldimi %0,%1,7*8,4*8\n" - "rldicl %1,%1,32,0\n" - "rlwimi %0,%1,8,8,31\n" - "rlwimi %0,%1,24,16,23\n" - : "=r" (ret) , "=r" (tmp) : "b" (addr) , "m" (*addr)); - return ret; -} - -static inline unsigned long in_be64(const volatile unsigned long __iomem *addr) -{ - unsigned long ret; - - __asm__ __volatile__("ld%U1%X1 %0,%1; twi 0,%0,0; isync" - : "=r" (ret) : "m" (*addr)); - return ret; -} - -static inline void out_le64(volatile unsigned long __iomem *addr, unsigned long val) -{ - unsigned long tmp; - - __asm__ __volatile__( - "rldimi %0,%1,5*8,1*8\n" - "rldimi %0,%1,3*8,2*8\n" - "rldimi %0,%1,1*8,3*8\n" - "rldimi %0,%1,7*8,4*8\n" - "rldicl %1,%1,32,0\n" - "rlwimi %0,%1,8,8,31\n" - "rlwimi %0,%1,24,16,23\n" - "std %0,0(%3)\n" - "sync" - : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr)); -} - -static inline void out_be64(volatile unsigned long __iomem *addr, unsigned long val) -{ - __asm__ __volatile__("std%U0%X0 %1,%0; sync" : "=m" (*addr) : "r" (val)); -} - -#ifndef CONFIG_PPC_ISERIES -#include -#endif - -#ifdef __KERNEL__ - -/** - * check_signature - find BIOS signatures - * @io_addr: mmio address to check - * @signature: signature block - * @length: length of signature - * - * Perform a signature comparison with the mmio address io_addr. This - * address should have been obtained by ioremap. - * Returns 1 on a match. - */ -static inline int check_signature(const volatile void __iomem * io_addr, - const unsigned char *signature, int length) -{ - int retval = 0; -#ifndef CONFIG_PPC_ISERIES - do { - if (readb(io_addr) != *signature) - goto out; - io_addr++; - signature++; - length--; - } while (length); - retval = 1; -out: -#endif - return retval; -} - -/* Nothing to do */ - -#define dma_cache_inv(_start,_size) do { } while (0) -#define dma_cache_wback(_start,_size) do { } while (0) -#define dma_cache_wback_inv(_start,_size) do { } while (0) - -/* Check of existence of legacy devices */ -extern int check_legacy_ioport(unsigned long base_port); - - -/* - * Convert a physical pointer to a virtual kernel pointer for /dev/mem - * access - */ -#define xlate_dev_mem_ptr(p) __va(p) - -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - -#endif /* __KERNEL__ */ - -#endif /* _PPC64_IO_H */ diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h deleted file mode 100644 index 1a7e0afa2dc..00000000000 --- a/include/asm-ppc64/mmu.h +++ /dev/null @@ -1,395 +0,0 @@ -/* - * PowerPC memory management structures - * - * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> - * PPC64 rework. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _PPC64_MMU_H_ -#define _PPC64_MMU_H_ - -#include -#include -#include - -/* - * Segment table - */ - -#define STE_ESID_V 0x80 -#define STE_ESID_KS 0x20 -#define STE_ESID_KP 0x10 -#define STE_ESID_N 0x08 - -#define STE_VSID_SHIFT 12 - -/* Location of cpu0's segment table */ -#define STAB0_PAGE 0x6 -#define STAB0_PHYS_ADDR (STAB0_PAGE<<12) - -#ifndef __ASSEMBLY__ -extern char initial_stab[]; -#endif /* ! __ASSEMBLY */ - -/* - * SLB - */ - -#define SLB_NUM_BOLTED 3 -#define SLB_CACHE_ENTRIES 8 - -/* Bits in the SLB ESID word */ -#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ - -/* Bits in the SLB VSID word */ -#define SLB_VSID_SHIFT 12 -#define SLB_VSID_B ASM_CONST(0xc000000000000000) -#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) -#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) -#define SLB_VSID_KS ASM_CONST(0x0000000000000800) -#define SLB_VSID_KP ASM_CONST(0x0000000000000400) -#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ -#define SLB_VSID_L ASM_CONST(0x0000000000000100) -#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ -#define SLB_VSID_LP ASM_CONST(0x0000000000000030) -#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) -#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) -#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) -#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) -#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) - -#define SLB_VSID_KERNEL (SLB_VSID_KP) -#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) - -#define SLBIE_C (0x08000000) - -/* - * Hash table - */ - -#define HPTES_PER_GROUP 8 - -#define HPTE_V_AVPN_SHIFT 7 -#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) -#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) -#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN)) -#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) -#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) -#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) -#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) -#define HPTE_V_VALID ASM_CONST(0x0000000000000001) - -#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) -#define HPTE_R_TS ASM_CONST(0x4000000000000000) -#define HPTE_R_RPN_SHIFT 12 -#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) -#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) -#define HPTE_R_PP ASM_CONST(0x0000000000000003) -#define HPTE_R_N ASM_CONST(0x0000000000000004) - -/* Values for PP (assumes Ks=0, Kp=1) */ -/* pp0 will always be 0 for linux */ -#define PP_RWXX 0 /* Supervisor read/write, User none */ -#define PP_RWRX 1 /* Supervisor read/write, User read */ -#define PP_RWRW 2 /* Supervisor read/write, User read/write */ -#define PP_RXRX 3 /* Supervisor read, User read */ - -#ifndef __ASSEMBLY__ - -typedef struct { - unsigned long v; - unsigned long r; -} hpte_t; - -extern hpte_t *htab_address; -extern unsigned long htab_hash_mask; - -/* - * Page size definition - * - * shift : is the "PAGE_SHIFT" value for that page size - * sllp : is a bit mask with the value of SLB L || LP to be or'ed - * directly to a slbmte "vsid" value - * penc : is the HPTE encoding mask for the "LP" field: - * - */ -struct mmu_psize_def -{ - unsigned int shift; /* number of bits */ - unsigned int penc; /* HPTE encoding */ - unsigned int tlbiel; /* tlbiel supported for that page size */ - unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ - unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ -}; - -#endif /* __ASSEMBLY__ */ - -/* - * The kernel use the constants below to index in the page sizes array. - * The use of fixed constants for this purpose is better for performances - * of the low level hash refill handlers. - * - * A non supported page size has a "shift" field set to 0 - * - * Any new page size being implemented can get a new entry in here. Whether - * the kernel will use it or not is a different matter though. The actual page - * size used by hugetlbfs is not defined here and may be made variable - */ - -#define MMU_PAGE_4K 0 /* 4K */ -#define MMU_PAGE_64K 1 /* 64K */ -#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */ -#define MMU_PAGE_1M 3 /* 1M */ -#define MMU_PAGE_16M 4 /* 16M */ -#define MMU_PAGE_16G 5 /* 16G */ -#define MMU_PAGE_COUNT 6 - -#ifndef __ASSEMBLY__ - -/* - * The current system page sizes - */ -extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; -extern int mmu_linear_psize; -extern int mmu_virtual_psize; - -#ifdef CONFIG_HUGETLB_PAGE -/* - * The page size index of the huge pages for use by hugetlbfs - */ -extern int mmu_huge_psize; - -#endif /* CONFIG_HUGETLB_PAGE */ - -/* - * This function sets the AVPN and L fields of the HPTE appropriately - * for the page size - */ -static inline unsigned long hpte_encode_v(unsigned long va, int psize) -{ - unsigned long v = - v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); - v <<= HPTE_V_AVPN_SHIFT; - if (psize != MMU_PAGE_4K) - v |= HPTE_V_LARGE; - return v; -} - -/* - * This function sets the ARPN, and LP fields of the HPTE appropriately - * for the page size. We assume the pa is already "clean" that is properly - * aligned for the requested page size - */ -static inline unsigned long hpte_encode_r(unsigned long pa, int psize) -{ - unsigned long r; - - /* A 4K page needs no special encoding */ - if (psize == MMU_PAGE_4K) - return pa & HPTE_R_RPN; - else { - unsigned int penc = mmu_psize_defs[psize].penc; - unsigned int shift = mmu_psize_defs[psize].shift; - return (pa & ~((1ul << shift) - 1)) | (penc << 12); - } - return r; -} - -/* - * This hashes a virtual address for a 256Mb segment only for now - */ - -static inline unsigned long hpt_hash(unsigned long va, unsigned int shift) -{ - return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift); -} - -extern int __hash_page_4K(unsigned long ea, unsigned long access, - unsigned long vsid, pte_t *ptep, unsigned long trap, - unsigned int local); -extern int __hash_page_64K(unsigned long ea, unsigned long access, - unsigned long vsid, pte_t *ptep, unsigned long trap, - unsigned int local); -struct mm_struct; -extern int hash_huge_page(struct mm_struct *mm, unsigned long access, - unsigned long ea, unsigned long vsid, int local); - -extern void htab_finish_init(void); -extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, - unsigned long pstart, unsigned long mode, - int psize); - -extern void htab_initialize(void); -extern void htab_initialize_secondary(void); -extern void hpte_init_native(void); -extern void hpte_init_lpar(void); -extern void hpte_init_iSeries(void); -extern void mm_init_ppc64(void); - -extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, - unsigned long va, unsigned long prpn, - unsigned long rflags, - unsigned long vflags, int psize); - -extern long native_hpte_insert(unsigned long hpte_group, - unsigned long va, unsigned long prpn, - unsigned long rflags, - unsigned long vflags, int psize); - -extern long iSeries_hpte_insert(unsigned long hpte_group, - unsigned long va, unsigned long prpn, - unsigned long rflags, - unsigned long vflags, int psize); - -extern void stabs_alloc(void); -extern void slb_initialize(void); -extern void stab_initialize(unsigned long stab); - -#endif /* __ASSEMBLY__ */ - -/* - * VSID allocation - * - * We first generate a 36-bit "proto-VSID". For kernel addresses this - * is equal to the ESID, for user addresses it is: - * (context << 15) | (esid & 0x7fff) - * - * The two forms are distinguishable because the top bit is 0 for user - * addresses, whereas the top two bits are 1 for kernel addresses. - * Proto-VSIDs with the top two bits equal to 0b10 are reserved for - * now. - * - * The proto-VSIDs are then scrambled into real VSIDs with the - * multiplicative hash: - * - * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS - * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 - * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF - * - * This scramble is only well defined for proto-VSIDs below - * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are - * reserved. VSID_MULTIPLIER is prime, so in particular it is - * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. - * Because the modulus is 2^n-1 we can compute it efficiently without - * a divide or extra multiply (see below). - * - * This scheme has several advantages over older methods: - * - * - We have VSIDs allocated for every kernel address - * (i.e. everything above 0xC000000000000000), except the very top - * segment, which simplifies several things. - * - * - We allow for 15 significant bits of ESID and 20 bits of - * context for user addresses. i.e. 8T (43 bits) of address space for - * up to 1M contexts (although the page table structure and context - * allocation will need changes to take advantage of this). - * - * - The scramble function gives robust scattering in the hash - * table (at least based on some initial results). The previous - * method was more susceptible to pathological cases giving excessive - * hash collisions. - */ -/* - * WARNING - If you change these you must make sure the asm - * implementations in slb_allocate (slb_low.S), do_stab_bolted - * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. - * - * You'll also need to change the precomputed VSID values in head.S - * which are used by the iSeries firmware. - */ - -#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ -#define VSID_BITS 36 -#define VSID_MODULUS ((1UL<= \ - * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ - * the bit clear, r3 already has the answer we want, if it \ - * doesn't, the answer is the low 36 bits of r3+1. So in all \ - * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ - addi rx,rt,1; \ - srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ - add rt,rt,rx - - -#ifndef __ASSEMBLY__ - -typedef unsigned long mm_context_id_t; - -typedef struct { - mm_context_id_t id; -#ifdef CONFIG_HUGETLB_PAGE - u16 low_htlb_areas, high_htlb_areas; -#endif -} mm_context_t; - - -static inline unsigned long vsid_scramble(unsigned long protovsid) -{ -#if 0 - /* The code below is equivalent to this function for arguments - * < 2^VSID_BITS, which is all this should ever be called - * with. However gcc is not clever enough to compute the - * modulus (2^n-1) without a second multiply. */ - return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); -#else /* 1 */ - unsigned long x; - - x = protovsid * VSID_MULTIPLIER; - x = (x >> VSID_BITS) + (x & VSID_MODULUS); - return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; -#endif /* 1 */ -} - -/* This is only valid for addresses >= KERNELBASE */ -static inline unsigned long get_kernel_vsid(unsigned long ea) -{ - return vsid_scramble(ea >> SID_SHIFT); -} - -/* This is only valid for user addresses (which are below 2^41) */ -static inline unsigned long get_vsid(unsigned long context, unsigned long ea) -{ - return vsid_scramble((context << USER_ESID_BITS) - | (ea >> SID_SHIFT)); -} - -#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) -#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) - -#endif /* __ASSEMBLY */ - -#endif /* _PPC64_MMU_H_ */ diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h deleted file mode 100644 index 4f512e9fa6b..00000000000 --- a/include/asm-ppc64/mmu_context.h +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef __PPC64_MMU_CONTEXT_H -#define __PPC64_MMU_CONTEXT_H - -#include -#include -#include -#include -#include - -/* - * Copyright (C) 2001 PPC 64 Team, IBM Corp - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -/* - * Getting into a kernel thread, there is no valid user segment, mark - * paca->pgdir NULL so that SLB miss on user addresses will fault - */ -static inline void enter_lazy_tlb(struct mm_struct *mm, - struct task_struct *tsk) -{ -#ifdef CONFIG_PPC_64K_PAGES - get_paca()->pgdir = NULL; -#endif /* CONFIG_PPC_64K_PAGES */ -} - -#define NO_CONTEXT 0 -#define MAX_CONTEXT (0x100000-1) - -extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); -extern void destroy_context(struct mm_struct *mm); - -extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm); -extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); - -/* - * switch_mm is the entry point called from the architecture independent - * code in kernel/sched.c - */ -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) -{ - if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask)) - cpu_set(smp_processor_id(), next->cpu_vm_mask); - - /* No need to flush userspace segments if the mm doesnt change */ -#ifdef CONFIG_PPC_64K_PAGES - if (prev == next && get_paca()->pgdir == next->pgd) - return; -#else - if (prev == next) - return; -#endif /* CONFIG_PPC_64K_PAGES */ - -#ifdef CONFIG_ALTIVEC - if (cpu_has_feature(CPU_FTR_ALTIVEC)) - asm volatile ("dssall"); -#endif /* CONFIG_ALTIVEC */ - - if (cpu_has_feature(CPU_FTR_SLB)) - switch_slb(tsk, next); - else - switch_stab(tsk, next); -} - -#define deactivate_mm(tsk,mm) do { } while (0) - -/* - * After we have set current->mm to a new value, this activates - * the context for the new mm so we see the new mappings. - */ -static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) -{ - unsigned long flags; - - local_irq_save(flags); - switch_mm(prev, next, current); - local_irq_restore(flags); -} - -#endif /* __PPC64_MMU_CONTEXT_H */ diff --git a/include/asm-ppc64/mmzone.h b/include/asm-ppc64/mmzone.h deleted file mode 100644 index 54958d6cae0..00000000000 --- a/include/asm-ppc64/mmzone.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99 - * - * PowerPC64 port: - * Copyright (C) 2002 Anton Blanchard, IBM Corp. - */ -#ifndef _ASM_MMZONE_H_ -#define _ASM_MMZONE_H_ - -#include - -/* - * generic non-linear memory support: - * - * 1) we will not split memory into more chunks than will fit into the - * flags field of the struct page - */ - -#ifdef CONFIG_NEED_MULTIPLE_NODES - -extern struct pglist_data *node_data[]; -/* - * Return a pointer to the node data for node n. - */ -#define NODE_DATA(nid) (node_data[nid]) - -/* - * Following are specific to this numa platform. - */ - -extern int numa_cpu_lookup_table[]; -extern cpumask_t numa_cpumask_lookup_table[]; -#ifdef CONFIG_MEMORY_HOTPLUG -extern unsigned long max_pfn; -#endif - -/* - * Following are macros that each numa implmentation must define. - */ - -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn) - -#endif /* CONFIG_NEED_MULTIPLE_NODES */ - -#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID -extern int __init early_pfn_to_nid(unsigned long pfn); -#endif - -#endif /* _ASM_MMZONE_H_ */ diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-ppc64/pci-bridge.h deleted file mode 100644 index cf04327a597..00000000000 --- a/include/asm-ppc64/pci-bridge.h +++ /dev/null @@ -1,151 +0,0 @@ -#ifdef __KERNEL__ -#ifndef _ASM_PCI_BRIDGE_H -#define _ASM_PCI_BRIDGE_H - -#include -#include -#include - -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -/* - * Structure of a PCI controller (host bridge) - */ -struct pci_controller { - struct pci_bus *bus; - char is_dynamic; - void *arch_data; - struct list_head list_node; - - int first_busno; - int last_busno; - - void __iomem *io_base_virt; - unsigned long io_base_phys; - - /* Some machines have a non 1:1 mapping of - * the PCI memory space in the CPU bus space - */ - unsigned long pci_mem_offset; - unsigned long pci_io_size; - - struct pci_ops *ops; - volatile unsigned int __iomem *cfg_addr; - volatile void __iomem *cfg_data; - - /* Currently, we limit ourselves to 1 IO range and 3 mem - * ranges since the common pci_bus structure can't handle more - */ - struct resource io_resource; - struct resource mem_resources[3]; - int global_number; - int local_number; - unsigned long buid; - unsigned long dma_window_base_cur; - unsigned long dma_window_size; -}; - -/* - * PCI stuff, for nodes representing PCI devices, pointed to - * by device_node->data. - */ -struct pci_controller; -struct iommu_table; - -struct pci_dn { - int busno; /* for pci devices */ - int bussubno; /* for pci devices */ - int devfn; /* for pci devices */ - -#ifdef CONFIG_PPC_PSERIES - int eeh_mode; /* See eeh.h for possible EEH_MODEs */ - int eeh_config_addr; - int eeh_check_count; /* # times driver ignored error */ - int eeh_freeze_count; /* # times this device froze up. */ - int eeh_is_bridge; /* device is pci-to-pci bridge */ -#endif - int pci_ext_config_space; /* for pci devices */ - struct pci_controller *phb; /* for pci devices */ - struct iommu_table *iommu_table; /* for phb's or bridges */ - struct pci_dev *pcidev; /* back-pointer to the pci device */ - struct device_node *node; /* back-pointer to the device_node */ -#ifdef CONFIG_PPC_ISERIES - struct list_head Device_List; - int Irq; /* Assigned IRQ */ - int Flags; /* Possible flags(disable/bist)*/ - u8 LogicalSlot; /* Hv Slot Index for Tces */ -#endif - u32 config_space[16]; /* saved PCI config space */ -}; - -/* Get the pointer to a device_node's pci_dn */ -#define PCI_DN(dn) ((struct pci_dn *) (dn)->data) - -struct device_node *fetch_dev_dn(struct pci_dev *dev); - -/* Get a device_node from a pci_dev. This code must be fast except - * in the case where the sysdata is incorrect and needs to be fixed - * up (this will only happen once). - * In this case the sysdata will have been inherited from a PCI host - * bridge or a PCI-PCI bridge further up the tree, so it will point - * to a valid struct pci_dn, just not the one we want. - */ -static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev) -{ - struct device_node *dn = dev->sysdata; - struct pci_dn *pdn = dn->data; - - if (pdn && pdn->devfn == dev->devfn && pdn->busno == dev->bus->number) - return dn; /* fast path. sysdata is good */ - return fetch_dev_dn(dev); -} - -static inline int pci_device_from_OF_node(struct device_node *np, - u8 *bus, u8 *devfn) -{ - if (!PCI_DN(np)) - return -ENODEV; - *bus = PCI_DN(np)->busno; - *devfn = PCI_DN(np)->devfn; - return 0; -} - -static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) -{ - if (bus->self) - return pci_device_to_OF_node(bus->self); - else - return bus->sysdata; /* Must be root bus (PHB) */ -} - -extern void pci_process_bridge_OF_ranges(struct pci_controller *hose, - struct device_node *dev, int primary); - -extern int pcibios_remove_root_bus(struct pci_controller *phb); - -extern void phbs_remap_io(void); - -static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus) -{ - struct device_node *busdn = bus->sysdata; - - BUG_ON(busdn == NULL); - return PCI_DN(busdn)->phb; -} - -extern struct pci_controller * -pcibios_alloc_controller(struct device_node *dev); -extern void pcibios_free_controller(struct pci_controller *phb); - -/* Return values for ppc_md.pci_probe_mode function */ -#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */ -#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */ -#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */ - -#endif -#endif /* __KERNEL__ */ diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h deleted file mode 100644 index dcf3622d194..00000000000 --- a/include/asm-ppc64/pgalloc.h +++ /dev/null @@ -1,151 +0,0 @@ -#ifndef _PPC64_PGALLOC_H -#define _PPC64_PGALLOC_H - -#include -#include -#include -#include - -extern kmem_cache_t *pgtable_cache[]; - -#ifdef CONFIG_PPC_64K_PAGES -#define PTE_CACHE_NUM 0 -#define PMD_CACHE_NUM 1 -#define PGD_CACHE_NUM 2 -#else -#define PTE_CACHE_NUM 0 -#define PMD_CACHE_NUM 1 -#define PUD_CACHE_NUM 1 -#define PGD_CACHE_NUM 0 -#endif - -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -static inline pgd_t *pgd_alloc(struct mm_struct *mm) -{ - return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL); -} - -static inline void pgd_free(pgd_t *pgd) -{ - kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); -} - -#ifndef CONFIG_PPC_64K_PAGES - -#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) - -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM], - GFP_KERNEL|__GFP_REPEAT); -} - -static inline void pud_free(pud_t *pud) -{ - kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); -} - -static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) -{ - pud_set(pud, (unsigned long)pmd); -} - -#define pmd_populate(mm, pmd, pte_page) \ - pmd_populate_kernel(mm, pmd, page_address(pte_page)) -#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) - - -#else /* CONFIG_PPC_64K_PAGES */ - -#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) - -static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, - pte_t *pte) -{ - pmd_set(pmd, (unsigned long)pte); -} - -#define pmd_populate(mm, pmd, pte_page) \ - pmd_populate_kernel(mm, pmd, page_address(pte_page)) - -#endif /* CONFIG_PPC_64K_PAGES */ - -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM], - GFP_KERNEL|__GFP_REPEAT); -} - -static inline void pmd_free(pmd_t *pmd) -{ - kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); -} - -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, - unsigned long address) -{ - return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], - GFP_KERNEL|__GFP_REPEAT); -} - -static inline struct page *pte_alloc_one(struct mm_struct *mm, - unsigned long address) -{ - return virt_to_page(pte_alloc_one_kernel(mm, address)); -} - -static inline void pte_free_kernel(pte_t *pte) -{ - kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte); -} - -static inline void pte_free(struct page *ptepage) -{ - pte_free_kernel(page_address(ptepage)); -} - -#define PGF_CACHENUM_MASK 0xf - -typedef struct pgtable_free { - unsigned long val; -} pgtable_free_t; - -static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, - unsigned long mask) -{ - BUG_ON(cachenum > PGF_CACHENUM_MASK); - - return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; -} - -static inline void pgtable_free(pgtable_free_t pgf) -{ - void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); - int cachenum = pgf.val & PGF_CACHENUM_MASK; - - kmem_cache_free(pgtable_cache[cachenum], p); -} - -extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); - -#define __pte_free_tlb(tlb, ptepage) \ - pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ - PTE_CACHE_NUM, PTE_TABLE_SIZE-1)) -#define __pmd_free_tlb(tlb, pmd) \ - pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ - PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) -#ifndef CONFIG_PPC_64K_PAGES -#define __pud_free_tlb(tlb, pmd) \ - pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ - PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) -#endif /* CONFIG_PPC_64K_PAGES */ - -#define check_pgt_cache() do { } while (0) - -#endif /* _PPC64_PGALLOC_H */ diff --git a/include/asm-ppc64/pgtable-4k.h b/include/asm-ppc64/pgtable-4k.h deleted file mode 100644 index e9590c06ad9..00000000000 --- a/include/asm-ppc64/pgtable-4k.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Entries per page directory level. The PTE level must use a 64b record - * for each page table entry. The PMD and PGD level use a 32b record for - * each entry by assuming that each entry is page aligned. - */ -#define PTE_INDEX_SIZE 9 -#define PMD_INDEX_SIZE 7 -#define PUD_INDEX_SIZE 7 -#define PGD_INDEX_SIZE 9 - -#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) -#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) -#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) -#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) - -#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) -#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) -#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) -#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) - -/* PMD_SHIFT determines what a second-level page table entry can map */ -#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) - -/* With 4k base page size, hugepage PTEs go at the PMD level */ -#define MIN_HUGEPTE_SHIFT PMD_SHIFT - -/* PUD_SHIFT determines what a third-level page table entry can map */ -#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) -#define PUD_SIZE (1UL << PUD_SHIFT) -#define PUD_MASK (~(PUD_SIZE-1)) - -/* PGDIR_SHIFT determines what a fourth-level page table entry can map */ -#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) - -/* PTE bits */ -#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ -#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ -#define _PAGE_F_SECOND _PAGE_SECONDARY -#define _PAGE_F_GIX _PAGE_GROUP_IX - -/* PTE flags to conserve for HPTE identification */ -#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ - _PAGE_SECONDARY | _PAGE_GROUP_IX) - -/* PAGE_MASK gives the right answer below, but only by accident */ -/* It should be preserving the high 48 bits and then specifically */ -/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ - _PAGE_HPTEFLAGS) - -/* Bits to mask out from a PMD to get to the PTE page */ -#define PMD_MASKED_BITS 0 -/* Bits to mask out from a PUD to get to the PMD page */ -#define PUD_MASKED_BITS 0 -/* Bits to mask out from a PGD to get to the PUD page */ -#define PGD_MASKED_BITS 0 - -/* shift to put page number into pte */ -#define PTE_RPN_SHIFT (17) - -#define __real_pte(e,p) ((real_pte_t)(e)) -#define __rpte_to_pte(r) (r) -#define __rpte_to_hidx(r,index) (pte_val((r)) >> 12) - -#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ - do { \ - index = 0; \ - shift = mmu_psize_defs[psize].shift; \ - -#define pte_iterate_hashed_end() } while(0) - -/* - * 4-level page tables related bits - */ - -#define pgd_none(pgd) (!pgd_val(pgd)) -#define pgd_bad(pgd) (pgd_val(pgd) == 0) -#define pgd_present(pgd) (pgd_val(pgd) != 0) -#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) -#define pgd_page(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) - -#define pud_offset(pgdp, addr) \ - (((pud_t *) pgd_page(*(pgdp))) + \ - (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) - -#define pud_ERROR(e) \ - printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e)) diff --git a/include/asm-ppc64/pgtable-64k.h b/include/asm-ppc64/pgtable-64k.h deleted file mode 100644 index 154f1840ece..00000000000 --- a/include/asm-ppc64/pgtable-64k.h +++ /dev/null @@ -1,90 +0,0 @@ -#include - - -#define PTE_INDEX_SIZE 12 -#define PMD_INDEX_SIZE 12 -#define PUD_INDEX_SIZE 0 -#define PGD_INDEX_SIZE 4 - -#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) -#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) -#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) - -#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) -#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) -#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) - -/* With 4k base page size, hugepage PTEs go at the PMD level */ -#define MIN_HUGEPTE_SHIFT PAGE_SHIFT - -/* PMD_SHIFT determines what a second-level page table entry can map */ -#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) - -/* PGDIR_SHIFT determines what a third-level page table entry can map */ -#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) - -/* Additional PTE bits (don't change without checking asm in hash_low.S) */ -#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ -#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ -#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ -#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ -#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ - -/* PTE flags to conserve for HPTE identification */ -#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_HPTE_SUB |\ - _PAGE_COMBO) - -/* Shift to put page number into pte. - * - * That gives us a max RPN of 32 bits, which means a max of 48 bits - * of addressable physical space. - * We could get 3 more bits here by setting PTE_RPN_SHIFT to 29 but - * 32 makes PTEs more readable for debugging for now :) - */ -#define PTE_RPN_SHIFT (32) -#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT)) -#define PTE_RPN_MASK (~((1UL<> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) -#define __rpte_to_pte(r) ((r).pte) -#define __rpte_sub_valid(rpte, index) \ - (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) - - -/* Trick: we set __end to va + 64k, which happens works for - * a 16M page as well as we want only one iteration - */ -#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ - do { \ - unsigned long __end = va + PAGE_SIZE; \ - unsigned __split = (psize == MMU_PAGE_4K || \ - psize == MMU_PAGE_64K_AP); \ - shift = mmu_psize_defs[psize].shift; \ - for (index = 0; va < __end; index++, va += (1 << shift)) { \ - if (!__split || __rpte_sub_valid(rpte, index)) do { \ - -#define pte_iterate_hashed_end() } while(0); } } while(0) - - -#endif /* __ASSEMBLY__ */ diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h deleted file mode 100644 index dee36c83be1..00000000000 --- a/include/asm-ppc64/pgtable.h +++ /dev/null @@ -1,519 +0,0 @@ -#ifndef _PPC64_PGTABLE_H -#define _PPC64_PGTABLE_H - -/* - * This file contains the functions and defines necessary to modify and use - * the ppc64 hashed page table. - */ - -#ifndef __ASSEMBLY__ -#include -#include -#include /* For TASK_SIZE */ -#include -#include -#include -struct mm_struct; -#endif /* __ASSEMBLY__ */ - -#ifdef CONFIG_PPC_64K_PAGES -#include -#else -#include -#endif - -#define FIRST_USER_ADDRESS 0 - -/* - * Size of EA range mapped by our pagetables. - */ -#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ - PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) -#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE) - -#if TASK_SIZE_USER64 > PGTABLE_RANGE -#error TASK_SIZE_USER64 exceeds pagetable range -#endif - -#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) -#error TASK_SIZE_USER64 exceeds user VSID range -#endif - -/* - * Define the address range of the vmalloc VM area. - */ -#define VMALLOC_START (0xD000000000000000ul) -#define VMALLOC_SIZE (0x80000000000UL) -#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) - -/* - * Define the address range of the imalloc VM area. - */ -#define PHBS_IO_BASE VMALLOC_END -#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ -#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) - -/* - * Common bits in a linux-style PTE. These match the bits in the - * (hardware-defined) PowerPC PTE as closely as possible. Additional - * bits may be defined in pgtable-*.h - */ -#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ -#define _PAGE_USER 0x0002 /* matches one of the PP bits */ -#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ -#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ -#define _PAGE_GUARDED 0x0008 -#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ -#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ -#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ -#define _PAGE_DIRTY 0x0080 /* C: page changed */ -#define _PAGE_ACCESSED 0x0100 /* R: page referenced */ -#define _PAGE_RW 0x0200 /* software: user write access allowed */ -#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ -#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ - -#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) - -#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) - -/* __pgprot defined in asm-ppc64/page.h */ -#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) - -#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) -#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) -#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) -#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) -#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ - _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) -#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC) - -#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) -#define HAVE_PAGE_AGP - -/* PTEIDX nibble */ -#define _PTEIDX_SECONDARY 0x8 -#define _PTEIDX_GROUP_IX 0x7 - - -/* - * POWER4 and newer have per page execute protection, older chips can only - * do this on a segment (256MB) basis. - * - * Also, write permissions imply read permissions. - * This is the closest we can get.. - * - * Note due to the way vm flags are laid out, the bits are XWR - */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_X -#define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY_X -#define __P111 PAGE_COPY_X - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_X -#define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED_X -#define __S111 PAGE_SHARED_X - -#ifndef __ASSEMBLY__ - -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) -#endif /* __ASSEMBLY__ */ - -#ifdef CONFIG_HUGETLB_PAGE - -#define HAVE_ARCH_UNMAPPED_AREA -#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN - -#endif - -#ifndef __ASSEMBLY__ - -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - * - * mk_pte takes a (struct page *) as input - */ -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - -static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) -{ - pte_t pte; - - - pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot); - return pte; -} - -#define pte_modify(_pte, newprot) \ - (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) - -#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) -#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) - -/* pte_clear moved to later in this file */ - -#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) -#define pte_page(x) pfn_to_page(pte_pfn(x)) - -#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) -#define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_bad(pmd) (pmd_val(pmd) == 0) -#define pmd_present(pmd) (pmd_val(pmd) != 0) -#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) -#define pmd_page_kernel(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) -#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) - -#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) -#define pud_none(pud) (!pud_val(pud)) -#define pud_bad(pud) ((pud_val(pud)) == 0) -#define pud_present(pud) (pud_val(pud) != 0) -#define pud_clear(pudp) (pud_val(*(pudp)) = 0) -#define pud_page(pud) (pud_val(pud) & ~PUD_MASKED_BITS) - -#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) - -/* - * Find an entry in a page-table-directory. We combine the address region - * (the high order N bits) and the pgd portion of the address. - */ -/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ -#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff) - -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) - -#define pmd_offset(pudp,addr) \ - (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) - -#define pte_offset_kernel(dir,addr) \ - (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) - -#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) -#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) -#define pte_unmap(pte) do { } while(0) -#define pte_unmap_nested(pte) do { } while(0) - -/* to find an entry in a kernel page-table-directory */ -/* This now only contains the vmalloc pages */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;} -static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} -static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} -static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} -static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} -static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} - -static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } -static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } - -static inline pte_t pte_rdprotect(pte_t pte) { - pte_val(pte) &= ~_PAGE_USER; return pte; } -static inline pte_t pte_exprotect(pte_t pte) { - pte_val(pte) &= ~_PAGE_EXEC; return pte; } -static inline pte_t pte_wrprotect(pte_t pte) { - pte_val(pte) &= ~(_PAGE_RW); return pte; } -static inline pte_t pte_mkclean(pte_t pte) { - pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } -static inline pte_t pte_mkold(pte_t pte) { - pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } -static inline pte_t pte_mkread(pte_t pte) { - pte_val(pte) |= _PAGE_USER; return pte; } -static inline pte_t pte_mkexec(pte_t pte) { - pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } -static inline pte_t pte_mkwrite(pte_t pte) { - pte_val(pte) |= _PAGE_RW; return pte; } -static inline pte_t pte_mkdirty(pte_t pte) { - pte_val(pte) |= _PAGE_DIRTY; return pte; } -static inline pte_t pte_mkyoung(pte_t pte) { - pte_val(pte) |= _PAGE_ACCESSED; return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { - return pte; } - -/* Atomic PTE updates */ -static inline unsigned long pte_update(pte_t *p, unsigned long clr) -{ - unsigned long old, tmp; - - __asm__ __volatile__( - "1: ldarx %0,0,%3 # pte_update\n\ - andi. %1,%0,%6\n\ - bne- 1b \n\ - andc %1,%0,%4 \n\ - stdcx. %1,0,%3 \n\ - bne- 1b" - : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY) - : "cc" ); - return old; -} - -/* PTE updating functions, this function puts the PTE in the - * batch, doesn't actually triggers the hash flush immediately, - * you need to call flush_tlb_pending() to do that. - * Pass -1 for "normal" size (4K or 64K) - */ -extern void hpte_update(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, unsigned long pte, int huge); - -static inline int __ptep_test_and_clear_young(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - unsigned long old; - - if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) - return 0; - old = pte_update(ptep, _PAGE_ACCESSED); - if (old & _PAGE_HASHPTE) { - hpte_update(mm, addr, ptep, old, 0); - flush_tlb_pending(); - } - return (old & _PAGE_ACCESSED) != 0; -} -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ -({ \ - int __r; \ - __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ - __r; \ -}) - -/* - * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the - * moment we always flush but we need to fix hpte_update and test if the - * optimisation is worth it. - */ -static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - unsigned long old; - - if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) - return 0; - old = pte_update(ptep, _PAGE_DIRTY); - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); - return (old & _PAGE_DIRTY) != 0; -} -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY -#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \ -({ \ - int __r; \ - __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \ - __r; \ -}) - -#define __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) -{ - unsigned long old; - - if ((pte_val(*ptep) & _PAGE_RW) == 0) - return; - old = pte_update(ptep, _PAGE_RW); - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); -} - -/* - * We currently remove entries from the hashtable regardless of whether - * the entry was young or dirty. The generic routines only flush if the - * entry was young or dirty which is not good enough. - * - * We should be more intelligent about this but for the moment we override - * these functions and force a tlb flush unconditionally - */ -#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH -#define ptep_clear_flush_young(__vma, __address, __ptep) \ -({ \ - int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ - __ptep); \ - __young; \ -}) - -#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH -#define ptep_clear_flush_dirty(__vma, __address, __ptep) \ -({ \ - int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ - __ptep); \ - flush_tlb_page(__vma, __address); \ - __dirty; \ -}) - -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - unsigned long old = pte_update(ptep, ~0UL); - - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); - return __pte(old); -} - -static inline void pte_clear(struct mm_struct *mm, unsigned long addr, - pte_t * ptep) -{ - unsigned long old = pte_update(ptep, ~0UL); - - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); -} - -/* - * set_pte stores a linux PTE into the linux page table. - */ -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - if (pte_present(*ptep)) { - pte_clear(mm, addr, ptep); - flush_tlb_pending(); - } - pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); - -#ifdef CONFIG_PPC_64K_PAGES - if (mmu_virtual_psize != MMU_PAGE_64K) - pte = __pte(pte_val(pte) | _PAGE_COMBO); -#endif /* CONFIG_PPC_64K_PAGES */ - - *ptep = pte; -} - -/* Set the dirty and/or accessed bits atomically in a linux PTE, this - * function doesn't need to flush the hash entry - */ -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) -{ - unsigned long bits = pte_val(entry) & - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); - unsigned long old, tmp; - - __asm__ __volatile__( - "1: ldarx %0,0,%4\n\ - andi. %1,%0,%6\n\ - bne- 1b \n\ - or %0,%3,%0\n\ - stdcx. %0,0,%4\n\ - bne- 1b" - :"=&r" (old), "=&r" (tmp), "=m" (*ptep) - :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) - :"cc"); -} -#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ - do { \ - __ptep_set_access_flags(__ptep, __entry, __dirty); \ - flush_tlb_page_nohash(__vma, __address); \ - } while(0) - -/* - * Macro to mark a page protection value as "uncacheable". - */ -#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) - -struct file; -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -#define __HAVE_PHYS_MEM_ACCESS_PROT - -#define __HAVE_ARCH_PTE_SAME -#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) - -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) -#define pmd_ERROR(e) \ - printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) - -extern pgd_t swapper_pg_dir[]; - -extern void paging_init(void); - -#ifdef CONFIG_HUGETLB_PAGE -#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ - free_pgd_range(tlb, addr, end, floor, ceiling) -#endif - -/* - * This gets called at the end of handling a page fault, when - * the kernel has put a new PTE into the page table for the process. - * We use it to put a corresponding HPTE into the hash table - * ahead of time, instead of waiting for the inevitable extra - * hash-table miss exception. - */ -struct vm_area_struct; -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); - -/* Encode and de-code a swap entry */ -#define __swp_type(entry) (((entry).val >> 1) & 0x3f) -#define __swp_offset(entry) ((entry).val >> 8) -#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) -#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) -#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) -#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) -#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) - -/* - * kern_addr_valid is intended to indicate whether an address is a valid - * kernel address. Most 32-bit archs define it as always true (like this) - * but most 64-bit archs actually perform a test. What should we do here? - * The only use is in fs/ncpfs/dir.c - */ -#define kern_addr_valid(addr) (1) - -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - -void pgtable_cache_init(void); - -/* - * find_linux_pte returns the address of a linux pte for a given - * effective address and directory. If not found, it returns zero. - */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) -{ - pgd_t *pg; - pud_t *pu; - pmd_t *pm; - pte_t *pt = NULL; - - pg = pgdir + pgd_index(ea); - if (!pgd_none(*pg)) { - pu = pud_offset(pg, ea); - if (!pud_none(*pu)) { - pm = pmd_offset(pu, ea); - if (pmd_present(*pm)) - pt = pte_offset_kernel(pm, ea); - } - } - return pt; -} - -#include - -#endif /* __ASSEMBLY__ */ - -#endif /* _PPC64_PGTABLE_H */ -- cgit v1.2.3 From f8ef2705969e0409efedeb889445da67806ba9ea Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 19 Nov 2005 20:46:04 +1100 Subject: powerpc: Merge pci.h This involves some minor changes: a few unused functions that the ppc32 pci.c provides are no longer declared here or exported; pcibios_assign_all_busses now just refers to the pci_assign_all_buses variable on both 32-bit and 64-bit; pcibios_scan_all_fns is now just 0 instead of a function that always returns 0 on 64-bit. Signed-off-by: Paul Mackerras --- include/asm-powerpc/pci.h | 247 ++++++++++++++++++++++++++++++++++++++++++ include/asm-powerpc/ppc-pci.h | 2 - include/asm-ppc64/pci.h | 193 --------------------------------- 3 files changed, 247 insertions(+), 195 deletions(-) create mode 100644 include/asm-powerpc/pci.h delete mode 100644 include/asm-ppc64/pci.h (limited to 'include') diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h new file mode 100644 index 00000000000..d5934a076bd --- /dev/null +++ b/include/asm-powerpc/pci.h @@ -0,0 +1,247 @@ +#ifndef __ASM_POWERPC_PCI_H +#define __ASM_POWERPC_PCI_H +#ifdef __KERNEL__ + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#define PCIBIOS_MIN_IO 0x1000 +#define PCIBIOS_MIN_MEM 0x10000000 + +struct pci_dev; + +/* Values for the `which' argument to sys_pciconfig_iobase syscall. */ +#define IOBASE_BRIDGE_NUMBER 0 +#define IOBASE_MEMORY 1 +#define IOBASE_IO 2 +#define IOBASE_ISA_IO 3 +#define IOBASE_ISA_MEM 4 + +/* + * Set this to 1 if you want the kernel to re-assign all PCI + * bus numbers + */ +extern int pci_assign_all_buses; +#define pcibios_assign_all_busses() (pci_assign_all_buses) + +#define pcibios_scan_all_fns(a, b) 0 + +static inline void pcibios_set_master(struct pci_dev *dev) +{ + /* No special bus mastering setup handling */ +} + +static inline void pcibios_penalize_isa_irq(int irq, int active) +{ + /* We don't do dynamic PCI IRQ allocation */ +} + +#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + if (ppc_md.pci_get_legacy_ide_irq) + return ppc_md.pci_get_legacy_ide_irq(dev, channel); + return channel ? 15 : 14; +} + +#ifdef CONFIG_PPC64 +#define HAVE_ARCH_PCI_MWI 1 +static inline int pcibios_prep_mwi(struct pci_dev *dev) +{ + /* + * We would like to avoid touching the cacheline size or MWI bit + * but we cant do that with the current pcibios_prep_mwi + * interface. pSeries firmware sets the cacheline size (which is not + * the cpu cacheline size in all cases) and hardware treats MWI + * the same as memory write. So we dont touch the cacheline size + * here and allow the generic code to set the MWI bit. + */ + return 0; +} + +extern struct dma_mapping_ops pci_dma_ops; + +/* For DAC DMA, we currently don't support it by default, but + * we let 64-bit platforms override this. + */ +static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask) +{ + if (pci_dma_ops.dac_dma_supported) + return pci_dma_ops.dac_dma_supported(&hwdev->dev, mask); + return 0; +} + +#ifdef CONFIG_PCI +static inline void pci_dma_burst_advice(struct pci_dev *pdev, + enum pci_dma_burst_strategy *strat, + unsigned long *strategy_parameter) +{ + unsigned long cacheline_size; + u8 byte; + + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); + if (byte == 0) + cacheline_size = 1024; + else + cacheline_size = (int) byte * 4; + + *strat = PCI_DMA_BURST_MULTIPLE; + *strategy_parameter = cacheline_size; +} +#endif + +extern int pci_domain_nr(struct pci_bus *bus); + +/* Decide whether to display the domain number in /proc */ +extern int pci_proc_domain(struct pci_bus *bus); + +#else /* 32-bit */ + +#ifdef CONFIG_PCI +static inline void pci_dma_burst_advice(struct pci_dev *pdev, + enum pci_dma_burst_strategy *strat, + unsigned long *strategy_parameter) +{ + *strat = PCI_DMA_BURST_INFINITY; + *strategy_parameter = ~0UL; +} +#endif + +/* + * At present there are very few 32-bit PPC machines that can have + * memory above the 4GB point, and we don't support that. + */ +#define pci_dac_dma_supported(pci_dev, mask) (0) + +/* Return the index of the PCI controller for device PDEV. */ +#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index + +/* Set the name of the bus as it appears in /proc/bus/pci */ +static inline int pci_proc_domain(struct pci_bus *bus) +{ + return 0; +} + +#endif /* CONFIG_PPC64 */ + +struct vm_area_struct; +/* Map a range of PCI memory or I/O space for a device into user space */ +int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine); + +/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ +#define HAVE_PCI_MMAP 1 + +#ifdef CONFIG_PPC64 +/* pci_unmap_{single,page} is not a nop, thus... */ +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME; +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + __u32 LEN_NAME; +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) + +/* The PCI address space does not equal the physical memory address + * space (we have an IOMMU). The IDE and SCSI device layers use + * this boolean for bounce buffer decisions. + */ +#define PCI_DMA_BUS_IS_PHYS (0) + +#else /* 32-bit */ + +/* The PCI address space does equal the physical memory + * address space (no IOMMU). The IDE and SCSI device layers use + * this boolean for bounce buffer decisions. + */ +#define PCI_DMA_BUS_IS_PHYS (1) + +/* pci_unmap_{page,single} is a nop so... */ +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) +#define pci_unmap_addr(PTR, ADDR_NAME) (0) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) +#define pci_unmap_len(PTR, LEN_NAME) (0) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) + +#endif /* CONFIG_PPC64 */ + +extern void pcibios_resource_to_bus(struct pci_dev *dev, + struct pci_bus_region *region, + struct resource *res); + +extern void pcibios_bus_to_resource(struct pci_dev *dev, + struct resource *res, + struct pci_bus_region *region); + +static inline struct resource *pcibios_select_root(struct pci_dev *pdev, + struct resource *res) +{ + struct resource *root = NULL; + + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + if (res->flags & IORESOURCE_MEM) + root = &iomem_resource; + + return root; +} + +extern int unmap_bus_range(struct pci_bus *bus); + +extern int remap_bus_range(struct pci_bus *bus); + +extern void pcibios_fixup_device_resources(struct pci_dev *dev, + struct pci_bus *bus); + +extern struct pci_controller *init_phb_dynamic(struct device_node *dn); + +extern struct pci_dev *of_create_pci_dev(struct device_node *node, + struct pci_bus *bus, int devfn); + +extern void of_scan_pci_bridge(struct device_node *node, + struct pci_dev *dev); + +extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); + +extern int pci_read_irq_line(struct pci_dev *dev); + +extern void pcibios_add_platform_entries(struct pci_dev *dev); + +struct file; +extern pgprot_t pci_phys_mem_access_prot(struct file *file, + unsigned long pfn, + unsigned long size, + pgprot_t prot); + +#if defined(CONFIG_PPC_MULTIPLATFORM) || defined(CONFIG_PPC32) +#define HAVE_ARCH_PCI_RESOURCE_TO_USER +extern void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, + u64 *start, u64 *end); +#endif /* CONFIG_PPC_MULTIPLATFORM || CONFIG_PPC32 */ + +#endif /* __KERNEL__ */ +#endif /* __ASM_POWERPC_PCI_H */ diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h index 2e36e5a7f4f..36cdc869e58 100644 --- a/include/asm-powerpc/ppc-pci.h +++ b/include/asm-powerpc/ppc-pci.h @@ -48,8 +48,6 @@ extern void pSeries_final_fixup(void); extern void pSeries_irq_bus_setup(struct pci_bus *bus); extern unsigned long pci_probe_only; -extern unsigned long pci_assign_all_buses; -extern int pci_read_irq_line(struct pci_dev *pci_dev); /* ---- EEH internal-use-only related routines ---- */ #ifdef CONFIG_EEH diff --git a/include/asm-ppc64/pci.h b/include/asm-ppc64/pci.h deleted file mode 100644 index fafdf885a3c..00000000000 --- a/include/asm-ppc64/pci.h +++ /dev/null @@ -1,193 +0,0 @@ -#ifndef __PPC64_PCI_H -#define __PPC64_PCI_H -#ifdef __KERNEL__ - -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -#define PCIBIOS_MIN_IO 0x1000 -#define PCIBIOS_MIN_MEM 0x10000000 - -struct pci_dev; - -#ifdef CONFIG_PPC_ISERIES -#define pcibios_scan_all_fns(a, b) 0 -#else -extern int pcibios_scan_all_fns(struct pci_bus *bus, int devfn); -#endif - -static inline void pcibios_set_master(struct pci_dev *dev) -{ - /* No special bus mastering setup handling */ -} - -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - -#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - if (ppc_md.pci_get_legacy_ide_irq) - return ppc_md.pci_get_legacy_ide_irq(dev, channel); - return channel ? 15 : 14; -} - -#define HAVE_ARCH_PCI_MWI 1 -static inline int pcibios_prep_mwi(struct pci_dev *dev) -{ - /* - * We would like to avoid touching the cacheline size or MWI bit - * but we cant do that with the current pcibios_prep_mwi - * interface. pSeries firmware sets the cacheline size (which is not - * the cpu cacheline size in all cases) and hardware treats MWI - * the same as memory write. So we dont touch the cacheline size - * here and allow the generic code to set the MWI bit. - */ - return 0; -} - -extern unsigned int pcibios_assign_all_busses(void); - -extern struct dma_mapping_ops pci_dma_ops; - -/* For DAC DMA, we currently don't support it by default, but - * we let the platform override this - */ -static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask) -{ - if (pci_dma_ops.dac_dma_supported) - return pci_dma_ops.dac_dma_supported(&hwdev->dev, mask); - return 0; -} - -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - unsigned long cacheline_size; - u8 byte; - - pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); - if (byte == 0) - cacheline_size = 1024; - else - cacheline_size = (int) byte * 4; - - *strat = PCI_DMA_BURST_MULTIPLE; - *strategy_parameter = cacheline_size; -} -#endif - -extern int pci_domain_nr(struct pci_bus *bus); - -/* Decide whether to display the domain number in /proc */ -extern int pci_proc_domain(struct pci_bus *bus); - -struct vm_area_struct; -/* Map a range of PCI memory or I/O space for a device into user space */ -int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); - -/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ -#define HAVE_PCI_MMAP 1 - -/* pci_unmap_{single,page} is not a nop, thus... */ -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ - dma_addr_t ADDR_NAME; -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ - __u32 LEN_NAME; -#define pci_unmap_addr(PTR, ADDR_NAME) \ - ((PTR)->ADDR_NAME) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ - (((PTR)->ADDR_NAME) = (VAL)) -#define pci_unmap_len(PTR, LEN_NAME) \ - ((PTR)->LEN_NAME) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ - (((PTR)->LEN_NAME) = (VAL)) - -/* The PCI address space does equal the physical memory - * address space. The networking and block device layers use - * this boolean for bounce buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (0) - -extern void -pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, - struct resource *res); - -extern void -pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, - struct pci_bus_region *region); - -static inline struct resource * -pcibios_select_root(struct pci_dev *pdev, struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - -extern int -unmap_bus_range(struct pci_bus *bus); - -extern int -remap_bus_range(struct pci_bus *bus); - -extern void -pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus); - -extern struct pci_controller *init_phb_dynamic(struct device_node *dn); - -extern struct pci_dev *of_create_pci_dev(struct device_node *node, - struct pci_bus *bus, int devfn); - -extern void of_scan_pci_bridge(struct device_node *node, - struct pci_dev *dev); - -extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); - -extern int pci_read_irq_line(struct pci_dev *dev); - -extern void pcibios_add_platform_entries(struct pci_dev *dev); - -struct file; -extern pgprot_t pci_phys_mem_access_prot(struct file *file, - unsigned long pfn, - unsigned long size, - pgprot_t prot); - -#ifdef CONFIG_PPC_MULTIPLATFORM -#define HAVE_ARCH_PCI_RESOURCE_TO_USER -extern void pci_resource_to_user(const struct pci_dev *dev, int bar, - const struct resource *rsrc, - u64 *start, u64 *end); -#endif /* CONFIG_PPC_MULTIPLATFORM */ - - -#endif /* __KERNEL__ */ - -#endif /* __PPC64_PCI_H */ -- cgit v1.2.3 From 21a6290220679d94912a068c75db2c5cd9c6552a Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 19 Nov 2005 20:47:22 +1100 Subject: powerpc: move include/asm-ppc64/ptrace-common.h to arch/powerpc/kernel It's only used by arch/powerpc/kernel/ptrace{,32}.c. Signed-off-by: Paul Mackerras --- include/asm-ppc64/ptrace-common.h | 164 -------------------------------------- 1 file changed, 164 deletions(-) delete mode 100644 include/asm-ppc64/ptrace-common.h (limited to 'include') diff --git a/include/asm-ppc64/ptrace-common.h b/include/asm-ppc64/ptrace-common.h deleted file mode 100644 index b1babb72967..00000000000 --- a/include/asm-ppc64/ptrace-common.h +++ /dev/null @@ -1,164 +0,0 @@ -/* - * linux/arch/ppc64/kernel/ptrace-common.h - * - * Copyright (c) 2002 Stephen Rothwell, IBM Coproration - * Extracted from ptrace.c and ptrace32.c - * - * This file is subject to the terms and conditions of the GNU General - * Public License. See the file README.legal in the main directory of - * this archive for more details. - */ - -#ifndef _PPC64_PTRACE_COMMON_H -#define _PPC64_PTRACE_COMMON_H - -#include -#include - -/* - * Set of msr bits that gdb can change on behalf of a process. - */ -#define MSR_DEBUGCHANGE (MSR_FE0 | MSR_SE | MSR_BE | MSR_FE1) - -/* - * Get contents of register REGNO in task TASK. - */ -static inline unsigned long get_reg(struct task_struct *task, int regno) -{ - unsigned long tmp = 0; - - /* - * Put the correct FP bits in, they might be wrong as a result - * of our lazy FP restore. - */ - if (regno == PT_MSR) { - tmp = ((unsigned long *)task->thread.regs)[PT_MSR]; - tmp |= task->thread.fpexc_mode; - } else if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) { - tmp = ((unsigned long *)task->thread.regs)[regno]; - } - - return tmp; -} - -/* - * Write contents of register REGNO in task TASK. - */ -static inline int put_reg(struct task_struct *task, int regno, - unsigned long data) -{ - if (regno < PT_SOFTE) { - if (regno == PT_MSR) - data = (data & MSR_DEBUGCHANGE) - | (task->thread.regs->msr & ~MSR_DEBUGCHANGE); - ((unsigned long *)task->thread.regs)[regno] = data; - return 0; - } - return -EIO; -} - -static inline void set_single_step(struct task_struct *task) -{ - struct pt_regs *regs = task->thread.regs; - if (regs != NULL) - regs->msr |= MSR_SE; - set_ti_thread_flag(task->thread_info, TIF_SINGLESTEP); -} - -static inline void clear_single_step(struct task_struct *task) -{ - struct pt_regs *regs = task->thread.regs; - if (regs != NULL) - regs->msr &= ~MSR_SE; - clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP); -} - -#ifdef CONFIG_ALTIVEC -/* - * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go. - * The transfer totals 34 quadword. Quadwords 0-31 contain the - * corresponding vector registers. Quadword 32 contains the vscr as the - * last word (offset 12) within that quadword. Quadword 33 contains the - * vrsave as the first word (offset 0) within the quadword. - * - * This definition of the VMX state is compatible with the current PPC32 - * ptrace interface. This allows signal handling and ptrace to use the - * same structures. This also simplifies the implementation of a bi-arch - * (combined (32- and 64-bit) gdb. - */ - -/* - * Get contents of AltiVec register state in task TASK - */ -static inline int get_vrregs(unsigned long __user *data, - struct task_struct *task) -{ - unsigned long regsize; - - /* copy AltiVec registers VR[0] .. VR[31] */ - regsize = 32 * sizeof(vector128); - if (copy_to_user(data, task->thread.vr, regsize)) - return -EFAULT; - data += (regsize / sizeof(unsigned long)); - - /* copy VSCR */ - regsize = 1 * sizeof(vector128); - if (copy_to_user(data, &task->thread.vscr, regsize)) - return -EFAULT; - data += (regsize / sizeof(unsigned long)); - - /* copy VRSAVE */ - if (put_user(task->thread.vrsave, (u32 __user *)data)) - return -EFAULT; - - return 0; -} - -/* - * Write contents of AltiVec register state into task TASK. - */ -static inline int set_vrregs(struct task_struct *task, - unsigned long __user *data) -{ - unsigned long regsize; - - /* copy AltiVec registers VR[0] .. VR[31] */ - regsize = 32 * sizeof(vector128); - if (copy_from_user(task->thread.vr, data, regsize)) - return -EFAULT; - data += (regsize / sizeof(unsigned long)); - - /* copy VSCR */ - regsize = 1 * sizeof(vector128); - if (copy_from_user(&task->thread.vscr, data, regsize)) - return -EFAULT; - data += (regsize / sizeof(unsigned long)); - - /* copy VRSAVE */ - if (get_user(task->thread.vrsave, (u32 __user *)data)) - return -EFAULT; - - return 0; -} -#endif - -static inline int ptrace_set_debugreg(struct task_struct *task, - unsigned long addr, unsigned long data) -{ - /* We only support one DABR and no IABRS at the moment */ - if (addr > 0) - return -EINVAL; - - /* The bottom 3 bits are flags */ - if ((data & ~0x7UL) >= TASK_SIZE) - return -EIO; - - /* Ensure translation is on */ - if (data && !(data & DABR_TRANSLATION)) - return -EIO; - - task->thread.dabr = data; - return 0; -} - -#endif /* _PPC64_PTRACE_COMMON_H */ -- cgit v1.2.3 From 0212ddd839470f7a54cccccbaecd4833b4123da2 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 19 Nov 2005 20:50:46 +1100 Subject: powerpc: Merge spinlock.h The result is mostly similar to the original ppc64 version but with some adaptations for 32-bit compilation. include/asm-ppc64 is now empty! Signed-off-by: Paul Mackerras --- include/asm-powerpc/spinlock.h | 269 +++++++++++++++++++++++++++++++++++++++++ include/asm-ppc64/spinlock.h | 241 ------------------------------------ 2 files changed, 269 insertions(+), 241 deletions(-) create mode 100644 include/asm-powerpc/spinlock.h delete mode 100644 include/asm-ppc64/spinlock.h (limited to 'include') diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h new file mode 100644 index 00000000000..caa4b14e0e9 --- /dev/null +++ b/include/asm-powerpc/spinlock.h @@ -0,0 +1,269 @@ +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H + +/* + * Simple spin lock operations. + * + * Copyright (C) 2001-2004 Paul Mackerras , IBM + * Copyright (C) 2001 Anton Blanchard , IBM + * Copyright (C) 2002 Dave Engebretsen , IBM + * Rework to support virtual processors + * + * Type of int is used as a full 64b word is not necessary. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * (the type definitions are in asm/spinlock_types.h) + */ +#ifdef CONFIG_PPC64 +#include +#include +#include +#endif +#include +#include + +#define __raw_spin_is_locked(x) ((x)->slock != 0) + +#ifdef CONFIG_PPC64 +/* use 0x800000yy when locked, where yy == CPU number */ +#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) +#else +#define LOCK_TOKEN 1 +#endif + +/* + * This returns the old value in the lock, so we succeeded + * in getting the lock if the return value is 0. + */ +static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) +{ + unsigned long tmp, token; + + token = LOCK_TOKEN; + __asm__ __volatile__( +"1: lwarx %0,0,%2 # __spin_trylock\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n\ + stwcx. %1,0,%2\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp) + : "r" (token), "r" (&lock->slock) + : "cr0", "memory"); + + return tmp; +} + +static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) +{ + return __spin_trylock(lock) == 0; +} + +/* + * On a system with shared processors (that is, where a physical + * processor is multiplexed between several virtual processors), + * there is no point spinning on a lock if the holder of the lock + * isn't currently scheduled on a physical processor. Instead + * we detect this situation and ask the hypervisor to give the + * rest of our timeslice to the lock holder. + * + * So that we can tell which virtual processor is holding a lock, + * we put 0x80000000 | smp_processor_id() in the lock when it is + * held. Conveniently, we have a word in the paca that holds this + * value. + */ + +#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) +/* We only yield to the hypervisor if we are in shared processor mode */ +#define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) +extern void __spin_yield(raw_spinlock_t *lock); +extern void __rw_yield(raw_rwlock_t *lock); +#else /* SPLPAR || ISERIES */ +#define __spin_yield(x) barrier() +#define __rw_yield(x) barrier() +#define SHARED_PROCESSOR 0 +#endif + +static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) +{ + while (1) { + if (likely(__spin_trylock(lock) == 0)) + break; + do { + HMT_low(); + if (SHARED_PROCESSOR) + __spin_yield(lock); + } while (unlikely(lock->slock != 0)); + HMT_medium(); + } +} + +static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +{ + unsigned long flags_dis; + + while (1) { + if (likely(__spin_trylock(lock) == 0)) + break; + local_save_flags(flags_dis); + local_irq_restore(flags); + do { + HMT_low(); + if (SHARED_PROCESSOR) + __spin_yield(lock); + } while (unlikely(lock->slock != 0)); + HMT_medium(); + local_irq_restore(flags_dis); + } +} + +static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) +{ + __asm__ __volatile__(SYNC_ON_SMP" # __raw_spin_unlock" + : : :"memory"); + lock->slock = 0; +} + +#ifdef CONFIG_PPC64 +extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); +#else +#define __raw_spin_unlock_wait(lock) \ + do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#endif + +/* + * Read-write spinlocks, allowing multiple readers + * but only one writer. + * + * NOTE! it is quite common to have readers in interrupts + * but no interrupt writers. For those circumstances we + * can "mix" irq-safe locks - any writer needs to get a + * irq-safe write-lock, but readers can get non-irqsafe + * read-locks. + */ + +#define __raw_read_can_lock(rw) ((rw)->lock >= 0) +#define __raw_write_can_lock(rw) (!(rw)->lock) + +#ifdef CONFIG_PPC64 +#define __DO_SIGN_EXTEND "extsw %0,%0\n" +#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ +#else +#define __DO_SIGN_EXTEND +#define WRLOCK_TOKEN (-1) +#endif + +/* + * This returns the old value in the lock + 1, + * so we got a read lock if the return value is > 0. + */ +static long __inline__ __read_trylock(raw_rwlock_t *rw) +{ + long tmp; + + __asm__ __volatile__( +"1: lwarx %0,0,%1 # read_trylock\n" + __DO_SIGN_EXTEND +" addic. %0,%0,1\n\ + ble- 2f\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp) + : "r" (&rw->lock) + : "cr0", "xer", "memory"); + + return tmp; +} + +/* + * This returns the old value in the lock, + * so we got the write lock if the return value is 0. + */ +static __inline__ long __write_trylock(raw_rwlock_t *rw) +{ + long tmp, token; + + token = WRLOCK_TOKEN; + __asm__ __volatile__( +"1: lwarx %0,0,%2 # write_trylock\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n" + PPC405_ERR77(0,%1) +" stwcx. %1,0,%2\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp) + : "r" (token), "r" (&rw->lock) + : "cr0", "memory"); + + return tmp; +} + +static void __inline__ __raw_read_lock(raw_rwlock_t *rw) +{ + while (1) { + if (likely(__read_trylock(rw) > 0)) + break; + do { + HMT_low(); + if (SHARED_PROCESSOR) + __rw_yield(rw); + } while (unlikely(rw->lock < 0)); + HMT_medium(); + } +} + +static void __inline__ __raw_write_lock(raw_rwlock_t *rw) +{ + while (1) { + if (likely(__write_trylock(rw) == 0)) + break; + do { + HMT_low(); + if (SHARED_PROCESSOR) + __rw_yield(rw); + } while (unlikely(rw->lock != 0)); + HMT_medium(); + } +} + +static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) +{ + return __read_trylock(rw) > 0; +} + +static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) +{ + return __write_trylock(rw) == 0; +} + +static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) +{ + long tmp; + + __asm__ __volatile__( + "eieio # read_unlock\n\ +1: lwarx %0,0,%1\n\ + addic %0,%0,-1\n" + PPC405_ERR77(0,%1) +" stwcx. %0,0,%1\n\ + bne- 1b" + : "=&r"(tmp) + : "r"(&rw->lock) + : "cr0", "memory"); +} + +static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) +{ + __asm__ __volatile__(SYNC_ON_SMP" # write_unlock" + : : :"memory"); + rw->lock = 0; +} + +#endif /* __ASM_SPINLOCK_H */ diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h deleted file mode 100644 index 7d84fb5e39f..00000000000 --- a/include/asm-ppc64/spinlock.h +++ /dev/null @@ -1,241 +0,0 @@ -#ifndef __ASM_SPINLOCK_H -#define __ASM_SPINLOCK_H - -/* - * Simple spin lock operations. - * - * Copyright (C) 2001-2004 Paul Mackerras , IBM - * Copyright (C) 2001 Anton Blanchard , IBM - * Copyright (C) 2002 Dave Engebretsen , IBM - * Rework to support virtual processors - * - * Type of int is used as a full 64b word is not necessary. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * (the type definitions are in asm/spinlock_types.h) - */ -#include -#include -#include -#include - -#define __raw_spin_is_locked(x) ((x)->slock != 0) - -/* - * This returns the old value in the lock, so we succeeded - * in getting the lock if the return value is 0. - */ -static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) -{ - unsigned long tmp, tmp2; - - __asm__ __volatile__( -" lwz %1,%3(13) # __spin_trylock\n\ -1: lwarx %0,0,%2\n\ - cmpwi 0,%0,0\n\ - bne- 2f\n\ - stwcx. %1,0,%2\n\ - bne- 1b\n\ - isync\n\ -2:" : "=&r" (tmp), "=&r" (tmp2) - : "r" (&lock->slock), "i" (offsetof(struct paca_struct, lock_token)) - : "cr0", "memory"); - - return tmp; -} - -static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) -{ - return __spin_trylock(lock) == 0; -} - -/* - * On a system with shared processors (that is, where a physical - * processor is multiplexed between several virtual processors), - * there is no point spinning on a lock if the holder of the lock - * isn't currently scheduled on a physical processor. Instead - * we detect this situation and ask the hypervisor to give the - * rest of our timeslice to the lock holder. - * - * So that we can tell which virtual processor is holding a lock, - * we put 0x80000000 | smp_processor_id() in the lock when it is - * held. Conveniently, we have a word in the paca that holds this - * value. - */ - -#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) -/* We only yield to the hypervisor if we are in shared processor mode */ -#define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) -extern void __spin_yield(raw_spinlock_t *lock); -extern void __rw_yield(raw_rwlock_t *lock); -#else /* SPLPAR || ISERIES */ -#define __spin_yield(x) barrier() -#define __rw_yield(x) barrier() -#define SHARED_PROCESSOR 0 -#endif - -static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) -{ - while (1) { - if (likely(__spin_trylock(lock) == 0)) - break; - do { - HMT_low(); - if (SHARED_PROCESSOR) - __spin_yield(lock); - } while (unlikely(lock->slock != 0)); - HMT_medium(); - } -} - -static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) -{ - unsigned long flags_dis; - - while (1) { - if (likely(__spin_trylock(lock) == 0)) - break; - local_save_flags(flags_dis); - local_irq_restore(flags); - do { - HMT_low(); - if (SHARED_PROCESSOR) - __spin_yield(lock); - } while (unlikely(lock->slock != 0)); - HMT_medium(); - local_irq_restore(flags_dis); - } -} - -static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) -{ - __asm__ __volatile__("lwsync # __raw_spin_unlock": : :"memory"); - lock->slock = 0; -} - -extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); - -/* - * Read-write spinlocks, allowing multiple readers - * but only one writer. - * - * NOTE! it is quite common to have readers in interrupts - * but no interrupt writers. For those circumstances we - * can "mix" irq-safe locks - any writer needs to get a - * irq-safe write-lock, but readers can get non-irqsafe - * read-locks. - */ - -#define __raw_read_can_lock(rw) ((rw)->lock >= 0) -#define __raw_write_can_lock(rw) (!(rw)->lock) - -/* - * This returns the old value in the lock + 1, - * so we got a read lock if the return value is > 0. - */ -static long __inline__ __read_trylock(raw_rwlock_t *rw) -{ - long tmp; - - __asm__ __volatile__( -"1: lwarx %0,0,%1 # read_trylock\n\ - extsw %0,%0\n\ - addic. %0,%0,1\n\ - ble- 2f\n\ - stwcx. %0,0,%1\n\ - bne- 1b\n\ - isync\n\ -2:" : "=&r" (tmp) - : "r" (&rw->lock) - : "cr0", "xer", "memory"); - - return tmp; -} - -/* - * This returns the old value in the lock, - * so we got the write lock if the return value is 0. - */ -static __inline__ long __write_trylock(raw_rwlock_t *rw) -{ - long tmp, tmp2; - - __asm__ __volatile__( -" lwz %1,%3(13) # write_trylock\n\ -1: lwarx %0,0,%2\n\ - cmpwi 0,%0,0\n\ - bne- 2f\n\ - stwcx. %1,0,%2\n\ - bne- 1b\n\ - isync\n\ -2:" : "=&r" (tmp), "=&r" (tmp2) - : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token)) - : "cr0", "memory"); - - return tmp; -} - -static void __inline__ __raw_read_lock(raw_rwlock_t *rw) -{ - while (1) { - if (likely(__read_trylock(rw) > 0)) - break; - do { - HMT_low(); - if (SHARED_PROCESSOR) - __rw_yield(rw); - } while (unlikely(rw->lock < 0)); - HMT_medium(); - } -} - -static void __inline__ __raw_write_lock(raw_rwlock_t *rw) -{ - while (1) { - if (likely(__write_trylock(rw) == 0)) - break; - do { - HMT_low(); - if (SHARED_PROCESSOR) - __rw_yield(rw); - } while (unlikely(rw->lock != 0)); - HMT_medium(); - } -} - -static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) -{ - return __read_trylock(rw) > 0; -} - -static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) -{ - return __write_trylock(rw) == 0; -} - -static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) -{ - long tmp; - - __asm__ __volatile__( - "eieio # read_unlock\n\ -1: lwarx %0,0,%1\n\ - addic %0,%0,-1\n\ - stwcx. %0,0,%1\n\ - bne- 1b" - : "=&r"(tmp) - : "r"(&rw->lock) - : "cr0", "memory"); -} - -static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) -{ - __asm__ __volatile__("lwsync # write_unlock": : :"memory"); - rw->lock = 0; -} - -#endif /* __ASM_SPINLOCK_H */ -- cgit v1.2.3 From b63d4f0fb80918ab37b6c0ee1adcd49e05c9994c Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 19 Nov 2005 11:10:35 +0000 Subject: [SERIAL] Fix status reporting with PL011 serial driver The receiver status register reports latched error conditions, which must be cleared by writing to it. However, the data register reports unlatched conditions which are associated with the current character. Use the data register to interpret error status rather than the RSR. Signed-off-by: Russell King --- include/asm-arm/hardware/amba_serial.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/asm-arm/hardware/amba_serial.h b/include/asm-arm/hardware/amba_serial.h index 71770aa6389..dc726ffcceb 100644 --- a/include/asm-arm/hardware/amba_serial.h +++ b/include/asm-arm/hardware/amba_serial.h @@ -50,6 +50,11 @@ #define UART011_ICR 0x44 /* Interrupt clear register. */ #define UART011_DMACR 0x48 /* DMA control register. */ +#define UART011_DR_OE (1 << 11) +#define UART011_DR_BE (1 << 10) +#define UART011_DR_PE (1 << 9) +#define UART011_DR_FE (1 << 8) + #define UART01x_RSR_OE 0x08 #define UART01x_RSR_BE 0x04 #define UART01x_RSR_PE 0x02 -- cgit v1.2.3 From 4f1d774aadfc5a6ed1545dca180f66ab6d0f543d Mon Sep 17 00:00:00 2001 From: Mathias Kretschmer Date: Sat, 19 Nov 2005 21:32:38 +0100 Subject: [PATCH] via82cxxx: add VIA VT6410 IDE support From: Mathias Kretschmer Signed-off-by: Daniel Drake Acked-by: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Bartlomiej Zolnierkiewicz --- include/linux/pci_ids.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 7b387faedb4..e34d96db571 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1234,6 +1234,7 @@ #define PCI_DEVICE_ID_VIA_8703_51_0 0x3148 #define PCI_DEVICE_ID_VIA_8237_SATA 0x3149 #define PCI_DEVICE_ID_VIA_XN266 0x3156 +#define PCI_DEVICE_ID_VIA_6410 0x3164 #define PCI_DEVICE_ID_VIA_8754C_0 0x3168 #define PCI_DEVICE_ID_VIA_8235 0x3177 #define PCI_DEVICE_ID_VIA_8385_0 0x3188 -- cgit v1.2.3 From 14351f8e573442e2437d4b177fa10075aaefd5c9 Mon Sep 17 00:00:00 2001 From: Aurelien Jarno Date: Sat, 19 Nov 2005 21:43:45 +0100 Subject: [PATCH] sis5513: enable ATA133 for the SiS965 southbridge Signed-off-by: Aurelien Jarno Signed-off-by: Bartlomiej Zolnierkiewicz --- include/linux/pci_ids.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index e34d96db571..efb60d06caa 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -620,6 +620,7 @@ #define PCI_DEVICE_ID_SI_961 0x0961 #define PCI_DEVICE_ID_SI_962 0x0962 #define PCI_DEVICE_ID_SI_963 0x0963 +#define PCI_DEVICE_ID_SI_965 0x0965 #define PCI_DEVICE_ID_SI_5511 0x5511 #define PCI_DEVICE_ID_SI_5513 0x5513 #define PCI_DEVICE_ID_SI_5518 0x5518 -- cgit v1.2.3 From 84e7b9e94b474d40582090b7e09139f0029cff2c Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sat, 19 Nov 2005 21:54:04 +0100 Subject: [PATCH] ide: remove duplicate documentation for ide_do_drive_cmd() Remove duplicate documentation for ide_do_drive_cmd() from , this function is already documented in ide-io.c. Signed-off-by: Bartlomiej Zolnierkiewicz --- include/linux/ide.h | 25 ------------------------- 1 file changed, 25 deletions(-) (limited to 'include') diff --git a/include/linux/ide.h b/include/linux/ide.h index e99019057ba..e74ee4f7fcd 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1207,31 +1207,6 @@ typedef enum { ide_end /* insert rq at end of list, but don't wait for it */ } ide_action_t; -/* - * This function issues a special IDE device request - * onto the request queue. - * - * If action is ide_wait, then the rq is queued at the end of the - * request queue, and the function sleeps until it has been processed. - * This is for use when invoked from an ioctl handler. - * - * If action is ide_preempt, then the rq is queued at the head of - * the request queue, displacing the currently-being-processed - * request and this function returns immediately without waiting - * for the new rq to be completed. This is VERY DANGEROUS, and is - * intended for careful use by the ATAPI tape/cdrom driver code. - * - * If action is ide_next, then the rq is queued immediately after - * the currently-being-processed-request (if any), and the function - * returns without waiting for the new rq to be completed. As above, - * This is VERY DANGEROUS, and is intended for careful use by the - * ATAPI tape/cdrom driver code. - * - * If action is ide_end, then the rq is queued at the end of the - * request queue, and the function returns immediately without waiting - * for the new rq to be completed. This is again intended for careful - * use by the ATAPI tape/cdrom driver code. - */ extern int ide_do_drive_cmd(ide_drive_t *, struct request *, ide_action_t); /* -- cgit v1.2.3 From 071ffcc0f7dd8df871f443be3f5059f05da528e2 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sat, 19 Nov 2005 22:01:35 +0100 Subject: [PATCH] ide: remove unused ide_action_t:ide_next Signed-off-by: Bartlomiej Zolnierkiewicz --- include/linux/ide.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include') diff --git a/include/linux/ide.h b/include/linux/ide.h index e74ee4f7fcd..a39c3c59789 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -1201,7 +1201,6 @@ extern u64 ide_get_error_location(ide_drive_t *, char *); */ typedef enum { ide_wait, /* insert rq at end of list, and wait for it */ - ide_next, /* insert rq immediately after current request */ ide_preempt, /* insert rq in front of current request */ ide_head_wait, /* insert rq in front of current request and wait for it */ ide_end /* insert rq at end of list, but don't wait for it */ -- cgit v1.2.3 From e07bc7096424b977e53a16d72ec02645389107ba Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Sat, 19 Nov 2005 22:17:55 +0100 Subject: [PATCH] ide: remove dead code from flagged_taskfile() flagged_taskfile() is called from execute_drive_cmd() (the only user) only if args->tf_out_flags.all != 0. Signed-off-by: Bartlomiej Zolnierkiewicz --- include/linux/hdreg.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h index b5d660089de..2b54eac738e 100644 --- a/include/linux/hdreg.h +++ b/include/linux/hdreg.h @@ -80,10 +80,12 @@ /* * Define standard taskfile in/out register */ -#define IDE_TASKFILE_STD_OUT_FLAGS 0xFE #define IDE_TASKFILE_STD_IN_FLAGS 0xFE -#define IDE_HOB_STD_OUT_FLAGS 0x3C #define IDE_HOB_STD_IN_FLAGS 0x3C +#ifndef __KERNEL__ +#define IDE_TASKFILE_STD_OUT_FLAGS 0xFE +#define IDE_HOB_STD_OUT_FLAGS 0x3C +#endif typedef unsigned char task_ioreg_t; typedef unsigned long sata_ioreg_t; -- cgit v1.2.3 From 29506415a0ff0152cc2928f8fcac724fbbf98651 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sun, 20 Nov 2005 00:51:22 -0500 Subject: Input: uinput - convert to dynalloc allocation Also introduce proper locking when creating/deleting device. Signed-off-by: Dmitry Torokhov --- include/linux/uinput.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/uinput.h b/include/linux/uinput.h index 84876077027..6fd1a47acab 100644 --- a/include/linux/uinput.h +++ b/include/linux/uinput.h @@ -34,8 +34,7 @@ #define UINPUT_BUFFER_SIZE 16 #define UINPUT_NUM_REQUESTS 16 -/* state flags => bit index for {set|clear|test}_bit ops */ -#define UIST_CREATED 0 +enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED }; struct uinput_request { int id; @@ -52,11 +51,12 @@ struct uinput_request { struct uinput_device { struct input_dev *dev; - unsigned long state; + struct semaphore sem; + enum uinput_state state; wait_queue_head_t waitq; - unsigned char ready, - head, - tail; + unsigned char ready; + unsigned char head; + unsigned char tail; struct input_event buff[UINPUT_BUFFER_SIZE]; struct uinput_request *requests[UINPUT_NUM_REQUESTS]; -- cgit v1.2.3 From 59c7c0377e00a3cbd7b71631177fb92166ceb437 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sun, 20 Nov 2005 00:51:33 -0500 Subject: Input: uinput - add UI_SET_SWBIT ioctl Signed-off-by: Dmitry Torokhov --- include/linux/uinput.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/uinput.h b/include/linux/uinput.h index 6fd1a47acab..0ff7ca68e5c 100644 --- a/include/linux/uinput.h +++ b/include/linux/uinput.h @@ -91,6 +91,7 @@ struct uinput_ff_erase { #define UI_SET_SNDBIT _IOW(UINPUT_IOCTL_BASE, 106, int) #define UI_SET_FFBIT _IOW(UINPUT_IOCTL_BASE, 107, int) #define UI_SET_PHYS _IOW(UINPUT_IOCTL_BASE, 108, char*) +#define UI_SET_SWBIT _IOW(UINPUT_IOCTL_BASE, 109, int) #define UI_BEGIN_FF_UPLOAD _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload) #define UI_END_FF_UPLOAD _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload) -- cgit v1.2.3 From e6c667592e824c2871fe0ae3bc4b9bc7e81941f4 Mon Sep 17 00:00:00 2001 From: "Jacob.Shin@amd.com" Date: Sun, 20 Nov 2005 18:49:07 +0100 Subject: [PATCH] Fix x86_64/msr.h interface to agree with i386/msr.h Ever since we remove msr.c from x86_64 branch and started grabbing it from i386, msr device (read functionality) has been broken for us. This is due to the differences between asm-i386/msr.h and asm-x86_64/msr.h interfaces. Here is a patch to our side to fix this. Thankfully, as of current (2.6.15-rc1-git6) tree, arch/i386/kernel/msr.c is the only file that uses rdmsr_safe macro. Signed-off-by: Jacob Shin Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- include/asm-x86_64/msr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index 24dc39651bc..10f8b51cec8 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h @@ -56,7 +56,7 @@ ".section __ex_table,\"a\"\n" \ " .align 8\n" \ " .quad 1b,3b\n" \ - ".previous":"=&bDS" (ret__), "=a"(a), "=d"(b)\ + ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\ :"c"(msr), "i"(-EIO), "0"(0)); \ ret__; }) -- cgit v1.2.3