From 99ddef9bfe714c3273e3fce4c6b6a2a99e7d0bf8 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sat, 17 Feb 2007 18:17:16 -0700 Subject: [POWERPC] Fix compile error in prom.h In file included from include/asm/pci.h:20, from include/linux/pci.h:751, from arch/powerpc/sysdev/dart_iommu.c:36: include/asm/prom.h: In function `of_irq_to_resource': include/asm/prom.h:341: warning: implicit declaration of function `irq_of_parse_and_map' include/asm/prom.h:345: error: `NO_IRQ' undeclared (first use in this function) include/asm/prom.h:345: error: (Each undeclared identifier is reported only once include/asm/prom.h:345: error: for each function it appears in.) Seems that prom.h has always wanted irq.h. Cc: Mathieu Desnoyers Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 020ed015a94..1aa15e2e056 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -18,6 +18,7 @@ #include #include #include +#include #include /* Definitions used by the flattened device tree */ -- cgit v1.2.3 From 02567c6cdad4d6254052f25f3b93aa2771f48d25 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 21 Feb 2007 14:53:50 +1100 Subject: [POWERPC] Allocate syscall number for sys_getcpu I forgot to do this when wiring up the syscall. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/unistd.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h index 0ae954e3d25..bcc21bf56a1 100644 --- a/include/asm-powerpc/unistd.h +++ b/include/asm-powerpc/unistd.h @@ -324,10 +324,11 @@ #define __NR_get_robust_list 299 #define __NR_set_robust_list 300 #define __NR_move_pages 301 +#define __NR_getcpu 302 #ifdef __KERNEL__ -#define __NR_syscalls 302 +#define __NR_syscalls 303 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls -- cgit v1.2.3 From dbc11f539df7c9a32424b78afb0314c68d5e7d0b Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Sun, 25 Feb 2007 20:04:18 +0100 Subject: [POWERPC] Include stddef.h in asm-powerpc/current.h to get offsetof On Tue, Oct 31, Hugh Dickins wrote: > +++ linux/include/asm-powerpc/current.h 2006-10-30 19:27:05.000000000 +0000 > +static inline struct task_struct *get_current(void) > +{ > + struct task_struct *task; > + > + __asm__ __volatile__("ld %0,%1(13)" > + : "=r" (task) > + : "i" (offsetof(struct paca_struct, __current))); This breaks compile of 2.6.18.8: CC [M] drivers/media/video/pwc/pwc-uncompress.o In file included from /home/olaf/kernel/linux-2.6.18.8/drivers/media/video/pwc/pwc-uncompress.c:29: include2/asm/current.h: In function 'get_current': include2/asm/current.h:23: warning: implicit declaration of function 'offsetof' include2/asm/current.h:23: error: expected expression before 'struct' make[5]: *** [drivers/media/video/pwc/pwc-uncompress.o] Error 1 Signed-off-by: Olaf Hering Signed-off-by: Paul Mackerras --- include/asm-powerpc/current.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/current.h b/include/asm-powerpc/current.h index b8708aedf92..e2c7f06931e 100644 --- a/include/asm-powerpc/current.h +++ b/include/asm-powerpc/current.h @@ -12,6 +12,7 @@ struct task_struct; #ifdef __powerpc64__ +#include #include static inline struct task_struct *get_current(void) -- cgit v1.2.3 From 723ec731de880a76a004a304b62bf8d0f96435d8 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Fri, 2 Mar 2007 13:36:21 -0700 Subject: [POWERPC] EDAC ECC software scrubber Implements the per arch atomic_scrub() that EDAC uses for software ECC scrubbing. It reads memory and then writes back the original value, allowing the hardware to detect and correct memory errors. Signed-off-by: Dave Jiang Signed-off-by: Paul Mackerras --- include/asm-powerpc/edac.h | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 include/asm-powerpc/edac.h (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/edac.h b/include/asm-powerpc/edac.h new file mode 100644 index 00000000000..6ead88bbfbb --- /dev/null +++ b/include/asm-powerpc/edac.h @@ -0,0 +1,40 @@ +/* + * PPC EDAC common defs + * + * Author: Dave Jiang + * + * 2007 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#ifndef ASM_EDAC_H +#define ASM_EDAC_H +/* + * ECC atomic, DMA, SMP and interrupt safe scrub function. + * Implements the per arch atomic_scrub() that EDAC use for software + * ECC scrubbing. It reads memory and then writes back the original + * value, allowing the hardware to detect and correct memory errors. + */ +static __inline__ void atomic_scrub(void *va, u32 size) +{ + unsigned int *virt_addr = va; + unsigned int temp; + unsigned int i; + + for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) { + /* Very carefully read and write to memory atomically + * so we are interrupt, DMA and SMP safe. + */ + __asm__ __volatile__ ("\n\ + 1: lwarx %0,0,%1\n\ + stwcx. %0,0,%1\n\ + bne- 1b\n\ + isync" + : "=&r"(temp) + : "r"(virt_addr) + : "cr0", "memory"); + } +} + +#endif -- cgit v1.2.3 From 9874777016e06ad2df420237963e81389776cb6d Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Sun, 4 Mar 2007 16:58:39 +1100 Subject: [POWERPC] Create and use set_pci_dma_ops This will allow us to build without PCI easier. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/pci.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h index ac656ee6bb1..ebf31f1c591 100644 --- a/include/asm-powerpc/pci.h +++ b/include/asm-powerpc/pci.h @@ -70,8 +70,11 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) */ #define PCI_DISABLE_MWI +#ifdef CONFIG_PCI extern struct dma_mapping_ops *pci_dma_ops; +extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops); + /* For DAC DMA, we currently don't support it by default, but * we let 64-bit platforms override this. */ @@ -82,7 +85,6 @@ static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask) return 0; } -#ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, unsigned long *strategy_parameter) @@ -99,6 +101,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, *strat = PCI_DMA_BURST_MULTIPLE; *strategy_parameter = cacheline_size; } +#else /* CONFIG_PCI */ +#define set_pci_dma_ops(d) #endif extern int pci_domain_nr(struct pci_bus *bus); -- cgit v1.2.3 From 57190708f1f52d732d94fa21a8e576302d384d33 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Sun, 4 Mar 2007 17:02:41 +1100 Subject: [POWERPC] Create and use get_pci_dma_ops() This allows us to hide pci_dma_ops. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/pci.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h index ebf31f1c591..ce0f13e8eb1 100644 --- a/include/asm-powerpc/pci.h +++ b/include/asm-powerpc/pci.h @@ -71,17 +71,18 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) #define PCI_DISABLE_MWI #ifdef CONFIG_PCI -extern struct dma_mapping_ops *pci_dma_ops; - extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops); +extern struct dma_mapping_ops *get_pci_dma_ops(void); /* For DAC DMA, we currently don't support it by default, but * we let 64-bit platforms override this. */ static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask) { - if (pci_dma_ops && pci_dma_ops->dac_dma_supported) - return pci_dma_ops->dac_dma_supported(&hwdev->dev, mask); + struct dma_mapping_ops *d = get_pci_dma_ops(); + + if (d && d->dac_dma_supported) + return d->dac_dma_supported(&hwdev->dev, mask); return 0; } @@ -103,6 +104,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, } #else /* CONFIG_PCI */ #define set_pci_dma_ops(d) +#define get_pci_dma_ops() NULL #endif extern int pci_domain_nr(struct pci_bus *bus); -- cgit v1.2.3 From bed59275810a55500e885cbdc5c2a0507f13c00e Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Sun, 4 Mar 2007 17:04:44 +1100 Subject: [POWERPC] Allow pSeries to build without CONFIG_PCI Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/ppc-pci.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h index ab6eddb518c..4a053404bf6 100644 --- a/include/asm-powerpc/ppc-pci.h +++ b/include/asm-powerpc/ppc-pci.h @@ -10,6 +10,8 @@ #define _ASM_POWERPC_PPC_PCI_H #ifdef __KERNEL__ +#ifdef CONFIG_PCI + #include #include @@ -126,5 +128,10 @@ struct device_node * find_device_pe(struct device_node *dn); #endif +#else /* CONFIG_PCI */ +static inline void find_and_init_phbs(void) { } +static inline void init_pci_config_tokens(void) { } +#endif /* !CONFIG_PCI */ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PPC_PCI_H */ -- cgit v1.2.3 From 36241ce695f16193d0f76ea010f212119da37071 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Sun, 4 Mar 2007 17:07:38 +1100 Subject: [POWERPC] Make find_and_init_pbs() a void function It always returned 0 and noone checked. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/ppc-pci.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h index 4a053404bf6..f186720d073 100644 --- a/include/asm-powerpc/ppc-pci.h +++ b/include/asm-powerpc/ppc-pci.h @@ -24,7 +24,7 @@ extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary); extern struct list_head hose_list; extern int global_phb_number; -extern unsigned long find_and_init_phbs(void); +extern void find_and_init_phbs(void); extern struct pci_dev *ppc64_isabridge_dev; /* may be NULL if no ISA bus */ -- cgit v1.2.3 From a83088003cd53f3cd8d550ab5d7778866568d204 Mon Sep 17 00:00:00 2001 From: Joachim Fenkes Date: Fri, 9 Mar 2007 18:56:46 +0100 Subject: [POWERPC] ibmebus: whitespace fixes This fixes a lot of whitespace in ibmebus.[ch] Signed-off-by: Joachim Fenkes Signed-off-by: Paul Mackerras --- include/asm-powerpc/ibmebus.h | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/ibmebus.h b/include/asm-powerpc/ibmebus.h index 66112114b8c..8d3c9e39b90 100644 --- a/include/asm-powerpc/ibmebus.h +++ b/include/asm-powerpc/ibmebus.h @@ -3,35 +3,35 @@ * * Copyright (c) 2005 IBM Corporation * Heiko J Schick - * + * * All rights reserved. * - * This source code is distributed under a dual license of GPL v2.0 and OpenIB - * BSD. + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. * * OpenIB BSD License * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation * and/or other materials - * provided with the distribution. + * provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER - * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ @@ -46,12 +46,12 @@ extern struct bus_type ibmebus_bus_type; -struct ibmebus_dev { +struct ibmebus_dev { const char *name; struct of_device ofdev; }; -struct ibmebus_driver { +struct ibmebus_driver { char *name; struct of_device_id *id_table; int (*probe) (struct ibmebus_dev *dev, const struct of_device_id *id); @@ -63,7 +63,7 @@ int ibmebus_register_driver(struct ibmebus_driver *drv); void ibmebus_unregister_driver(struct ibmebus_driver *drv); int ibmebus_request_irq(struct ibmebus_dev *dev, - u32 ist, + u32 ist, irq_handler_t handler, unsigned long irq_flags, const char * devname, void *dev_id); -- cgit v1.2.3 From 6bccf755ff53241d46c01c229b3c2452b9029ec4 Mon Sep 17 00:00:00 2001 From: Joachim Fenkes Date: Fri, 9 Mar 2007 19:00:32 +0100 Subject: [POWERPC] ibmebus: dynamic addition/removal of adapters, some code cleanup This adds two sysfs attributes to /sys/bus/ibmebus which can be used to notify the ebus driver of added / removed ebus devices in the OF device tree. Echoing the device's location code (as found in the OFDT "ibm,loc-code" property) into the "probe" attribute will notify ebus of addition of the device and cause the appropriate device driver's probe function to be called on the device. Likewise, echoing the location code into the "remove" attribute will cause the device to be removed from the system. The writes will block until the respective operation has finished and return an error code if the operation failed. In addition, two minor tidbits are fixed: - The fake root device used to provide a common parent for all ebus devices is now based on device instead of of_device - it had no associated devtree node. This saves several checks throughout the ebus driver. - The sysfs attributes are now generated automagically by device_register() instead of by the ibmebus code, which saves a few compiler warnings about unused return codes. Signed-off-by: Joachim Fenkes Signed-off-by: Paul Mackerras --- include/asm-powerpc/ibmebus.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/ibmebus.h b/include/asm-powerpc/ibmebus.h index 8d3c9e39b90..87d396e28db 100644 --- a/include/asm-powerpc/ibmebus.h +++ b/include/asm-powerpc/ibmebus.h @@ -2,6 +2,7 @@ * IBM PowerPC eBus Infrastructure Support. * * Copyright (c) 2005 IBM Corporation + * Joachim Fenkes * Heiko J Schick * * All rights reserved. @@ -47,7 +48,6 @@ extern struct bus_type ibmebus_bus_type; struct ibmebus_dev { - const char *name; struct of_device ofdev; }; -- cgit v1.2.3 From 0e0293c898c424c52e5d4e7f6923a203d06b9c4b Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 14 Mar 2007 11:50:40 +1100 Subject: [POWERPC] Update documentation for flat device tree format v17 This patch updates booting-without-of.txt to describe version 17 of the flattened device tree format. Version 17 is a small, backwards compatible change from version 16, adding an extra field giving the size of the device tree's structure block. At this time, the kernel has no use for the extra information, however its presence can make life easier for bootloaders or other software manipulating the tree. In addition this patch adds information on the size_dt_strings field of the device tree header, present since version 3 of the flattened tree format, but omitted from the documentation. It also makes changes to consistently refer to versions 16 and 17 as versions 16 and 17 in decimal, rather than version 0x10 which was occasionally used for version 16 previously. Finally, we also add the new field to the definition of the device tree header structure in prom.h Signed-off-by: David Gibson Acked-by: Jon Loeliger Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 1aa15e2e056..994de8ea330 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -59,6 +59,8 @@ struct boot_param_header u32 boot_cpuid_phys; /* Physical CPU id we're booting on */ /* version 3 fields below */ u32 dt_strings_size; /* size of the DT strings block */ + /* version 17 fields below */ + u32 dt_struct_size; /* size of the DT structure block */ }; -- cgit v1.2.3 From e91948fd84086020072e022d5463036033d449c1 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 16 Mar 2007 17:47:07 +1100 Subject: [POWERPC] Minor paca optimisation Move the slb_shadow_ptr field into the first cache line since it is (like everything there) read-only after boot. It is in fact statically initialised and thereafter only read. Signed-off-by: Stephen Rothwell Acked-by: Michael Neuling Signed-off-by: Paul Mackerras --- include/asm-powerpc/paca.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index 0d3adc09c84..4de851d91f9 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -70,6 +70,7 @@ struct paca_struct { s16 hw_cpu_id; /* Physical processor number */ u8 cpu_start; /* At startup, processor spins until */ /* this becomes non-zero. */ + struct slb_shadow *slb_shadow_ptr; /* * Now, starting in cacheline 2, the exception save areas @@ -101,8 +102,6 @@ struct paca_struct { u64 user_time; /* accumulated usermode TB ticks */ u64 system_time; /* accumulated system TB ticks */ u64 startpurr; /* PURR/TB value snapshot */ - - struct slb_shadow *slb_shadow_ptr; }; extern struct paca_struct paca[]; -- cgit v1.2.3 From 9c547768e7d9f456f1b145102e75f79e30f7b709 Mon Sep 17 00:00:00 2001 From: Linas Vepstas Date: Mon, 19 Mar 2007 14:58:07 -0500 Subject: [POWERPC] EEH: wait for slot status Modify routine that returns PCI slot status to wait for slot status to become available. This is needed, as slots that are in some remote card cage may go offline for extended periods of time. New users for this routine in following patches. Signed-off-by: Linas Vepstas Signed-off-by: Paul Mackerras --- include/asm-powerpc/ppc-pci.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h index f186720d073..d74b2965bb8 100644 --- a/include/asm-powerpc/ppc-pci.h +++ b/include/asm-powerpc/ppc-pci.h @@ -70,7 +70,7 @@ struct pci_dev *pci_get_device_by_addr(unsigned long addr); void eeh_slot_error_detail (struct pci_dn *pdn, int severity); /** - * rtas_pci_enableo - enable IO transfers for this slot + * rtas_pci_enable - enable IO transfers for this slot * @pdn: pci device node * @function: either EEH_THAW_MMIO or EEH_THAW_DMA * @@ -91,6 +91,7 @@ int rtas_pci_enable(struct pci_dn *pdn, int function); * Returns a non-zero value if the reset failed. */ int rtas_set_slot_reset (struct pci_dn *); +int eeh_wait_for_slot_status(struct pci_dn *pdn, int max_wait_msecs); /** * eeh_restore_bars - Restore device configuration info. -- cgit v1.2.3 From d0ab95ca9854174029cef2f08acf1859441cb547 Mon Sep 17 00:00:00 2001 From: Linas Vepstas Date: Mon, 19 Mar 2007 14:59:10 -0500 Subject: [POWERPC] EEH: rm un-needed data The EEH event notification system passes around data that is not needed or at least, not used properly. Stop passing this data; get it in a more reliable fashion. Signed-off-by: Linas Vepstas Signed-off-by: Paul Mackerras --- include/asm-powerpc/eeh_event.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/eeh_event.h b/include/asm-powerpc/eeh_event.h index dc6bf0ffb79..cc3cb04539a 100644 --- a/include/asm-powerpc/eeh_event.h +++ b/include/asm-powerpc/eeh_event.h @@ -30,8 +30,6 @@ struct eeh_event { struct list_head list; struct device_node *dn; /* struct device node */ struct pci_dev *dev; /* affected device */ - enum pci_channel_state state; /* PCI bus state for the affected device */ - int time_unavail; /* milliseconds until device might be available */ }; /** @@ -46,9 +44,7 @@ struct eeh_event { * (from a workqueue). */ int eeh_send_failure_event (struct device_node *dn, - struct pci_dev *dev, - enum pci_channel_state state, - int time_unavail); + struct pci_dev *dev); /* Main recovery function */ struct pci_dn * handle_eeh_events (struct eeh_event *); -- cgit v1.2.3 From 4002aca771a2aa2848e94a98cf51a2cae4e77ae0 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 20 Mar 2007 10:08:33 -0500 Subject: [POWERPC] Remove last_syscall Remove last_syscall from 32bit powerpc, its been gone in 64bit for years. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- include/asm-powerpc/processor.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index a26c32ee552..d947b160949 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h @@ -133,7 +133,6 @@ struct thread_struct { mm_segment_t fs; /* for get_fs() validation */ #ifdef CONFIG_PPC32 void *pgdir; /* root of page-table tree */ - signed long last_syscall; #endif #if defined(CONFIG_4xx) || defined (CONFIG_BOOKE) unsigned long dbcr0; /* debug control register values */ -- cgit v1.2.3 From 9eb90a0c3b333e27db74412833a36da3f27da6a3 Mon Sep 17 00:00:00 2001 From: Zang Roy-r61911 Date: Fri, 9 Mar 2007 13:27:28 +0800 Subject: [POWERPC] 86xx/85xx: Unify Freescale PCI Express memory map registers structure Unify PCI Express memory map registers structure define to arch/pwoerpc/sysdev/fsl_pcie.h for Freescale 85xx/86xx processor family. Signed-off-by: Roy Zang Signed-off-by: Kumar Gala --- include/asm-powerpc/immap_86xx.h | 75 ---------------------------------------- 1 file changed, 75 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/immap_86xx.h b/include/asm-powerpc/immap_86xx.h index d905b662226..59b9e07b8e9 100644 --- a/include/asm-powerpc/immap_86xx.h +++ b/include/asm-powerpc/immap_86xx.h @@ -85,81 +85,6 @@ typedef struct ccsr_pci { char res19[472]; } ccsr_pci_t; -/* PCI Express Registers */ -typedef struct ccsr_pex { - uint pex_config_addr; /* 0x.000 - PCI Express Configuration Address Register */ - uint pex_config_data; /* 0x.004 - PCI Express Configuration Data Register */ - char res1[4]; - uint pex_otb_cpl_tor; /* 0x.00c - PCI Express Outbound completion timeout register */ - uint pex_conf_tor; /* 0x.010 - PCI Express configuration timeout register */ - char res2[12]; - uint pex_pme_mes_dr; /* 0x.020 - PCI Express PME and message detect register */ - uint pex_pme_mes_disr; /* 0x.024 - PCI Express PME and message disable register */ - uint pex_pme_mes_ier; /* 0x.028 - PCI Express PME and message interrupt enable register */ - uint pex_pmcr; /* 0x.02c - PCI Express power management command register */ - char res3[3024]; - uint pexotar0; /* 0x.c00 - PCI Express outbound translation address register 0 */ - uint pexotear0; /* 0x.c04 - PCI Express outbound translation extended address register 0*/ - char res4[8]; - uint pexowar0; /* 0x.c10 - PCI Express outbound window attributes register 0*/ - char res5[12]; - uint pexotar1; /* 0x.c20 - PCI Express outbound translation address register 1 */ - uint pexotear1; /* 0x.c24 - PCI Express outbound translation extended address register 1*/ - uint pexowbar1; /* 0x.c28 - PCI Express outbound window base address register 1*/ - char res6[4]; - uint pexowar1; /* 0x.c30 - PCI Express outbound window attributes register 1*/ - char res7[12]; - uint pexotar2; /* 0x.c40 - PCI Express outbound translation address register 2 */ - uint pexotear2; /* 0x.c44 - PCI Express outbound translation extended address register 2*/ - uint pexowbar2; /* 0x.c48 - PCI Express outbound window base address register 2*/ - char res8[4]; - uint pexowar2; /* 0x.c50 - PCI Express outbound window attributes register 2*/ - char res9[12]; - uint pexotar3; /* 0x.c60 - PCI Express outbound translation address register 3 */ - uint pexotear3; /* 0x.c64 - PCI Express outbound translation extended address register 3*/ - uint pexowbar3; /* 0x.c68 - PCI Express outbound window base address register 3*/ - char res10[4]; - uint pexowar3; /* 0x.c70 - PCI Express outbound window attributes register 3*/ - char res11[12]; - uint pexotar4; /* 0x.c80 - PCI Express outbound translation address register 4 */ - uint pexotear4; /* 0x.c84 - PCI Express outbound translation extended address register 4*/ - uint pexowbar4; /* 0x.c88 - PCI Express outbound window base address register 4*/ - char res12[4]; - uint pexowar4; /* 0x.c90 - PCI Express outbound window attributes register 4*/ - char res13[12]; - char res14[256]; - uint pexitar3; /* 0x.da0 - PCI Express inbound translation address register 3 */ - char res15[4]; - uint pexiwbar3; /* 0x.da8 - PCI Express inbound window base address register 3 */ - uint pexiwbear3; /* 0x.dac - PCI Express inbound window base extended address register 3 */ - uint pexiwar3; /* 0x.db0 - PCI Express inbound window attributes register 3 */ - char res16[12]; - uint pexitar2; /* 0x.dc0 - PCI Express inbound translation address register 2 */ - char res17[4]; - uint pexiwbar2; /* 0x.dc8 - PCI Express inbound window base address register 2 */ - uint pexiwbear2; /* 0x.dcc - PCI Express inbound window base extended address register 2 */ - uint pexiwar2; /* 0x.dd0 - PCI Express inbound window attributes register 2 */ - char res18[12]; - uint pexitar1; /* 0x.de0 - PCI Express inbound translation address register 2 */ - char res19[4]; - uint pexiwbar1; /* 0x.de8 - PCI Express inbound window base address register 2 */ - uint pexiwbear1; /* 0x.dec - PCI Express inbound window base extended address register 2 */ - uint pexiwar1; /* 0x.df0 - PCI Express inbound window attributes register 2 */ - char res20[12]; - uint pex_err_dr; /* 0x.e00 - PCI Express error detect register */ - char res21[4]; - uint pex_err_en; /* 0x.e08 - PCI Express error interrupt enable register */ - char res22[4]; - uint pex_err_disr; /* 0x.e10 - PCI Express error disable register */ - char res23[12]; - uint pex_err_cap_stat; /* 0x.e20 - PCI Express error capture status register */ - char res24[4]; - uint pex_err_cap_r0; /* 0x.e28 - PCI Express error capture register 0 */ - uint pex_err_cap_r1; /* 0x.e2c - PCI Express error capture register 0 */ - uint pex_err_cap_r2; /* 0x.e30 - PCI Express error capture register 0 */ - uint pex_err_cap_r3; /* 0x.e34 - PCI Express error capture register 0 */ -} ccsr_pex_t; - /* Global Utility Registers */ typedef struct ccsr_guts { uint porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ -- cgit v1.2.3 From eb0cb8a07e320ed3237789cc4f29858338d14d8e Mon Sep 17 00:00:00 2001 From: Sylvain Munaut Date: Mon, 12 Feb 2007 23:13:25 +0100 Subject: [POWERPC] Add a unified uevent handler for bus based on of_device This common uevent handler allow the several bus types based on of_device to generate the uevent properly and avoiding code duplication. This handlers take a struct device as argument and can therefore be used as the uevent call directly if no special treatment is needed for the bus. Signed-off-by: Sylvain Munaut Signed-off-by: Paul Mackerras --- include/asm-powerpc/of_device.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/of_device.h b/include/asm-powerpc/of_device.h index a889b2005bf..4f1aabe0ce7 100644 --- a/include/asm-powerpc/of_device.h +++ b/include/asm-powerpc/of_device.h @@ -32,5 +32,8 @@ extern int of_device_register(struct of_device *ofdev); extern void of_device_unregister(struct of_device *ofdev); extern void of_release_dev(struct device *dev); +extern int of_device_uevent(struct device *dev, + char **envp, int num_envp, char *buffer, int buffer_size); + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_OF_DEVICE_H */ -- cgit v1.2.3 From 3467bfd340f9ad48f3732415533a2e9c18240b62 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Thu, 22 Mar 2007 09:34:13 -0500 Subject: [POWERPC] Use mtocrf instruction in asm when CONFIG_POWER4_ONLY=y mtocrf is a faster single-field mtcrf (move to condition register fields) instruction available in POWER4 and later processors. It can make quite a difference in performance on some implementations, so use it for CONFIG_POWER4_ONLY builds. Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- include/asm-powerpc/asm-compat.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h index c89bd58ee28..c19e7367fce 100644 --- a/include/asm-powerpc/asm-compat.h +++ b/include/asm-powerpc/asm-compat.h @@ -78,6 +78,15 @@ #define PPC_STLCX stringify_in_c(stdcx.) #define PPC_CNTLZL stringify_in_c(cntlzd) +/* Move to CR, single-entry optimized version. Only available + * on POWER4 and later. + */ +#ifdef CONFIG_POWER4_ONLY +#define PPC_MTOCRF stringify_in_c(mtocrf) +#else +#define PPC_MTOCRF stringify_in_c(mtcrf) +#endif + #else /* 32-bit */ /* operations for longs and pointers */ @@ -89,6 +98,7 @@ #define PPC_LLARX stringify_in_c(lwarx) #define PPC_STLCX stringify_in_c(stwcx.) #define PPC_CNTLZL stringify_in_c(cntlzw) +#define PPC_MTOCRF stringify_in_c(mtcrf) #endif -- cgit v1.2.3 From 0e56efc7dcd1eb5004363e52bdbe801783245638 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 3 Apr 2007 10:54:01 +1000 Subject: [POWERPC] Rename get_property to of_get_property This is more consistent and gets us closer to the Sparc code. We add a get_property define for compatibility during the change over. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 994de8ea330..448c5ce76fb 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -165,9 +165,10 @@ extern void early_init_devtree(void *); extern int device_is_compatible(const struct device_node *device, const char *); extern int machine_is_compatible(const char *compat); -extern const void *get_property(const struct device_node *node, +extern const void *of_get_property(const struct device_node *node, const char *name, int *lenp); +#define get_property(a, b, c) of_get_property((a), (b), (c)) extern void print_properties(struct device_node *node); extern int prom_n_addr_cells(struct device_node* np); extern int prom_n_size_cells(struct device_node* np); -- cgit v1.2.3 From 7a92f74f98bde8498c98aad6cac5da5a87dd0bf4 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 3 Apr 2007 10:55:39 +1000 Subject: [POWERPC] Rename device_is_compatible to of_device_is_compatible This is more consistent and gets us closer to the Sparc code. We add a device_is_compatible define for compatibility during the change over. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 448c5ce76fb..fcacc88b770 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -162,8 +162,9 @@ extern void of_detach_node(const struct device_node *); extern void finish_device_tree(void); extern void unflatten_device_tree(void); extern void early_init_devtree(void *); -extern int device_is_compatible(const struct device_node *device, +extern int of_device_is_compatible(const struct device_node *device, const char *); +#define device_is_compatible(d, c) of_device_is_compatible((d), (c)) extern int machine_is_compatible(const char *compat); extern const void *of_get_property(const struct device_node *node, const char *name, -- cgit v1.2.3 From a8bda5dd4f99d6469f3c0dc362db3cce8a4d6416 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 3 Apr 2007 10:56:50 +1000 Subject: [POWERPC] Rename prom_n_addr_cells to of_n_addr_cells This is more consistent and gets us closer to the Sparc code. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index fcacc88b770..e0c92bf2019 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -171,7 +171,7 @@ extern const void *of_get_property(const struct device_node *node, int *lenp); #define get_property(a, b, c) of_get_property((a), (b), (c)) extern void print_properties(struct device_node *node); -extern int prom_n_addr_cells(struct device_node* np); +extern int of_n_addr_cells(struct device_node* np); extern int prom_n_size_cells(struct device_node* np); extern int prom_n_intr_cells(struct device_node* np); extern void prom_get_irq_senses(unsigned char *senses, int off, int max); -- cgit v1.2.3 From 9213feea6e197f8507ec855337798cc3388f5570 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 3 Apr 2007 10:57:48 +1000 Subject: [POWERPC] Rename prom_n_size_cells to of_n_size_cells This is more consistent and gets us closer to the Sparc code. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index e0c92bf2019..fb068f2eca4 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -172,7 +172,7 @@ extern const void *of_get_property(const struct device_node *node, #define get_property(a, b, c) of_get_property((a), (b), (c)) extern void print_properties(struct device_node *node); extern int of_n_addr_cells(struct device_node* np); -extern int prom_n_size_cells(struct device_node* np); +extern int of_n_size_cells(struct device_node* np); extern int prom_n_intr_cells(struct device_node* np); extern void prom_get_irq_senses(unsigned char *senses, int off, int max); extern int prom_add_property(struct device_node* np, struct property* prop); -- cgit v1.2.3 From 1a38147ed0737a9c01dbf5f2ca47fd2a0aa5cb55 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 3 Apr 2007 10:58:52 +1000 Subject: [POWERPC] Make struct property's value a void * Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index fb068f2eca4..e73a2b482a1 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -71,7 +71,7 @@ typedef u32 ihandle; struct property { char *name; int length; - unsigned char *value; + void *value; struct property *next; }; -- cgit v1.2.3 From 721151d004dcf01a71b12bb6b893f9160284cf6e Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 3 Apr 2007 21:24:02 +1000 Subject: [POWERPC] Allow drivers to map individual 4k pages to userspace Some drivers have resources that they want to be able to map into userspace that are 4k in size. On a kernel configured with 64k pages we currently end up mapping the 4k we want plus another 60k of physical address space, which could contain anything. This can introduce security problems, for example in the case of an infiniband adaptor where the other 60k could contain registers that some other program is using for its communications. This patch adds a new function, remap_4k_pfn, which drivers can use to map a single 4k page to userspace regardless of whether the kernel is using a 4k or a 64k page size. Like remap_pfn_range, it would typically be called in a driver's mmap function. It only maps a single 4k page, which on a 64k page kernel appears replicated 16 times throughout a 64k page. On a 4k page kernel it reduces to a call to remap_pfn_range. The way this works on a 64k kernel is that a new bit, _PAGE_4K_PFN, gets set on the linux PTE. This alters the way that __hash_page_4K computes the real address to put in the HPTE. The RPN field of the linux PTE becomes the 4k RPN directly rather than being interpreted as a 64k RPN. Since the RPN field is 32 bits, this means that physical addresses being mapped with remap_4k_pfn have to be below 2^44, i.e. 0x100000000000. The patch also factors out the code in arch/powerpc/mm/hash_utils_64.c that deals with demoting a process to use 4k pages into one function that gets called in the various different places where we need to do that. There were some discrepancies between exactly what was done in the various places, such as a call to spu_flush_all_slbs in one case but not in others. Signed-off-by: Paul Mackerras --- include/asm-powerpc/pgtable-4k.h | 3 +++ include/asm-powerpc/pgtable-64k.h | 5 +++++ 2 files changed, 8 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h index 345d9b07b3e..a28fa8bc01d 100644 --- a/include/asm-powerpc/pgtable-4k.h +++ b/include/asm-powerpc/pgtable-4k.h @@ -97,3 +97,6 @@ #define pud_ERROR(e) \ printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) + +#define remap_4k_pfn(vma, addr, pfn, prot) \ + remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h index 4b7126c53f3..5e84f070eaf 100644 --- a/include/asm-powerpc/pgtable-64k.h +++ b/include/asm-powerpc/pgtable-64k.h @@ -35,6 +35,7 @@ #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ +#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ @@ -93,6 +94,10 @@ #define pte_pagesize_index(pte) \ (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) +#define remap_4k_pfn(vma, addr, pfn, prot) \ + remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ + __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PGTABLE_64K_H */ -- cgit v1.2.3 From ceef87782a9452eeeca774e65d7f4e06455780a3 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 3 Apr 2007 22:24:06 +1000 Subject: [POWERPC] Rename get_property to of_get_property: include Signed-off-by: Stephen Rothwell Acked-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- include/asm-powerpc/parport.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/parport.h b/include/asm-powerpc/parport.h index 3fca21ddf54..b37b81e3727 100644 --- a/include/asm-powerpc/parport.h +++ b/include/asm-powerpc/parport.h @@ -20,18 +20,18 @@ extern struct parport *parport_pc_probe_port (unsigned long int base, static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) { struct device_node *np; - u32 *prop; + const u32 *prop; u32 io1, io2; int propsize; int count = 0; for (np = NULL; (np = of_find_compatible_node(np, "parallel", "pnpPNP,400")) != NULL;) { - prop = (u32 *)get_property(np, "reg", &propsize); + prop = of_get_property(np, "reg", &propsize); if (!prop || propsize > 6*sizeof(u32)) continue; io1 = prop[1]; io2 = prop[2]; - prop = (u32 *)get_property(np, "interrupts", NULL); + prop = of_get_property(np, "interrupts", NULL); if (!prop) continue; if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL) != NULL) -- cgit v1.2.3 From e68c825bb016703eda94aac99be96de73b482d61 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 11 Apr 2007 16:13:19 +1000 Subject: [POWERPC] Add inatomic versions of __get_user and __put_user Those are needed by things like alignment exception fixup handlers since those can now be triggered by copy_tofrom_user_inatomic. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- include/asm-powerpc/uaccess.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h index adbf16b8cfb..8e798e3758b 100644 --- a/include/asm-powerpc/uaccess.h +++ b/include/asm-powerpc/uaccess.h @@ -110,12 +110,18 @@ struct exception_table_entry { __get_user_nocheck((x), (ptr), sizeof(*(ptr))) #define __put_user(x, ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + #ifndef __powerpc64__ #define __get_user64(x, ptr) \ __get_user64_nocheck((x), (ptr), sizeof(*(ptr))) #define __put_user64(x, ptr) __put_user(x, ptr) #endif +#define __get_user_inatomic(x, ptr) \ + __get_user_nosleep((x), (ptr), sizeof(*(ptr))) +#define __put_user_inatomic(x, ptr) \ + __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + #define __get_user_unaligned __get_user #define __put_user_unaligned __put_user @@ -198,6 +204,16 @@ do { \ __pu_err; \ }) +#define __put_user_nosleep(x, ptr, size) \ +({ \ + long __pu_err; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __chk_user_ptr(ptr); \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ +}) + + extern long __get_user_bad(void); #define __get_user_asm(x, addr, err, op) \ @@ -297,6 +313,18 @@ do { \ __gu_err; \ }) +#define __get_user_nosleep(x, ptr, size) \ +({ \ + long __gu_err; \ + unsigned long __gu_val; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __chk_user_ptr(ptr); \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + + /* more complex routines */ extern unsigned long __copy_tofrom_user(void __user *to, -- cgit v1.2.3 From a741e67969577163a4cfc78d7fd2753219087ef1 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 10 Apr 2007 17:09:37 +1000 Subject: [POWERPC] Make tlb flush batch use lazy MMU mode The current tlb flush code on powerpc 64 bits has a subtle race since we lost the page table lock due to the possible faulting in of new PTEs after a previous one has been removed but before the corresponding hash entry has been evicted, which can leads to all sort of fatal problems. This patch reworks the batch code completely. It doesn't use the mmu_gather stuff anymore. Instead, we use the lazy mmu hooks that were added by the paravirt code. They have the nice property that the enter/leave lazy mmu mode pair is always fully contained by the PTE lock for a given range of PTEs. Thus we can guarantee that all batches are flushed on a given CPU before it drops that lock. We also generalize batching for any PTE update that require a flush. Batching is now enabled on a CPU by arch_enter_lazy_mmu_mode() and disabled by arch_leave_lazy_mmu_mode(). The code epects that this is always contained within a PTE lock section so no preemption can happen and no PTE insertion in that range from another CPU. When batching is enabled on a CPU, every PTE updates that need a hash flush will use the batch for that flush. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- include/asm-powerpc/pgtable.h | 50 ++++++++++++++---------------------------- include/asm-powerpc/tlb.h | 1 - include/asm-powerpc/tlbflush.h | 39 ++++++++++++++++++++------------ 3 files changed, 41 insertions(+), 49 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h index 10f52743f4f..c7142c7e0e0 100644 --- a/include/asm-powerpc/pgtable.h +++ b/include/asm-powerpc/pgtable.h @@ -272,7 +272,10 @@ static inline pte_t pte_mkhuge(pte_t pte) { return pte; } /* Atomic PTE updates */ -static inline unsigned long pte_update(pte_t *p, unsigned long clr) +static inline unsigned long pte_update(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, unsigned long clr, + int huge) { unsigned long old, tmp; @@ -283,20 +286,15 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr) andc %1,%0,%4 \n\ stdcx. %1,0,%3 \n\ bne- 1b" - : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY) + : "=&r" (old), "=&r" (tmp), "=m" (*ptep) + : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) : "cc" ); + + if (old & _PAGE_HASHPTE) + hpte_need_flush(mm, addr, ptep, old, huge); return old; } -/* PTE updating functions, this function puts the PTE in the - * batch, doesn't actually triggers the hash flush immediately, - * you need to call flush_tlb_pending() to do that. - * Pass -1 for "normal" size (4K or 64K) - */ -extern void hpte_update(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, unsigned long pte, int huge); - static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -304,11 +302,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) return 0; - old = pte_update(ptep, _PAGE_ACCESSED); - if (old & _PAGE_HASHPTE) { - hpte_update(mm, addr, ptep, old, 0); - flush_tlb_pending(); - } + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); return (old & _PAGE_ACCESSED) != 0; } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -331,9 +325,7 @@ static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) return 0; - old = pte_update(ptep, _PAGE_DIRTY); - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); + old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0); return (old & _PAGE_DIRTY) != 0; } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY @@ -352,9 +344,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, if ((pte_val(*ptep) & _PAGE_RW) == 0) return; - old = pte_update(ptep, _PAGE_RW); - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); + old = pte_update(mm, addr, ptep, _PAGE_RW, 0); } /* @@ -378,7 +368,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, ({ \ int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ __ptep); \ - flush_tlb_page(__vma, __address); \ __dirty; \ }) @@ -386,20 +375,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long old = pte_update(ptep, ~0UL); - - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); return __pte(old); } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) { - unsigned long old = pte_update(ptep, ~0UL); - - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); + pte_update(mm, addr, ptep, ~0UL, 0); } /* @@ -408,10 +391,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { - if (pte_present(*ptep)) { + if (pte_present(*ptep)) pte_clear(mm, addr, ptep); - flush_tlb_pending(); - } pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); *ptep = pte; } @@ -522,6 +503,7 @@ void pgtable_cache_init(void); return pt; } + #include #endif /* __ASSEMBLY__ */ diff --git a/include/asm-powerpc/tlb.h b/include/asm-powerpc/tlb.h index 4e2a834683f..0a17682663d 100644 --- a/include/asm-powerpc/tlb.h +++ b/include/asm-powerpc/tlb.h @@ -38,7 +38,6 @@ extern void pte_free_finish(void); static inline void tlb_flush(struct mmu_gather *tlb) { - flush_tlb_pending(); pte_free_finish(); } diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h index 93c7d0c7230..0bc5a5e506b 100644 --- a/include/asm-powerpc/tlbflush.h +++ b/include/asm-powerpc/tlbflush.h @@ -28,25 +28,41 @@ struct mm_struct; #define PPC64_TLB_BATCH_NR 192 struct ppc64_tlb_batch { - unsigned long index; - struct mm_struct *mm; - real_pte_t pte[PPC64_TLB_BATCH_NR]; - unsigned long vaddr[PPC64_TLB_BATCH_NR]; - unsigned int psize; + int active; + unsigned long index; + struct mm_struct *mm; + real_pte_t pte[PPC64_TLB_BATCH_NR]; + unsigned long vaddr[PPC64_TLB_BATCH_NR]; + unsigned int psize; }; DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); -static inline void flush_tlb_pending(void) +extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pte, int huge); + +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE + +static inline void arch_enter_lazy_mmu_mode(void) +{ + struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + + batch->active = 1; +} + +static inline void arch_leave_lazy_mmu_mode(void) { - struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); if (batch->index) __flush_tlb_pending(batch); - put_cpu_var(ppc64_tlb_batch); + batch->active = 0; } +#define arch_flush_lazy_mmu_mode() do {} while (0) + + extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local); extern void flush_hash_range(unsigned long number, int local); @@ -88,15 +104,12 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); static inline void flush_tlb_mm(struct mm_struct *mm) { - flush_tlb_pending(); } static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { -#ifdef CONFIG_PPC64 - flush_tlb_pending(); -#else +#ifndef CONFIG_PPC64 _tlbie(vmaddr); #endif } @@ -112,13 +125,11 @@ static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - flush_tlb_pending(); } static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - flush_tlb_pending(); } #else /* 6xx, 7xx, 7xxx cpus */ -- cgit v1.2.3 From 88df6e90fa9782dbf44d936e44649afe271e4790 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 12 Apr 2007 15:30:22 +1000 Subject: [POWERPC] DEBUG_PAGEALLOC for 32-bit Here's an implementation of DEBUG_PAGEALLOC for ppc32. It disables BAT mapping and is only tested with Hash table based processor though it shouldn't be too hard to adapt it to others. Signed-off-by: Benjamin Herrenschmidt arch/powerpc/Kconfig.debug | 9 ++++++ arch/powerpc/mm/init_32.c | 4 +++ arch/powerpc/mm/pgtable_32.c | 52 +++++++++++++++++++++++++++++++++++++++ arch/powerpc/mm/ppc_mmu_32.c | 4 ++- include/asm-powerpc/cacheflush.h | 6 ++++ 5 files changed, 74 insertions(+), 1 deletion(-) Signed-off-by: Paul Mackerras --- include/asm-powerpc/cacheflush.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/cacheflush.h b/include/asm-powerpc/cacheflush.h index 08e93e78921..ba667a383b8 100644 --- a/include/asm-powerpc/cacheflush.h +++ b/include/asm-powerpc/cacheflush.h @@ -64,6 +64,12 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); memcpy(dst, src, len) + +#ifdef CONFIG_DEBUG_PAGEALLOC +/* internal debugging function */ +void kernel_map_pages(struct page *page, int numpages, int enable); +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_CACHEFLUSH_H */ -- cgit v1.2.3 From 57dace2391ba10135e38457904121e7ef34d0c83 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 23 Apr 2007 21:08:15 +0200 Subject: [POWERPC] spufs: make spu page faults not block scheduling Until now, we have always entered the spu page fault handler with a mutex for the spu context held. This has multiple bad side-effects: - it becomes impossible to suspend the context during page faults - if an spu program attempts to access its own mmio areas through DMA, we get an immediate livelock when the nopage function tries to acquire the same mutex This patch makes the page fault logic operate on a struct spu_context instead of a struct spu, and moves it from spu_base.c to a new file fault.c inside of spufs. We now also need to copy the dar and dsisr contents of the last fault into the saved context to have it accessible in case we schedule out the context before activating the page fault handler. Signed-off-by: Arnd Bergmann --- include/asm-powerpc/mmu.h | 1 + include/asm-powerpc/spu_csa.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h index 200055a4b82..e22fd881150 100644 --- a/include/asm-powerpc/mmu.h +++ b/include/asm-powerpc/mmu.h @@ -234,6 +234,7 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned int local); struct mm_struct; +extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); extern int hash_huge_page(struct mm_struct *mm, unsigned long access, unsigned long ea, unsigned long vsid, int local, unsigned long trap); diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h index 8aad0619eb8..02e56a6685a 100644 --- a/include/asm-powerpc/spu_csa.h +++ b/include/asm-powerpc/spu_csa.h @@ -242,6 +242,7 @@ struct spu_state { u64 spu_chnldata_RW[32]; u32 spu_mailbox_data[4]; u32 pu_mailbox_data[1]; + u64 dar, dsisr; unsigned long suspend_time; spinlock_t register_lock; }; -- cgit v1.2.3 From 91a69c9646a5b709381d99a171890e77377b1b9c Mon Sep 17 00:00:00 2001 From: Christian Krafft Date: Mon, 23 Apr 2007 21:35:39 +0200 Subject: [POWERPC] cell: add cbe_node_to_cpu function This patch adds code to deal with conversion of logical cpu to cbe nodes. It removes code that assummed there were two logical CPUs per CBE. Signed-off-by: Christian Krafft Signed-off-by: Arnd Bergmann --- include/asm-powerpc/cell-pmu.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/cell-pmu.h b/include/asm-powerpc/cell-pmu.h index 35b95773746..8066eede3a0 100644 --- a/include/asm-powerpc/cell-pmu.h +++ b/include/asm-powerpc/cell-pmu.h @@ -97,11 +97,6 @@ extern void cbe_disable_pm_interrupts(u32 cpu); extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu); extern void cbe_sync_irq(int node); -/* Utility functions, macros */ -extern u32 cbe_get_hw_thread_id(int cpu); - -#define cbe_cpu_to_node(cpu) ((cpu) >> 1) - #define CBE_COUNT_SUPERVISOR_MODE 0 #define CBE_COUNT_HYPERVISOR_MODE 1 #define CBE_COUNT_PROBLEM_MODE 2 -- cgit v1.2.3 From 6bf05fd776e38a0a9c17e17c2345b59b1b9aa2cb Mon Sep 17 00:00:00 2001 From: Christian Krafft Date: Mon, 23 Apr 2007 21:35:45 +0200 Subject: [POWERPC] add of_iomap function The of_iomap function maps memory for a given device_node and returns a pointer to that memory. This is used at some places, so it makes sense to a seperate function. Signed-off-by: Christian Krafft Signed-off-by: Arnd Bergmann --- include/asm-powerpc/prom.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index e73a2b482a1..f31af713e6a 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -20,6 +20,7 @@ #include #include #include +#include /* Definitions used by the flattened device tree */ #define OF_DT_HEADER 0xd00dfeed /* marker */ @@ -355,6 +356,16 @@ static inline int of_irq_to_resource(struct device_node *dev, int index, struct return irq; } +static inline void __iomem *of_iomap(struct device_node *np, int index) +{ + struct resource res; + + if (of_address_to_resource(np, index, &res)) + return NULL; + + return ioremap(res.start, 1 + res.end - res.start); +} + #endif /* __KERNEL__ */ #endif /* _POWERPC_PROM_H */ -- cgit v1.2.3 From a14c4508f4bb1bb7772b1976a82646be8d8b515a Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Fri, 13 Apr 2007 04:33:25 +1000 Subject: [POWERPC] Fix PowerPC 750CL and 750GX CPU features PowerPC 750CL has high BATs. The patch below adds a CPU_FTRS_750CL that includes that. Without it, the original firmware mappings in the high BATs aren't cleared which continue to override the linux translations. It also adds CPU_FTR_COMMON to CPU_FTRS_750GX for completeness. Signed-off-by: Josh Boyer Signed-off-by: Paul Mackerras --- include/asm-powerpc/cputable.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index e870b539317..4f7c2551864 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -223,6 +223,10 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_PPC_LE) +#define CPU_FTRS_750CL (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) #define CPU_FTRS_750FX1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ @@ -235,9 +239,9 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) -#define CPU_FTRS_750GX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ +#define CPU_FTRS_750GX (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) #define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ -- cgit v1.2.3 From 25fc530eed1ca9ccde2a1e96d0b2060867f76bb2 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 18 Apr 2007 16:38:21 +1000 Subject: [POWERPC] pasemi: PA6T oprofile support Oprofile support for PA6T, kernel side. Also rename the PA6T_SPRN.* defines to SPRN_PA6T.*. Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- include/asm-powerpc/cputable.h | 1 + include/asm-powerpc/oprofile_impl.h | 2 ++ include/asm-powerpc/pmc.h | 1 + include/asm-powerpc/reg.h | 68 +++++++++++++++++++++++++++++++++---- 4 files changed, 66 insertions(+), 6 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 4f7c2551864..434524931ef 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -48,6 +48,7 @@ enum powerpc_oprofile_type { PPC_OPROFILE_G4 = 3, PPC_OPROFILE_BOOKE = 4, PPC_OPROFILE_CELL = 5, + PPC_OPROFILE_PA6T = 6, }; enum powerpc_pmc_type { diff --git a/include/asm-powerpc/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h index 94c0ad2bff9..8d6b47f7b30 100644 --- a/include/asm-powerpc/oprofile_impl.h +++ b/include/asm-powerpc/oprofile_impl.h @@ -57,6 +57,8 @@ extern struct op_powerpc_model op_model_rs64; extern struct op_powerpc_model op_model_power4; extern struct op_powerpc_model op_model_7450; extern struct op_powerpc_model op_model_cell; +extern struct op_powerpc_model op_model_pa6t; + /* All the classic PPC parts use these */ static inline unsigned int classic_ctr_read(unsigned int i) diff --git a/include/asm-powerpc/pmc.h b/include/asm-powerpc/pmc.h index 8588be68e0a..d6a616a1b3e 100644 --- a/include/asm-powerpc/pmc.h +++ b/include/asm-powerpc/pmc.h @@ -30,6 +30,7 @@ void release_pmc_hardware(void); #ifdef CONFIG_PPC64 void power4_enable_pmcs(void); +void pasemi_enable_pmcs(void); #endif #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index 0d7f0164ed8..749c7f953b5 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h @@ -469,12 +469,68 @@ #define SPRN_SIAR 780 #define SPRN_SDAR 781 -#define PA6T_SPRN_PMC0 787 -#define PA6T_SPRN_PMC1 788 -#define PA6T_SPRN_PMC2 789 -#define PA6T_SPRN_PMC3 790 -#define PA6T_SPRN_PMC4 791 -#define PA6T_SPRN_PMC5 792 +#define SPRN_PA6T_MMCR0 795 +#define PA6T_MMCR0_EN0 0x0000000000000001UL +#define PA6T_MMCR0_EN1 0x0000000000000002UL +#define PA6T_MMCR0_EN2 0x0000000000000004UL +#define PA6T_MMCR0_EN3 0x0000000000000008UL +#define PA6T_MMCR0_EN4 0x0000000000000010UL +#define PA6T_MMCR0_EN5 0x0000000000000020UL +#define PA6T_MMCR0_SUPEN 0x0000000000000040UL +#define PA6T_MMCR0_PREN 0x0000000000000080UL +#define PA6T_MMCR0_HYPEN 0x0000000000000100UL +#define PA6T_MMCR0_FCM0 0x0000000000000200UL +#define PA6T_MMCR0_FCM1 0x0000000000000400UL +#define PA6T_MMCR0_INTGEN 0x0000000000000800UL +#define PA6T_MMCR0_INTEN0 0x0000000000001000UL +#define PA6T_MMCR0_INTEN1 0x0000000000002000UL +#define PA6T_MMCR0_INTEN2 0x0000000000004000UL +#define PA6T_MMCR0_INTEN3 0x0000000000008000UL +#define PA6T_MMCR0_INTEN4 0x0000000000010000UL +#define PA6T_MMCR0_INTEN5 0x0000000000020000UL +#define PA6T_MMCR0_DISCNT 0x0000000000040000UL +#define PA6T_MMCR0_UOP 0x0000000000080000UL +#define PA6T_MMCR0_TRG 0x0000000000100000UL +#define PA6T_MMCR0_TRGEN 0x0000000000200000UL +#define PA6T_MMCR0_TRGREG 0x0000000001600000UL +#define PA6T_MMCR0_SIARLOG 0x0000000002000000UL +#define PA6T_MMCR0_SDARLOG 0x0000000004000000UL +#define PA6T_MMCR0_PROEN 0x0000000008000000UL +#define PA6T_MMCR0_PROLOG 0x0000000010000000UL +#define PA6T_MMCR0_DAMEN2 0x0000000020000000UL +#define PA6T_MMCR0_DAMEN3 0x0000000040000000UL +#define PA6T_MMCR0_DAMEN4 0x0000000080000000UL +#define PA6T_MMCR0_DAMEN5 0x0000000100000000UL +#define PA6T_MMCR0_DAMSEL2 0x0000000200000000UL +#define PA6T_MMCR0_DAMSEL3 0x0000000400000000UL +#define PA6T_MMCR0_DAMSEL4 0x0000000800000000UL +#define PA6T_MMCR0_DAMSEL5 0x0000001000000000UL +#define PA6T_MMCR0_HANDDIS 0x0000002000000000UL +#define PA6T_MMCR0_PCTEN 0x0000004000000000UL +#define PA6T_MMCR0_SOCEN 0x0000008000000000UL +#define PA6T_MMCR0_SOCMOD 0x0000010000000000UL + +#define SPRN_PA6T_MMCR1 798 +#define PA6T_MMCR1_ES2 0x00000000000000ffUL +#define PA6T_MMCR1_ES3 0x000000000000ff00UL +#define PA6T_MMCR1_ES4 0x0000000000ff0000UL +#define PA6T_MMCR1_ES5 0x00000000ff000000UL + +#define SPRN_PA6T_SIAR 780 +#define SPRN_PA6T_UPMC0 771 +#define SPRN_PA6T_UPMC1 772 +#define SPRN_PA6T_UPMC2 773 +#define SPRN_PA6T_UPMC3 774 +#define SPRN_PA6T_UPMC4 775 +#define SPRN_PA6T_UPMC5 776 +#define SPRN_PA6T_UMMCR0 779 +#define SPRN_PA6T_UMMCR1 782 +#define SPRN_PA6T_PMC0 787 +#define SPRN_PA6T_PMC1 788 +#define SPRN_PA6T_PMC2 789 +#define SPRN_PA6T_PMC3 790 +#define SPRN_PA6T_PMC4 791 +#define SPRN_PA6T_PMC5 792 #else /* 32-bit */ #define SPRN_MMCR0 952 /* Monitor Mode Control Register 0 */ -- cgit v1.2.3 From e6349a958b3577da6e5c5eacda85c07f9a364cb5 Mon Sep 17 00:00:00 2001 From: Ananth N Mavinakayanahalli Date: Wed, 18 Apr 2007 15:57:51 +1000 Subject: [POWERPC] kprobes: Eliminate sstep exception if instruction can be emulated For cases when probes are placed on instructions that can be emulated, don't take the single-step exception. Signed-off-by: Ananth N Mavinakayanahalli Signed-off-by: Paul Mackerras --- include/asm-powerpc/kprobes.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h index 3a5dd492588..f850ca7020e 100644 --- a/include/asm-powerpc/kprobes.h +++ b/include/asm-powerpc/kprobes.h @@ -87,6 +87,11 @@ extern void arch_remove_kprobe(struct kprobe *p); struct arch_specific_insn { /* copy of original instruction */ kprobe_opcode_t *insn; + /* + * Set in kprobes code, initially to 0. If the instruction can be + * eumulated, this is set to 1, if not, to -1. + */ + int boostable; }; struct prev_kprobe { -- cgit v1.2.3 From e58923ed14370e0facc5eb2c3923216adc3bf260 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Wed, 18 Apr 2007 16:36:26 +1000 Subject: [POWERPC] Add arch/powerpc driver for UIC, PPC4xx interrupt controller This patch adds a driver to arch/powerpc/sysdev for the UIC, the on-chip interrupt controller from IBM/AMCC 4xx chips. It uses the new irq host mapping infrastructure. Signed-off-by: David Gibson Signed-off-by: Paul Mackerras --- include/asm-powerpc/uic.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 include/asm-powerpc/uic.h (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/uic.h b/include/asm-powerpc/uic.h new file mode 100644 index 00000000000..970eb7e2186 --- /dev/null +++ b/include/asm-powerpc/uic.h @@ -0,0 +1,23 @@ +/* + * include/asm-powerpc/uic.h + * + * IBM PPC4xx UIC external definitions and structure. + * + * Maintainer: David Gibson + * Copyright 2007 IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _ASM_POWERPC_UIC_H +#define _ASM_POWERPC_UIC_H + +#ifdef __KERNEL__ + +extern void __init uic_init_tree(void); +extern unsigned int uic_get_irq(void); + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_UIC_H */ -- cgit v1.2.3 From 6cfef5b27e49e826125f12637ee0d7210a896044 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 23 Apr 2007 18:47:08 +1000 Subject: [POWERPC] Rename MPIC_BROKEN_U3 to MPIC_U3_HT_IRQS Rename MPIC_BROKEN_U3 to something a little more descriptive. Its effect is to enable support for HT irqs behind the PCI-X/HT bridge on U3/U4 (aka. CPC9x5) parts. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- include/asm-powerpc/mpic.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h index cb204a71e91..e4d5fc5362a 100644 --- a/include/asm-powerpc/mpic.h +++ b/include/asm-powerpc/mpic.h @@ -199,7 +199,7 @@ enum { }; -#ifdef CONFIG_MPIC_BROKEN_U3 +#ifdef CONFIG_MPIC_U3_HT_IRQS /* Fixup table entry */ struct mpic_irq_fixup { @@ -208,7 +208,7 @@ struct mpic_irq_fixup u32 data; unsigned int index; }; -#endif /* CONFIG_MPIC_BROKEN_U3 */ +#endif /* CONFIG_MPIC_U3_HT_IRQS */ enum mpic_reg_type { @@ -239,7 +239,7 @@ struct mpic /* The "linux" controller struct */ struct irq_chip hc_irq; -#ifdef CONFIG_MPIC_BROKEN_U3 +#ifdef CONFIG_MPIC_U3_HT_IRQS struct irq_chip hc_ht_irq; #endif #ifdef CONFIG_SMP @@ -268,7 +268,7 @@ struct mpic /* Spurious vector to program into unused sources */ unsigned int spurious_vec; -#ifdef CONFIG_MPIC_BROKEN_U3 +#ifdef CONFIG_MPIC_U3_HT_IRQS /* The fixup table */ struct mpic_irq_fixup *fixups; spinlock_t fixup_lock; @@ -313,7 +313,7 @@ struct mpic /* Set this for a big-endian MPIC */ #define MPIC_BIG_ENDIAN 0x00000002 /* Broken U3 MPIC */ -#define MPIC_BROKEN_U3 0x00000004 +#define MPIC_U3_HT_IRQS 0x00000004 /* Broken IPI registers (autodetected) */ #define MPIC_BROKEN_IPI 0x00000008 /* MPIC wants a reset */ @@ -352,7 +352,7 @@ struct mpic * @senses_num: number of entries in the array * * Note about the sense array. If none is passed, all interrupts are - * setup to be level negative unless MPIC_BROKEN_U3 is set in which + * setup to be level negative unless MPIC_U3_HT_IRQS is set in which * case they are edge positive (and the array is ignored anyway). * The values in the array start at the first source of the MPIC, * that is senses[0] correspond to linux irq "irq_offset". -- cgit v1.2.3 From 687304014f7ca8e2fbb3feaefef356b4a0da65ad Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Tue, 24 Apr 2007 01:11:55 +1000 Subject: [POWERPC] Save trap number in bad_stack Save the trap number in the case of getting a bad stack in an exception handler. It is sometimes useful to know what exception it was that caused this to happen. Without this, no trap number is reported. Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- include/asm-powerpc/paca.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index 4de851d91f9..cf95274f735 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -94,6 +94,7 @@ struct paca_struct { u64 stab_rr; /* stab/slb round-robin counter */ u64 saved_r1; /* r1 save for RTAS calls */ u64 saved_msr; /* MSR saved here by enter_rtas */ + u16 trap_save; /* Used when bad stack is encountered */ u8 soft_enabled; /* irq soft-enable flag */ u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 io_sync; /* writel() needs spin_unlock sync */ -- cgit v1.2.3 From 621023072524fc0155ed16490255e1ea3aa11585 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Tue, 24 Apr 2007 13:09:12 +1000 Subject: [POWERPC] Cleanup and fix breakage in tlbflush.h BenH's commit a741e67969577163a4cfc78d7fd2753219087ef1 in powerpc.git, although (AFAICT) only intended to affect ppc64, also has side-effects which break 44x. I think 40x, 8xx and Freescale Book E are also affected, though I haven't tested them. The problem lies in unconditionally removing flush_tlb_pending() from the versions of flush_tlb_mm(), flush_tlb_range() and flush_tlb_kernel_range() used on ppc64 - which are also used the embedded platforms mentioned above. The patch below cleans up the convoluted #ifdef logic in tlbflush.h, in the process restoring the necessary flushes for the software TLB platforms. There are three sets of definitions for the flushing hooks: the software TLB versions (revised to avoid using names which appear to related to TLB batching), the 32-bit hash based versions (external functions) amd the 64-bit hash based versions (which implement batching). It also moves the declaration of update_mmu_cache() to always be in tlbflush.h (previously it was in tlbflush.h except for PPC64, where it was in pgtable.h). Booted on Ebony (440GP) and compiled for 64-bit and 32-bit multiplatform. Signed-off-by: David Gibson Acked-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- include/asm-powerpc/pgtable.h | 10 --- include/asm-powerpc/tlbflush.h | 135 ++++++++++++++++++++++++----------------- 2 files changed, 80 insertions(+), 65 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h index c7142c7e0e0..19edb6982b8 100644 --- a/include/asm-powerpc/pgtable.h +++ b/include/asm-powerpc/pgtable.h @@ -448,16 +448,6 @@ extern pgd_t swapper_pg_dir[]; extern void paging_init(void); -/* - * This gets called at the end of handling a page fault, when - * the kernel has put a new PTE into the page table for the process. - * We use it to put a corresponding HPTE into the hash table - * ahead of time, instead of waiting for the inevitable extra - * hash-table miss exception. - */ -struct vm_area_struct; -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); - /* Encode and de-code a swap entry */ #define __swp_type(entry) (((entry).val >> 1) & 0x3f) #define __swp_offset(entry) ((entry).val >> 8) diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h index 0bc5a5e506b..86e6266a028 100644 --- a/include/asm-powerpc/tlbflush.h +++ b/include/asm-powerpc/tlbflush.h @@ -17,10 +17,73 @@ */ #ifdef __KERNEL__ - struct mm_struct; +struct vm_area_struct; + +#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) +/* + * TLB flushing for software loaded TLB chips + * + * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & + * flush_tlb_kernel_range are best implemented as tlbia vs + * specific tlbie's + */ + +extern void _tlbie(unsigned long address); + +#if defined(CONFIG_40x) || defined(CONFIG_8xx) +#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") +#else /* CONFIG_44x || CONFIG_FSL_BOOKE */ +extern void _tlbia(void); +#endif -#ifdef CONFIG_PPC64 +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + _tlbia(); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ + _tlbie(vmaddr); +} + +static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, + unsigned long vmaddr) +{ + _tlbie(vmaddr); +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + _tlbia(); +} + +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + _tlbia(); +} + +#elif defined(CONFIG_PPC32) +/* + * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx + */ +extern void _tlbie(unsigned long address); +extern void _tlbia(void); + +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +#else +/* + * TLB flushing for 64-bit has-MMU CPUs + */ #include #include @@ -67,89 +130,51 @@ extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local); extern void flush_hash_range(unsigned long number, int local); -#else /* CONFIG_PPC64 */ - -#include - -extern void _tlbie(unsigned long address); -extern void _tlbia(void); - -/* - * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & - * flush_tlb_kernel_range are best implemented as tlbia vs - * specific tlbie's - */ - -#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) -#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") -#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) -#define flush_tlb_pending() _tlbia() -#endif - -/* - * This gets called at the end of handling a page fault, when - * the kernel has put a new PTE into the page table for the process. - * We use it to ensure coherency between the i-cache and d-cache - * for the page which has just been mapped in. - * On machines which use an MMU hash table, we use this to put a - * corresponding HPTE into the hash table ahead of time, instead of - * waiting for the inevitable extra hash-table miss exception. - */ -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); - -#endif /* CONFIG_PPC64 */ - -#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ - defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) static inline void flush_tlb_mm(struct mm_struct *mm) { } static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) + unsigned long vmaddr) { -#ifndef CONFIG_PPC64 - _tlbie(vmaddr); -#endif } static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long vmaddr) { -#ifndef CONFIG_PPC64 - _tlbie(vmaddr); -#endif } static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end) { } static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) + unsigned long end) { } -#else /* 6xx, 7xx, 7xxx cpus */ - -extern void flush_tlb_mm(struct mm_struct *mm); -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); - #endif +/* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to ensure coherency between the i-cache and d-cache + * for the page which has just been mapped in. + * On machines which use an MMU hash table, we use this to put a + * corresponding HPTE into the hash table ahead of time, instead of + * waiting for the inevitable extra hash-table miss exception. + */ +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); + /* * This is called in munmap when we have freed up some page-table * pages. We don't need to do anything here, there's nothing special * about our page-table pages. -- paulus */ static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end) { } -- cgit v1.2.3 From 4bf56e1725a298fb430977cf143ad3a36c91b46a Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 24 Apr 2007 13:48:41 +1000 Subject: [POWERPC] Remove find_compatible_devices This is an old interface and is replaced by of_find_compatible_node. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index f31af713e6a..58eabb2fa24 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -116,8 +116,6 @@ static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_e extern struct device_node *find_devices(const char *name); extern struct device_node *find_type_devices(const char *type); extern struct device_node *find_path_device(const char *path); -extern struct device_node *find_compatible_devices(const char *type, - const char *compat); extern struct device_node *find_all_nodes(void); /* New style node lookup */ -- cgit v1.2.3 From 112466b4d0036b3244509d01dbbf3c8caec52a23 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 24 Apr 2007 13:49:47 +1000 Subject: [POWERPC] Remove find_all_nodes This old interface has no more users. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 58eabb2fa24..a9acdd8b3bb 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -116,7 +116,6 @@ static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_e extern struct device_node *find_devices(const char *name); extern struct device_node *find_type_devices(const char *type); extern struct device_node *find_path_device(const char *path); -extern struct device_node *find_all_nodes(void); /* New style node lookup */ extern struct device_node *of_find_node_by_name(struct device_node *from, -- cgit v1.2.3 From 8c8dc322486d5394dc981bef9276dd0ce6c8d1ce Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 24 Apr 2007 13:50:55 +1000 Subject: [POWERPC] Remove old interface find_path_device Replaced by of_find_node_by_path. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index a9acdd8b3bb..a070defc443 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -115,7 +115,6 @@ static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_e /* OBSOLETE: Old style node lookup */ extern struct device_node *find_devices(const char *name); extern struct device_node *find_type_devices(const char *type); -extern struct device_node *find_path_device(const char *path); /* New style node lookup */ extern struct device_node *of_find_node_by_name(struct device_node *from, -- cgit v1.2.3 From 1658ab66781d918f604c6069c5cf9a94b6f52f84 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 24 Apr 2007 13:51:59 +1000 Subject: [POWERPC] Remove old interface find_type_devices Replaced by of_find_node_by_type. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index a070defc443..990489cac9b 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -114,7 +114,6 @@ static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_e /* OBSOLETE: Old style node lookup */ extern struct device_node *find_devices(const char *name); -extern struct device_node *find_type_devices(const char *type); /* New style node lookup */ extern struct device_node *of_find_node_by_name(struct device_node *from, -- cgit v1.2.3 From 30686ba6d56858657829d3eb524ed73e5dc98d2b Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 24 Apr 2007 13:53:04 +1000 Subject: [POWERPC] Remove old interface find_devices Replace uses with of_find_node_by_name and for_each_node_by_name. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- include/asm-powerpc/prom.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 990489cac9b..ec400f608e1 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -112,9 +112,6 @@ static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_e } -/* OBSOLETE: Old style node lookup */ -extern struct device_node *find_devices(const char *name); - /* New style node lookup */ extern struct device_node *of_find_node_by_name(struct device_node *from, const char *name); -- cgit v1.2.3 From 8d2169e8d6b8a91413df33bc402e0f602ceaabcc Mon Sep 17 00:00:00 2001 From: David Gibson Date: Fri, 27 Apr 2007 11:53:52 +1000 Subject: [POWERPC] Prepare for splitting up mmu.h by MMU type Currently asm-powerpc/mmu.h has definitions for the 64-bit hash based MMU. If CONFIG_PPC64 is not set, it instead includes asm-ppc/mmu.h which contains a particularly horrible mess of #ifdefs giving the definitions for all the various 32-bit MMUs. It would be nice to have the low level definitions for each MMU type neatly in their own separate files. It would also be good to wean arch/powerpc off dependence on the old asm-ppc/mmu.h. This patch makes a start on such a cleanup by moving the definitions for the 64-bit hash MMU to their own file, asm-powerpc/mmu_hash64.h. Definitions for the other MMUs still all come from asm-ppc/mmu.h, however each MMU type can now be one-by-one moved over to their own file, in the process cleaning them up stripping them of cruft no longer necessary in arch/powerpc. Signed-off-by: David Gibson Signed-off-by: Paul Mackerras --- include/asm-powerpc/mmu-hash64.h | 400 ++++++++++++++++++++++++++++++++++++++ include/asm-powerpc/mmu.h | 406 +-------------------------------------- 2 files changed, 406 insertions(+), 400 deletions(-) create mode 100644 include/asm-powerpc/mmu-hash64.h (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h new file mode 100644 index 00000000000..6739457d8bc --- /dev/null +++ b/include/asm-powerpc/mmu-hash64.h @@ -0,0 +1,400 @@ +#ifndef _ASM_POWERPC_MMU_HASH64_H_ +#define _ASM_POWERPC_MMU_HASH64_H_ +/* + * PowerPC64 memory management structures + * + * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> + * PPC64 rework. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include + +/* + * Segment table + */ + +#define STE_ESID_V 0x80 +#define STE_ESID_KS 0x20 +#define STE_ESID_KP 0x10 +#define STE_ESID_N 0x08 + +#define STE_VSID_SHIFT 12 + +/* Location of cpu0's segment table */ +#define STAB0_PAGE 0x6 +#define STAB0_OFFSET (STAB0_PAGE << 12) +#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) + +#ifndef __ASSEMBLY__ +extern char initial_stab[]; +#endif /* ! __ASSEMBLY */ + +/* + * SLB + */ + +#define SLB_NUM_BOLTED 3 +#define SLB_CACHE_ENTRIES 8 + +/* Bits in the SLB ESID word */ +#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ + +/* Bits in the SLB VSID word */ +#define SLB_VSID_SHIFT 12 +#define SLB_VSID_B ASM_CONST(0xc000000000000000) +#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) +#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) +#define SLB_VSID_KS ASM_CONST(0x0000000000000800) +#define SLB_VSID_KP ASM_CONST(0x0000000000000400) +#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ +#define SLB_VSID_L ASM_CONST(0x0000000000000100) +#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ +#define SLB_VSID_LP ASM_CONST(0x0000000000000030) +#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) +#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) +#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) +#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) +#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) + +#define SLB_VSID_KERNEL (SLB_VSID_KP) +#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) + +#define SLBIE_C (0x08000000) + +/* + * Hash table + */ + +#define HPTES_PER_GROUP 8 + +#define HPTE_V_AVPN_SHIFT 7 +#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) +#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) +#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN)) +#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) +#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) +#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) +#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) +#define HPTE_V_VALID ASM_CONST(0x0000000000000001) + +#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) +#define HPTE_R_TS ASM_CONST(0x4000000000000000) +#define HPTE_R_RPN_SHIFT 12 +#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) +#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) +#define HPTE_R_PP ASM_CONST(0x0000000000000003) +#define HPTE_R_N ASM_CONST(0x0000000000000004) +#define HPTE_R_C ASM_CONST(0x0000000000000080) +#define HPTE_R_R ASM_CONST(0x0000000000000100) + +/* Values for PP (assumes Ks=0, Kp=1) */ +/* pp0 will always be 0 for linux */ +#define PP_RWXX 0 /* Supervisor read/write, User none */ +#define PP_RWRX 1 /* Supervisor read/write, User read */ +#define PP_RWRW 2 /* Supervisor read/write, User read/write */ +#define PP_RXRX 3 /* Supervisor read, User read */ + +#ifndef __ASSEMBLY__ + +typedef struct { + unsigned long v; + unsigned long r; +} hpte_t; + +extern hpte_t *htab_address; +extern unsigned long htab_size_bytes; +extern unsigned long htab_hash_mask; + +/* + * Page size definition + * + * shift : is the "PAGE_SHIFT" value for that page size + * sllp : is a bit mask with the value of SLB L || LP to be or'ed + * directly to a slbmte "vsid" value + * penc : is the HPTE encoding mask for the "LP" field: + * + */ +struct mmu_psize_def +{ + unsigned int shift; /* number of bits */ + unsigned int penc; /* HPTE encoding */ + unsigned int tlbiel; /* tlbiel supported for that page size */ + unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ + unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ +}; + +#endif /* __ASSEMBLY__ */ + +/* + * The kernel use the constants below to index in the page sizes array. + * The use of fixed constants for this purpose is better for performances + * of the low level hash refill handlers. + * + * A non supported page size has a "shift" field set to 0 + * + * Any new page size being implemented can get a new entry in here. Whether + * the kernel will use it or not is a different matter though. The actual page + * size used by hugetlbfs is not defined here and may be made variable + */ + +#define MMU_PAGE_4K 0 /* 4K */ +#define MMU_PAGE_64K 1 /* 64K */ +#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */ +#define MMU_PAGE_1M 3 /* 1M */ +#define MMU_PAGE_16M 4 /* 16M */ +#define MMU_PAGE_16G 5 /* 16G */ +#define MMU_PAGE_COUNT 6 + +#ifndef __ASSEMBLY__ + +/* + * The current system page sizes + */ +extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; +extern int mmu_linear_psize; +extern int mmu_virtual_psize; +extern int mmu_vmalloc_psize; +extern int mmu_io_psize; + +/* + * If the processor supports 64k normal pages but not 64k cache + * inhibited pages, we have to be prepared to switch processes + * to use 4k pages when they create cache-inhibited mappings. + * If this is the case, mmu_ci_restrictions will be set to 1. + */ +extern int mmu_ci_restrictions; + +#ifdef CONFIG_HUGETLB_PAGE +/* + * The page size index of the huge pages for use by hugetlbfs + */ +extern int mmu_huge_psize; + +#endif /* CONFIG_HUGETLB_PAGE */ + +/* + * This function sets the AVPN and L fields of the HPTE appropriately + * for the page size + */ +static inline unsigned long hpte_encode_v(unsigned long va, int psize) +{ + unsigned long v = + v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); + v <<= HPTE_V_AVPN_SHIFT; + if (psize != MMU_PAGE_4K) + v |= HPTE_V_LARGE; + return v; +} + +/* + * This function sets the ARPN, and LP fields of the HPTE appropriately + * for the page size. We assume the pa is already "clean" that is properly + * aligned for the requested page size + */ +static inline unsigned long hpte_encode_r(unsigned long pa, int psize) +{ + unsigned long r; + + /* A 4K page needs no special encoding */ + if (psize == MMU_PAGE_4K) + return pa & HPTE_R_RPN; + else { + unsigned int penc = mmu_psize_defs[psize].penc; + unsigned int shift = mmu_psize_defs[psize].shift; + return (pa & ~((1ul << shift) - 1)) | (penc << 12); + } + return r; +} + +/* + * This hashes a virtual address for a 256Mb segment only for now + */ + +static inline unsigned long hpt_hash(unsigned long va, unsigned int shift) +{ + return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift); +} + +extern int __hash_page_4K(unsigned long ea, unsigned long access, + unsigned long vsid, pte_t *ptep, unsigned long trap, + unsigned int local); +extern int __hash_page_64K(unsigned long ea, unsigned long access, + unsigned long vsid, pte_t *ptep, unsigned long trap, + unsigned int local); +struct mm_struct; +extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); +extern int hash_huge_page(struct mm_struct *mm, unsigned long access, + unsigned long ea, unsigned long vsid, int local, + unsigned long trap); + +extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, + unsigned long pstart, unsigned long mode, + int psize); + +extern void htab_initialize(void); +extern void htab_initialize_secondary(void); +extern void hpte_init_native(void); +extern void hpte_init_lpar(void); +extern void hpte_init_iSeries(void); +extern void hpte_init_beat(void); + +extern void stabs_alloc(void); +extern void slb_initialize(void); +extern void slb_flush_and_rebolt(void); +extern void stab_initialize(unsigned long stab); + +#endif /* __ASSEMBLY__ */ + +/* + * VSID allocation + * + * We first generate a 36-bit "proto-VSID". For kernel addresses this + * is equal to the ESID, for user addresses it is: + * (context << 15) | (esid & 0x7fff) + * + * The two forms are distinguishable because the top bit is 0 for user + * addresses, whereas the top two bits are 1 for kernel addresses. + * Proto-VSIDs with the top two bits equal to 0b10 are reserved for + * now. + * + * The proto-VSIDs are then scrambled into real VSIDs with the + * multiplicative hash: + * + * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS + * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 + * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF + * + * This scramble is only well defined for proto-VSIDs below + * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are + * reserved. VSID_MULTIPLIER is prime, so in particular it is + * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. + * Because the modulus is 2^n-1 we can compute it efficiently without + * a divide or extra multiply (see below). + * + * This scheme has several advantages over older methods: + * + * - We have VSIDs allocated for every kernel address + * (i.e. everything above 0xC000000000000000), except the very top + * segment, which simplifies several things. + * + * - We allow for 15 significant bits of ESID and 20 bits of + * context for user addresses. i.e. 8T (43 bits) of address space for + * up to 1M contexts (although the page table structure and context + * allocation will need changes to take advantage of this). + * + * - The scramble function gives robust scattering in the hash + * table (at least based on some initial results). The previous + * method was more susceptible to pathological cases giving excessive + * hash collisions. + */ +/* + * WARNING - If you change these you must make sure the asm + * implementations in slb_allocate (slb_low.S), do_stab_bolted + * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. + * + * You'll also need to change the precomputed VSID values in head.S + * which are used by the iSeries firmware. + */ + +#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ +#define VSID_BITS 36 +#define VSID_MODULUS ((1UL<= \ + * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ + * the bit clear, r3 already has the answer we want, if it \ + * doesn't, the answer is the low 36 bits of r3+1. So in all \ + * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ + addi rx,rt,1; \ + srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ + add rt,rt,rx + + +#ifndef __ASSEMBLY__ + +typedef unsigned long mm_context_id_t; + +typedef struct { + mm_context_id_t id; + u16 user_psize; /* page size index */ + u16 sllp; /* SLB entry page size encoding */ +#ifdef CONFIG_HUGETLB_PAGE + u16 low_htlb_areas, high_htlb_areas; +#endif + unsigned long vdso_base; +} mm_context_t; + + +static inline unsigned long vsid_scramble(unsigned long protovsid) +{ +#if 0 + /* The code below is equivalent to this function for arguments + * < 2^VSID_BITS, which is all this should ever be called + * with. However gcc is not clever enough to compute the + * modulus (2^n-1) without a second multiply. */ + return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); +#else /* 1 */ + unsigned long x; + + x = protovsid * VSID_MULTIPLIER; + x = (x >> VSID_BITS) + (x & VSID_MODULUS); + return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; +#endif /* 1 */ +} + +/* This is only valid for addresses >= KERNELBASE */ +static inline unsigned long get_kernel_vsid(unsigned long ea) +{ + return vsid_scramble(ea >> SID_SHIFT); +} + +/* This is only valid for user addresses (which are below 2^41) */ +static inline unsigned long get_vsid(unsigned long context, unsigned long ea) +{ + return vsid_scramble((context << USER_ESID_BITS) + | (ea >> SID_SHIFT)); +} + +#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) +#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) + +/* Physical address used by some IO functions */ +typedef unsigned long phys_addr_t; + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_MMU_HASH64_H_ */ diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h index e22fd881150..06b3e6d336c 100644 --- a/include/asm-powerpc/mmu.h +++ b/include/asm-powerpc/mmu.h @@ -2,408 +2,14 @@ #define _ASM_POWERPC_MMU_H_ #ifdef __KERNEL__ -#ifndef CONFIG_PPC64 -#include +#ifdef CONFIG_PPC64 +/* 64-bit classic hash table MMU */ +# include #else - -/* - * PowerPC memory management structures - * - * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> - * PPC64 rework. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include - -/* - * Segment table - */ - -#define STE_ESID_V 0x80 -#define STE_ESID_KS 0x20 -#define STE_ESID_KP 0x10 -#define STE_ESID_N 0x08 - -#define STE_VSID_SHIFT 12 - -/* Location of cpu0's segment table */ -#define STAB0_PAGE 0x6 -#define STAB0_OFFSET (STAB0_PAGE << 12) -#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) - -#ifndef __ASSEMBLY__ -extern char initial_stab[]; -#endif /* ! __ASSEMBLY */ - -/* - * SLB - */ - -#define SLB_NUM_BOLTED 3 -#define SLB_CACHE_ENTRIES 8 - -/* Bits in the SLB ESID word */ -#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ - -/* Bits in the SLB VSID word */ -#define SLB_VSID_SHIFT 12 -#define SLB_VSID_B ASM_CONST(0xc000000000000000) -#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) -#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) -#define SLB_VSID_KS ASM_CONST(0x0000000000000800) -#define SLB_VSID_KP ASM_CONST(0x0000000000000400) -#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ -#define SLB_VSID_L ASM_CONST(0x0000000000000100) -#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ -#define SLB_VSID_LP ASM_CONST(0x0000000000000030) -#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) -#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) -#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) -#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) -#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) - -#define SLB_VSID_KERNEL (SLB_VSID_KP) -#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) - -#define SLBIE_C (0x08000000) - -/* - * Hash table - */ - -#define HPTES_PER_GROUP 8 - -#define HPTE_V_AVPN_SHIFT 7 -#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) -#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) -#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN)) -#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) -#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) -#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) -#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) -#define HPTE_V_VALID ASM_CONST(0x0000000000000001) - -#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) -#define HPTE_R_TS ASM_CONST(0x4000000000000000) -#define HPTE_R_RPN_SHIFT 12 -#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) -#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) -#define HPTE_R_PP ASM_CONST(0x0000000000000003) -#define HPTE_R_N ASM_CONST(0x0000000000000004) -#define HPTE_R_C ASM_CONST(0x0000000000000080) -#define HPTE_R_R ASM_CONST(0x0000000000000100) - -/* Values for PP (assumes Ks=0, Kp=1) */ -/* pp0 will always be 0 for linux */ -#define PP_RWXX 0 /* Supervisor read/write, User none */ -#define PP_RWRX 1 /* Supervisor read/write, User read */ -#define PP_RWRW 2 /* Supervisor read/write, User read/write */ -#define PP_RXRX 3 /* Supervisor read, User read */ - -#ifndef __ASSEMBLY__ - -typedef struct { - unsigned long v; - unsigned long r; -} hpte_t; - -extern hpte_t *htab_address; -extern unsigned long htab_size_bytes; -extern unsigned long htab_hash_mask; - -/* - * Page size definition - * - * shift : is the "PAGE_SHIFT" value for that page size - * sllp : is a bit mask with the value of SLB L || LP to be or'ed - * directly to a slbmte "vsid" value - * penc : is the HPTE encoding mask for the "LP" field: - * - */ -struct mmu_psize_def -{ - unsigned int shift; /* number of bits */ - unsigned int penc; /* HPTE encoding */ - unsigned int tlbiel; /* tlbiel supported for that page size */ - unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ - unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ -}; - -#endif /* __ASSEMBLY__ */ - -/* - * The kernel use the constants below to index in the page sizes array. - * The use of fixed constants for this purpose is better for performances - * of the low level hash refill handlers. - * - * A non supported page size has a "shift" field set to 0 - * - * Any new page size being implemented can get a new entry in here. Whether - * the kernel will use it or not is a different matter though. The actual page - * size used by hugetlbfs is not defined here and may be made variable - */ - -#define MMU_PAGE_4K 0 /* 4K */ -#define MMU_PAGE_64K 1 /* 64K */ -#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */ -#define MMU_PAGE_1M 3 /* 1M */ -#define MMU_PAGE_16M 4 /* 16M */ -#define MMU_PAGE_16G 5 /* 16G */ -#define MMU_PAGE_COUNT 6 - -#ifndef __ASSEMBLY__ - -/* - * The current system page sizes - */ -extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; -extern int mmu_linear_psize; -extern int mmu_virtual_psize; -extern int mmu_vmalloc_psize; -extern int mmu_io_psize; - -/* - * If the processor supports 64k normal pages but not 64k cache - * inhibited pages, we have to be prepared to switch processes - * to use 4k pages when they create cache-inhibited mappings. - * If this is the case, mmu_ci_restrictions will be set to 1. - */ -extern int mmu_ci_restrictions; - -#ifdef CONFIG_HUGETLB_PAGE -/* - * The page size index of the huge pages for use by hugetlbfs - */ -extern int mmu_huge_psize; - -#endif /* CONFIG_HUGETLB_PAGE */ - -/* - * This function sets the AVPN and L fields of the HPTE appropriately - * for the page size - */ -static inline unsigned long hpte_encode_v(unsigned long va, int psize) -{ - unsigned long v = - v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); - v <<= HPTE_V_AVPN_SHIFT; - if (psize != MMU_PAGE_4K) - v |= HPTE_V_LARGE; - return v; -} - -/* - * This function sets the ARPN, and LP fields of the HPTE appropriately - * for the page size. We assume the pa is already "clean" that is properly - * aligned for the requested page size - */ -static inline unsigned long hpte_encode_r(unsigned long pa, int psize) -{ - unsigned long r; - - /* A 4K page needs no special encoding */ - if (psize == MMU_PAGE_4K) - return pa & HPTE_R_RPN; - else { - unsigned int penc = mmu_psize_defs[psize].penc; - unsigned int shift = mmu_psize_defs[psize].shift; - return (pa & ~((1ul << shift) - 1)) | (penc << 12); - } - return r; -} - -/* - * This hashes a virtual address for a 256Mb segment only for now - */ - -static inline unsigned long hpt_hash(unsigned long va, unsigned int shift) -{ - return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift); -} - -extern int __hash_page_4K(unsigned long ea, unsigned long access, - unsigned long vsid, pte_t *ptep, unsigned long trap, - unsigned int local); -extern int __hash_page_64K(unsigned long ea, unsigned long access, - unsigned long vsid, pte_t *ptep, unsigned long trap, - unsigned int local); -struct mm_struct; -extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); -extern int hash_huge_page(struct mm_struct *mm, unsigned long access, - unsigned long ea, unsigned long vsid, int local, - unsigned long trap); - -extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, - unsigned long pstart, unsigned long mode, - int psize); - -extern void htab_initialize(void); -extern void htab_initialize_secondary(void); -extern void hpte_init_native(void); -extern void hpte_init_lpar(void); -extern void hpte_init_iSeries(void); -extern void hpte_init_beat(void); - -extern void stabs_alloc(void); -extern void slb_initialize(void); -extern void slb_flush_and_rebolt(void); -extern void stab_initialize(unsigned long stab); - -#endif /* __ASSEMBLY__ */ - -/* - * VSID allocation - * - * We first generate a 36-bit "proto-VSID". For kernel addresses this - * is equal to the ESID, for user addresses it is: - * (context << 15) | (esid & 0x7fff) - * - * The two forms are distinguishable because the top bit is 0 for user - * addresses, whereas the top two bits are 1 for kernel addresses. - * Proto-VSIDs with the top two bits equal to 0b10 are reserved for - * now. - * - * The proto-VSIDs are then scrambled into real VSIDs with the - * multiplicative hash: - * - * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS - * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 - * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF - * - * This scramble is only well defined for proto-VSIDs below - * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are - * reserved. VSID_MULTIPLIER is prime, so in particular it is - * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. - * Because the modulus is 2^n-1 we can compute it efficiently without - * a divide or extra multiply (see below). - * - * This scheme has several advantages over older methods: - * - * - We have VSIDs allocated for every kernel address - * (i.e. everything above 0xC000000000000000), except the very top - * segment, which simplifies several things. - * - * - We allow for 15 significant bits of ESID and 20 bits of - * context for user addresses. i.e. 8T (43 bits) of address space for - * up to 1M contexts (although the page table structure and context - * allocation will need changes to take advantage of this). - * - * - The scramble function gives robust scattering in the hash - * table (at least based on some initial results). The previous - * method was more susceptible to pathological cases giving excessive - * hash collisions. - */ -/* - * WARNING - If you change these you must make sure the asm - * implementations in slb_allocate (slb_low.S), do_stab_bolted - * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. - * - * You'll also need to change the precomputed VSID values in head.S - * which are used by the iSeries firmware. - */ - -#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ -#define VSID_BITS 36 -#define VSID_MODULUS ((1UL<= \ - * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ - * the bit clear, r3 already has the answer we want, if it \ - * doesn't, the answer is the low 36 bits of r3+1. So in all \ - * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ - addi rx,rt,1; \ - srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ - add rt,rt,rx - - -#ifndef __ASSEMBLY__ - -typedef unsigned long mm_context_id_t; - -typedef struct { - mm_context_id_t id; - u16 user_psize; /* page size index */ - u16 sllp; /* SLB entry page size encoding */ -#ifdef CONFIG_HUGETLB_PAGE - u16 low_htlb_areas, high_htlb_areas; +/* 32-bit. FIXME: split up the 32-bit MMU types, and revise for + * arch/powerpc */ +# include #endif - unsigned long vdso_base; -} mm_context_t; - - -static inline unsigned long vsid_scramble(unsigned long protovsid) -{ -#if 0 - /* The code below is equivalent to this function for arguments - * < 2^VSID_BITS, which is all this should ever be called - * with. However gcc is not clever enough to compute the - * modulus (2^n-1) without a second multiply. */ - return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); -#else /* 1 */ - unsigned long x; - - x = protovsid * VSID_MULTIPLIER; - x = (x >> VSID_BITS) + (x & VSID_MODULUS); - return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; -#endif /* 1 */ -} - -/* This is only valid for addresses >= KERNELBASE */ -static inline unsigned long get_kernel_vsid(unsigned long ea) -{ - return vsid_scramble(ea >> SID_SHIFT); -} - -/* This is only valid for user addresses (which are below 2^41) */ -static inline unsigned long get_vsid(unsigned long context, unsigned long ea) -{ - return vsid_scramble((context << USER_ESID_BITS) - | (ea >> SID_SHIFT)); -} - -#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) -#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) - -/* Physical address used by some IO functions */ -typedef unsigned long phys_addr_t; - - -#endif /* __ASSEMBLY */ -#endif /* CONFIG_PPC64 */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MMU_H_ */ -- cgit v1.2.3 From 8d8a0241eb019ce9648a77b55f9f76a834207cbb Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Thu, 26 Apr 2007 06:36:56 +1000 Subject: [POWERPC] Generic check_legacy_ioport check_legacy_ioport makes only sense on PREP, CHRP and pSeries. They may have an isa node with PS/2, parport, floppy and serial ports. Remove the check_legacy_ioport call from ppc_md, it's not needed anymore. Hardware capabilities come from the device-tree. Signed-off-by: Olaf Hering Signed-off-by: Paul Mackerras --- include/asm-powerpc/io.h | 7 ++++++- include/asm-powerpc/machdep.h | 3 --- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 301c9bb308b..350c9bdb31d 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h @@ -11,7 +11,12 @@ /* Check of existence of legacy devices */ extern int check_legacy_ioport(unsigned long base_port); -#define PNPBIOS_BASE 0xf000 /* only relevant for PReP */ +#define I8042_DATA_REG 0x60 +#define FDC_BASE 0x3f0 +/* only relevant for PReP */ +#define _PIDXR 0x279 +#define _PNPWRP 0xa79 +#define PNPBIOS_BASE 0xf000 #include #include diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index 1b04e572354..b204926ce91 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h @@ -153,9 +153,6 @@ struct machdep_calls { */ long (*feature_call)(unsigned int feature, ...); - /* Check availability of legacy devices like i8042 */ - int (*check_legacy_ioport)(unsigned int baseport); - /* Get legacy PCI/IDE interrupt mapping */ int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel); -- cgit v1.2.3 From d169d140944a67edd6f88f78b65b3117059f4380 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sat, 28 Apr 2007 08:00:03 +1000 Subject: [POWERPC] Declare enable_kernel_spe in a header This patch puts enable_kernel_spe into along with enable_kernel_altivec etc. Signed-off-by: Johannes Berg Signed-off-by: Paul Mackerras --- include/asm-powerpc/system.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/asm-powerpc') diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index f7b1227d645..d3e0906ff2b 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -131,6 +131,7 @@ extern void enable_kernel_altivec(void); extern void giveup_altivec(struct task_struct *); extern void load_up_altivec(struct task_struct *); extern int emulate_altivec(struct pt_regs *); +extern void enable_kernel_spe(void); extern void giveup_spe(struct task_struct *); extern void load_up_spe(struct task_struct *); extern int fix_alignment(struct pt_regs *); -- cgit v1.2.3