aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig8
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/cputable.c5
-rw-r--r--arch/powerpc/kernel/head_32.S7
-rw-r--r--arch/powerpc/kernel/module_32.c23
-rw-r--r--arch/powerpc/kernel/module_64.c23
-rw-r--r--arch/powerpc/kernel/of_device.c4
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/pci_32.c143
-rw-r--r--arch/powerpc/kernel/pci_64.c42
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c2
-rw-r--r--arch/powerpc/kernel/prom.c55
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/rtas.c35
-rw-r--r--arch/powerpc/kernel/sysfs.c16
-rw-r--r--arch/powerpc/kernel/traps.c56
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/mm/numa.c65
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c2
-rw-r--r--arch/powerpc/platforms/cell/pmu.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/maple/pci.c2
-rw-r--r--arch/powerpc/platforms/maple/setup.c12
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig11
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c13
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c275
-rw-r--r--arch/powerpc/platforms/pseries/setup.c30
-rw-r--r--arch/powerpc/platforms/pseries/smp.c200
-rw-r--r--arch/powerpc/sysdev/Makefile3
-rw-r--r--arch/powerpc/sysdev/dcr.S39
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c40
-rw-r--r--arch/powerpc/sysdev/rom.c1
-rw-r--r--arch/powerpc/xmon/xmon.c10
37 files changed, 645 insertions, 501 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 56c3c4065eb..8699dadcd09 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -107,6 +107,11 @@ config AUDIT_ARCH
bool
default y
+config GENERIC_BUG
+ bool
+ default y
+ depends on BUG
+
config DEFAULT_UIMAGE
bool
help
@@ -478,6 +483,7 @@ config PPC_MAPLE
select PPC_UDBG_16550
select PPC_970_NAP
select PPC_NATIVE
+ select PPC_RTAS
default n
help
This option enables support for the Maple 970FX Evaluation Board.
@@ -714,7 +720,7 @@ config FORCE_MAX_ZONEORDER
config MATH_EMULATION
bool "Math emulation"
- depends on 4xx || 8xx || E200 || E500
+ depends on 4xx || 8xx || E200 || PPC_83xx || E500
---help---
Some PowerPC chips designed for embedded applications do not have
a floating-point unit and therefore do not implement the
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index f2d888e014a..70ed61337f5 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -157,6 +157,7 @@ CONFIG_SPU_BASE=y
CONFIG_PS3_HTAB_SIZE=20
CONFIG_PS3_DYNAMIC_DMA=y
CONFIG_PS3_USE_LPAR_ADDR=y
+CONFIG_PS3_VUART=y
#
# Kernel options
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 4fe53d08ab8..d2ded19e406 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -77,6 +77,7 @@ endif
ifeq ($(CONFIG_PPC_ISERIES),y)
extra-y += lparmap.s
+$(obj)/head_64.o: $(obj)/lparmap.s
AFLAGS_head_64.o += -I$(obj)
endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 9d1614c3ce6..b742013bb9d 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -833,7 +833,7 @@ static struct cpu_spec cpu_specs[] = {
.pvr_mask = 0x7fff0000,
.pvr_value = 0x00840000,
.cpu_name = "e300c2",
- .cpu_features = CPU_FTRS_E300,
+ .cpu_features = CPU_FTRS_E300C2,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
.icache_bsize = 32,
.dcache_bsize = 32,
@@ -1136,8 +1136,7 @@ static struct cpu_spec cpu_specs[] = {
.pvr_mask = 0xff000fff,
.pvr_value = 0x53000890,
.cpu_name = "440SPe Rev. A",
- .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
- CPU_FTR_USE_TB,
+ .cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index d88e182e40b..9417cf5b4b7 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -437,6 +437,13 @@ Alignment:
/* Floating-point unavailable */
. = 0x800
FPUnavailable:
+BEGIN_FTR_SECTION
+/*
+ * Certain Freescale cores don't have a FPU and treat fp instructions
+ * as a FP Unavailable exception. Redirect to illegal/emulation handling.
+ */
+ b ProgramCheck
+END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
EXCEPTION_PROLOG
bne load_up_fpu /* if from user, just load it up */
addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index e2c3c6a85f3..8339fd609de 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -23,6 +23,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cache.h>
+#include <linux/bug.h>
#include "setup.h"
@@ -290,23 +291,11 @@ int module_finalize(const Elf_Ehdr *hdr,
struct module *me)
{
const Elf_Shdr *sect;
+ int err;
- me->arch.bug_table = NULL;
- me->arch.num_bugs = 0;
-
- /* Find the __bug_table section, if present */
- sect = find_section(hdr, sechdrs, "__bug_table");
- if (sect != NULL) {
- me->arch.bug_table = (void *) sect->sh_addr;
- me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
- }
-
- /*
- * Strictly speaking this should have a spinlock to protect against
- * traversals, but since we only traverse on BUG()s, a spinlock
- * could potentially lead to deadlock and thus be counter-productive.
- */
- list_add(&me->arch.bug_list, &module_bug_list);
+ err = module_bug_finalize(hdr, sechdrs, me);
+ if (err) /* never true, currently */
+ return err;
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -320,7 +309,7 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
- list_del(&mod->arch.bug_list);
+ module_bug_cleanup(mod);
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 8dd1f0aae5d..75c7c4f1928 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -20,6 +20,7 @@
#include <linux/moduleloader.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
+#include <linux/bug.h>
#include <asm/module.h>
#include <asm/uaccess.h>
#include <asm/firmware.h>
@@ -439,23 +440,11 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *me)
{
const Elf_Shdr *sect;
+ int err;
- me->arch.bug_table = NULL;
- me->arch.num_bugs = 0;
-
- /* Find the __bug_table section, if present */
- sect = find_section(hdr, sechdrs, "__bug_table");
- if (sect != NULL) {
- me->arch.bug_table = (void *) sect->sh_addr;
- me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
- }
-
- /*
- * Strictly speaking this should have a spinlock to protect against
- * traversals, but since we only traverse on BUG()s, a spinlock
- * could potentially lead to deadlock and thus be counter-productive.
- */
- list_add(&me->arch.bug_list, &module_bug_list);
+ err = module_bug_finalize(hdr, sechdrs, me);
+ if (err)
+ return err;
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -475,7 +464,7 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
- list_del(&mod->arch.bug_list);
+ module_bug_cleanup(mod);
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 8a06724e029..e921514e655 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -109,9 +109,7 @@ int of_device_register(struct of_device *ofdev)
if (rc)
return rc;
- device_create_file(&ofdev->dev, &dev_attr_devspec);
-
- return 0;
+ return device_create_file(&ofdev->dev, &dev_attr_devspec);
}
void of_device_unregister(struct of_device *ofdev)
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index b3189d0161b..3002ea3a61a 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -169,7 +169,7 @@ static void of_platform_make_bus_id(struct of_device *dev)
char *name = dev->dev.bus_id;
const u32 *reg;
u64 addr;
- long magic;
+ int magic;
/*
* If it's a DCR based device, use 'd' for native DCRs
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 2f54cd81dea..8336deafc62 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -736,25 +736,51 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void*
return NULL;
}
-static int
-scan_OF_pci_childs_iterator(struct device_node* node, void* data)
+static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
+ unsigned int devfn)
{
- const unsigned int *reg;
- u8* fdata = (u8*)data;
-
- reg = get_property(node, "reg", NULL);
- if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
- && ((reg[0] >> 16) & 0xff) == fdata[0])
- return 1;
- return 0;
+ struct device_node *np = NULL;
+ const u32 *reg;
+ unsigned int psize;
+
+ while ((np = of_get_next_child(parent, np)) != NULL) {
+ reg = get_property(np, "reg", &psize);
+ if (reg == NULL || psize < 4)
+ continue;
+ if (((reg[0] >> 8) & 0xff) == devfn)
+ return np;
+ }
+ return NULL;
}
-static struct device_node*
-scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
+
+static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
{
- u8 filter_data[2] = {bus, dev_fn};
+ struct device_node *parent, *np;
+
+ /* Are we a root bus ? */
+ if (bus->self == NULL || bus->parent == NULL) {
+ struct pci_controller *hose = pci_bus_to_hose(bus->number);
+ if (hose == NULL)
+ return NULL;
+ return of_node_get(hose->arch_data);
+ }
+
+ /* not a root bus, we need to get our parent */
+ parent = scan_OF_for_pci_bus(bus->parent);
+ if (parent == NULL)
+ return NULL;
+
+ /* now iterate for children for a match */
+ np = scan_OF_for_pci_dev(parent, bus->self->devfn);
+ of_node_put(parent);
- return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data);
+ /* sanity check */
+ if (strcmp(np->type, "pci") != 0)
+ printk(KERN_WARNING "pci: wrong type \"%s\" for bridge %s\n",
+ np->type, np->full_name);
+
+ return np;
}
/*
@@ -763,43 +789,25 @@ scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
struct device_node *
pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
{
- struct pci_controller *hose;
- struct device_node *node;
- int busnr;
+ struct device_node *parent, *np;
if (!have_of)
return NULL;
-
- /* Lookup the hose */
- busnr = bus->number;
- hose = pci_bus_to_hose(busnr);
- if (!hose)
- return NULL;
- /* Check it has an OF node associated */
- node = (struct device_node *) hose->arch_data;
- if (!node)
+ DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
+ parent = scan_OF_for_pci_bus(bus);
+ if (parent == NULL)
return NULL;
-
- /* Fixup bus number according to what OF think it is. */
-#ifdef CONFIG_PPC_PMAC
- /* The G5 need a special case here. Basically, we don't remap all
- * busses on it so we don't create the pci-OF-map. However, we do
- * remap the AGP bus and so have to deal with it. A future better
- * fix has to be done by making the remapping per-host and always
- * filling the pci_to_OF map. --BenH
+ DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>");
+ np = scan_OF_for_pci_dev(parent, devfn);
+ of_node_put(parent);
+ DBG(" result is %s\n", np ? np->full_name : "<NULL>");
+
+ /* XXX most callers don't release the returned node
+ * mostly because ppc64 doesn't increase the refcount,
+ * we need to fix that.
*/
- if (machine_is(powermac) && busnr >= 0xf0)
- busnr -= 0xf0;
- else
-#endif
- if (pci_to_OF_bus_map)
- busnr = pci_to_OF_bus_map[busnr];
- if (busnr == 0xff)
- return NULL;
-
- /* Now, lookup childs of the hose */
- return scan_OF_childs_for_device(node->child, busnr, devfn);
+ return np;
}
EXPORT_SYMBOL(pci_busdev_to_OF_node);
@@ -1544,7 +1552,7 @@ pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
- unsigned long *offset,
+ resource_size_t *offset,
enum pci_mmap_state mmap_state)
{
struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
@@ -1556,7 +1564,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
/* If memory, add on the PCI bridge address offset */
if (mmap_state == pci_mmap_mem) {
+#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
*offset += hose->pci_mem_offset;
+#endif
res_bit = IORESOURCE_MEM;
} else {
io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
@@ -1624,9 +1634,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
- printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev),
- (unsigned long long)rp->start, prot);
-
return __pgprot(prot);
}
@@ -1695,7 +1702,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine)
{
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
struct resource *rp;
int ret;
@@ -1808,22 +1815,42 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
resource_size_t *start, resource_size_t *end)
{
struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
- unsigned long offset = 0;
+ resource_size_t offset = 0;
if (hose == NULL)
return;
if (rsrc->flags & IORESOURCE_IO)
- offset = (void __iomem *)_IO_BASE - hose->io_base_virt
- + hose->io_base_phys;
+ offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+
+ /* We pass a fully fixed up address to userland for MMIO instead of
+ * a BAR value because X is lame and expects to be able to use that
+ * to pass to /dev/mem !
+ *
+ * That means that we'll have potentially 64 bits values where some
+ * userland apps only expect 32 (like X itself since it thinks only
+ * Sparc has 64 bits MMIO) but if we don't do that, we break it on
+ * 32 bits CHRPs :-(
+ *
+ * Hopefully, the sysfs insterface is immune to that gunk. Once X
+ * has been fixed (and the fix spread enough), we can re-enable the
+ * 2 lines below and pass down a BAR value to userland. In that case
+ * we'll also have to re-enable the matching code in
+ * __pci_mmap_make_offset().
+ *
+ * BenH.
+ */
+#if 0
+ else if (rsrc->flags & IORESOURCE_MEM)
+ offset = hose->pci_mem_offset;
+#endif
- *start = rsrc->start + offset;
- *end = rsrc->end + offset;
+ *start = rsrc->start - offset;
+ *end = rsrc->end - offset;
}
-void __init
-pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
- int flags, char *name)
+void __init pci_init_resource(struct resource *res, resource_size_t start,
+ resource_size_t end, int flags, char *name)
{
res->start = start;
res->end = end;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 6fa9a0a5c8d..a6b7692c726 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -682,7 +682,7 @@ int pci_proc_domain(struct pci_bus *bus)
* Returns negative error code on failure, zero on success.
*/
static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
- unsigned long *offset,
+ resource_size_t *offset,
enum pci_mmap_state mmap_state)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -694,7 +694,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
/* If memory, add on the PCI bridge address offset */
if (mmap_state == pci_mmap_mem) {
+#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
*offset += hose->pci_mem_offset;
+#endif
res_bit = IORESOURCE_MEM;
} else {
io_offset = (unsigned long)hose->io_base_virt - pci_io_base;
@@ -762,9 +764,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
- printk(KERN_DEBUG "PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
- prot);
-
return __pgprot(prot);
}
@@ -832,7 +831,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
struct resource *rp;
int ret;
@@ -1333,20 +1332,41 @@ EXPORT_SYMBOL(pci_read_irq_line);
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
- u64 *start, u64 *end)
+ resource_size_t *start, resource_size_t *end)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
- unsigned long offset = 0;
+ resource_size_t offset = 0;
if (hose == NULL)
return;
if (rsrc->flags & IORESOURCE_IO)
- offset = pci_io_base - (unsigned long)hose->io_base_virt +
- hose->io_base_phys;
+ offset = (unsigned long)hose->io_base_virt - pci_io_base;
+
+ /* We pass a fully fixed up address to userland for MMIO instead of
+ * a BAR value because X is lame and expects to be able to use that
+ * to pass to /dev/mem !
+ *
+ * That means that we'll have potentially 64 bits values where some
+ * userland apps only expect 32 (like X itself since it thinks only
+ * Sparc has 64 bits MMIO) but if we don't do that, we break it on
+ * 32 bits CHRPs :-(
+ *
+ * Hopefully, the sysfs insterface is immune to that gunk. Once X
+ * has been fixed (and the fix spread enough), we can re-enable the
+ * 2 lines below and pass down a BAR value to userland. In that case
+ * we'll also have to re-enable the matching code in
+ * __pci_mmap_make_offset().
+ *
+ * BenH.
+ */
+#if 0
+ else if (rsrc->flags & IORESOURCE_MEM)
+ offset = hose->pci_mem_offset;
+#endif
- *start = rsrc->start + offset;
- *end = rsrc->end + offset;
+ *start = rsrc->start - offset;
+ *end = rsrc->end - offset;
}
struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 9179f0739ea..95776b6af4e 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
extern long *intercept_table;
EXPORT_SYMBOL(intercept_table);
#endif /* CONFIG_PPC_STD_MMU_32 */
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_PPC_DCR_NATIVE
EXPORT_SYMBOL(__mtdcr);
EXPORT_SYMBOL(__mfdcr);
#endif
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index c18dbe77fdc..1fc732a552d 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -804,6 +804,56 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
return of_read_ulong(p, s);
}
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Interpret the ibm,dynamic-memory property in the
+ * /ibm,dynamic-reconfiguration-memory node.
+ * This contains a list of memory blocks along with NUMA affinity
+ * information.
+ */
+static int __init early_init_dt_scan_drconf_memory(unsigned long node)
+{
+ cell_t *dm, *ls;
+ unsigned long l, n;
+ unsigned long base, size, lmb_size, flags;
+
+ ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
+ if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
+ return 0;
+ lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
+
+ dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
+ if (dm == NULL || l < sizeof(cell_t))
+ return 0;
+
+ n = *dm++; /* number of entries */
+ if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t))
+ return 0;
+
+ for (; n != 0; --n) {
+ base = dt_mem_next_cell(dt_root_addr_cells, &dm);
+ flags = dm[3];
+ /* skip DRC index, pad, assoc. list index, flags */
+ dm += 4;
+ /* skip this block if the reserved bit is set in flags (0x80)
+ or if the block is not assigned to this partition (0x8) */
+ if ((flags & 0x80) || !(flags & 0x8))
+ continue;
+ size = lmb_size;
+ if (iommu_is_off) {
+ if (base >= 0x80000000ul)
+ continue;
+ if ((base + size) > 0x80000000ul)
+ size = 0x80000000ul - base;
+ }
+ lmb_add(base, size);
+ }
+ lmb_dump_all();
+ return 0;
+}
+#else
+#define early_init_dt_scan_drconf_memory(node) 0
+#endif /* CONFIG_PPC_PSERIES */
static int __init early_init_dt_scan_memory(unsigned long node,
const char *uname, int depth, void *data)
@@ -812,6 +862,11 @@ static int __init early_init_dt_scan_memory(unsigned long node,
cell_t *reg, *endp;
unsigned long l;
+ /* Look for the ibm,dynamic-reconfiguration-memory node */
+ if (depth == 1 &&
+ strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
+ return early_init_dt_scan_drconf_memory(node);
+
/* We are scanning "memory" nodes only */
if (type == NULL) {
/*
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 46cf32670dd..520ef42f642 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -679,7 +679,7 @@ static unsigned char ibm_architecture_vec[] = {
/* option vector 5: PAPR/OF options */
3 - 2, /* length */
0, /* don't ignore, don't halt */
- OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES,
+ OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY,
};
/* Old method - ELF header with PT_NOTE sections */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 387ed0d9ad6..76b5d7ebdcc 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -303,6 +303,12 @@ int rtas_token(const char *service)
}
EXPORT_SYMBOL(rtas_token);
+int rtas_service_present(const char *service)
+{
+ return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
+}
+EXPORT_SYMBOL(rtas_service_present);
+
#ifdef CONFIG_RTAS_ERROR_LOGGING
/*
* Return the firmware-specified size of the error log buffer
@@ -810,32 +816,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-/* This version can't take the spinlock, because it never returns */
-static struct rtas_args rtas_stop_self_args = {
- /* The token is initialized for real in setup_system() */
- .token = RTAS_UNKNOWN_SERVICE,
- .nargs = 0,
- .nret = 1,
- .rets = &rtas_stop_self_args.args[0],
-};
-
-void rtas_stop_self(void)
-{
- struct rtas_args *rtas_args = &rtas_stop_self_args;
-
- local_irq_disable();
-
- BUG_ON(rtas_args->token == RTAS_UNKNOWN_SERVICE);
-
- printk("cpu %u (hwid %u) Ready to die...\n",
- smp_processor_id(), hard_smp_processor_id());
- enter_rtas(__pa(rtas_args));
-
- panic("Alas, I survived.\n");
-}
-#endif
-
/*
* Call early during boot, before mem init or bootmem, to retrieve the RTAS
* informations from the device-tree and allocate the RMO buffer for userland
@@ -880,9 +860,6 @@ void __init rtas_initialize(void)
#endif
rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
-#ifdef CONFIG_HOTPLUG_CPU
- rtas_stop_self_args.token = rtas_token("stop-self");
-#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_RTAS_ERROR_LOGGING
rtas_last_error_token = rtas_token("rtas-last-error");
#endif
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 63ed265b7f0..400ab2b946e 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -181,6 +181,8 @@ SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
SYSFS_PMCSETUP(purr, SPRN_PURR);
+SYSFS_PMCSETUP(spurr, SPRN_SPURR);
+SYSFS_PMCSETUP(dscr, SPRN_DSCR);
static SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0);
static SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1);
@@ -194,6 +196,8 @@ static SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6);
static SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7);
static SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8);
static SYSDEV_ATTR(purr, 0600, show_purr, NULL);
+static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
+static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
static void register_cpu_online(unsigned int cpu)
{
@@ -231,6 +235,12 @@ static void register_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PURR))
sysdev_create_file(s, &attr_purr);
+
+ if (cpu_has_feature(CPU_FTR_SPURR))
+ sysdev_create_file(s, &attr_spurr);
+
+ if (cpu_has_feature(CPU_FTR_DSCR))
+ sysdev_create_file(s, &attr_dscr);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -272,6 +282,12 @@ static void unregister_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PURR))
sysdev_remove_file(s, &attr_purr);
+
+ if (cpu_has_feature(CPU_FTR_SPURR))
+ sysdev_remove_file(s, &attr_spurr);
+
+ if (cpu_has_feature(CPU_FTR_DSCR))
+ sysdev_remove_file(s, &attr_dscr);
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0d4e203fa7a..535f5066564 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -32,6 +32,7 @@
#include <linux/kprobes.h>
#include <linux/kexec.h>
#include <linux/backlight.h>
+#include <linux/bug.h>
#include <asm/kdebug.h>
#include <asm/pgtable.h>
@@ -727,54 +728,9 @@ static int emulate_instruction(struct pt_regs *regs)
return -EINVAL;
}
-/*
- * Look through the list of trap instructions that are used for BUG(),
- * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
- * that the exception was caused by a trap instruction of some kind.
- * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
- * otherwise.
- */
-extern struct bug_entry __start___bug_table[], __stop___bug_table[];
-
-#ifndef CONFIG_MODULES
-#define module_find_bug(x) NULL
-#endif
-
-struct bug_entry *find_bug(unsigned long bugaddr)
+int is_valid_bugaddr(unsigned long addr)
{
- struct bug_entry *bug;
-
- for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
- if (bugaddr == bug->bug_addr)
- return bug;
- return module_find_bug(bugaddr);
-}
-
-static int check_bug_trap(struct pt_regs *regs)
-{
- struct bug_entry *bug;
- unsigned long addr;
-
- if (regs->msr & MSR_PR)
- return 0; /* not in kernel */
- addr = regs->nip; /* address of trap instruction */
- if (addr < PAGE_OFFSET)
- return 0;
- bug = find_bug(regs->nip);
- if (bug == NULL)
- return 0;
- if (bug->line & BUG_WARNING_TRAP) {
- /* this is a WARN_ON rather than BUG/BUG_ON */
- printk(KERN_ERR "Badness in %s at %s:%ld\n",
- bug->function, bug->file,
- bug->line & ~BUG_WARNING_TRAP);
- dump_stack();
- return 1;
- }
- printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
- bug->function, bug->file, bug->line);
-
- return 0;
+ return is_kernel_addr(addr);
}
void __kprobes program_check_exception(struct pt_regs *regs)
@@ -782,6 +738,8 @@ void __kprobes program_check_exception(struct pt_regs *regs)
unsigned int reason = get_reason(regs);
extern int do_mathemu(struct pt_regs *regs);
+ /* We can now get here via a FP Unavailable exception if the core
+ * has no FPU, in that case no reason flags will be set */
#ifdef CONFIG_MATH_EMULATION
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
* but there seems to be a hardware bug on the 405GP (RevD)
@@ -808,7 +766,9 @@ void __kprobes program_check_exception(struct pt_regs *regs)
return;
if (debugger_bpt(regs))
return;
- if (check_bug_trap(regs)) {
+
+ if (!(regs->msr & MSR_PR) && /* not user-mode */
+ report_bug(regs->nip) == BUG_TRAP_TYPE_WARN) {
regs->nip += 4;
return;
}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 04b98671a06..04b8e71bf5b 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -62,11 +62,7 @@ SECTIONS
__stop___ex_table = .;
}
- __bug_table : {
- __start___bug_table = .;
- *(__bug_table)
- __stop___bug_table = .;
- }
+ BUG_TABLE
/*
* Init sections discarded at runtime
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9da01dc8cfd..262790910ff 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -295,6 +295,63 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
return lmb_end_of_DRAM() - start;
}
+/*
+ * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
+ * node. This assumes n_mem_{addr,size}_cells have been set.
+ */
+static void __init parse_drconf_memory(struct device_node *memory)
+{
+ const unsigned int *lm, *dm, *aa;
+ unsigned int ls, ld, la;
+ unsigned int n, aam, aalen;
+ unsigned long lmb_size, size;
+ int nid, default_nid = 0;
+ unsigned int start, ai, flags;
+
+ lm = get_property(memory, "ibm,lmb-size", &ls);
+ dm = get_property(memory, "ibm,dynamic-memory", &ld);
+ aa = get_property(memory, "ibm,associativity-lookup-arrays", &la);
+ if (!lm || !dm || !aa ||
+ ls < sizeof(unsigned int) || ld < sizeof(unsigned int) ||
+ la < 2 * sizeof(unsigned int))
+ return;
+
+ lmb_size = read_n_cells(n_mem_size_cells, &lm);
+ n = *dm++; /* number of LMBs */
+ aam = *aa++; /* number of associativity lists */
+ aalen = *aa++; /* length of each associativity list */
+ if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) ||
+ la < (aam * aalen + 2) * sizeof(unsigned int))
+ return;
+
+ for (; n != 0; --n) {
+ start = read_n_cells(n_mem_addr_cells, &dm);
+ ai = dm[2];
+ flags = dm[3];
+ dm += 4;
+ /* 0x80 == reserved, 0x8 = assigned to us */
+ if ((flags & 0x80) || !(flags & 0x8))
+ continue;
+ nid = default_nid;
+ /* flags & 0x40 means associativity index is invalid */
+ if (min_common_depth > 0 && min_common_depth <= aalen &&
+ (flags & 0x40) == 0 && ai < aam) {
+ /* this is like of_node_to_nid_single */
+ nid = aa[ai * aalen + min_common_depth - 1];
+ if (nid == 0xffff || nid >= MAX_NUMNODES)
+ nid = default_nid;
+ }
+ node_set_online(nid);
+
+ size = numa_enforce_memory_limit(start, lmb_size);
+ if (!size)
+ continue;
+
+ add_active_range(nid, start >> PAGE_SHIFT,
+ (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
+ }
+}
+
static int __init parse_numa_properties(void)
{
struct device_node *cpu = NULL;
@@ -385,6 +442,14 @@ new_range:
goto new_range;
}
+ /*
+ * Now do the same thing for each LMB listed in the ibm,dynamic-memory
+ * property in the ibm,dynamic-reconfiguration-memory node.
+ */
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (memory)
+ parse_drconf_memory(memory);
+
return 0;
}
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index a375c15b431..eaff71e74fb 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -40,8 +40,6 @@
#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
#include <asm/of_platform.h>
#include <asm/mpc52xx.h>
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index 616a0a3fd0e..70e0d968d30 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -115,6 +115,7 @@ static struct sysdev_attribute attr_spu_temperature = {
static struct attribute *spu_attributes[] = {
&attr_spu_temperature.attr,
+ NULL,
};
static struct attribute_group spu_attribute_group = {
@@ -135,6 +136,7 @@ static struct sysdev_attribute attr_ppe_temperature1 = {
static struct attribute *ppe_attributes[] = {
&attr_ppe_temperature0.attr,
&attr_ppe_temperature1.attr,
+ NULL,
};
static struct attribute_group ppe_attribute_group = {
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 99c612025e8..d04ae1671e6 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -382,11 +382,14 @@ static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int __init cbe_init_pm_irq(void)
+static int __init cbe_init_pm_irq(void)
{
unsigned int irq;
int rc, node;
+ if (!machine_is(cell))
+ return 0;
+
for_each_node(node) {
irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
(node << IIC_IRQ_NODE_SHIFT));
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 26945c491f6..725e1956115 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -147,7 +147,7 @@ static int spufs_arch_notes_size(void)
struct fdtable *fdt = files_fdtable(current->files);
int size = 0, fd;
- for (fd = 0; fd < fdt->max_fdset && fd < fdt->max_fds; fd++) {
+ for (fd = 0; fd < fdt->max_fds; fd++) {
if (FD_ISSET(fd, fdt->open_fds)) {
struct file *file = fcheck(fd);
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 3a32deda765..3f6a69f6719 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -562,7 +562,7 @@ void __init maple_pci_init(void)
for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
if (np->name == NULL)
continue;
- if (strcmp(np->name, "pci") == 0) {
+ if (!strcmp(np->name, "pci") || !strcmp(np->name, "pcie")) {
if (add_bridge(np) == 0)
of_node_get(np);
}
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 094989d50ba..f12d5c69e74 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -60,6 +60,7 @@
#include <asm/of_device.h>
#include <asm/lmb.h>
#include <asm/mpic.h>
+#include <asm/rtas.h>
#include <asm/udbg.h>
#include "maple.h"
@@ -166,6 +167,16 @@ struct smp_ops_t maple_smp_ops = {
};
#endif /* CONFIG_SMP */
+static void __init maple_use_rtas_reboot_and_halt_if_present(void)
+{
+ if (rtas_service_present("system-reboot") &&
+ rtas_service_present("power-off")) {
+ ppc_md.restart = rtas_restart;
+ ppc_md.power_off = rtas_power_off;
+ ppc_md.halt = rtas_halt;
+ }
+}
+
void __init maple_setup_arch(void)
{
/* init to some ~sane value until calibrate_delay() runs */
@@ -181,6 +192,7 @@ void __init maple_setup_arch(void)
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
+ maple_use_rtas_reboot_and_halt_if_present();
printk(KERN_DEBUG "Using native/NAP idle loop\n");
}
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index 451bfcd5502..de52ec4e9e5 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -40,4 +40,15 @@ config PS3_USE_LPAR_ADDR
If you have any doubt, choose the default y.
+config PS3_VUART
+ depends on PPC_PS3
+ bool "PS3 Virtual UART support"
+ default y
+ help
+ Include support for the PS3 Virtual UART.
+
+ This support is required for several system services
+ including the System Manager and AV Settings. In
+ general, all users will say Y.
+
endmenu
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 997243a91be..69590fbf83d 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o
+
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 3c2d63ebf78..da6e5362e7c 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -337,6 +337,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n",
pdn->eeh_check_count);
dump_stack();
+ msleep(5000);
/* re-read the slot reset state */
if (read_slot_reset_state(pdn, rets) != 0)
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index c2bc9904f1c..cbd6b0711ab 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -170,14 +170,19 @@ static void eeh_report_reset(struct pci_dev *dev, void *userdata)
static void eeh_report_resume(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
+ struct device_node *dn = pci_device_to_OF_node(dev);
dev->error_state = pci_channel_io_normal;
if (!driver)
return;
- if (!driver->err_handler)
- return;
- if (!driver->err_handler->resume)
+
+ if ((PCI_DN(dn)->eeh_mode) & EEH_MODE_IRQ_DISABLED) {
+ PCI_DN(dn)->eeh_mode &= ~EEH_MODE_IRQ_DISABLED;
+ enable_irq(dev->irq);
+ }
+ if (!driver->err_handler ||
+ !driver->err_handler->resume)
return;
driver->err_handler->resume(dev);
@@ -407,6 +412,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
if (rc)
result = PCI_ERS_RESULT_NEED_RESET;
+ else
+ result = PCI_ERS_RESULT_RECOVERED;
}
/* If any device has a hard failure, then shut off everything. */
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
new file mode 100644
index 00000000000..f460b9cbfd4
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -0,0 +1,275 @@
+/*
+ * pseries CPU Hotplug infrastructure.
+ *
+ * Split out from arch/powerpc/platforms/pseries/setup.c
+ * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
+ *
+ * Peter Bergner, IBM March 2001.
+ * Copyright (C) 2001 IBM.
+ * Dave Engebretsen, Peter Bergner, and
+ * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
+ * Plus various changes from other IBM teams...
+ *
+ * Copyright (C) 2006 Michael Ellerman, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+#include <asm/system.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/firmware.h>
+#include <asm/machdep.h>
+#include <asm/vdso_datapage.h>
+#include <asm/pSeries_reconfig.h>
+#include "xics.h"
+
+/* This version can't take the spinlock, because it never returns */
+static struct rtas_args rtas_stop_self_args = {
+ .token = RTAS_UNKNOWN_SERVICE,
+ .nargs = 0,
+ .nret = 1,
+ .rets = &rtas_stop_self_args.args[0],
+};
+
+static void rtas_stop_self(void)
+{
+ struct rtas_args *args = &rtas_stop_self_args;
+
+ local_irq_disable();
+
+ BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
+
+ printk("cpu %u (hwid %u) Ready to die...\n",
+ smp_processor_id(), hard_smp_processor_id());
+ enter_rtas(__pa(args));
+
+ panic("Alas, I survived.\n");
+}
+
+static void pseries_mach_cpu_die(void)
+{
+ local_irq_disable();
+ idle_task_exit();
+ xics_teardown_cpu(0);
+ rtas_stop_self();
+ /* Should never get here... */
+ BUG();
+ for(;;);
+}
+
+static int qcss_tok; /* query-cpu-stopped-state token */
+
+/* Get state of physical CPU.
+ * Return codes:
+ * 0 - The processor is in the RTAS stopped state
+ * 1 - stop-self is in progress
+ * 2 - The processor is not in the RTAS stopped state
+ * -1 - Hardware Error
+ * -2 - Hardware Busy, Try again later.
+ */
+static int query_cpu_stopped(unsigned int pcpu)
+{
+ int cpu_status, status;
+
+ status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
+ if (status != 0) {
+ printk(KERN_ERR
+ "RTAS query-cpu-stopped-state failed: %i\n", status);
+ return status;
+ }
+
+ return cpu_status;
+}
+
+static int pseries_cpu_disable(void)
+{
+ int cpu = smp_processor_id();
+
+ cpu_clear(cpu, cpu_online_map);
+ vdso_data->processorCount--;
+
+ /*fix boot_cpuid here*/
+ if (cpu == boot_cpuid)
+ boot_cpuid = any_online_cpu(cpu_online_map);
+
+ /* FIXME: abstract this to not be platform specific later on */
+ xics_migrate_irqs_away();
+ return 0;
+}
+
+static void pseries_cpu_die(unsigned int cpu)
+{
+ int tries;
+ int cpu_status;
+ unsigned int pcpu = get_hard_smp_processor_id(cpu);
+
+ for (tries = 0; tries < 25; tries++) {
+ cpu_status = query_cpu_stopped(pcpu);
+ if (cpu_status == 0 || cpu_status == -1)
+ break;
+ msleep(200);
+ }
+ if (cpu_status != 0) {
+ printk("Querying DEAD? cpu %i (%i) shows %i\n",
+ cpu, pcpu, cpu_status);
+ }
+
+ /* Isolation and deallocation are definatly done by
+ * drslot_chrp_cpu. If they were not they would be
+ * done here. Change isolate state to Isolate and
+ * change allocation-state to Unusable.
+ */
+ paca[cpu].cpu_start = 0;
+}
+
+/*
+ * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
+ * here is that a cpu device node may represent up to two logical cpus
+ * in the SMT case. We must honor the assumption in other code that
+ * the logical ids for sibling SMT threads x and y are adjacent, such
+ * that x^1 == y and y^1 == x.
+ */
+static int pseries_add_processor(struct device_node *np)
+{
+ unsigned int cpu;
+ cpumask_t candidate_map, tmp = CPU_MASK_NONE;
+ int err = -ENOSPC, len, nthreads, i;
+ const u32 *intserv;
+
+ intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
+ if (!intserv)
+ return 0;
+
+ nthreads = len / sizeof(u32);
+ for (i = 0; i < nthreads; i++)
+ cpu_set(i, tmp);
+
+ lock_cpu_hotplug();
+
+ BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
+
+ /* Get a bitmap of unoccupied slots. */
+ cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
+ if (cpus_empty(candidate_map)) {
+ /* If we get here, it most likely means that NR_CPUS is
+ * less than the partition's max processors setting.
+ */
+ printk(KERN_ERR "Cannot add cpu %s; this system configuration"
+ " supports %d logical cpus.\n", np->full_name,
+ cpus_weight(cpu_possible_map));
+ goto out_unlock;
+ }
+
+ while (!cpus_empty(tmp))
+ if (cpus_subset(tmp, candidate_map))
+ /* Found a range where we can insert the new cpu(s) */
+ break;
+ else
+ cpus_shift_left(tmp, tmp, nthreads);
+
+ if (cpus_empty(tmp)) {
+ printk(KERN_ERR "Unable to find space in cpu_present_map for"
+ " processor %s with %d thread(s)\n", np->name,
+ nthreads);
+ goto out_unlock;
+ }
+
+ for_each_cpu_mask(cpu, tmp) {
+ BUG_ON(cpu_isset(cpu, cpu_present_map));
+ cpu_set(cpu, cpu_present_map);
+ set_hard_smp_processor_id(cpu, *intserv++);
+ }
+ err = 0;
+out_unlock:
+ unlock_cpu_hotplug();
+ return err;
+}
+
+/*
+ * Update the present map for a cpu node which is going away, and set
+ * the hard id in the paca(s) to -1 to be consistent with boot time
+ * convention for non-present cpus.
+ */
+static void pseries_remove_processor(struct device_node *np)
+{
+ unsigned int cpu;
+ int len, nthreads, i;
+ const u32 *intserv;
+
+ intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
+ if (!intserv)
+ return;
+
+ nthreads = len / sizeof(u32);
+
+ lock_cpu_hotplug();
+ for (i = 0; i < nthreads; i++) {
+ for_each_present_cpu(cpu) {
+ if (get_hard_smp_processor_id(cpu) != intserv[i])
+ continue;
+ BUG_ON(cpu_online(cpu));
+ cpu_clear(cpu, cpu_present_map);
+ set_hard_smp_processor_id(cpu, -1);
+ break;
+ }
+ if (cpu == NR_CPUS)
+ printk(KERN_WARNING "Could not find cpu to remove "
+ "with physical id 0x%x\n", intserv[i]);
+ }
+ unlock_cpu_hotplug();
+}
+
+static int pseries_smp_notifier(struct notifier_block *nb,
+ unsigned long action, void *node)
+{
+ int err = NOTIFY_OK;
+
+ switch (action) {
+ case PSERIES_RECONFIG_ADD:
+ if (pseries_add_processor(node))
+ err = NOTIFY_BAD;
+ break;
+ case PSERIES_RECONFIG_REMOVE:
+ pseries_remove_processor(node);
+ break;
+ default:
+ err = NOTIFY_DONE;
+ break;
+ }
+ return err;
+}
+
+static struct notifier_block pseries_smp_nb = {
+ .notifier_call = pseries_smp_notifier,
+};
+
+static int __init pseries_cpu_hotplug_init(void)
+{
+ rtas_stop_self_args.token = rtas_token("stop-self");
+ qcss_tok = rtas_token("query-cpu-stopped-state");
+
+ if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
+ qcss_tok == RTAS_UNKNOWN_SERVICE) {
+ printk(KERN_INFO "CPU Hotplug not supported by firmware "
+ "- disabling.\n");
+ return 0;
+ }
+
+ ppc_md.cpu_die = pseries_mach_cpu_die;
+ smp_ops->cpu_disable = pseries_cpu_disable;
+ smp_ops->cpu_die = pseries_cpu_die;
+
+ /* Processors can be added/removed only on LPAR */
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ pSeries_reconfig_notifier_register(&pseries_smp_nb);
+
+ return 0;
+}
+arch_initcall(pseries_cpu_hotplug_init);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 0dc2548ca9b..042ecae107a 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -347,21 +347,6 @@ static int __init pSeries_init_panel(void)
}
arch_initcall(pSeries_init_panel);
-#ifdef CONFIG_HOTPLUG_CPU
-static void pSeries_mach_cpu_die(void)
-{
- local_irq_disable();
- idle_task_exit();
- xics_teardown_cpu(0);
- rtas_stop_self();
- /* Should never get here... */
- BUG();
- for(;;);
-}
-#else
-#define pSeries_mach_cpu_die NULL
-#endif
-
static int pseries_set_dabr(unsigned long dabr)
{
return plpar_hcall_norets(H_SET_DABR, dabr);
@@ -437,19 +422,14 @@ static int __init pSeries_probe_hypertas(unsigned long node,
if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL)
powerpc_firmware_features |= FW_FEATURE_LPAR;
- if (firmware_has_feature(FW_FEATURE_LPAR))
- hpte_init_lpar();
- else
- hpte_init_native();
-
return 1;
}
static int __init pSeries_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
- "device_type", NULL);
+ char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
+
if (dtype == NULL)
return 0;
if (strcmp(dtype, "chrp"))
@@ -467,6 +447,11 @@ static int __init pSeries_probe(void)
/* Now try to figure out if we are running on LPAR */
of_scan_flat_dt(pSeries_probe_hypertas, NULL);
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ hpte_init_lpar();
+ else
+ hpte_init_native();
+
DBG("Machine is%s LPAR !\n",
(powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
@@ -561,7 +546,6 @@ define_machine(pseries) {
.power_off = rtas_power_off,
.halt = rtas_halt,
.panic = rtas_os_term,
- .cpu_die = pSeries_mach_cpu_die,
.get_boot_time = rtas_get_boot_time,
.get_rtc_time = rtas_get_rtc_time,
.set_rtc_time = rtas_set_rtc_time,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index c6624b8a0e7..4408518eaeb 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -64,197 +64,6 @@ static cpumask_t of_spin_map;
extern void generic_secondary_smp_init(unsigned long);
-#ifdef CONFIG_HOTPLUG_CPU
-
-/* Get state of physical CPU.
- * Return codes:
- * 0 - The processor is in the RTAS stopped state
- * 1 - stop-self is in progress
- * 2 - The processor is not in the RTAS stopped state
- * -1 - Hardware Error
- * -2 - Hardware Busy, Try again later.
- */
-static int query_cpu_stopped(unsigned int pcpu)
-{
- int cpu_status;
- int status, qcss_tok;
-
- qcss_tok = rtas_token("query-cpu-stopped-state");
- if (qcss_tok == RTAS_UNKNOWN_SERVICE)
- return -1;
- status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
- if (status != 0) {
- printk(KERN_ERR
- "RTAS query-cpu-stopped-state failed: %i\n", status);
- return status;
- }
-
- return cpu_status;
-}
-
-static int pSeries_cpu_disable(void)
-{
- int cpu = smp_processor_id();
-
- cpu_clear(cpu, cpu_online_map);
- vdso_data->processorCount--;
-
- /*fix boot_cpuid here*/
- if (cpu == boot_cpuid)
- boot_cpuid = any_online_cpu(cpu_online_map);
-
- /* FIXME: abstract this to not be platform specific later on */
- xics_migrate_irqs_away();
- return 0;
-}
-
-static void pSeries_cpu_die(unsigned int cpu)
-{
- int tries;
- int cpu_status;
- unsigned int pcpu = get_hard_smp_processor_id(cpu);
-
- for (tries = 0; tries < 25; tries++) {
- cpu_status = query_cpu_stopped(pcpu);
- if (cpu_status == 0 || cpu_status == -1)
- break;
- msleep(200);
- }
- if (cpu_status != 0) {
- printk("Querying DEAD? cpu %i (%i) shows %i\n",
- cpu, pcpu, cpu_status);
- }
-
- /* Isolation and deallocation are definatly done by
- * drslot_chrp_cpu. If they were not they would be
- * done here. Change isolate state to Isolate and
- * change allocation-state to Unusable.
- */
- paca[cpu].cpu_start = 0;
-}
-
-/*
- * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
- * here is that a cpu device node may represent up to two logical cpus
- * in the SMT case. We must honor the assumption in other code that
- * the logical ids for sibling SMT threads x and y are adjacent, such
- * that x^1 == y and y^1 == x.
- */
-static int pSeries_add_processor(struct device_node *np)
-{
- unsigned int cpu;
- cpumask_t candidate_map, tmp = CPU_MASK_NONE;
- int err = -ENOSPC, len, nthreads, i;
- const u32 *intserv;
-
- intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
- if (!intserv)
- return 0;
-
- nthreads = len / sizeof(u32);
- for (i = 0; i < nthreads; i++)
- cpu_set(i, tmp);
-
- lock_cpu_hotplug();
-
- BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
-
- /* Get a bitmap of unoccupied slots. */
- cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
- if (cpus_empty(candidate_map)) {
- /* If we get here, it most likely means that NR_CPUS is
- * less than the partition's max processors setting.
- */
- printk(KERN_ERR "Cannot add cpu %s; this system configuration"
- " supports %d logical cpus.\n", np->full_name,
- cpus_weight(cpu_possible_map));
- goto out_unlock;
- }
-
- while (!cpus_empty(tmp))
- if (cpus_subset(tmp, candidate_map))
- /* Found a range where we can insert the new cpu(s) */
- break;
- else
- cpus_shift_left(tmp, tmp, nthreads);
-
- if (cpus_empty(tmp)) {
- printk(KERN_ERR "Unable to find space in cpu_present_map for"
- " processor %s with %d thread(s)\n", np->name,
- nthreads);
- goto out_unlock;
- }
-
- for_each_cpu_mask(cpu, tmp) {
- BUG_ON(cpu_isset(cpu, cpu_present_map));
- cpu_set(cpu, cpu_present_map);
- set_hard_smp_processor_id(cpu, *intserv++);
- }
- err = 0;
-out_unlock:
- unlock_cpu_hotplug();
- return err;
-}
-
-/*
- * Update the present map for a cpu node which is going away, and set
- * the hard id in the paca(s) to -1 to be consistent with boot time
- * convention for non-present cpus.
- */
-static void pSeries_remove_processor(struct device_node *np)
-{
- unsigned int cpu;
- int len, nthreads, i;
- const u32 *intserv;
-
- intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
- if (!intserv)
- return;
-
- nthreads = len / sizeof(u32);
-
- lock_cpu_hotplug();
- for (i = 0; i < nthreads; i++) {
- for_each_present_cpu(cpu) {
- if (get_hard_smp_processor_id(cpu) != intserv[i])
- continue;
- BUG_ON(cpu_online(cpu));
- cpu_clear(cpu, cpu_present_map);
- set_hard_smp_processor_id(cpu, -1);
- break;
- }
- if (cpu == NR_CPUS)
- printk(KERN_WARNING "Could not find cpu to remove "
- "with physical id 0x%x\n", intserv[i]);
- }
- unlock_cpu_hotplug();
-}
-
-static int pSeries_smp_notifier(struct notifier_block *nb, unsigned long action, void *node)
-{
- int err = NOTIFY_OK;
-
- switch (action) {
- case PSERIES_RECONFIG_ADD:
- if (pSeries_add_processor(node))
- err = NOTIFY_BAD;
- break;
- case PSERIES_RECONFIG_REMOVE:
- pSeries_remove_processor(node);
- break;
- default:
- err = NOTIFY_DONE;
- break;
- }
- return err;
-}
-
-static struct notifier_block pSeries_smp_nb = {
- .notifier_call = pSeries_smp_notifier,
-};
-
-#endif /* CONFIG_HOTPLUG_CPU */
-
/**
* smp_startup_cpu() - start the given cpu
*
@@ -422,15 +231,6 @@ static void __init smp_init_pseries(void)
DBG(" -> smp_init_pSeries()\n");
-#ifdef CONFIG_HOTPLUG_CPU
- smp_ops->cpu_disable = pSeries_cpu_disable;
- smp_ops->cpu_die = pSeries_cpu_die;
-
- /* Processors can be added/removed only on LPAR */
- if (firmware_has_feature(FW_FEATURE_LPAR))
- pSeries_reconfig_notifier_register(&pSeries_smp_nb);
-#endif
-
/* Mark threads which are still spinning in hold loops. */
if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) {
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 6cc34597a62..04d4917eb30 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -5,7 +5,8 @@ endif
obj-$(CONFIG_MPIC) += mpic.o
obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
obj-$(CONFIG_PPC_MPC106) += grackle.o
-obj-$(CONFIG_PPC_DCR) += dcr.o dcr-low.o
+obj-$(CONFIG_PPC_DCR) += dcr.o
+obj-$(CONFIG_PPC_DCR_NATIVE) += dcr-low.o
obj-$(CONFIG_U3_DART) += dart_iommu.o
obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
obj-$(CONFIG_FSL_SOC) += fsl_soc.o
diff --git a/arch/powerpc/sysdev/dcr.S b/arch/powerpc/sysdev/dcr.S
deleted file mode 100644
index 2078f39e2f1..00000000000
--- a/arch/powerpc/sysdev/dcr.S
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * "Indirect" DCR access
- *
- * Copyright (c) 2004 Eugene Surovegin <ebs@ebshome.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/processor.h>
-
-#define DCR_ACCESS_PROLOG(table) \
- rlwinm r3,r3,4,18,27; \
- lis r5,table@h; \
- ori r5,r5,table@l; \
- add r3,r3,r5; \
- mtctr r3; \
- bctr
-
-_GLOBAL(__mfdcr)
- DCR_ACCESS_PROLOG(__mfdcr_table)
-
-_GLOBAL(__mtdcr)
- DCR_ACCESS_PROLOG(__mtdcr_table)
-
-__mfdcr_table:
- mfdcr r3,0; blr
-__mtdcr_table:
- mtdcr 0,r4; blr
-
-dcr = 1
- .rept 1023
- mfdcr r3,dcr; blr
- mtdcr dcr,r4; blr
- dcr = dcr + 1
- .endr
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 6995f51b948..74e48d94f27 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -223,23 +223,15 @@ static void qe_ic_mask_irq(unsigned int virq)
qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
temp & ~qe_ic_info[src].mask);
- spin_unlock_irqrestore(&qe_ic_lock, flags);
-}
-
-static void qe_ic_mask_irq_and_ack(unsigned int virq)
-{
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
- unsigned long flags;
- u32 temp;
-
- spin_lock_irqsave(&qe_ic_lock, flags);
-
- temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
- qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
- temp & ~qe_ic_info[src].mask);
-
- /* There is nothing to do for ack here, ack is handled in ISR */
+ /* Flush the above write before enabling interrupts; otherwise,
+ * spurious interrupts will sometimes happen. To be 100% sure
+ * that the write has reached the device before interrupts are
+ * enabled, the mask register would have to be read back; however,
+ * this is not required for correctness, only to avoid wasting
+ * time on a large number of spurious interrupts. In testing,
+ * a sync reduced the observed spurious interrupts to zero.
+ */
+ mb();
spin_unlock_irqrestore(&qe_ic_lock, flags);
}
@@ -248,7 +240,7 @@ static struct irq_chip qe_ic_irq_chip = {
.typename = " QEIC ",
.unmask = qe_ic_unmask_irq,
.mask = qe_ic_mask_irq,
- .mask_ack = qe_ic_mask_irq_and_ack,
+ .mask_ack = qe_ic_mask_irq,
};
static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
@@ -331,34 +323,22 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
return irq_linear_revmap(qe_ic->irqhost, irq);
}
-/* FIXME: We mask all the QE Low interrupts while handling. We should
- * let other interrupt come in, but BAD interrupts are generated */
void fastcall qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc)
{
struct qe_ic *qe_ic = desc->handler_data;
- struct irq_chip *chip = irq_desc[irq].chip;
-
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
- chip->mask_ack(irq);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- chip->unmask(irq);
}
-/* FIXME: We mask all the QE High interrupts while handling. We should
- * let other interrupt come in, but BAD interrupts are generated */
void fastcall qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc)
{
struct qe_ic *qe_ic = desc->handler_data;
- struct irq_chip *chip = irq_desc[irq].chip;
-
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
- chip->mask_ack(irq);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
- chip->unmask(irq);
}
void __init qe_ic_init(struct device_node *node, unsigned int flags)
diff --git a/arch/powerpc/sysdev/rom.c b/arch/powerpc/sysdev/rom.c
index bf5b3f10e6c..c855a3b298a 100644
--- a/arch/powerpc/sysdev/rom.c
+++ b/arch/powerpc/sysdev/rom.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <asm/of_device.h>
+#include <asm/of_platform.h>
static int __init powerpc_flash_init(void)
{
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a34ed49e035..77540a2f770 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -22,6 +22,7 @@
#include <linux/sysrq.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/bug.h>
#include <asm/ptrace.h>
#include <asm/string.h>
@@ -35,7 +36,6 @@
#include <asm/cputable.h>
#include <asm/rtas.h>
#include <asm/sstep.h>
-#include <asm/bug.h>
#include <asm/irq_regs.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
@@ -1346,7 +1346,7 @@ static void backtrace(struct pt_regs *excp)
static void print_bug_trap(struct pt_regs *regs)
{
- struct bug_entry *bug;
+ const struct bug_entry *bug;
unsigned long addr;
if (regs->msr & MSR_PR)
@@ -1357,11 +1357,11 @@ static void print_bug_trap(struct pt_regs *regs)
bug = find_bug(regs->nip);
if (bug == NULL)
return;
- if (bug->line & BUG_WARNING_TRAP)
+ if (is_warning_bug(bug))
return;
- printf("kernel BUG in %s at %s:%d!\n",
- bug->function, bug->file, (unsigned int)bug->line);
+ printf("kernel BUG at %s:%u!\n",
+ bug->file, bug->line);
}
void excprint(struct pt_regs *fp)