diff options
Diffstat (limited to 'arch/powerpc/platforms')
46 files changed, 2324 insertions, 446 deletions
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile index 172c0db6350..8836b3a0066 100644 --- a/arch/powerpc/platforms/Makefile +++ b/arch/powerpc/platforms/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_85xx) += 85xx/ obj-$(CONFIG_PPC_PSERIES) += pseries/ obj-$(CONFIG_PPC_ISERIES) += iseries/ obj-$(CONFIG_PPC_MAPLE) += maple/ +obj-$(CONFIG_PPC_CELL) += cell/ diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile new file mode 100644 index 00000000000..55e094b96bc --- /dev/null +++ b/arch/powerpc/platforms/cell/Makefile @@ -0,0 +1,2 @@ +obj-y += interrupt.o iommu.o setup.o spider-pic.o +obj-$(CONFIG_SMP) += smp.o diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c new file mode 100644 index 00000000000..7fbe78a9327 --- /dev/null +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -0,0 +1,284 @@ +/* + * Cell Internal Interrupt Controller + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 + * + * Author: Arnd Bergmann <arndb@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/config.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/percpu.h> +#include <linux/types.h> + +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/prom.h> +#include <asm/ptrace.h> + +#include "interrupt.h" + +struct iic_pending_bits { + u32 data; + u8 flags; + u8 class; + u8 source; + u8 prio; +}; + +enum iic_pending_flags { + IIC_VALID = 0x80, + IIC_IPI = 0x40, +}; + +struct iic_regs { + struct iic_pending_bits pending; + struct iic_pending_bits pending_destr; + u64 generate; + u64 prio; +}; + +struct iic { + struct iic_regs __iomem *regs; +}; + +static DEFINE_PER_CPU(struct iic, iic); + +void iic_local_enable(void) +{ + out_be64(&__get_cpu_var(iic).regs->prio, 0xff); +} + +void iic_local_disable(void) +{ + out_be64(&__get_cpu_var(iic).regs->prio, 0x0); +} + +static unsigned int iic_startup(unsigned int irq) +{ + return 0; +} + +static void iic_enable(unsigned int irq) +{ + iic_local_enable(); +} + +static void iic_disable(unsigned int irq) +{ +} + +static void iic_end(unsigned int irq) +{ + iic_local_enable(); +} + +static struct hw_interrupt_type iic_pic = { + .typename = " CELL-IIC ", + .startup = iic_startup, + .enable = iic_enable, + .disable = iic_disable, + .end = iic_end, +}; + +static int iic_external_get_irq(struct iic_pending_bits pending) +{ + int irq; + unsigned char node, unit; + + node = pending.source >> 4; + unit = pending.source & 0xf; + irq = -1; + + /* + * This mapping is specific to the Cell Broadband + * Engine. We might need to get the numbers + * from the device tree to support future CPUs. + */ + switch (unit) { + case 0x00: + case 0x0b: + /* + * One of these units can be connected + * to an external interrupt controller. + */ + if (pending.prio > 0x3f || + pending.class != 2) + break; + irq = IIC_EXT_OFFSET + + spider_get_irq(pending.prio + node * IIC_NODE_STRIDE) + + node * IIC_NODE_STRIDE; + break; + case 0x01 ... 0x04: + case 0x07 ... 0x0a: + /* + * These units are connected to the SPEs + */ + if (pending.class > 2) + break; + irq = IIC_SPE_OFFSET + + pending.class * IIC_CLASS_STRIDE + + node * IIC_NODE_STRIDE + + unit; + break; + } + if (irq == -1) + printk(KERN_WARNING "Unexpected interrupt class %02x, " + "source %02x, prio %02x, cpu %02x\n", pending.class, + pending.source, pending.prio, smp_processor_id()); + return irq; +} + +/* Get an IRQ number from the pending state register of the IIC */ +int iic_get_irq(struct pt_regs *regs) +{ + struct iic *iic; + int irq; + struct iic_pending_bits pending; + + iic = &__get_cpu_var(iic); + *(unsigned long *) &pending = + in_be64((unsigned long __iomem *) &iic->regs->pending_destr); + + irq = -1; + if (pending.flags & IIC_VALID) { + if (pending.flags & IIC_IPI) { + irq = IIC_IPI_OFFSET + (pending.prio >> 4); +/* + if (irq > 0x80) + printk(KERN_WARNING "Unexpected IPI prio %02x" + "on CPU %02x\n", pending.prio, + smp_processor_id()); +*/ + } else { + irq = iic_external_get_irq(pending); + } + } + return irq; +} + +static struct iic_regs __iomem *find_iic(int cpu) +{ + struct device_node *np; + int nodeid = cpu / 2; + unsigned long regs; + struct iic_regs __iomem *iic_regs; + + for (np = of_find_node_by_type(NULL, "cpu"); + np; + np = of_find_node_by_type(np, "cpu")) { + if (nodeid == *(int *)get_property(np, "node-id", NULL)) + break; + } + + if (!np) { + printk(KERN_WARNING "IIC: CPU %d not found\n", cpu); + iic_regs = NULL; + } else { + regs = *(long *)get_property(np, "iic", NULL); + + /* hack until we have decided on the devtree info */ + regs += 0x400; + if (cpu & 1) + regs += 0x20; + + printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs); + iic_regs = __ioremap(regs, sizeof(struct iic_regs), + _PAGE_NO_CACHE); + } + return iic_regs; +} + +#ifdef CONFIG_SMP + +/* Use the highest interrupt priorities for IPI */ +static inline int iic_ipi_to_irq(int ipi) +{ + return IIC_IPI_OFFSET + IIC_NUM_IPIS - 1 - ipi; +} + +static inline int iic_irq_to_ipi(int irq) +{ + return IIC_NUM_IPIS - 1 - (irq - IIC_IPI_OFFSET); +} + +void iic_setup_cpu(void) +{ + out_be64(&__get_cpu_var(iic).regs->prio, 0xff); +} + +void iic_cause_IPI(int cpu, int mesg) +{ + out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4); +} + +static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) +{ + smp_message_recv(iic_irq_to_ipi(irq), regs); + return IRQ_HANDLED; +} + +static void iic_request_ipi(int ipi, const char *name) +{ + int irq; + + irq = iic_ipi_to_irq(ipi); + /* IPIs are marked SA_INTERRUPT as they must run with irqs + * disabled */ + get_irq_desc(irq)->handler = &iic_pic; + get_irq_desc(irq)->status |= IRQ_PER_CPU; + request_irq(irq, iic_ipi_action, SA_INTERRUPT, name, NULL); +} + +void iic_request_IPIs(void) +{ + iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); + iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); +#ifdef CONFIG_DEBUGGER + iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); +#endif /* CONFIG_DEBUGGER */ +} +#endif /* CONFIG_SMP */ + +static void iic_setup_spe_handlers(void) +{ + int be, isrc; + + /* Assume two threads per BE are present */ + for (be=0; be < num_present_cpus() / 2; be++) { + for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) { + int irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc; + get_irq_desc(irq)->handler = &iic_pic; + } + } +} + +void iic_init_IRQ(void) +{ + int cpu, irq_offset; + struct iic *iic; + + irq_offset = 0; + for_each_cpu(cpu) { + iic = &per_cpu(iic, cpu); + iic->regs = find_iic(cpu); + if (iic->regs) + out_be64(&iic->regs->prio, 0xff); + } + iic_setup_spe_handlers(); +} diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h new file mode 100644 index 00000000000..37d58e6fd0c --- /dev/null +++ b/arch/powerpc/platforms/cell/interrupt.h @@ -0,0 +1,62 @@ +#ifndef ASM_CELL_PIC_H +#define ASM_CELL_PIC_H +#ifdef __KERNEL__ +/* + * Mapping of IIC pending bits into per-node + * interrupt numbers. + * + * IRQ FF CC SS PP FF CC SS PP Description + * + * 00-3f 80 02 +0 00 - 80 02 +0 3f South Bridge + * 00-3f 80 02 +b 00 - 80 02 +b 3f South Bridge + * 41-4a 80 00 +1 ** - 80 00 +a ** SPU Class 0 + * 51-5a 80 01 +1 ** - 80 01 +a ** SPU Class 1 + * 61-6a 80 02 +1 ** - 80 02 +a ** SPU Class 2 + * 70-7f C0 ** ** 00 - C0 ** ** 0f IPI + * + * F flags + * C class + * S source + * P Priority + * + node number + * * don't care + * + * A node consists of a Cell Broadband Engine and an optional + * south bridge device providing a maximum of 64 IRQs. + * The south bridge may be connected to either IOIF0 + * or IOIF1. + * Each SPE is represented as three IRQ lines, one per + * interrupt class. + * 16 IRQ numbers are reserved for inter processor + * interruptions, although these are only used in the + * range of the first node. + * + * This scheme needs 128 IRQ numbers per BIF node ID, + * which means that with the total of 512 lines + * available, we can have a maximum of four nodes. + */ + +enum { + IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */ + IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */ + IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */ + IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */ + IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */ + IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ + IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */ +}; + +extern void iic_init_IRQ(void); +extern int iic_get_irq(struct pt_regs *regs); +extern void iic_cause_IPI(int cpu, int mesg); +extern void iic_request_IPIs(void); +extern void iic_setup_cpu(void); +extern void iic_local_enable(void); +extern void iic_local_disable(void); + + +extern void spider_init_IRQ(void); +extern int spider_get_irq(unsigned long int_pending); + +#endif +#endif /* ASM_CELL_PIC_H */ diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c new file mode 100644 index 00000000000..74f999b4ac9 --- /dev/null +++ b/arch/powerpc/platforms/cell/iommu.c @@ -0,0 +1,381 @@ +/* + * IOMMU implementation for Cell Broadband Processor Architecture + * We just establish a linear mapping at boot by setting all the + * IOPT cache entries in the CPU. + * The mapping functions should be identical to pci_direct_iommu, + * except for the handling of the high order bit that is required + * by the Spider bridge. These should be split into a separate + * file at the point where we get a different bridge chip. + * + * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH, + * Arnd Bergmann <arndb@de.ibm.com> + * + * Based on linear mapping + * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#undef DEBUG + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/string.h> +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> + +#include <asm/sections.h> +#include <asm/iommu.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/pci-bridge.h> +#include <asm/machdep.h> +#include <asm/pmac_feature.h> +#include <asm/abs_addr.h> +#include <asm/system.h> +#include <asm/ppc-pci.h> + +#include "iommu.h" + +static inline unsigned long +get_iopt_entry(unsigned long real_address, unsigned long ioid, + unsigned long prot) +{ + return (prot & IOPT_PROT_MASK) + | (IOPT_COHERENT) + | (IOPT_ORDER_VC) + | (real_address & IOPT_RPN_MASK) + | (ioid & IOPT_IOID_MASK); +} + +typedef struct { + unsigned long val; +} ioste; + +static inline ioste +mk_ioste(unsigned long val) +{ + ioste ioste = { .val = val, }; + return ioste; +} + +static inline ioste +get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size) +{ + unsigned long ps; + unsigned long iostep; + unsigned long nnpt; + unsigned long shift; + + switch (page_size) { + case 0x1000000: + ps = IOST_PS_16M; + nnpt = 0; /* one page per segment */ + shift = 5; /* segment has 16 iopt entries */ + break; + + case 0x100000: + ps = IOST_PS_1M; + nnpt = 0; /* one page per segment */ + shift = 1; /* segment has 256 iopt entries */ + break; + + case 0x10000: + ps = IOST_PS_64K; + nnpt = 0x07; /* 8 pages per io page table */ + shift = 0; /* all entries are used */ + break; + + case 0x1000: + ps = IOST_PS_4K; + nnpt = 0x7f; /* 128 pages per io page table */ + shift = 0; /* all entries are used */ + break; + + default: /* not a known compile time constant */ + { + /* BUILD_BUG_ON() is not usable here */ + extern void __get_iost_entry_bad_page_size(void); + __get_iost_entry_bad_page_size(); + } + break; + } + + iostep = iopt_base + + /* need 8 bytes per iopte */ + (((io_address / page_size * 8) + /* align io page tables on 4k page boundaries */ + << shift) + /* nnpt+1 pages go into each iopt */ + & ~(nnpt << 12)); + + nnpt++; /* this seems to work, but the documentation is not clear + about wether we put nnpt or nnpt-1 into the ioste bits. + In theory, this can't work for 4k pages. */ + return mk_ioste(IOST_VALID_MASK + | (iostep & IOST_PT_BASE_MASK) + | ((nnpt << 5) & IOST_NNPT_MASK) + | (ps & IOST_PS_MASK)); +} + +/* compute the address of an io pte */ +static inline unsigned long +get_ioptep(ioste iost_entry, unsigned long io_address) +{ + unsigned long iopt_base; + unsigned long page_size; + unsigned long page_number; + unsigned long iopt_offset; + + iopt_base = iost_entry.val & IOST_PT_BASE_MASK; + page_size = iost_entry.val & IOST_PS_MASK; + + /* decode page size to compute page number */ + page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size); + /* page number is an offset into the io page table */ + iopt_offset = (page_number << 3) & 0x7fff8ul; + return iopt_base + iopt_offset; +} + +/* compute the tag field of the iopt cache entry */ +static inline unsigned long +get_ioc_tag(ioste iost_entry, unsigned long io_address) +{ + unsigned long iopte = get_ioptep(iost_entry, io_address); + + return IOPT_VALID_MASK + | ((iopte & 0x00000000000000ff8ul) >> 3) + | ((iopte & 0x0000003fffffc0000ul) >> 9); +} + +/* compute the hashed 6 bit index for the 4-way associative pte cache */ +static inline unsigned long +get_ioc_hash(ioste iost_entry, unsigned long io_address) +{ + unsigned long iopte = get_ioptep(iost_entry, io_address); + + return ((iopte & 0x000000000000001f8ul) >> 3) + ^ ((iopte & 0x00000000000020000ul) >> 17) + ^ ((iopte & 0x00000000000010000ul) >> 15) + ^ ((iopte & 0x00000000000008000ul) >> 13) + ^ ((iopte & 0x00000000000004000ul) >> 11) + ^ ((iopte & 0x00000000000002000ul) >> 9) + ^ ((iopte & 0x00000000000001000ul) >> 7); +} + +/* same as above, but pretend that we have a simpler 1-way associative + pte cache with an 8 bit index */ +static inline unsigned long +get_ioc_hash_1way(ioste iost_entry, unsigned long io_address) +{ + unsigned long iopte = get_ioptep(iost_entry, io_address); + + return ((iopte & 0x000000000000001f8ul) >> 3) + ^ ((iopte & 0x00000000000020000ul) >> 17) + ^ ((iopte & 0x00000000000010000ul) >> 15) + ^ ((iopte & 0x00000000000008000ul) >> 13) + ^ ((iopte & 0x00000000000004000ul) >> 11) + ^ ((iopte & 0x00000000000002000ul) >> 9) + ^ ((iopte & 0x00000000000001000ul) >> 7) + ^ ((iopte & 0x0000000000000c000ul) >> 8); +} + +static inline ioste +get_iost_cache(void __iomem *base, unsigned long index) +{ + unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR); + return mk_ioste(in_be64(&p[index])); +} + +static inline void +set_iost_cache(void __iomem *base, unsigned long index, ioste ste) +{ + unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR); + pr_debug("ioste %02lx was %016lx, store %016lx", index, + get_iost_cache(base, index).val, ste.val); + out_be64(&p[index], ste.val); + pr_debug(" now %016lx\n", get_iost_cache(base, index).val); +} + +static inline unsigned long +get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag) +{ + unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR); + unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG); + + *tag = tags[index]; + rmb(); + return *p; +} + +static inline void +set_iopt_cache(void __iomem *base, unsigned long index, + unsigned long tag, unsigned long val) +{ + unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR; + unsigned long __iomem *p = base + IOC_PT_CACHE_REG; + pr_debug("iopt %02lx was v%016lx/t%016lx, store v%016lx/t%016lx\n", + index, get_iopt_cache(base, index, &oldtag), oldtag, val, tag); + + out_be64(p, val); + out_be64(&tags[index], tag); +} + +static inline void +set_iost_origin(void __iomem *base) +{ + unsigned long __iomem *p = base + IOC_ST_ORIGIN; + unsigned long origin = IOSTO_ENABLE | IOSTO_SW; + + pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin); + out_be64(p, origin); +} + +static inline void +set_iocmd_config(void __iomem *base) +{ + unsigned long __iomem *p = base + 0xc00; + unsigned long conf; + + conf = in_be64(p); + pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE); + out_be64(p, conf | IOCMD_CONF_TE); +} + +/* FIXME: get these from the device tree */ +#define ioc_base 0x20000511000ull +#define ioc_mmio_base 0x20000510000ull +#define ioid 0x48a +#define iopt_phys_offset (- 0x20000000) /* We have a 512MB offset from the SB */ +#define io_page_size 0x1000000 + +static unsigned long map_iopt_entry(unsigned long address) +{ + switch (address >> 20) { + case 0x600: + address = 0x24020000000ull; /* spider i/o */ + break; + default: + address += iopt_phys_offset; + break; + } + + return get_iopt_entry(address, ioid, IOPT_PROT_RW); +} + +static void iommu_bus_setup_null(struct pci_bus *b) { } +static void iommu_dev_setup_null(struct pci_dev *d) { } + +/* initialize the iommu to support a simple linear mapping + * for each DMA window used by any device. For now, we + * happen to know that there is only one DMA window in use, + * starting at iopt_phys_offset. */ +static void cell_map_iommu(void) +{ + unsigned long address; + void __iomem *base; + ioste ioste; + unsigned long index; + + base = __ioremap(ioc_base, 0x1000, _PAGE_NO_CACHE); + pr_debug("%lx mapped to %p\n", ioc_base, base); + set_iocmd_config(base); + iounmap(base); + + base = __ioremap(ioc_mmio_base, 0x1000, _PAGE_NO_CACHE); + pr_debug("%lx mapped to %p\n", ioc_mmio_base, base); + + set_iost_origin(base); + + for (address = 0; address < 0x100000000ul; address += io_page_size) { + ioste = get_iost_entry(0x10000000000ul, address, io_page_size); + if ((address & 0xfffffff) == 0) /* segment start */ + set_iost_cache(base, address >> 28, ioste); + index = get_ioc_hash_1way(ioste, address); + pr_debug("addr %08lx, index %02lx, ioste %016lx\n", + address, index, ioste.val); + set_iopt_cache(base, + get_ioc_hash_1way(ioste, address), + get_ioc_tag(ioste, address), + map_iopt_entry(address)); + } + iounmap(base); +} + + +static void *cell_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret; + + ret = (void *)__get_free_pages(flag, get_order(size)); + if (ret != NULL) { + memset(ret, 0, size); + *dma_handle = virt_to_abs(ret) | CELL_DMA_VALID; + } + return ret; +} + +static void cell_free_coherent(struct device *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + free_pages((unsigned long)vaddr, get_order(size)); +} + +static dma_addr_t cell_map_single(struct device *hwdev, void *ptr, + size_t size, enum dma_data_direction direction) +{ + return virt_to_abs(ptr) | CELL_DMA_VALID; +} + +static void cell_unmap_single(struct device *hwdev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) +{ +} + +static int cell_map_sg(struct device *hwdev, struct scatterlist *sg, + int nents, enum dma_data_direction direction) +{ + int i; + + for (i = 0; i < nents; i++, sg++) { + sg->dma_address = (page_to_phys(sg->page) + sg->offset) + | CELL_DMA_VALID; + sg->dma_length = sg->length; + } + + return nents; +} + +static void cell_unmap_sg(struct device *hwdev, struct scatterlist *sg, + int nents, enum dma_data_direction direction) +{ +} + +static int cell_dma_supported(struct device *dev, u64 mask) +{ + return mask < 0x100000000ull; +} + +void cell_init_iommu(void) +{ + cell_map_iommu(); + + /* Direct I/O, IOMMU off */ + ppc_md.iommu_dev_setup = iommu_dev_setup_null; + ppc_md.iommu_bus_setup = iommu_bus_setup_null; + + pci_dma_ops.alloc_coherent = cell_alloc_coherent; + pci_dma_ops.free_coherent = cell_free_coherent; + pci_dma_ops.map_single = cell_map_single; + pci_dma_ops.unmap_single = cell_unmap_single; + pci_dma_ops.map_sg = cell_map_sg; + pci_dma_ops.unmap_sg = cell_unmap_sg; + pci_dma_ops.dma_supported = cell_dma_supported; +} diff --git a/arch/powerpc/platforms/cell/iommu.h b/arch/powerpc/platforms/cell/iommu.h new file mode 100644 index 00000000000..490d77abfe8 --- /dev/null +++ b/arch/powerpc/platforms/cell/iommu.h @@ -0,0 +1,65 @@ +#ifndef CELL_IOMMU_H +#define CELL_IOMMU_H + +/* some constants */ +enum { + /* segment table entries */ + IOST_VALID_MASK = 0x8000000000000000ul, + IOST_TAG_MASK = 0x3000000000000000ul, + IOST_PT_BASE_MASK = 0x000003fffffff000ul, + IOST_NNPT_MASK = 0x0000000000000fe0ul, + IOST_PS_MASK = 0x000000000000000ful, + + IOST_PS_4K = 0x1, + IOST_PS_64K = 0x3, + IOST_PS_1M = 0x5, + IOST_PS_16M = 0x7, + + /* iopt tag register */ + IOPT_VALID_MASK = 0x0000000200000000ul, + IOPT_TAG_MASK = 0x00000001fffffffful, + + /* iopt cache register */ + IOPT_PROT_MASK = 0xc000000000000000ul, + IOPT_PROT_NONE = 0x0000000000000000ul, + IOPT_PROT_READ = 0x4000000000000000ul, + IOPT_PROT_WRITE = 0x8000000000000000ul, + IOPT_PROT_RW = 0xc000000000000000ul, + IOPT_COHERENT = 0x2000000000000000ul, + + IOPT_ORDER_MASK = 0x1800000000000000ul, + /* order access to same IOID/VC on same address */ + IOPT_ORDER_ADDR = 0x0800000000000000ul, + /* similar, but only after a write access */ + IOPT_ORDER_WRITES = 0x1000000000000000ul, + /* Order all accesses to same IOID/VC */ + IOPT_ORDER_VC = 0x1800000000000000ul, + + IOPT_RPN_MASK = 0x000003fffffff000ul, + IOPT_HINT_MASK = 0x0000000000000800ul, + IOPT_IOID_MASK = 0x00000000000007fful, + + IOSTO_ENABLE = 0x8000000000000000ul, + IOSTO_ORIGIN = 0x000003fffffff000ul, + IOSTO_HW = 0x0000000000000800ul, + IOSTO_SW = 0x0000000000000400ul, + + IOCMD_CONF_TE = 0x0000800000000000ul, + + /* memory mapped registers */ + IOC_PT_CACHE_DIR = 0x000, + IOC_ST_CACHE_DIR = 0x800, + IOC_PT_CACHE_REG = 0x910, + IOC_ST_ORIGIN = 0x918, + IOC_CONF = 0x930, + + /* The high bit needs to be set on every DMA address, + only 2GB are addressable */ + CELL_DMA_VALID = 0x80000000, + CELL_DMA_MASK = 0x7fffffff, +}; + + +void cell_init_iommu(void); + +#endif diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c new file mode 100644 index 00000000000..9a495634d0c --- /dev/null +++ b/arch/powerpc/platforms/cell/setup.c @@ -0,0 +1,141 @@ +/* + * linux/arch/powerpc/platforms/cell/cell_setup.c + * + * Copyright (C) 1995 Linus Torvalds + * Adapted from 'alpha' version by Gary Thomas + * Modified by Cort Dougan (cort@cs.nmt.edu) + * Modified by PPC64 Team, IBM Corp + * Modified by Cell Team, IBM Deutschland Entwicklung GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#undef DEBUG + +#include <linux/config.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/slab.h> +#include <linux/user.h> +#include <linux/reboot.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/seq_file.h> +#include <linux/root_dev.h> +#include <linux/console.h> + +#include <asm/mmu.h> +#include <asm/processor.h> +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/prom.h> +#include <asm/rtas.h> +#include <asm/pci-bridge.h> +#include <asm/iommu.h> +#include <asm/dma.h> +#include <asm/machdep.h> +#include <asm/time.h> +#include <asm/nvram.h> +#include <asm/cputable.h> +#include <asm/ppc-pci.h> +#include <asm/irq.h> + +#include "interrupt.h" +#include "iommu.h" + +#ifdef DEBUG +#define DBG(fmt...) udbg_printf(fmt) +#else +#define DBG(fmt...) +#endif + +void cell_show_cpuinfo(struct seq_file *m) +{ + struct device_node *root; + const char *model = ""; + + root = of_find_node_by_path("/"); + if (root) + model = get_property(root, "model", NULL); + seq_printf(m, "machine\t\t: CHRP %s\n", model); + of_node_put(root); +} + +static void cell_progress(char *s, unsigned short hex) +{ + printk("*** %04x : %s\n", hex, s ? s : ""); +} + +static void __init cell_setup_arch(void) +{ + ppc_md.init_IRQ = iic_init_IRQ; + ppc_md.get_irq = iic_get_irq; + +#ifdef CONFIG_SMP + smp_init_cell(); +#endif + + /* init to some ~sane value until calibrate_delay() runs */ + loops_per_jiffy = 50000000; + + if (ROOT_DEV == 0) { + printk("No ramdisk, default root is /dev/hda2\n"); + ROOT_DEV = Root_HDA2; + } + + /* Find and initialize PCI host bridges */ + init_pci_config_tokens(); + find_and_init_phbs(); + spider_init_IRQ(); +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; +#endif + + mmio_nvram_init(); +} + +/* + * Early initialization. Relocation is on but do not reference unbolted pages + */ +static void __init cell_init_early(void) +{ + DBG(" -> cell_init_early()\n"); + + hpte_init_native(); + + cell_init_iommu(); + + ppc64_interrupt_controller = IC_CELL_PIC; + + DBG(" <- cell_init_early()\n"); +} + + +static int __init cell_probe(int platform) +{ + if (platform != PLATFORM_CELL) + return 0; + + return 1; +} + +struct machdep_calls __initdata cell_md = { + .probe = cell_probe, + .setup_arch = cell_setup_arch, + .init_early = cell_init_early, + .show_cpuinfo = cell_show_cpuinfo, + .restart = rtas_restart, + .power_off = rtas_power_off, + .halt = rtas_halt, + .get_boot_time = rtas_get_boot_time, + .get_rtc_time = rtas_get_rtc_time, + .set_rtc_time = rtas_set_rtc_time, + .calibrate_decr = generic_calibrate_decr, + .progress = cell_progress, +}; diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c new file mode 100644 index 00000000000..de96eadf419 --- /dev/null +++ b/arch/powerpc/platforms/cell/smp.c @@ -0,0 +1,230 @@ +/* + * SMP support for BPA machines. + * + * Dave Engebretsen, Peter Bergner, and + * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com + * + * Plus various changes from other IBM teams... + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#undef DEBUG + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/cache.h> +#include <linux/err.h> +#include <linux/sysdev.h> +#include <linux/cpu.h> + +#include <asm/ptrace.h> +#include <asm/atomic.h> +#include <asm/irq.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/smp.h> +#include <asm/paca.h> +#include <asm/time.h> +#include <asm/machdep.h> +#include <asm/cputable.h> +#include <asm/firmware.h> +#include <asm/system.h> +#include <asm/rtas.h> + +#include "interrupt.h" + +#ifdef DEBUG +#define DBG(fmt...) udbg_printf(fmt) +#else +#define DBG(fmt...) +#endif + +/* + * The primary thread of each non-boot processor is recorded here before + * smp init. + */ +static cpumask_t of_spin_map; + +extern void pSeries_secondary_smp_init(unsigned long); + +/** + * smp_startup_cpu() - start the given cpu + * + * At boot time, there is nothing to do for primary threads which were + * started from Open Firmware. For anything else, call RTAS with the + * appropriate start location. + * + * Returns: + * 0 - failure + * 1 - success + */ +static inline int __devinit smp_startup_cpu(unsigned int lcpu) +{ + int status; + unsigned long start_here = __pa((u32)*((unsigned long *) + pSeries_secondary_smp_init)); + unsigned int pcpu; + int start_cpu; + + if (cpu_isset(lcpu, of_spin_map)) + /* Already started by OF and sitting in spin loop */ + return 1; + + pcpu = get_hard_smp_processor_id(lcpu); + + /* Fixup atomic count: it exited inside IRQ handler. */ + paca[lcpu].__current->thread_info->preempt_count = 0; + + /* + * If the RTAS start-cpu token does not exist then presume the + * cpu is already spinning. + */ + start_cpu = rtas_token("start-cpu"); + if (start_cpu == RTAS_UNKNOWN_SERVICE) + return 1; + + status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu); + if (status != 0) { + printk(KERN_ERR "start-cpu failed: %i\n", status); + return 0; + } + + return 1; +} + +static void smp_iic_message_pass(int target, int msg) +{ + unsigned int i; + + if (target < NR_CPUS) { + iic_cause_IPI(target, msg); + } else { + for_each_online_cpu(i) { + if (target == MSG_ALL_BUT_SELF + && i == smp_processor_id()) + continue; + iic_cause_IPI(i, msg); + } + } +} + +static int __init smp_iic_probe(void) +{ + iic_request_IPIs(); + + return cpus_weight(cpu_possible_map); +} + +static void __devinit smp_iic_setup_cpu(int cpu) +{ + if (cpu != boot_cpuid) + iic_setup_cpu(); +} + +static DEFINE_SPINLOCK(timebase_lock); +static unsigned long timebase = 0; + +static void __devinit cell_give_timebase(void) +{ + spin_lock(&timebase_lock); + rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); + timebase = get_tb(); + spin_unlock(&timebase_lock); + + while (timebase) + barrier(); + rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); +} + +static void __devinit cell_take_timebase(void) +{ + while (!timebase) + barrier(); + spin_lock(&timebase_lock); + set_tb(timebase >> 32, timebase & 0xffffffff); + timebase = 0; + spin_unlock(&timebase_lock); +} + +static void __devinit smp_cell_kick_cpu(int nr) +{ + BUG_ON(nr < 0 || nr >= NR_CPUS); + + if (!smp_startup_cpu(nr)) + return; + + /* + * The processor is currently spinning, waiting for the + * cpu_start field to become non-zero After we set cpu_start, + * the processor will continue on to secondary_start + */ + paca[nr].cpu_start = 1; +} + +static int smp_cell_cpu_bootable(unsigned int nr) +{ + /* Special case - we inhibit secondary thread startup + * during boot if the user requests it. Odd-numbered + * cpus are assumed to be secondary threads. + */ + if (system_state < SYSTEM_RUNNING && + cpu_has_feature(CPU_FTR_SMT) && + !smt_enabled_at_boot && nr % 2 != 0) + return 0; + + return 1; +} +static struct smp_ops_t bpa_iic_smp_ops = { + .message_pass = smp_iic_message_pass, + .probe = smp_iic_probe, + .kick_cpu = smp_cell_kick_cpu, + .setup_cpu = smp_iic_setup_cpu, + .cpu_bootable = smp_cell_cpu_bootable, +}; + +/* This is called very early */ +void __init smp_init_cell(void) +{ + int i; + + DBG(" -> smp_init_cell()\n"); + + smp_ops = &bpa_iic_smp_ops; + + /* Mark threads which are still spinning in hold loops. */ + if (cpu_has_feature(CPU_FTR_SMT)) { + for_each_present_cpu(i) { + if (i % 2 == 0) + /* + * Even-numbered logical cpus correspond to + * primary threads. + */ + cpu_set(i, of_spin_map); + } + } else { + of_spin_map = cpu_present_map; + } + + cpu_clear(boot_cpuid, of_spin_map); + + /* Non-lpar has additional take/give timebase */ + if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { + smp_ops->give_timebase = cell_give_timebase; + smp_ops->take_timebase = cell_take_timebase; + } + + DBG(" <- smp_init_cell()\n"); +} diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c new file mode 100644 index 00000000000..e74132188bd --- /dev/null +++ b/arch/powerpc/platforms/cell/spider-pic.c @@ -0,0 +1,191 @@ +/* + * External Interrupt Controller on Spider South Bridge + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 + * + * Author: Arnd Bergmann <arndb@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/interrupt.h> +#include <linux/irq.h> + +#include <asm/pgtable.h> +#include <asm/prom.h> +#include <asm/io.h> + +#include "interrupt.h" + +/* register layout taken from Spider spec, table 7.4-4 */ +enum { + TIR_DEN = 0x004, /* Detection Enable Register */ + TIR_MSK = 0x084, /* Mask Level Register */ + TIR_EDC = 0x0c0, /* Edge Detection Clear Register */ + TIR_PNDA = 0x100, /* Pending Register A */ + TIR_PNDB = 0x104, /* Pending Register B */ + TIR_CS = 0x144, /* Current Status Register */ + TIR_LCSA = 0x150, /* Level Current Status Register A */ + TIR_LCSB = 0x154, /* Level Current Status Register B */ + TIR_LCSC = 0x158, /* Level Current Status Register C */ + TIR_LCSD = 0x15c, /* Level Current Status Register D */ + TIR_CFGA = 0x200, /* Setting Register A0 */ + TIR_CFGB = 0x204, /* Setting Register B0 */ + /* 0x208 ... 0x3ff Setting Register An/Bn */ + TIR_PPNDA = 0x400, /* Packet Pending Register A */ + TIR_PPNDB = 0x404, /* Packet Pending Register B */ + TIR_PIERA = 0x408, /* Packet Output Error Register A */ + TIR_PIERB = 0x40c, /* Packet Output Error Register B */ + TIR_PIEN = 0x444, /* Packet Output Enable Register */ + TIR_PIPND = 0x454, /* Packet Output Pending Register */ + TIRDID = 0x484, /* Spider Device ID Register */ + REISTIM = 0x500, /* Reissue Command Timeout Time Setting */ + REISTIMEN = 0x504, /* Reissue Command Timeout Setting */ + REISWAITEN = 0x508, /* Reissue Wait Control*/ +}; + +static void __iomem *spider_pics[4]; + +static void __iomem *spider_get_pic(int irq) +{ + int node = irq / IIC_NODE_STRIDE; + irq %= IIC_NODE_STRIDE; + + if (irq >= IIC_EXT_OFFSET && + irq < IIC_EXT_OFFSET + IIC_NUM_EXT && + spider_pics) + return spider_pics[node]; + return NULL; +} + +static int spider_get_nr(unsigned int irq) +{ + return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET; +} + +static void __iomem *spider_get_irq_config(int irq) +{ + void __iomem *pic; + pic = spider_get_pic(irq); + return pic + TIR_CFGA + 8 * spider_get_nr(irq); +} + +static void spider_enable_irq(unsigned int irq) +{ + void __iomem *cfg = spider_get_irq_config(irq); + irq = spider_get_nr(irq); + + out_be32(cfg, in_be32(cfg) | 0x3107000eu); + out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq); +} + +static void spider_disable_irq(unsigned int irq) +{ + void __iomem *cfg = spider_get_irq_config(irq); + irq = spider_get_nr(irq); + + out_be32(cfg, in_be32(cfg) & ~0x30000000u); +} + +static unsigned int spider_startup_irq(unsigned int irq) +{ + spider_enable_irq(irq); + return 0; +} + +static void spider_shutdown_irq(unsigned int irq) +{ + spider_disable_irq(irq); +} + +static void spider_end_irq(unsigned int irq) +{ + spider_enable_irq(irq); +} + +static void spider_ack_irq(unsigned int irq) +{ + spider_disable_irq(irq); + iic_local_enable(); +} + +static struct hw_interrupt_type spider_pic = { + .typename = " SPIDER ", + .startup = spider_startup_irq, + .shutdown = spider_shutdown_irq, + .enable = spider_enable_irq, + .disable = spider_disable_irq, + .ack = spider_ack_irq, + .end = spider_end_irq, +}; + + +int spider_get_irq(unsigned long int_pending) +{ + void __iomem *regs = spider_get_pic(int_pending); + unsigned long cs; + int irq; + + cs = in_be32(regs + TIR_CS); + + irq = cs >> 24; + if (irq != 63) + return irq; + + return -1; +} + +void spider_init_IRQ(void) +{ + int node; + struct device_node *dn; + unsigned int *property; + long spiderpic; + int n; + +/* FIXME: detect multiple PICs as soon as the device tree has them */ + for (node = 0; node < 1; node++) { + dn = of_find_node_by_path("/"); + n = prom_n_addr_cells(dn); + property = (unsigned int *) get_property(dn, + "platform-spider-pic", NULL); + + if (!property) + continue; + for (spiderpic = 0; n > 0; --n) + spiderpic = (spiderpic << 32) + *property++; + printk(KERN_DEBUG "SPIDER addr: %lx\n", spiderpic); + spider_pics[node] = __ioremap(spiderpic, 0x800, _PAGE_NO_CACHE); + for (n = 0; n < IIC_NUM_EXT; n++) { + int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE; + get_irq_desc(irq)->handler = &spider_pic; + + /* do not mask any interrupts because of level */ + out_be32(spider_pics[node] + TIR_MSK, 0x0); + + /* disable edge detection clear */ + /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */ + + /* enable interrupt packets to be output */ + out_be32(spider_pics[node] + TIR_PIEN, + in_be32(spider_pics[node] + TIR_PIEN) | 0x1); + + /* Enable the interrupt detection enable bit. Do this last! */ + out_be32(spider_pics[node] + TIR_DEN, + in_be32(spider_pics[node] +TIR_DEN) | 0x1); + + } + } +} diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c index 4ac7125aa09..150f67d6f90 100644 --- a/arch/powerpc/platforms/chrp/nvram.c +++ b/arch/powerpc/platforms/chrp/nvram.c @@ -17,6 +17,7 @@ #include <asm/uaccess.h> #include <asm/prom.h> #include <asm/machdep.h> +#include <asm/rtas.h> #include "chrp.h" static unsigned int nvram_size; @@ -25,7 +26,8 @@ static DEFINE_SPINLOCK(nvram_lock); static unsigned char chrp_nvram_read(int addr) { - unsigned long done, flags; + unsigned int done; + unsigned long flags; unsigned char ret; if (addr >= nvram_size) { @@ -34,7 +36,8 @@ static unsigned char chrp_nvram_read(int addr) return 0xff; } spin_lock_irqsave(&nvram_lock, flags); - if ((call_rtas("nvram-fetch", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) + if ((rtas_call(rtas_token("nvram-fetch"), 3, 2, &done, addr, + __pa(nvram_buf), 1) != 0) || 1 != done) ret = 0xff; else ret = nvram_buf[0]; @@ -45,7 +48,8 @@ static unsigned char chrp_nvram_read(int addr) static void chrp_nvram_write(int addr, unsigned char val) { - unsigned long done, flags; + unsigned int done; + unsigned long flags; if (addr >= nvram_size) { printk(KERN_DEBUG "%s: write addr %d > nvram_size %u\n", @@ -54,7 +58,8 @@ static void chrp_nvram_write(int addr, unsigned char val) } spin_lock_irqsave(&nvram_lock, flags); nvram_buf[0] = val; - if ((call_rtas("nvram-store", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done) + if ((rtas_call(rtas_token("nvram-store"), 3, 2, &done, addr, + __pa(nvram_buf), 1) != 0) || 1 != done) printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr); spin_unlock_irqrestore(&nvram_lock, flags); } diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c index a9052305c35..29c86781c49 100644 --- a/arch/powerpc/platforms/chrp/pegasos_eth.c +++ b/arch/powerpc/platforms/chrp/pegasos_eth.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/ioport.h> #include <linux/device.h> +#include <linux/platform_device.h> #include <linux/mv643xx.h> #include <linux/pci.h> diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c index 31ee49c2501..bb2315997d4 100644 --- a/arch/powerpc/platforms/chrp/smp.c +++ b/arch/powerpc/platforms/chrp/smp.c @@ -35,43 +35,6 @@ #include <asm/smp.h> #include <asm/mpic.h> -extern unsigned long smp_chrp_cpu_nr; - -static int __init smp_chrp_probe(void) -{ - struct device_node *cpus = NULL; - unsigned int *reg; - int reglen; - int ncpus = 0; - int cpuid; - unsigned int phys; - - /* Count CPUs in the device-tree */ - cpuid = 1; /* the boot cpu is logical cpu 0 */ - while ((cpus = of_find_node_by_type(cpus, "cpu")) != NULL) { - phys = ncpus; - reg = (unsigned int *) get_property(cpus, "reg", ®len); - if (reg && reglen >= sizeof(unsigned int)) - /* hmmm, not having a reg property would be bad */ - phys = *reg; - if (phys != boot_cpuid_phys) { - set_hard_smp_processor_id(cpuid, phys); - ++cpuid; - } - ++ncpus; - } - - printk(KERN_INFO "CHRP SMP probe found %d cpus\n", ncpus); - - /* Nothing more to do if less than 2 of them */ - if (ncpus <= 1) - return 1; - - mpic_request_ipis(); - - return ncpus; -} - static void __devinit smp_chrp_kick_cpu(int nr) { *(unsigned long *)KERNELBASE = nr; @@ -114,7 +77,7 @@ void __devinit smp_chrp_take_timebase(void) /* CHRP with openpic */ struct smp_ops_t chrp_smp_ops = { .message_pass = smp_mpic_message_pass, - .probe = smp_chrp_probe, + .probe = smp_mpic_probe, .kick_cpu = smp_chrp_kick_cpu, .setup_cpu = smp_chrp_setup_cpu, .give_timebase = smp_chrp_give_timebase, diff --git a/arch/powerpc/platforms/iseries/call_hpt.h b/arch/powerpc/platforms/iseries/call_hpt.h index 321f3bb7a8f..a843b0f87b7 100644 --- a/arch/powerpc/platforms/iseries/call_hpt.h +++ b/arch/powerpc/platforms/iseries/call_hpt.h @@ -23,8 +23,8 @@ * drive the hypervisor from the OS. */ -#include <asm/iSeries/HvCallSc.h> -#include <asm/iSeries/HvTypes.h> +#include <asm/iseries/hv_call_sc.h> +#include <asm/iseries/hv_types.h> #include <asm/mmu.h> #define HvCallHptGetHptAddress HvCallHpt + 0 diff --git a/arch/powerpc/platforms/iseries/call_pci.h b/arch/powerpc/platforms/iseries/call_pci.h index a86e065b957..59d4e0ad5cf 100644 --- a/arch/powerpc/platforms/iseries/call_pci.h +++ b/arch/powerpc/platforms/iseries/call_pci.h @@ -25,8 +25,8 @@ #ifndef _PLATFORMS_ISERIES_CALL_PCI_H #define _PLATFORMS_ISERIES_CALL_PCI_H -#include <asm/iSeries/HvCallSc.h> -#include <asm/iSeries/HvTypes.h> +#include <asm/iseries/hv_call_sc.h> +#include <asm/iseries/hv_types.h> /* * DSA == Direct Select Address diff --git a/arch/powerpc/platforms/iseries/call_sm.h b/arch/powerpc/platforms/iseries/call_sm.h index ef223166cf2..c7e251619f4 100644 --- a/arch/powerpc/platforms/iseries/call_sm.h +++ b/arch/powerpc/platforms/iseries/call_sm.h @@ -23,8 +23,8 @@ * drive the hypervisor from the OS. */ -#include <asm/iSeries/HvCallSc.h> -#include <asm/iSeries/HvTypes.h> +#include <asm/iseries/hv_call_sc.h> +#include <asm/iseries/hv_types.h> #define HvCallSmGet64BitsOfAccessMap HvCallSm + 11 diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c index b3c6c3374ca..30bdcf3925d 100644 --- a/arch/powerpc/platforms/iseries/htab.c +++ b/arch/powerpc/platforms/iseries/htab.c @@ -39,15 +39,16 @@ static inline void iSeries_hunlock(unsigned long slot) spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]); } -static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, - unsigned long prpn, unsigned long vflags, - unsigned long rflags) +long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, + unsigned long pa, unsigned long rflags, + unsigned long vflags, int psize) { - unsigned long arpn; long slot; hpte_t lhpte; int secondary = 0; + BUG_ON(psize != MMU_PAGE_4K); + /* * The hypervisor tries both primary and secondary. * If we are being called to insert in the secondary, @@ -59,8 +60,19 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, iSeries_hlock(hpte_group); - slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT); - BUG_ON(lhpte.v & HPTE_V_VALID); + slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT); + if (unlikely(lhpte.v & HPTE_V_VALID)) { + if (vflags & HPTE_V_BOLTED) { + HvCallHpt_setSwBits(slot, 0x10, 0); + HvCallHpt_setPp(slot, PP_RWXX); + iSeries_hunlock(hpte_group); + if (slot < 0) + return 0x8 | (slot & 7); + else + return slot & 7; + } + BUG(); + } if (slot == -1) { /* No available entry found in either group */ iSeries_hunlock(hpte_group); @@ -73,10 +85,9 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, slot &= 0x7fffffffffffffff; } - arpn = phys_to_abs(prpn << PAGE_SHIFT) >> PAGE_SHIFT; - lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; - lhpte.r = (arpn << HPTE_R_RPN_SHIFT) | rflags; + lhpte.v = hpte_encode_v(va, MMU_PAGE_4K) | vflags | HPTE_V_VALID; + lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags; /* Now fill in the actual HPTE */ HvCallHpt_addValidate(slot, secondary, &lhpte); @@ -86,25 +97,6 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, return (secondary << 3) | (slot & 7); } -long iSeries_hpte_bolt_or_insert(unsigned long hpte_group, - unsigned long va, unsigned long prpn, unsigned long vflags, - unsigned long rflags) -{ - long slot; - hpte_t lhpte; - - slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT); - - if (lhpte.v & HPTE_V_VALID) { - /* Bolt the existing HPTE */ - HvCallHpt_setSwBits(slot, 0x10, 0); - HvCallHpt_setPp(slot, PP_RWXX); - return 0; - } - - return iSeries_hpte_insert(hpte_group, va, prpn, vflags, rflags); -} - static unsigned long iSeries_hpte_getword0(unsigned long slot) { hpte_t hpte; @@ -150,15 +142,17 @@ static long iSeries_hpte_remove(unsigned long hpte_group) * bits 61..63 : PP2,PP1,PP0 */ static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long va, int large, int local) + unsigned long va, int psize, int local) { hpte_t hpte; - unsigned long avpn = va >> 23; + unsigned long want_v; iSeries_hlock(slot); HvCallHpt_get(&hpte, slot); - if ((HPTE_V_AVPN_VAL(hpte.v) == avpn) && (hpte.v & HPTE_V_VALID)) { + want_v = hpte_encode_v(va, MMU_PAGE_4K); + + if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) { /* * Hypervisor expects bits as NPPP, which is * different from how they are mapped in our PP. @@ -210,14 +204,17 @@ static long iSeries_hpte_find(unsigned long vpn) * * No need to lock here because we should be the only user. */ -static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) +static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, + int psize) { unsigned long vsid,va,vpn; long slot; + BUG_ON(psize != MMU_PAGE_4K); + vsid = get_kernel_vsid(ea); va = (vsid << 28) | (ea & 0x0fffffff); - vpn = va >> PAGE_SHIFT; + vpn = va >> HW_PAGE_SHIFT; slot = iSeries_hpte_find(vpn); if (slot == -1) panic("updateboltedpp: Could not find page to bolt\n"); @@ -225,7 +222,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea) } static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va, - int large, int local) + int psize, int local) { unsigned long hpte_v; unsigned long avpn = va >> 23; diff --git a/arch/powerpc/platforms/iseries/hvlog.c b/arch/powerpc/platforms/iseries/hvlog.c index f61e2e9ac9e..f476d71194f 100644 --- a/arch/powerpc/platforms/iseries/hvlog.c +++ b/arch/powerpc/platforms/iseries/hvlog.c @@ -9,9 +9,9 @@ #include <asm/page.h> #include <asm/abs_addr.h> -#include <asm/iSeries/HvCall.h> -#include <asm/iSeries/HvCallSc.h> -#include <asm/iSeries/HvTypes.h> +#include <asm/iseries/hv_call.h> +#include <asm/iseries/hv_call_sc.h> +#include <asm/iseries/hv_types.h> void HvCall_writeLogBuffer(const void *buffer, u64 len) @@ -22,7 +22,7 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len) while (len) { hv_buf.addr = cur; - left_this_page = ((cur & PAGE_MASK) + PAGE_SIZE) - cur; + left_this_page = ((cur & HW_PAGE_MASK) + HW_PAGE_SIZE) - cur; if (left_this_page > len) left_this_page = len; hv_buf.len = left_this_page; @@ -30,6 +30,6 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len) HvCall2(HvCallBaseWriteLogBuffer, virt_to_abs(&hv_buf), left_this_page); - cur = (cur & PAGE_MASK) + PAGE_SIZE; + cur = (cur & HW_PAGE_MASK) + HW_PAGE_SIZE; } } diff --git a/arch/powerpc/platforms/iseries/hvlpconfig.c b/arch/powerpc/platforms/iseries/hvlpconfig.c index dc28621aea0..663a1affb4b 100644 --- a/arch/powerpc/platforms/iseries/hvlpconfig.c +++ b/arch/powerpc/platforms/iseries/hvlpconfig.c @@ -17,7 +17,7 @@ */ #include <linux/module.h> -#include <asm/iSeries/HvLpConfig.h> +#include <asm/iseries/hv_lp_config.h> HvLpIndex HvLpConfig_getLpIndex_outline(void) { diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c index 1db26d8be64..bf081b34582 100644 --- a/arch/powerpc/platforms/iseries/iommu.c +++ b/arch/powerpc/platforms/iseries/iommu.c @@ -32,7 +32,7 @@ #include <asm/machdep.h> #include <asm/abs_addr.h> #include <asm/pci-bridge.h> -#include <asm/iSeries/HvCallXm.h> +#include <asm/iseries/hv_call_xm.h> extern struct list_head iSeries_Global_Device_List; @@ -43,9 +43,12 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, u64 rc; union tce_entry tce; + index <<= TCE_PAGE_FACTOR; + npages <<= TCE_PAGE_FACTOR; + while (npages--) { tce.te_word = 0; - tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> PAGE_SHIFT; + tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> TCE_SHIFT; if (tbl->it_type == TCE_VB) { /* Virtual Bus */ @@ -66,7 +69,7 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", rc); index++; - uaddr += PAGE_SIZE; + uaddr += TCE_PAGE_SIZE; } } @@ -74,6 +77,9 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) { u64 rc; + npages <<= TCE_PAGE_FACTOR; + index <<= TCE_PAGE_FACTOR; + while (npages--) { rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); if (rc) @@ -83,27 +89,6 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) } } -#ifdef CONFIG_PCI -/* - * This function compares the known tables to find an iommu_table - * that has already been built for hardware TCEs. - */ -static struct iommu_table *iommu_table_find(struct iommu_table * tbl) -{ - struct pci_dn *pdn; - - list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) { - struct iommu_table *it = pdn->iommu_table; - if ((it != NULL) && - (it->it_type == TCE_PCI) && - (it->it_offset == tbl->it_offset) && - (it->it_index == tbl->it_index) && - (it->it_size == tbl->it_size)) - return it; - } - return NULL; -} - /* * Call Hv with the architected data structure to get TCE table info. * info. Put the returned data into the Linux representation of the @@ -113,8 +98,10 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl) * 2. TCE table per Bus. * 3. TCE Table per IOA. */ -static void iommu_table_getparms(struct pci_dn *pdn, - struct iommu_table* tbl) +void iommu_table_getparms_iSeries(unsigned long busno, + unsigned char slotno, + unsigned char virtbus, + struct iommu_table* tbl) { struct iommu_table_cb *parms; @@ -124,9 +111,9 @@ static void iommu_table_getparms(struct pci_dn *pdn, memset(parms, 0, sizeof(*parms)); - parms->itc_busno = pdn->busno; - parms->itc_slotno = pdn->LogicalSlot; - parms->itc_virtbus = 0; + parms->itc_busno = busno; + parms->itc_slotno = slotno; + parms->itc_virtbus = virtbus; HvCallXm_getTceTableParms(iseries_hv_addr(parms)); @@ -134,17 +121,40 @@ static void iommu_table_getparms(struct pci_dn *pdn, panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); /* itc_size is in pages worth of table, it_size is in # of entries */ - tbl->it_size = (parms->itc_size * PAGE_SIZE) / sizeof(union tce_entry); + tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) / + sizeof(union tce_entry)) >> TCE_PAGE_FACTOR; tbl->it_busno = parms->itc_busno; - tbl->it_offset = parms->itc_offset; + tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR; tbl->it_index = parms->itc_index; tbl->it_blocksize = 1; - tbl->it_type = TCE_PCI; + tbl->it_type = virtbus ? TCE_VB : TCE_PCI; kfree(parms); } +#ifdef CONFIG_PCI +/* + * This function compares the known tables to find an iommu_table + * that has already been built for hardware TCEs. + */ +static struct iommu_table *iommu_table_find(struct iommu_table * tbl) +{ + struct pci_dn *pdn; + + list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) { + struct iommu_table *it = pdn->iommu_table; + if ((it != NULL) && + (it->it_type == TCE_PCI) && + (it->it_offset == tbl->it_offset) && + (it->it_index == tbl->it_index) && + (it->it_size == tbl->it_size)) + return it; + } + return NULL; +} + + void iommu_devnode_init_iSeries(struct device_node *dn) { struct iommu_table *tbl; @@ -152,7 +162,7 @@ void iommu_devnode_init_iSeries(struct device_node *dn) tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); - iommu_table_getparms(pdn, tbl); + iommu_table_getparms_iSeries(pdn->busno, pdn->LogicalSlot, 0, tbl); /* Look for existing tce table */ pdn->iommu_table = iommu_table_find(tbl); diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index 937ac99b9d3..c1135912cc0 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c @@ -36,9 +36,9 @@ #include <linux/spinlock.h> #include <asm/ppcdebug.h> -#include <asm/iSeries/HvTypes.h> -#include <asm/iSeries/HvLpEvent.h> -#include <asm/iSeries/HvCallXm.h> +#include <asm/iseries/hv_types.h> +#include <asm/iseries/hv_lp_event.h> +#include <asm/iseries/hv_call_xm.h> #include "irq.h" #include "call_pci.h" diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c index f271b353972..a2200842f4e 100644 --- a/arch/powerpc/platforms/iseries/ksyms.c +++ b/arch/powerpc/platforms/iseries/ksyms.c @@ -9,7 +9,7 @@ #include <linux/module.h> #include <asm/hw_irq.h> -#include <asm/iSeries/HvCallSc.h> +#include <asm/iseries/hv_call_sc.h> EXPORT_SYMBOL(HvCall0); EXPORT_SYMBOL(HvCall1); diff --git a/arch/powerpc/platforms/iseries/lpardata.c b/arch/powerpc/platforms/iseries/lpardata.c index ed2ffee6f73..bb8c91537f3 100644 --- a/arch/powerpc/platforms/iseries/lpardata.c +++ b/arch/powerpc/platforms/iseries/lpardata.c @@ -13,16 +13,16 @@ #include <linux/bitops.h> #include <asm/processor.h> #include <asm/ptrace.h> -#include <asm/naca.h> #include <asm/abs_addr.h> -#include <asm/iSeries/ItLpNaca.h> +#include <asm/iseries/it_lp_naca.h> #include <asm/lppaca.h> -#include <asm/iSeries/ItLpRegSave.h> +#include <asm/iseries/it_lp_reg_save.h> #include <asm/paca.h> -#include <asm/iSeries/LparMap.h> -#include <asm/iSeries/ItExtVpdPanel.h> -#include <asm/iSeries/ItLpQueue.h> +#include <asm/iseries/lpar_map.h> +#include <asm/iseries/it_exp_vpd_panel.h> +#include <asm/iseries/it_lp_queue.h> +#include "naca.h" #include "vpd_areas.h" #include "spcomm_area.h" #include "ipl_parms.h" diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c index 54c7753dbe0..e9fb98bf895 100644 --- a/arch/powerpc/platforms/iseries/lpevents.c +++ b/arch/powerpc/platforms/iseries/lpevents.c @@ -17,10 +17,10 @@ #include <asm/system.h> #include <asm/paca.h> -#include <asm/iSeries/ItLpQueue.h> -#include <asm/iSeries/HvLpEvent.h> -#include <asm/iSeries/HvCallEvent.h> -#include <asm/iSeries/ItLpNaca.h> +#include <asm/iseries/it_lp_queue.h> +#include <asm/iseries/hv_lp_event.h> +#include <asm/iseries/hv_call_event.h> +#include <asm/iseries/it_lp_naca.h> /* * The LpQueue is used to pass event data from the hypervisor to diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index e5de31aa001..49e7e4b8584 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c @@ -38,10 +38,10 @@ #include <asm/uaccess.h> #include <asm/paca.h> #include <asm/abs_addr.h> -#include <asm/iSeries/vio.h> -#include <asm/iSeries/mf.h> -#include <asm/iSeries/HvLpConfig.h> -#include <asm/iSeries/ItLpQueue.h> +#include <asm/iseries/vio.h> +#include <asm/iseries/mf.h> +#include <asm/iseries/hv_lp_config.h> +#include <asm/iseries/it_lp_queue.h> #include "setup.h" diff --git a/arch/powerpc/platforms/iseries/naca.h b/arch/powerpc/platforms/iseries/naca.h new file mode 100644 index 00000000000..ab2372eb8d2 --- /dev/null +++ b/arch/powerpc/platforms/iseries/naca.h @@ -0,0 +1,24 @@ +#ifndef _PLATFORMS_ISERIES_NACA_H +#define _PLATFORMS_ISERIES_NACA_H + +/* + * c 2001 PPC 64 Team, IBM Corp + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <asm/types.h> + +struct naca_struct { + /* Kernel only data - undefined for user space */ + void *xItVpdAreas; /* VPD Data 0x00 */ + void *xRamDisk; /* iSeries ramdisk 0x08 */ + u64 xRamDiskSize; /* In pages 0x10 */ +}; + +extern struct naca_struct naca; + +#endif /* _PLATFORMS_ISERIES_NACA_H */ diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c index 959e59fd9c1..7d7d5884343 100644 --- a/arch/powerpc/platforms/iseries/pci.c +++ b/arch/powerpc/platforms/iseries/pci.c @@ -36,8 +36,8 @@ #include <asm/iommu.h> #include <asm/abs_addr.h> -#include <asm/iSeries/HvCallXm.h> -#include <asm/iSeries/mf.h> +#include <asm/iseries/hv_call_xm.h> +#include <asm/iseries/mf.h> #include <asm/ppc-pci.h> diff --git a/arch/powerpc/platforms/iseries/proc.c b/arch/powerpc/platforms/iseries/proc.c index 6f1929cac66..e68b6b5fa89 100644 --- a/arch/powerpc/platforms/iseries/proc.c +++ b/arch/powerpc/platforms/iseries/proc.c @@ -24,8 +24,8 @@ #include <asm/processor.h> #include <asm/time.h> #include <asm/lppaca.h> -#include <asm/iSeries/ItLpQueue.h> -#include <asm/iSeries/HvCallXm.h> +#include <asm/iseries/it_lp_queue.h> +#include <asm/iseries/hv_call_xm.h> #include "processor_vpd.h" #include "main_store.h" diff --git a/arch/powerpc/platforms/iseries/release_data.h b/arch/powerpc/platforms/iseries/release_data.h index c68b9c3e5ca..66189fd2e32 100644 --- a/arch/powerpc/platforms/iseries/release_data.h +++ b/arch/powerpc/platforms/iseries/release_data.h @@ -24,7 +24,7 @@ * address of the OS's NACA). */ #include <asm/types.h> -#include <asm/naca.h> +#include "naca.h" /* * When we IPL a secondary partition, we will check if if the diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index 1544c6f10a3..c5207064977 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c @@ -27,6 +27,7 @@ #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/root_dev.h> +#include <linux/kernel.h> #include <asm/processor.h> #include <asm/machdep.h> @@ -40,19 +41,19 @@ #include <asm/firmware.h> #include <asm/time.h> -#include <asm/naca.h> #include <asm/paca.h> #include <asm/cache.h> #include <asm/sections.h> #include <asm/abs_addr.h> -#include <asm/iSeries/HvLpConfig.h> -#include <asm/iSeries/HvCallEvent.h> -#include <asm/iSeries/HvCallXm.h> -#include <asm/iSeries/ItLpQueue.h> -#include <asm/iSeries/mf.h> -#include <asm/iSeries/HvLpEvent.h> -#include <asm/iSeries/LparMap.h> - +#include <asm/iseries/hv_lp_config.h> +#include <asm/iseries/hv_call_event.h> +#include <asm/iseries/hv_call_xm.h> +#include <asm/iseries/it_lp_queue.h> +#include <asm/iseries/mf.h> +#include <asm/iseries/hv_lp_event.h> +#include <asm/iseries/lpar_map.h> + +#include "naca.h" #include "setup.h" #include "irq.h" #include "vpd_areas.h" @@ -94,6 +95,8 @@ extern unsigned long iSeries_recal_titan; static int mf_initialized; +static unsigned long cmd_mem_limit; + struct MemoryBlock { unsigned long absStart; unsigned long absEnd; @@ -317,11 +320,11 @@ static void __init iSeries_init_early(void) */ if (naca.xRamDisk) { initrd_start = (unsigned long)__va(naca.xRamDisk); - initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE; + initrd_end = initrd_start + naca.xRamDiskSize * HW_PAGE_SIZE; initrd_below_start_ok = 1; // ramdisk in kernel space ROOT_DEV = Root_RAM0; - if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize) - rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024; + if (((rd_size * 1024) / HW_PAGE_SIZE) < naca.xRamDiskSize) + rd_size = (naca.xRamDiskSize * HW_PAGE_SIZE) / 1024; } else #endif /* CONFIG_BLK_DEV_INITRD */ { @@ -341,23 +344,6 @@ static void __init iSeries_init_early(void) */ iommu_init_early_iSeries(); - iSeries_get_cmdline(); - - /* Save unparsed command line copy for /proc/cmdline */ - strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); - - /* Parse early parameters, in particular mem=x */ - parse_early_param(); - - if (memory_limit) { - if (memory_limit < systemcfg->physicalMemorySize) - systemcfg->physicalMemorySize = memory_limit; - else { - printk("Ignoring mem=%lu >= ram_top.\n", memory_limit); - memory_limit = 0; - } - } - /* Initialize machine-dependency vectors */ #ifdef CONFIG_SMP smp_init_iSeries(); @@ -484,13 +470,14 @@ static void __init build_iSeries_Memory_Map(void) */ hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); hptSizePages = (u32)HvCallHpt_getHptPages(); - hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT); + hptSizeChunks = hptSizePages >> + (MSCHUNKS_CHUNK_SHIFT - HW_PAGE_SHIFT); hptLastChunk = hptFirstChunk + hptSizeChunks - 1; printk("HPT absolute addr = %016lx, size = %dK\n", chunk_to_addr(hptFirstChunk), hptSizeChunks * 256); - ppc64_pft_size = __ilog2(hptSizePages * PAGE_SIZE); + ppc64_pft_size = __ilog2(hptSizePages * HW_PAGE_SIZE); /* * The actual hashed page table is in the hypervisor, @@ -643,7 +630,7 @@ static void __init iSeries_fixup_klimit(void) */ if (naca.xRamDisk) klimit = KERNELBASE + (u64)naca.xRamDisk + - (naca.xRamDiskSize * PAGE_SIZE); + (naca.xRamDiskSize * HW_PAGE_SIZE); else { /* * No ram disk was included - check and see if there @@ -971,6 +958,8 @@ void build_flat_dt(struct iseries_flat_dt *dt) /* /chosen */ dt_start_node(dt, "chosen"); dt_prop_u32(dt, "linux,platform", PLATFORM_ISERIES_LPAR); + if (cmd_mem_limit) + dt_prop_u64(dt, "linux,memory-limit", cmd_mem_limit); dt_end_node(dt); dt_cpus(dt); @@ -990,7 +979,27 @@ void * __init iSeries_early_setup(void) */ build_iSeries_Memory_Map(); + iSeries_get_cmdline(); + + /* Save unparsed command line copy for /proc/cmdline */ + strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); + + /* Parse early parameters, in particular mem=x */ + parse_early_param(); + build_flat_dt(&iseries_dt); return (void *) __pa(&iseries_dt); } + +/* + * On iSeries we just parse the mem=X option from the command line. + * On pSeries it's a bit more complicated, see prom_init_mem() + */ +static int __init early_parsemem(char *p) +{ + if (p) + cmd_mem_limit = ALIGN(memparse(p, &p), PAGE_SIZE); + return 0; +} +early_param("mem", early_parsemem); diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c index f720916682f..3336bad6772 100644 --- a/arch/powerpc/platforms/iseries/smp.c +++ b/arch/powerpc/platforms/iseries/smp.c @@ -38,7 +38,7 @@ #include <asm/io.h> #include <asm/smp.h> #include <asm/paca.h> -#include <asm/iSeries/HvCall.h> +#include <asm/iseries/hv_call.h> #include <asm/time.h> #include <asm/ppcdebug.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c index c0f7d2e9153..384360ee06e 100644 --- a/arch/powerpc/platforms/iseries/vio.c +++ b/arch/powerpc/platforms/iseries/vio.c @@ -17,10 +17,10 @@ #include <asm/tce.h> #include <asm/abs_addr.h> #include <asm/page.h> -#include <asm/iSeries/vio.h> -#include <asm/iSeries/HvTypes.h> -#include <asm/iSeries/HvLpConfig.h> -#include <asm/iSeries/HvCallXm.h> +#include <asm/iseries/vio.h> +#include <asm/iseries/hv_types.h> +#include <asm/iseries/hv_lp_config.h> +#include <asm/iseries/hv_call_xm.h> struct device *iSeries_vio_dev = &vio_bus_device.dev; EXPORT_SYMBOL(iSeries_vio_dev); @@ -30,41 +30,14 @@ static struct iommu_table vio_iommu_table; static void __init iommu_vio_init(void) { - struct iommu_table *t; - struct iommu_table_cb cb; - unsigned long cbp; - unsigned long itc_entries; + iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table); + veth_iommu_table.it_size /= 2; + vio_iommu_table = veth_iommu_table; + vio_iommu_table.it_offset += veth_iommu_table.it_size; - cb.itc_busno = 255; /* Bus 255 is the virtual bus */ - cb.itc_virtbus = 0xff; /* Ask for virtual bus */ - - cbp = virt_to_abs(&cb); - HvCallXm_getTceTableParms(cbp); - - itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry); - veth_iommu_table.it_size = itc_entries / 2; - veth_iommu_table.it_busno = cb.itc_busno; - veth_iommu_table.it_offset = cb.itc_offset; - veth_iommu_table.it_index = cb.itc_index; - veth_iommu_table.it_type = TCE_VB; - veth_iommu_table.it_blocksize = 1; - - t = iommu_init_table(&veth_iommu_table); - - if (!t) + if (!iommu_init_table(&veth_iommu_table)) printk("Virtual Bus VETH TCE table failed.\n"); - - vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size; - vio_iommu_table.it_busno = cb.itc_busno; - vio_iommu_table.it_offset = cb.itc_offset + - veth_iommu_table.it_size; - vio_iommu_table.it_index = cb.itc_index; - vio_iommu_table.it_type = TCE_VB; - vio_iommu_table.it_blocksize = 1; - - t = iommu_init_table(&vio_iommu_table); - - if (!t) + if (!iommu_init_table(&vio_iommu_table)) printk("Virtual Bus VIO TCE table failed.\n"); } diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index c0c767bd37f..84267269559 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c @@ -41,12 +41,12 @@ #include <asm/system.h> #include <asm/uaccess.h> -#include <asm/iSeries/HvTypes.h> -#include <asm/iSeries/ItExtVpdPanel.h> -#include <asm/iSeries/HvLpEvent.h> -#include <asm/iSeries/HvLpConfig.h> -#include <asm/iSeries/mf.h> -#include <asm/iSeries/vio.h> +#include <asm/iseries/hv_types.h> +#include <asm/iseries/it_exp_vpd_panel.h> +#include <asm/iseries/hv_lp_event.h> +#include <asm/iseries/hv_lp_config.h> +#include <asm/iseries/mf.h> +#include <asm/iseries/vio.h> /* Status of the path to each other partition in the system. * This is overkill, since we will only ever establish connections @@ -68,7 +68,8 @@ static DEFINE_SPINLOCK(statuslock); * For each kind of event we allocate a buffer that is * guaranteed not to cross a page boundary */ -static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned; +static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] + __attribute__((__aligned__(4096))); static atomic_t event_buffer_available[VIO_MAX_SUBTYPES]; static int event_buffer_initialised; @@ -116,12 +117,12 @@ static int proc_viopath_show(struct seq_file *m, void *v) HvLpEvent_Rc hvrc; DECLARE_MUTEX_LOCKED(Semaphore); - buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + buf = kmalloc(HW_PAGE_SIZE, GFP_KERNEL); if (!buf) return 0; - memset(buf, 0, PAGE_SIZE); + memset(buf, 0, HW_PAGE_SIZE); - handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE, + handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE, DMA_FROM_DEVICE); hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, @@ -131,7 +132,7 @@ static int proc_viopath_show(struct seq_file *m, void *v) viopath_sourceinst(viopath_hostLp), viopath_targetinst(viopath_hostLp), (u64)(unsigned long)&Semaphore, VIOVERSION << 16, - ((u64)handle) << 32, PAGE_SIZE, 0, 0); + ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0); if (hvrc != HvLpEvent_Rc_Good) printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc); @@ -140,7 +141,7 @@ static int proc_viopath_show(struct seq_file *m, void *v) vlanMap = HvLpConfig_getVirtualLanIndexMap(); - buf[PAGE_SIZE-1] = '\0'; + buf[HW_PAGE_SIZE-1] = '\0'; seq_printf(m, "%s", buf); seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap); seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n", @@ -152,7 +153,8 @@ static int proc_viopath_show(struct seq_file *m, void *v) e2a(xItExtVpdPanel.systemSerial[4]), e2a(xItExtVpdPanel.systemSerial[5])); - dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_single(iSeries_vio_dev, handle, HW_PAGE_SIZE, + DMA_FROM_DEVICE); kfree(buf); return 0; diff --git a/arch/powerpc/platforms/iseries/vpdinfo.c b/arch/powerpc/platforms/iseries/vpdinfo.c index 9c318849dee..23a6d1e5b42 100644 --- a/arch/powerpc/platforms/iseries/vpdinfo.c +++ b/arch/powerpc/platforms/iseries/vpdinfo.c @@ -32,7 +32,7 @@ #include <asm/resource.h> #include <asm/abs_addr.h> #include <asm/pci-bridge.h> -#include <asm/iSeries/HvTypes.h> +#include <asm/iseries/hv_types.h> #include "pci.h" #include "call_pci.h" diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 0037a8c8c81..83a49e80ac2 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -576,7 +576,7 @@ void __init pmac_pic_init(void) #endif /* CONFIG_PPC32 */ } -#ifdef CONFIG_PM +#if defined(CONFIG_PM) && defined(CONFIG_PPC32) /* * These procedures are used in implementing sleep on the powerbooks. * sleep_save_intrs() saves the states of all interrupt enables @@ -643,7 +643,7 @@ static int pmacpic_resume(struct sys_device *sysdev) return 0; } -#endif /* CONFIG_PM */ +#endif /* CONFIG_PM && CONFIG_PPC32 */ static struct sysdev_class pmacpic_sysclass = { set_kset_name("pmac_pic"), @@ -655,10 +655,10 @@ static struct sys_device device_pmacpic = { }; static struct sysdev_driver driver_pmacpic = { -#ifdef CONFIG_PM +#if defined(CONFIG_PM) && defined(CONFIG_PPC32) .suspend = &pmacpic_suspend, .resume = &pmacpic_resume, -#endif /* CONFIG_PM */ +#endif /* CONFIG_PM && CONFIG_PPC32 */ }; static int __init init_pmacpic_sysfs(void) diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 6f62af59729..80b58c1ec41 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -75,6 +75,7 @@ #include <asm/smu.h> #include <asm/pmc.h> #include <asm/mpic.h> +#include <asm/lmb.h> #include "pmac.h" @@ -350,7 +351,7 @@ void __init pmac_setup_arch(void) find_via_pmu(); smu_init(); -#ifdef CONFIG_NVRAM +#if defined(CONFIG_NVRAM) || defined(CONFIG_PPC64) pmac_nvram_init(); #endif diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 2d57f588151..e3fc3407bb1 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -21,15 +21,6 @@ config EEH depends on PPC_PSERIES default y if !EMBEDDED -config RTAS_PROC - bool "Proc interface to RTAS" - depends on PPC_RTAS - default y - -config RTAS_FLASH - tristate "Firmware flash interface" - depends on PPC64 && RTAS_PROC - config SCANLOG tristate "Scanlog dump interface" depends on RTAS_PROC && PPC_PSERIES diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 5ef494e3a70..b9938fece78 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -1,5 +1,5 @@ obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \ - setup.o iommu.o rtas-fw.o ras.o + setup.o iommu.o ras.o rtasd.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_IBMVIO) += vio.o obj-$(CONFIG_XICS) += xics.o diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 9e90d41131d..513e2723149 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -42,13 +42,14 @@ #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/abs_addr.h> -#include <asm/plpar_wrappers.h> #include <asm/pSeries_reconfig.h> #include <asm/systemcfg.h> #include <asm/firmware.h> #include <asm/tce.h> #include <asm/ppc-pci.h> +#include "plpar_wrappers.h" + #define DBG(fmt...) extern int is_python(struct device_node *); @@ -498,7 +499,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti switch (action) { case PSERIES_RECONFIG_REMOVE: - if (pci->iommu_table && + if (pci && pci->iommu_table && get_property(np, "ibm,dma-window", NULL)) iommu_free_table(np); break; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 268d8362dde..ab0c6dd6ec9 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -19,7 +19,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define DEBUG +#undef DEBUG_LOW #include <linux/config.h> #include <linux/kernel.h> @@ -38,12 +38,13 @@ #include <asm/prom.h> #include <asm/abs_addr.h> #include <asm/cputable.h> -#include <asm/plpar_wrappers.h> -#ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) +#include "plpar_wrappers.h" + +#ifdef DEBUG_LOW +#define DBG_LOW(fmt...) do { udbg_printf(fmt); } while(0) #else -#define DBG(fmt...) +#define DBG_LOW(fmt...) do { } while(0) #endif /* in pSeries_hvCall.S */ @@ -260,27 +261,24 @@ out: void vpa_init(int cpu) { int hwcpu = get_hard_smp_processor_id(cpu); - unsigned long vpa = (unsigned long)&(paca[cpu].lppaca); + unsigned long vpa = __pa(&paca[cpu].lppaca); long ret; - unsigned long flags; - - /* Register the Virtual Processor Area (VPA) */ - flags = 1UL << (63 - 18); if (cpu_has_feature(CPU_FTR_ALTIVEC)) paca[cpu].lppaca.vmxregs_in_use = 1; - ret = register_vpa(flags, hwcpu, __pa(vpa)); + ret = register_vpa(hwcpu, vpa); if (ret) printk(KERN_ERR "WARNING: vpa_init: VPA registration for " "cpu %d (hw %d) of area %lx returns %ld\n", - cpu, hwcpu, __pa(vpa), ret); + cpu, hwcpu, vpa, ret); } long pSeries_lpar_hpte_insert(unsigned long hpte_group, - unsigned long va, unsigned long prpn, - unsigned long vflags, unsigned long rflags) + unsigned long va, unsigned long pa, + unsigned long rflags, unsigned long vflags, + int psize) { unsigned long lpar_rc; unsigned long flags; @@ -288,11 +286,28 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, unsigned long hpte_v, hpte_r; unsigned long dummy0, dummy1; - hpte_v = ((va >> 23) << HPTE_V_AVPN_SHIFT) | vflags | HPTE_V_VALID; - if (vflags & HPTE_V_LARGE) - hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT); - - hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags; + if (!(vflags & HPTE_V_BOLTED)) + DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " + "rflags=%lx, vflags=%lx, psize=%d)\n", + hpte_group, va, pa, rflags, vflags, psize); + + hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID; + hpte_r = hpte_encode_r(pa, psize) | rflags; + + if (!(vflags & HPTE_V_BOLTED)) + DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); + +#if 1 + { + int i; + for (i=0;i<8;i++) { + unsigned long w0, w1; + plpar_pte_read(0, hpte_group, &w0, &w1); + BUG_ON (HPTE_V_COMPARE(hpte_v, w0) + && (w0 & HPTE_V_VALID)); + } + } +#endif /* Now fill in the actual HPTE */ /* Set CEC cookie to 0 */ @@ -302,23 +317,30 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, /* Exact = 0 */ flags = 0; - /* XXX why is this here? - Anton */ + /* Make pHyp happy */ if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE)) hpte_r &= ~_PAGE_COHERENT; lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v, hpte_r, &slot, &dummy0, &dummy1); - - if (unlikely(lpar_rc == H_PTEG_Full)) + if (unlikely(lpar_rc == H_PTEG_Full)) { + if (!(vflags & HPTE_V_BOLTED)) + DBG_LOW(" full\n"); return -1; + } /* * Since we try and ioremap PHBs we don't own, the pte insert * will fail. However we must catch the failure in hash_page * or we will loop forever, so return -2 in this case. */ - if (unlikely(lpar_rc != H_Success)) + if (unlikely(lpar_rc != H_Success)) { + if (!(vflags & HPTE_V_BOLTED)) + DBG_LOW(" lpar err %d\n", lpar_rc); return -2; + } + if (!(vflags & HPTE_V_BOLTED)) + DBG_LOW(" -> slot: %d\n", slot & 7); /* Because of iSeries, we have to pass down the secondary * bucket bit here as well @@ -343,10 +365,8 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) /* don't remove a bolted entry */ lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, (0x1UL << 4), &dummy1, &dummy2); - if (lpar_rc == H_Success) return i; - BUG_ON(lpar_rc != H_Not_Found); slot_offset++; @@ -374,20 +394,28 @@ static void pSeries_lpar_hptab_clear(void) * We can probably optimize here and assume the high bits of newpp are * already zero. For now I am paranoid. */ -static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long va, int large, int local) +static long pSeries_lpar_hpte_updatepp(unsigned long slot, + unsigned long newpp, + unsigned long va, + int psize, int local) { unsigned long lpar_rc; unsigned long flags = (newpp & 7) | H_AVPN; - unsigned long avpn = va >> 23; + unsigned long want_v; - if (large) - avpn &= ~0x1UL; + want_v = hpte_encode_v(va, psize); - lpar_rc = plpar_pte_protect(flags, slot, (avpn << 7)); + DBG_LOW(" update: avpnv=%016lx, hash=%016lx, f=%x, psize: %d ... ", + want_v & HPTE_V_AVPN, slot, flags, psize); - if (lpar_rc == H_Not_Found) + lpar_rc = plpar_pte_protect(flags, slot, want_v & HPTE_V_AVPN); + + if (lpar_rc == H_Not_Found) { + DBG_LOW("not found !\n"); return -1; + } + + DBG_LOW("ok\n"); BUG_ON(lpar_rc != H_Success); @@ -413,21 +441,22 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot) return dword0; } -static long pSeries_lpar_hpte_find(unsigned long vpn) +static long pSeries_lpar_hpte_find(unsigned long va, int psize) { unsigned long hash; unsigned long i, j; long slot; - unsigned long hpte_v; + unsigned long want_v, hpte_v; - hash = hpt_hash(vpn, 0); + hash = hpt_hash(va, mmu_psize_defs[psize].shift); + want_v = hpte_encode_v(va, psize); for (j = 0; j < 2; j++) { slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; for (i = 0; i < HPTES_PER_GROUP; i++) { hpte_v = pSeries_lpar_hpte_getword0(slot); - if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11)) + if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID) && (!!(hpte_v & HPTE_V_SECONDARY) == j)) { /* HPTE matches */ @@ -444,17 +473,15 @@ static long pSeries_lpar_hpte_find(unsigned long vpn) } static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, - unsigned long ea) + unsigned long ea, + int psize) { - unsigned long lpar_rc; - unsigned long vsid, va, vpn, flags; - long slot; + unsigned long lpar_rc, slot, vsid, va, flags; vsid = get_kernel_vsid(ea); va = (vsid << 28) | (ea & 0x0fffffff); - vpn = va >> PAGE_SHIFT; - slot = pSeries_lpar_hpte_find(vpn); + slot = pSeries_lpar_hpte_find(va, psize); BUG_ON(slot == -1); flags = newpp & 7; @@ -464,18 +491,18 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, } static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, - int large, int local) + int psize, int local) { - unsigned long avpn = va >> 23; + unsigned long want_v; unsigned long lpar_rc; unsigned long dummy1, dummy2; - if (large) - avpn &= ~0x1UL; - - lpar_rc = plpar_pte_remove(H_AVPN, slot, (avpn << 7), &dummy1, - &dummy2); + DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d", + slot, va, psize, local); + want_v = hpte_encode_v(va, psize); + lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v & HPTE_V_AVPN, + &dummy1, &dummy2); if (lpar_rc == H_Not_Found) return; @@ -497,7 +524,8 @@ void pSeries_lpar_flush_hash_range(unsigned long number, int local) spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); for (i = 0; i < number; i++) - flush_hash_page(batch->vaddr[i], batch->pte[i], local); + flush_hash_page(batch->vaddr[i], batch->pte[i], + batch->psize, local); if (lock_tlbie) spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h new file mode 100644 index 00000000000..382f8c5b0e7 --- /dev/null +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h @@ -0,0 +1,120 @@ +#ifndef _PSERIES_PLPAR_WRAPPERS_H +#define _PSERIES_PLPAR_WRAPPERS_H + +#include <asm/hvcall.h> + +static inline long poll_pending(void) +{ + unsigned long dummy; + return plpar_hcall(H_POLL_PENDING, 0, 0, 0, 0, &dummy, &dummy, &dummy); +} + +static inline long prod_processor(void) +{ + plpar_hcall_norets(H_PROD); + return 0; +} + +static inline long cede_processor(void) +{ + plpar_hcall_norets(H_CEDE); + return 0; +} + +static inline long vpa_call(unsigned long flags, unsigned long cpu, + unsigned long vpa) +{ + /* flags are in bits 16-18 (counting from most significant bit) */ + flags = flags << (63 - 18); + + return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa); +} + +static inline long unregister_vpa(unsigned long cpu, unsigned long vpa) +{ + return vpa_call(0x5, cpu, vpa); +} + +static inline long register_vpa(unsigned long cpu, unsigned long vpa) +{ + return vpa_call(0x1, cpu, vpa); +} + +extern void vpa_init(int cpu); + +static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex, + unsigned long avpn, unsigned long *old_pteh_ret, + unsigned long *old_ptel_ret) +{ + unsigned long dummy; + return plpar_hcall(H_REMOVE, flags, ptex, avpn, 0, old_pteh_ret, + old_ptel_ret, &dummy); +} + +static inline long plpar_pte_read(unsigned long flags, unsigned long ptex, + unsigned long *old_pteh_ret, unsigned long *old_ptel_ret) +{ + unsigned long dummy; + return plpar_hcall(H_READ, flags, ptex, 0, 0, old_pteh_ret, + old_ptel_ret, &dummy); +} + +static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex, + unsigned long avpn) +{ + return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn); +} + +static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba, + unsigned long *tce_ret) +{ + unsigned long dummy; + return plpar_hcall(H_GET_TCE, liobn, ioba, 0, 0, tce_ret, &dummy, + &dummy); +} + +static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba, + unsigned long tceval) +{ + return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval); +} + +static inline long plpar_tce_put_indirect(unsigned long liobn, + unsigned long ioba, unsigned long page, unsigned long count) +{ + return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count); +} + +static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba, + unsigned long tceval, unsigned long count) +{ + return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count); +} + +static inline long plpar_get_term_char(unsigned long termno, + unsigned long *len_ret, char *buf_ret) +{ + unsigned long *lbuf = (unsigned long *)buf_ret; /* TODO: alignment? */ + return plpar_hcall(H_GET_TERM_CHAR, termno, 0, 0, 0, len_ret, + lbuf + 0, lbuf + 1); +} + +static inline long plpar_put_term_char(unsigned long termno, unsigned long len, + const char *buffer) +{ + unsigned long *lbuf = (unsigned long *)buffer; /* TODO: alignment? */ + return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0], + lbuf[1]); +} + +static inline long plpar_set_xdabr(unsigned long address, unsigned long flags) +{ + return plpar_hcall_norets(H_SET_XDABR, address, flags); +} + +static inline long plpar_set_dabr(unsigned long val) +{ + return plpar_hcall_norets(H_SET_DABR, val); +} + +#endif /* _PSERIES_PLPAR_WRAPPERS_H */ diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 58c61219d08..d7d40033945 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -286,10 +286,8 @@ static struct property *new_property(const char *name, const int length, return new; cleanup: - if (new->name) - kfree(new->name); - if (new->value) - kfree(new->value); + kfree(new->name); + kfree(new->value); kfree(new); return NULL; } diff --git a/arch/powerpc/platforms/pseries/rtas-fw.c b/arch/powerpc/platforms/pseries/rtas-fw.c deleted file mode 100644 index 15d81d758ca..00000000000 --- a/arch/powerpc/platforms/pseries/rtas-fw.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * - * Procedures for firmware flash updates on pSeries systems. - * - * Peter Bergner, IBM March 2001. - * Copyright (C) 2001 IBM. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <stdarg.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/spinlock.h> -#include <linux/module.h> -#include <linux/init.h> - -#include <asm/prom.h> -#include <asm/rtas.h> -#include <asm/semaphore.h> -#include <asm/machdep.h> -#include <asm/page.h> -#include <asm/param.h> -#include <asm/system.h> -#include <asm/abs_addr.h> -#include <asm/udbg.h> -#include <asm/delay.h> -#include <asm/uaccess.h> -#include <asm/systemcfg.h> - -#include "rtas-fw.h" - -struct flash_block_list_header rtas_firmware_flash_list = {0, NULL}; - -#define FLASH_BLOCK_LIST_VERSION (1UL) - -static void rtas_flash_firmware(void) -{ - unsigned long image_size; - struct flash_block_list *f, *next, *flist; - unsigned long rtas_block_list; - int i, status, update_token; - - update_token = rtas_token("ibm,update-flash-64-and-reboot"); - if (update_token == RTAS_UNKNOWN_SERVICE) { - printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot is not available -- not a service partition?\n"); - printk(KERN_ALERT "FLASH: firmware will not be flashed\n"); - return; - } - - /* NOTE: the "first" block list is a global var with no data - * blocks in the kernel data segment. We do this because - * we want to ensure this block_list addr is under 4GB. - */ - rtas_firmware_flash_list.num_blocks = 0; - flist = (struct flash_block_list *)&rtas_firmware_flash_list; - rtas_block_list = virt_to_abs(flist); - if (rtas_block_list >= 4UL*1024*1024*1024) { - printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n"); - return; - } - - printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n"); - /* Update the block_list in place. */ - image_size = 0; - for (f = flist; f; f = next) { - /* Translate data addrs to absolute */ - for (i = 0; i < f->num_blocks; i++) { - f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data); - image_size += f->blocks[i].length; - } - next = f->next; - /* Don't translate NULL pointer for last entry */ - if (f->next) - f->next = (struct flash_block_list *)virt_to_abs(f->next); - else - f->next = NULL; - /* make num_blocks into the version/length field */ - f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16); - } - - printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size); - printk(KERN_ALERT "FLASH: performing flash and reboot\n"); - rtas_progress("Flashing \n", 0x0); - rtas_progress("Please Wait... ", 0x0); - printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n"); - status = rtas_call(update_token, 1, 1, NULL, rtas_block_list); - switch (status) { /* should only get "bad" status */ - case 0: - printk(KERN_ALERT "FLASH: success\n"); - break; - case -1: - printk(KERN_ALERT "FLASH: hardware error. Firmware may not be not flashed\n"); - break; - case -3: - printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform. Firmware not flashed\n"); - break; - case -4: - printk(KERN_ALERT "FLASH: flash failed when partially complete. System may not reboot\n"); - break; - default: - printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status); - break; - } -} - -void rtas_flash_bypass_warning(void) -{ - printk(KERN_ALERT "FLASH: firmware flash requires a reboot\n"); - printk(KERN_ALERT "FLASH: the firmware image will NOT be flashed\n"); -} - - -void rtas_fw_restart(char *cmd) -{ - if (rtas_firmware_flash_list.next) - rtas_flash_firmware(); - rtas_restart(cmd); -} - -void rtas_fw_power_off(void) -{ - if (rtas_firmware_flash_list.next) - rtas_flash_bypass_warning(); - rtas_power_off(); -} - -void rtas_fw_halt(void) -{ - if (rtas_firmware_flash_list.next) - rtas_flash_bypass_warning(); - rtas_halt(); -} - -EXPORT_SYMBOL(rtas_firmware_flash_list); diff --git a/arch/powerpc/platforms/pseries/rtas-fw.h b/arch/powerpc/platforms/pseries/rtas-fw.h deleted file mode 100644 index e70fa69974a..00000000000 --- a/arch/powerpc/platforms/pseries/rtas-fw.h +++ /dev/null @@ -1,3 +0,0 @@ -void rtas_fw_restart(char *cmd); -void rtas_fw_power_off(void); -void rtas_fw_halt(void); diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c new file mode 100644 index 00000000000..e26b0420b6d --- /dev/null +++ b/arch/powerpc/platforms/pseries/rtasd.c @@ -0,0 +1,527 @@ +/* + * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Communication to userspace based on kernel/printk.c + */ + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/poll.h> +#include <linux/proc_fs.h> +#include <linux/init.h> +#include <linux/vmalloc.h> +#include <linux/spinlock.h> +#include <linux/cpu.h> +#include <linux/delay.h> + +#include <asm/uaccess.h> +#include <asm/io.h> +#include <asm/rtas.h> +#include <asm/prom.h> +#include <asm/nvram.h> +#include <asm/atomic.h> +#include <asm/systemcfg.h> + +#if 0 +#define DEBUG(A...) printk(KERN_ERR A) +#else +#define DEBUG(A...) +#endif + +static DEFINE_SPINLOCK(rtasd_log_lock); + +DECLARE_WAIT_QUEUE_HEAD(rtas_log_wait); + +static char *rtas_log_buf; +static unsigned long rtas_log_start; +static unsigned long rtas_log_size; + +static int surveillance_timeout = -1; +static unsigned int rtas_event_scan_rate; +static unsigned int rtas_error_log_max; +static unsigned int rtas_error_log_buffer_max; + +static int full_rtas_msgs = 0; + +extern int no_logging; + +volatile int error_log_cnt = 0; + +/* + * Since we use 32 bit RTAS, the physical address of this must be below + * 4G or else bad things happen. Allocate this in the kernel data and + * make it big enough. + */ +static unsigned char logdata[RTAS_ERROR_LOG_MAX]; + +static int get_eventscan_parms(void); + +static char *rtas_type[] = { + "Unknown", "Retry", "TCE Error", "Internal Device Failure", + "Timeout", "Data Parity", "Address Parity", "Cache Parity", + "Address Invalid", "ECC Uncorrected", "ECC Corrupted", +}; + +static char *rtas_event_type(int type) +{ + if ((type > 0) && (type < 11)) + return rtas_type[type]; + + switch (type) { + case RTAS_TYPE_EPOW: + return "EPOW"; + case RTAS_TYPE_PLATFORM: + return "Platform Error"; + case RTAS_TYPE_IO: + return "I/O Event"; + case RTAS_TYPE_INFO: + return "Platform Information Event"; + case RTAS_TYPE_DEALLOC: + return "Resource Deallocation Event"; + case RTAS_TYPE_DUMP: + return "Dump Notification Event"; + } + + return rtas_type[0]; +} + +/* To see this info, grep RTAS /var/log/messages and each entry + * will be collected together with obvious begin/end. + * There will be a unique identifier on the begin and end lines. + * This will persist across reboots. + * + * format of error logs returned from RTAS: + * bytes (size) : contents + * -------------------------------------------------------- + * 0-7 (8) : rtas_error_log + * 8-47 (40) : extended info + * 48-51 (4) : vendor id + * 52-1023 (vendor specific) : location code and debug data + */ +static void printk_log_rtas(char *buf, int len) +{ + + int i,j,n = 0; + int perline = 16; + char buffer[64]; + char * str = "RTAS event"; + + if (full_rtas_msgs) { + printk(RTAS_DEBUG "%d -------- %s begin --------\n", + error_log_cnt, str); + + /* + * Print perline bytes on each line, each line will start + * with RTAS and a changing number, so syslogd will + * print lines that are otherwise the same. Separate every + * 4 bytes with a space. + */ + for (i = 0; i < len; i++) { + j = i % perline; + if (j == 0) { + memset(buffer, 0, sizeof(buffer)); + n = sprintf(buffer, "RTAS %d:", i/perline); + } + + if ((i % 4) == 0) + n += sprintf(buffer+n, " "); + + n += sprintf(buffer+n, "%02x", (unsigned char)buf[i]); + + if (j == (perline-1)) + printk(KERN_DEBUG "%s\n", buffer); + } + if ((i % perline) != 0) + printk(KERN_DEBUG "%s\n", buffer); + + printk(RTAS_DEBUG "%d -------- %s end ----------\n", + error_log_cnt, str); + } else { + struct rtas_error_log *errlog = (struct rtas_error_log *)buf; + + printk(RTAS_DEBUG "event: %d, Type: %s, Severity: %d\n", + error_log_cnt, rtas_event_type(errlog->type), + errlog->severity); + } +} + +static int log_rtas_len(char * buf) +{ + int len; + struct rtas_error_log *err; + + /* rtas fixed header */ + len = 8; + err = (struct rtas_error_log *)buf; + if (err->extended_log_length) { + + /* extended header */ + len += err->extended_log_length; + } + + if (rtas_error_log_max == 0) { + get_eventscan_parms(); + } + if (len > rtas_error_log_max) + len = rtas_error_log_max; + + return len; +} + +/* + * First write to nvram, if fatal error, that is the only + * place we log the info. The error will be picked up + * on the next reboot by rtasd. If not fatal, run the + * method for the type of error. Currently, only RTAS + * errors have methods implemented, but in the future + * there might be a need to store data in nvram before a + * call to panic(). + * + * XXX We write to nvram periodically, to indicate error has + * been written and sync'd, but there is a possibility + * that if we don't shutdown correctly, a duplicate error + * record will be created on next reboot. + */ +void pSeries_log_error(char *buf, unsigned int err_type, int fatal) +{ + unsigned long offset; + unsigned long s; + int len = 0; + + DEBUG("logging event\n"); + if (buf == NULL) + return; + + spin_lock_irqsave(&rtasd_log_lock, s); + + /* get length and increase count */ + switch (err_type & ERR_TYPE_MASK) { + case ERR_TYPE_RTAS_LOG: + len = log_rtas_len(buf); + if (!(err_type & ERR_FLAG_BOOT)) + error_log_cnt++; + break; + case ERR_TYPE_KERNEL_PANIC: + default: + spin_unlock_irqrestore(&rtasd_log_lock, s); + return; + } + + /* Write error to NVRAM */ + if (!no_logging && !(err_type & ERR_FLAG_BOOT)) + nvram_write_error_log(buf, len, err_type); + + /* + * rtas errors can occur during boot, and we do want to capture + * those somewhere, even if nvram isn't ready (why not?), and even + * if rtasd isn't ready. Put them into the boot log, at least. + */ + if ((err_type & ERR_TYPE_MASK) == ERR_TYPE_RTAS_LOG) + printk_log_rtas(buf, len); + + /* Check to see if we need to or have stopped logging */ + if (fatal || no_logging) { + no_logging = 1; + spin_unlock_irqrestore(&rtasd_log_lock, s); + return; + } + + /* call type specific method for error */ + switch (err_type & ERR_TYPE_MASK) { + case ERR_TYPE_RTAS_LOG: + offset = rtas_error_log_buffer_max * + ((rtas_log_start+rtas_log_size) & LOG_NUMBER_MASK); + + /* First copy over sequence number */ + memcpy(&rtas_log_buf[offset], (void *) &error_log_cnt, sizeof(int)); + + /* Second copy over error log data */ + offset += sizeof(int); + memcpy(&rtas_log_buf[offset], buf, len); + + if (rtas_log_size < LOG_NUMBER) + rtas_log_size += 1; + else + rtas_log_start += 1; + + spin_unlock_irqrestore(&rtasd_log_lock, s); + wake_up_interruptible(&rtas_log_wait); + break; + case ERR_TYPE_KERNEL_PANIC: + default: + spin_unlock_irqrestore(&rtasd_log_lock, s); + return; + } + +} + + +static int rtas_log_open(struct inode * inode, struct file * file) +{ + return 0; +} + +static int rtas_log_release(struct inode * inode, struct file * file) +{ + return 0; +} + +/* This will check if all events are logged, if they are then, we + * know that we can safely clear the events in NVRAM. + * Next we'll sit and wait for something else to log. + */ +static ssize_t rtas_log_read(struct file * file, char __user * buf, + size_t count, loff_t *ppos) +{ + int error; + char *tmp; + unsigned long s; + unsigned long offset; + + if (!buf || count < rtas_error_log_buffer_max) + return -EINVAL; + + count = rtas_error_log_buffer_max; + + if (!access_ok(VERIFY_WRITE, buf, count)) + return -EFAULT; + + tmp = kmalloc(count, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + + spin_lock_irqsave(&rtasd_log_lock, s); + /* if it's 0, then we know we got the last one (the one in NVRAM) */ + if (rtas_log_size == 0 && !no_logging) + nvram_clear_error_log(); + spin_unlock_irqrestore(&rtasd_log_lock, s); + + + error = wait_event_interruptible(rtas_log_wait, rtas_log_size); + if (error) + goto out; + + spin_lock_irqsave(&rtasd_log_lock, s); + offset = rtas_error_log_buffer_max * (rtas_log_start & LOG_NUMBER_MASK); + memcpy(tmp, &rtas_log_buf[offset], count); + + rtas_log_start += 1; + rtas_log_size -= 1; + spin_unlock_irqrestore(&rtasd_log_lock, s); + + error = copy_to_user(buf, tmp, count) ? -EFAULT : count; +out: + kfree(tmp); + return error; +} + +static unsigned int rtas_log_poll(struct file *file, poll_table * wait) +{ + poll_wait(file, &rtas_log_wait, wait); + if (rtas_log_size) + return POLLIN | POLLRDNORM; + return 0; +} + +struct file_operations proc_rtas_log_operations = { + .read = rtas_log_read, + .poll = rtas_log_poll, + .open = rtas_log_open, + .release = rtas_log_release, +}; + +static int enable_surveillance(int timeout) +{ + int error; + + error = rtas_set_indicator(SURVEILLANCE_TOKEN, 0, timeout); + + if (error == 0) + return 0; + + if (error == -EINVAL) { + printk(KERN_INFO "rtasd: surveillance not supported\n"); + return 0; + } + + printk(KERN_ERR "rtasd: could not update surveillance\n"); + return -1; +} + +static int get_eventscan_parms(void) +{ + struct device_node *node; + int *ip; + + node = of_find_node_by_path("/rtas"); + + ip = (int *)get_property(node, "rtas-event-scan-rate", NULL); + if (ip == NULL) { + printk(KERN_ERR "rtasd: no rtas-event-scan-rate\n"); + of_node_put(node); + return -1; + } + rtas_event_scan_rate = *ip; + DEBUG("rtas-event-scan-rate %d\n", rtas_event_scan_rate); + + /* Make room for the sequence number */ + rtas_error_log_max = rtas_get_error_log_max(); + rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int); + + of_node_put(node); + + return 0; +} + +static void do_event_scan(int event_scan) +{ + int error; + do { + memset(logdata, 0, rtas_error_log_max); + error = rtas_call(event_scan, 4, 1, NULL, + RTAS_EVENT_SCAN_ALL_EVENTS, 0, + __pa(logdata), rtas_error_log_max); + if (error == -1) { + printk(KERN_ERR "event-scan failed\n"); + break; + } + + if (error == 0) + pSeries_log_error(logdata, ERR_TYPE_RTAS_LOG, 0); + + } while(error == 0); +} + +static void do_event_scan_all_cpus(long delay) +{ + int cpu; + + lock_cpu_hotplug(); + cpu = first_cpu(cpu_online_map); + for (;;) { + set_cpus_allowed(current, cpumask_of_cpu(cpu)); + do_event_scan(rtas_token("event-scan")); + set_cpus_allowed(current, CPU_MASK_ALL); + + /* Drop hotplug lock, and sleep for the specified delay */ + unlock_cpu_hotplug(); + msleep_interruptible(delay); + lock_cpu_hotplug(); + + cpu = next_cpu(cpu, cpu_online_map); + if (cpu == NR_CPUS) + break; + } + unlock_cpu_hotplug(); +} + +static int rtasd(void *unused) +{ + unsigned int err_type; + int event_scan = rtas_token("event-scan"); + int rc; + + daemonize("rtasd"); + + if (event_scan == RTAS_UNKNOWN_SERVICE || get_eventscan_parms() == -1) + goto error; + + rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER); + if (!rtas_log_buf) { + printk(KERN_ERR "rtasd: no memory\n"); + goto error; + } + + printk(KERN_INFO "RTAS daemon started\n"); + + DEBUG("will sleep for %d milliseconds\n", (30000/rtas_event_scan_rate)); + + /* See if we have any error stored in NVRAM */ + memset(logdata, 0, rtas_error_log_max); + + rc = nvram_read_error_log(logdata, rtas_error_log_max, &err_type); + + /* We can use rtas_log_buf now */ + no_logging = 0; + + if (!rc) { + if (err_type != ERR_FLAG_ALREADY_LOGGED) { + pSeries_log_error(logdata, err_type | ERR_FLAG_BOOT, 0); + } + } + + /* First pass. */ + do_event_scan_all_cpus(1000); + + if (surveillance_timeout != -1) { + DEBUG("enabling surveillance\n"); + enable_surveillance(surveillance_timeout); + DEBUG("surveillance enabled\n"); + } + + /* Delay should be at least one second since some + * machines have problems if we call event-scan too + * quickly. */ + for (;;) + do_event_scan_all_cpus(30000/rtas_event_scan_rate); + +error: + /* Should delete proc entries */ + return -EINVAL; +} + +static int __init rtas_init(void) +{ + struct proc_dir_entry *entry; + + /* No RTAS, only warn if we are on a pSeries box */ + if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) { + if (systemcfg->platform & PLATFORM_PSERIES) + printk(KERN_INFO "rtasd: no event-scan on system\n"); + return 1; + } + + entry = create_proc_entry("ppc64/rtas/error_log", S_IRUSR, NULL); + if (entry) + entry->proc_fops = &proc_rtas_log_operations; + else + printk(KERN_ERR "Failed to create error_log proc entry\n"); + + if (kernel_thread(rtasd, NULL, CLONE_FS) < 0) + printk(KERN_ERR "Failed to start RTAS daemon\n"); + + return 0; +} + +static int __init surveillance_setup(char *str) +{ + int i; + + if (get_option(&str,&i)) { + if (i >= 0 && i <= 255) + surveillance_timeout = i; + } + + return 1; +} + +static int __init rtasmsgs_setup(char *str) +{ + if (strcmp(str, "on") == 0) + full_rtas_msgs = 1; + else if (strcmp(str, "off") == 0) + full_rtas_msgs = 0; + + return 1; +} +__initcall(rtas_init); +__setup("surveillance=", surveillance_setup); +__setup("rtasmsgs=", rtasmsgs_setup); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 10cb0f2d9b5..65bee939eec 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -58,7 +58,6 @@ #include <asm/irq.h> #include <asm/time.h> #include <asm/nvram.h> -#include <asm/plpar_wrappers.h> #include "xics.h" #include <asm/firmware.h> #include <asm/pmc.h> @@ -67,7 +66,7 @@ #include <asm/i8259.h> #include <asm/udbg.h> -#include "rtas-fw.h" +#include "plpar_wrappers.h" #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -352,6 +351,16 @@ static void pSeries_mach_cpu_die(void) for(;;); } +static int pseries_set_dabr(unsigned long dabr) +{ + if (firmware_has_feature(FW_FEATURE_XDABR)) { + /* We want to catch accesses from kernel and userspace */ + return plpar_set_xdabr(dabr, H_DABRX_KERNEL | H_DABRX_USER); + } + + return plpar_set_dabr(dabr); +} + /* * Early initialization. Relocation is on but do not reference unbolted pages @@ -387,6 +396,8 @@ static void __init pSeries_init_early(void) DBG("Hello World !\n"); } + if (firmware_has_feature(FW_FEATURE_XDABR | FW_FEATURE_DABR)) + ppc_md.set_dabr = pseries_set_dabr; iommu_init_early_pSeries(); @@ -591,9 +602,9 @@ struct machdep_calls __initdata pSeries_md = { .pcibios_fixup = pSeries_final_fixup, .pci_probe_mode = pSeries_pci_probe_mode, .irq_bus_setup = pSeries_irq_bus_setup, - .restart = rtas_fw_restart, - .power_off = rtas_fw_power_off, - .halt = rtas_fw_halt, + .restart = rtas_restart, + .power_off = rtas_power_off, + .halt = rtas_halt, .panic = rtas_os_term, .cpu_die = pSeries_mach_cpu_die, .get_boot_time = rtas_get_boot_time, diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 9c9458ddfc2..7a243e8ccd7 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -44,10 +44,11 @@ #include <asm/firmware.h> #include <asm/system.h> #include <asm/rtas.h> -#include <asm/plpar_wrappers.h> #include <asm/pSeries_reconfig.h> #include <asm/mpic.h> +#include "plpar_wrappers.h" + #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else |