aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorMark Brown <broonie@opensource.wolfsonmicro.com>2009-10-06 16:01:27 +0100
committerMark Brown <broonie@opensource.wolfsonmicro.com>2009-10-06 16:01:27 +0100
commit907bc6c7fc7071b00083fc11e510e47dd93df45d (patch)
tree0697a608561522c00da9e1814974a2eb051bb96d /arch/ia64
parentd2b247a8be57647d1745535acd58169fbcbe431a (diff)
parent2a0f5cb32772e9a9560209e241a80bfbbc31dbc3 (diff)
Merge branch 'for-2.6.32' into for-2.6.33
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig15
-rw-r--r--arch/ia64/Makefile5
-rw-r--r--arch/ia64/hp/common/sba_iommu.c7
-rw-r--r--arch/ia64/hp/sim/simeth.c2
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c4
-rw-r--r--arch/ia64/ia32/sys_ia32.c2
-rw-r--r--arch/ia64/include/asm/acpi.h2
-rw-r--r--arch/ia64/include/asm/agp.h4
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/cputime.h1
-rw-r--r--arch/ia64/include/asm/device.h3
-rw-r--r--arch/ia64/include/asm/dma-mapping.h19
-rw-r--r--arch/ia64/include/asm/fpu.h2
-rw-r--r--arch/ia64/include/asm/kvm_host.h4
-rw-r--r--arch/ia64/include/asm/kvm_para.h4
-rw-r--r--arch/ia64/include/asm/mca.h2
-rw-r--r--arch/ia64/include/asm/mman.h14
-rw-r--r--arch/ia64/include/asm/pci.h14
-rw-r--r--arch/ia64/include/asm/pgalloc.h6
-rw-r--r--arch/ia64/include/asm/pgtable.h1
-rw-r--r--arch/ia64/include/asm/smp.h1
-rw-r--r--arch/ia64/include/asm/socket.h3
-rw-r--r--arch/ia64/include/asm/spinlock.h175
-rw-r--r--arch/ia64/include/asm/spinlock_types.h2
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/ia64/include/asm/tlb.h12
-rw-r--r--arch/ia64/include/asm/topology.h20
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h1
-rw-r--r--arch/ia64/install.sh4
-rw-r--r--arch/ia64/kernel/Makefile.gate2
-rw-r--r--arch/ia64/kernel/crash.c83
-rw-r--r--arch/ia64/kernel/dma-mapping.c10
-rw-r--r--arch/ia64/kernel/esi.c2
-rw-r--r--arch/ia64/kernel/head.S95
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c24
-rw-r--r--arch/ia64/kernel/init_task.c3
-rw-r--r--arch/ia64/kernel/iosapic.c4
-rw-r--r--arch/ia64/kernel/machine_kexec.c15
-rw-r--r--arch/ia64/kernel/mca.c15
-rw-r--r--arch/ia64/kernel/mca_asm.S47
-rw-r--r--arch/ia64/kernel/pci-dma.c5
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/process.c9
-rw-r--r--arch/ia64/kernel/ptrace.c1
-rw-r--r--arch/ia64/kernel/relocate_kernel.S2
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kernel/setup.c6
-rw-r--r--arch/ia64/kernel/smp.c5
-rw-r--r--arch/ia64/kernel/topology.c6
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S129
-rw-r--r--arch/ia64/kvm/Kconfig11
-rw-r--r--arch/ia64/kvm/kvm-ia64.c85
-rw-r--r--arch/ia64/kvm/kvm_lib.c6
-rw-r--r--arch/ia64/kvm/mmio.c6
-rw-r--r--arch/ia64/kvm/process.c6
-rw-r--r--arch/ia64/kvm/vcpu.c12
-rw-r--r--arch/ia64/kvm/vcpu.h13
-rw-r--r--arch/ia64/kvm/vtlb.c4
-rw-r--r--arch/ia64/lib/ip_fast_csum.S8
-rw-r--r--arch/ia64/mm/init.c7
-rw-r--r--arch/ia64/oprofile/backtrace.c20
-rw-r--r--arch/ia64/pci/pci.c2
-rw-r--r--arch/ia64/sn/kernel/io_common.c3
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c2
-rw-r--r--arch/ia64/xen/time.c3
67 files changed, 458 insertions, 534 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 170042b420d..1ee596cd942 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -60,9 +60,7 @@ config IOMMU_HELPER
bool
config GENERIC_LOCKBREAK
- bool
- default y
- depends on SMP && PREEMPT
+ def_bool n
config RWSEM_XCHGADD_ALGORITHM
bool
@@ -89,6 +87,9 @@ config GENERIC_TIME_VSYSCALL
bool
default y
+config HAVE_LEGACY_PER_CPU_AREA
+ def_bool y
+
config HAVE_SETUP_PER_CPU_AREA
def_bool y
@@ -112,6 +113,10 @@ config IA64_UNCACHED_ALLOCATOR
bool
select GENERIC_ALLOCATOR
+config ARCH_USES_PG_UNCACHED
+ def_bool y
+ depends on IA64_UNCACHED_ALLOCATOR
+
config AUDIT_ARCH
bool
default y
@@ -493,6 +498,10 @@ config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
depends on NUMA
+config ARCH_PROC_KCORE_TEXT
+ def_bool y
+ depends on PROC_KCORE
+
config IA32_SUPPORT
bool "Support for Linux/x86 binaries"
help
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 58a7e46affd..e7cbaa02cd0 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -41,11 +41,6 @@ $(error Sorry, you need a newer version of the assember, one that is built from
ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz)
endif
-ifeq ($(call cc-version),0304)
- cflags-$(CONFIG_ITANIUM) += -mtune=merced
- cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
-endif
-
KBUILD_CFLAGS += $(cflags-y)
head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 8cfb001092a..674a8374c6d 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -2026,24 +2026,21 @@ acpi_sba_ioc_add(struct acpi_device *device)
struct ioc *ioc;
acpi_status status;
u64 hpa, length;
- struct acpi_buffer buffer;
struct acpi_device_info *dev_info;
status = hp_acpi_csr_space(device->handle, &hpa, &length);
if (ACPI_FAILURE(status))
return 1;
- buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_get_object_info(device->handle, &buffer);
+ status = acpi_get_object_info(device->handle, &dev_info);
if (ACPI_FAILURE(status))
return 1;
- dev_info = buffer.pointer;
/*
* For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
* root bridges, and its CSR space includes the IOC function.
*/
- if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) {
+ if (strncmp("HWP0001", dev_info->hardware_id.string, 7) == 0) {
hpa += ZX1_IOC_OFFSET;
/* zx1 based systems default to kernel page size iommu pages */
if (!iovp_shift)
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c
index e4d8fde6810..7e81966ce48 100644
--- a/arch/ia64/hp/sim/simeth.c
+++ b/arch/ia64/hp/sim/simeth.c
@@ -412,7 +412,7 @@ simeth_tx(struct sk_buff *skb, struct net_device *dev)
*/
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
static inline struct sk_buff *
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index f92bdaac897..c69552bf893 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -69,11 +69,11 @@ ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf)
}
-static struct vm_operations_struct ia32_shared_page_vm_ops = {
+static const struct vm_operations_struct ia32_shared_page_vm_ops = {
.fault = ia32_install_shared_page
};
-static struct vm_operations_struct ia32_gate_page_vm_ops = {
+static const struct vm_operations_struct ia32_gate_page_vm_ops = {
.fault = ia32_install_gate_page
};
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 16ef61a91d9..625ed8f76fc 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -1270,7 +1270,7 @@ putreg (struct task_struct *child, int regno, unsigned int value)
case PT_CS:
if (value != __USER_CS)
printk(KERN_ERR
- "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
+ "ia32.putreg: attempt to set invalid segment register %d = %x\n",
regno, value);
break;
default:
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 0f82cc2934e..91df9686a0d 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -89,10 +89,12 @@ ia64_acpi_release_global_lock (unsigned int *lock)
#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
+#ifdef CONFIG_ACPI
#define acpi_disabled 0 /* ACPI always enabled on IA64 */
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
+#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }
diff --git a/arch/ia64/include/asm/agp.h b/arch/ia64/include/asm/agp.h
index c11fdd8ab4d..01d09c401c5 100644
--- a/arch/ia64/include/asm/agp.h
+++ b/arch/ia64/include/asm/agp.h
@@ -17,10 +17,6 @@
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index e2ca8003733..57a2787bc9f 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -286,7 +286,7 @@ __test_and_clear_bit(int nr, volatile void * addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
- int oldbitset = *p & m;
+ int oldbitset = (*p & m) != 0;
*p &= ~m;
return oldbitset;
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index d20b998cb91..7fa8a859466 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -30,6 +30,7 @@ typedef u64 cputime_t;
typedef u64 cputime64_t;
#define cputime_zero ((cputime_t)0)
+#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_max ((~((cputime_t)0) >> 1) - 1)
#define cputime_add(__a, __b) ((__a) + (__b))
#define cputime_sub(__a, __b) ((__a) - (__b))
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
index 41ab85d66f3..d66d446b127 100644
--- a/arch/ia64/include/asm/device.h
+++ b/arch/ia64/include/asm/device.h
@@ -15,4 +15,7 @@ struct dev_archdata {
#endif
};
+struct pdev_archdata {
+};
+
#endif /* _ASM_IA64_DEVICE_H */
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 5a61b5c2e18..8d3c79cd81e 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -44,7 +44,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define get_dma_ops(dev) platform_dma_get_ops(dev)
-#define flush_write_buffers()
#include <asm-generic/dma-mapping-common.h>
@@ -69,6 +68,24 @@ dma_set_mask (struct device *dev, u64 mask)
return 0;
}
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return 0;
+
+ return addr + size <= *dev->dma_mask;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr;
+}
+
extern int dma_get_cache_alignment(void);
static inline void
diff --git a/arch/ia64/include/asm/fpu.h b/arch/ia64/include/asm/fpu.h
index 0c26157cffa..b6395ad1500 100644
--- a/arch/ia64/include/asm/fpu.h
+++ b/arch/ia64/include/asm/fpu.h
@@ -6,6 +6,8 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <linux/types.h>
+
/* floating point status register: */
#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 5f43697aed3..d9b6325a932 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -235,7 +235,8 @@ struct kvm_vm_data {
#define KVM_REQ_PTC_G 32
#define KVM_REQ_RESUME 33
-#define KVM_PAGES_PER_HPAGE 1
+#define KVM_NR_PAGE_SIZES 1
+#define KVM_PAGES_PER_HPAGE(x) 1
struct kvm;
struct kvm_vcpu;
@@ -465,7 +466,6 @@ struct kvm_arch {
unsigned long metaphysical_rr4;
unsigned long vmm_init_rr;
- int online_vcpus;
int is_sn2;
struct kvm_ioapic *vioapic;
diff --git a/arch/ia64/include/asm/kvm_para.h b/arch/ia64/include/asm/kvm_para.h
index 0d6d8ca07b8..1588aee781a 100644
--- a/arch/ia64/include/asm/kvm_para.h
+++ b/arch/ia64/include/asm/kvm_para.h
@@ -19,9 +19,13 @@
*
*/
+#ifdef __KERNEL__
+
static inline unsigned int kvm_arch_para_features(void)
{
return 0;
}
#endif
+
+#endif
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index 44a0b53df90..c171cdf0a78 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -145,12 +145,14 @@ extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *);
extern void ia64_init_handler(struct pt_regs *,
struct switch_stack *,
struct ia64_sal_os_state *);
+extern void ia64_os_init_on_kdump(void);
extern void ia64_monarch_init_handler(void);
extern void ia64_slave_init_handler(void);
extern void ia64_mca_cmc_vector_setup(void);
extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
extern void ia64_unreg_MCA_extension(void);
extern unsigned long ia64_get_rnat(unsigned long *);
+extern void ia64_set_psr_mc(void);
extern void ia64_mca_printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2)));
diff --git a/arch/ia64/include/asm/mman.h b/arch/ia64/include/asm/mman.h
index 48cf8b98a0b..4459028e5aa 100644
--- a/arch/ia64/include/asm/mman.h
+++ b/arch/ia64/include/asm/mman.h
@@ -8,19 +8,9 @@
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
*/
-#include <asm-generic/mman-common.h>
+#include <asm-generic/mman.h>
-#define MAP_GROWSDOWN 0x00100 /* stack-like segment */
-#define MAP_GROWSUP 0x00200 /* register stack-like segment */
-#define MAP_DENYWRITE 0x00800 /* ETXTBSY */
-#define MAP_EXECUTABLE 0x01000 /* mark it as an executable */
-#define MAP_LOCKED 0x02000 /* pages are locked */
-#define MAP_NORESERVE 0x04000 /* don't check for reservations */
-#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-
-#define MCL_CURRENT 1 /* lock all current mappings */
-#define MCL_FUTURE 2 /* lock all future mappings */
+#define MAP_GROWSUP 0x0200 /* register stack-like segment */
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index fcfca56bb85..55281aabe5f 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -17,7 +17,6 @@
* loader.
*/
#define pcibios_assign_all_busses() 0
-#define pcibios_scan_all_fns(a, b) 0
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
@@ -135,7 +134,18 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev,
extern void pcibios_bus_to_resource(struct pci_dev *dev,
struct resource *res, struct pci_bus_region *region);
-#define pcibios_scan_all_fns(a, b) 0
+static inline struct resource *
+pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+{
+ struct resource *root = NULL;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+
+ return root;
+}
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index b9ac1a6fc21..96a8d927db2 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -48,7 +48,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
quicklist_free(0, NULL, pud);
}
-#define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud)
+#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
#endif /* CONFIG_PGTABLE_4 */
static inline void
@@ -67,7 +67,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
quicklist_free(0, NULL, pmd);
}
-#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
+#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
@@ -117,6 +117,6 @@ static inline void check_pgt_cache(void)
quicklist_trim(0, NULL, 25, 16);
}
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
+#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 0a9cc73d35c..8840a690d1e 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -155,7 +155,6 @@
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
-#include <asm/processor.h>
/*
* Next come the mappings that determine how mmap() protection bits
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index d217d1d4e05..0b3b3997dec 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -127,7 +127,6 @@ extern int is_multithreading_enabled(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#else /* CONFIG_SMP */
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index 745421225ec..0b0d5ff062e 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -66,4 +66,7 @@
#define SO_TIMESTAMPING 37
#define SCM_TIMESTAMPING SO_TIMESTAMPING
+#define SO_PROTOCOL 38
+#define SO_DOMAIN 39
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 13ab71576bc..30bb930e111 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -19,103 +19,106 @@
#define __raw_spin_lock_init(x) ((x)->lock = 0)
-#ifdef ASM_SUPPORTED
/*
- * Try to get the lock. If we fail to get the lock, make a non-standard call to
- * ia64_spinlock_contention(). We do not use a normal call because that would force all
- * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is
- * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered".
+ * Ticket locks are conceptually two parts, one indicating the current head of
+ * the queue, and the other indicating the current tail. The lock is acquired
+ * by atomically noting the tail and incrementing it by one (thus adding
+ * ourself to the queue and noting our position), then waiting until the head
+ * becomes equal to the the initial value of the tail.
+ *
+ * 63 32 31 0
+ * +----------------------------------------------------+
+ * | next_ticket_number | now_serving |
+ * +----------------------------------------------------+
*/
-#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
+#define TICKET_SHIFT 32
-static inline void
-__raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
+static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
{
- register volatile unsigned int *ptr asm ("r31") = &lock->lock;
-
-#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-# ifdef CONFIG_ITANIUM
- /* don't use brl on Itanium... */
- asm volatile ("{\n\t"
- " mov ar.ccv = r0\n\t"
- " mov r28 = ip\n\t"
- " mov r30 = 1;;\n\t"
- "}\n\t"
- "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
- "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
- "cmp4.ne p14, p0 = r30, r0\n\t"
- "mov b6 = r29;;\n\t"
- "mov r27=%2\n\t"
- "(p14) br.cond.spnt.many b6"
- : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
-# else
- asm volatile ("{\n\t"
- " mov ar.ccv = r0\n\t"
- " mov r28 = ip\n\t"
- " mov r30 = 1;;\n\t"
- "}\n\t"
- "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
- "cmp4.ne p14, p0 = r30, r0\n\t"
- "mov r27=%2\n\t"
- "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
- : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
-# endif /* CONFIG_MCKINLEY */
-#else
-# ifdef CONFIG_ITANIUM
- /* don't use brl on Itanium... */
- /* mis-declare, so we get the entry-point, not it's function descriptor: */
- asm volatile ("mov r30 = 1\n\t"
- "mov r27=%2\n\t"
- "mov ar.ccv = r0;;\n\t"
- "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
- "movl r29 = ia64_spinlock_contention;;\n\t"
- "cmp4.ne p14, p0 = r30, r0\n\t"
- "mov b6 = r29;;\n\t"
- "(p14) br.call.spnt.many b6 = b6"
- : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
-# else
- asm volatile ("mov r30 = 1\n\t"
- "mov r27=%2\n\t"
- "mov ar.ccv = r0;;\n\t"
- "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
- "cmp4.ne p14, p0 = r30, r0\n\t"
- "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
- : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
-# endif /* CONFIG_MCKINLEY */
-#endif
+ int *p = (int *)&lock->lock, turn, now_serving;
+
+ now_serving = *p;
+ turn = ia64_fetchadd(1, p+1, acq);
+
+ if (turn == now_serving)
+ return;
+
+ do {
+ cpu_relax();
+ } while (ACCESS_ONCE(*p) != turn);
}
-#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
+static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+{
+ long tmp = ACCESS_ONCE(lock->lock), try;
-/* Unlock by doing an ordered store and releasing the cacheline with nta */
-static inline void __raw_spin_unlock(raw_spinlock_t *x) {
- barrier();
- asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
+ if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) {
+ try = tmp + (1L << TICKET_SHIFT);
+
+ return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp;
+ }
+ return 0;
}
-#else /* !ASM_SUPPORTED */
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-# define __raw_spin_lock(x) \
-do { \
- __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
- __u64 ia64_spinlock_val; \
- ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
- if (unlikely(ia64_spinlock_val)) { \
- do { \
- while (*ia64_spinlock_ptr) \
- ia64_barrier(); \
- ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
- } while (ia64_spinlock_val); \
- } \
-} while (0)
-#define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0)
-#endif /* !ASM_SUPPORTED */
+static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+{
+ int *p = (int *)&lock->lock;
+
+ (void)ia64_fetchadd(1, p, rel);
+}
+
+static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+{
+ long tmp = ACCESS_ONCE(lock->lock);
+
+ return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1));
+}
+
+static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+{
+ long tmp = ACCESS_ONCE(lock->lock);
-#define __raw_spin_is_locked(x) ((x)->lock != 0)
-#define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+ return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1;
+}
+
+static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+{
+ return __ticket_spin_is_locked(lock);
+}
+
+static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+{
+ return __ticket_spin_is_contended(lock);
+}
+#define __raw_spin_is_contended __raw_spin_is_contended
+
+static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ __ticket_spin_lock(lock);
+}
+
+static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ return __ticket_spin_trylock(lock);
+}
+
+static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+ __ticket_spin_unlock(lock);
+}
+
+static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+ unsigned long flags)
+{
+ __raw_spin_lock(lock);
+}
+
+static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+{
+ while (__raw_spin_is_locked(lock))
+ cpu_relax();
+}
#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 474e46f1ab4..b61d136d9bc 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -6,7 +6,7 @@
#endif
typedef struct {
- volatile unsigned int lock;
+ volatile unsigned long lock;
} raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index ae6922626bf..8ce2e388e37 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -48,7 +48,7 @@ struct thread_info {
.flags = 0, \
.cpu = 0, \
.addr_limit = KERNEL_DS, \
- .preempt_count = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 20d8a39680c..85d965cb19a 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -236,22 +236,22 @@ do { \
__tlb_remove_tlb_entry(tlb, ptep, addr); \
} while (0)
-#define pte_free_tlb(tlb, ptep) \
+#define pte_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
- __pte_free_tlb(tlb, ptep); \
+ __pte_free_tlb(tlb, ptep, address); \
} while (0)
-#define pmd_free_tlb(tlb, ptep) \
+#define pmd_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
- __pmd_free_tlb(tlb, ptep); \
+ __pmd_free_tlb(tlb, ptep, address); \
} while (0)
-#define pud_free_tlb(tlb, pudp) \
+#define pud_free_tlb(tlb, pudp, address) \
do { \
tlb->need_flush = 1; \
- __pud_free_tlb(tlb, pudp); \
+ __pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif /* _ASM_IA64_TLB_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 7b4c8c70b2d..3ddb4e709db 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,6 @@
/*
* Returns a bitmask of CPUs on Node 'node'.
*/
-#define node_to_cpumask(node) (node_to_cpu_mask[node])
#define cpumask_of_node(node) (&node_to_cpu_mask[node])
/*
@@ -61,12 +60,13 @@ void build_cpu_to_node_map(void);
.cache_nice_tries = 2, \
.busy_idx = 2, \
.idle_idx = 1, \
- .newidle_idx = 2, \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_NEWIDLE \
| SD_BALANCE_EXEC \
+ | SD_BALANCE_FORK \
| SD_WAKE_AFFINE, \
.last_balance = jiffies, \
.balance_interval = 1, \
@@ -85,14 +85,14 @@ void build_cpu_to_node_map(void);
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
- .newidle_idx = 2, \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
.flags = SD_LOAD_BALANCE \
+ | SD_BALANCE_NEWIDLE \
| SD_BALANCE_EXEC \
| SD_BALANCE_FORK \
- | SD_SERIALIZE \
- | SD_WAKE_BALANCE, \
+ | SD_SERIALIZE, \
.last_balance = jiffies, \
.balance_interval = 64, \
.nr_balance_failed = 0, \
@@ -103,8 +103,6 @@ void build_cpu_to_node_map(void);
#ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
-#define topology_core_siblings(cpu) (cpu_core_map[cpu])
-#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
#define smt_capable() (smp_num_siblings > 1)
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index e425227a418..88afb54501e 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -33,6 +33,7 @@
#ifndef _ASM_IA64_XEN_HYPERVISOR_H
#define _ASM_IA64_XEN_HYPERVISOR_H
+#include <linux/err.h>
#include <xen/interface/xen.h>
#include <xen/interface/version.h> /* to compile feature.c */
#include <xen/features.h> /* to comiple xen-netfront.c */
diff --git a/arch/ia64/install.sh b/arch/ia64/install.sh
index 929e780026d..0e932f5dcd1 100644
--- a/arch/ia64/install.sh
+++ b/arch/ia64/install.sh
@@ -21,8 +21,8 @@
# User may have a custom install script
-if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
-if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
# Default install - same as make zlilo
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
index 1d87f84069b..ab9b03a9adc 100644
--- a/arch/ia64/kernel/Makefile.gate
+++ b/arch/ia64/kernel/Makefile.gate
@@ -10,7 +10,7 @@ quiet_cmd_gate = GATE $@
cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
- $(call ld-option, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index f065093f8e9..6631a9dfafd 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -23,6 +23,7 @@
int kdump_status[NR_CPUS];
static atomic_t kdump_cpu_frozen;
atomic_t kdump_in_progress;
+static int kdump_freeze_monarch;
static int kdump_on_init = 1;
static int kdump_on_fatal_mca = 1;
@@ -108,10 +109,38 @@ machine_crash_shutdown(struct pt_regs *pt)
*/
kexec_disable_iosapic();
#ifdef CONFIG_SMP
+ /*
+ * If kdump_on_init is set and an INIT is asserted here, kdump will
+ * be started again via INIT monarch.
+ */
+ local_irq_disable();
+ ia64_set_psr_mc(); /* mask MCA/INIT */
+ if (atomic_inc_return(&kdump_in_progress) != 1)
+ unw_init_running(kdump_cpu_freeze, NULL);
+
+ /*
+ * Now this cpu is ready for kdump.
+ * Stop all others by IPI or INIT. They could receive INIT from
+ * outside and might be INIT monarch, but only thing they have to
+ * do is falling into kdump_cpu_freeze().
+ *
+ * If an INIT is asserted here:
+ * - All receivers might be slaves, since some of cpus could already
+ * be frozen and INIT might be masked on monarch. In this case,
+ * all slaves will be frozen soon since kdump_in_progress will let
+ * them into DIE_INIT_SLAVE_LEAVE.
+ * - One might be a monarch, but INIT rendezvous will fail since
+ * at least this cpu already have INIT masked so it never join
+ * to the rendezvous. In this case, all slaves and monarch will
+ * be frozen soon with no wait since the INIT rendezvous is skipped
+ * by kdump_in_progress.
+ */
kdump_smp_send_stop();
/* not all cpu response to IPI, send INIT to freeze them */
- if (kdump_wait_cpu_freeze() && kdump_on_init) {
+ if (kdump_wait_cpu_freeze()) {
kdump_smp_send_init();
+ /* wait again, don't go ahead if possible */
+ kdump_wait_cpu_freeze();
}
#endif
}
@@ -129,17 +158,17 @@ void
kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
{
int cpuid;
+
local_irq_disable();
cpuid = smp_processor_id();
crash_save_this_cpu();
current->thread.ksp = (__u64)info->sw - 16;
+
+ ia64_set_psr_mc(); /* mask MCA/INIT and stop reentrance */
+
atomic_inc(&kdump_cpu_frozen);
kdump_status[cpuid] = 1;
mb();
-#ifdef CONFIG_HOTPLUG_CPU
- if (cpuid != 0)
- ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
-#endif
for (;;)
cpu_relax();
}
@@ -150,6 +179,20 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
struct ia64_mca_notify_die *nd;
struct die_args *args = data;
+ if (atomic_read(&kdump_in_progress)) {
+ switch (val) {
+ case DIE_INIT_MONARCH_LEAVE:
+ if (!kdump_freeze_monarch)
+ break;
+ /* fall through */
+ case DIE_INIT_SLAVE_LEAVE:
+ case DIE_INIT_MONARCH_ENTER:
+ case DIE_MCA_RENDZVOUS_LEAVE:
+ unw_init_running(kdump_cpu_freeze, NULL);
+ break;
+ }
+ }
+
if (!kdump_on_init && !kdump_on_fatal_mca)
return NOTIFY_DONE;
@@ -162,43 +205,31 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
}
if (val != DIE_INIT_MONARCH_LEAVE &&
- val != DIE_INIT_SLAVE_LEAVE &&
val != DIE_INIT_MONARCH_PROCESS &&
- val != DIE_MCA_RENDZVOUS_LEAVE &&
val != DIE_MCA_MONARCH_LEAVE)
return NOTIFY_DONE;
nd = (struct ia64_mca_notify_die *)args->err;
- /* Reason code 1 means machine check rendezvous*/
- if ((val == DIE_INIT_MONARCH_LEAVE || val == DIE_INIT_SLAVE_LEAVE
- || val == DIE_INIT_MONARCH_PROCESS) && nd->sos->rv_rc == 1)
- return NOTIFY_DONE;
switch (val) {
case DIE_INIT_MONARCH_PROCESS:
- if (kdump_on_init) {
- atomic_set(&kdump_in_progress, 1);
- *(nd->monarch_cpu) = -1;
+ /* Reason code 1 means machine check rendezvous*/
+ if (kdump_on_init && (nd->sos->rv_rc != 1)) {
+ if (atomic_inc_return(&kdump_in_progress) != 1)
+ kdump_freeze_monarch = 1;
}
break;
case DIE_INIT_MONARCH_LEAVE:
- if (kdump_on_init)
+ /* Reason code 1 means machine check rendezvous*/
+ if (kdump_on_init && (nd->sos->rv_rc != 1))
machine_kdump_on_init();
break;
- case DIE_INIT_SLAVE_LEAVE:
- if (atomic_read(&kdump_in_progress))
- unw_init_running(kdump_cpu_freeze, NULL);
- break;
- case DIE_MCA_RENDZVOUS_LEAVE:
- if (atomic_read(&kdump_in_progress))
- unw_init_running(kdump_cpu_freeze, NULL);
- break;
case DIE_MCA_MONARCH_LEAVE:
/* *(nd->data) indicate if MCA is recoverable */
if (kdump_on_fatal_mca && !(*(nd->data))) {
- atomic_set(&kdump_in_progress, 1);
- *(nd->monarch_cpu) = -1;
- machine_kdump_on_init();
+ if (atomic_inc_return(&kdump_in_progress) == 1)
+ machine_kdump_on_init();
+ /* We got fatal MCA while kdump!? No way!! */
}
break;
}
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
index 086a2aeb040..f2c1600da09 100644
--- a/arch/ia64/kernel/dma-mapping.c
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -6,6 +6,16 @@ int iommu_detected __read_mostly;
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init dma_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+ return 0;
+}
+fs_initcall(dma_init);
+
struct dma_map_ops *dma_get_ops(struct device *dev)
{
return dma_ops;
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c
index ebf4e988e78..d5764a3d74a 100644
--- a/arch/ia64/kernel/esi.c
+++ b/arch/ia64/kernel/esi.c
@@ -65,7 +65,7 @@ static int __init esi_init (void)
}
if (!esi)
- return -ENODEV;;
+ return -ENODEV;
systab = __va(esi);
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 23f846de62d..696eff28a0c 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -167,7 +167,7 @@ RestRR: \
mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \
mov rr[_tmp1]=_tmp2
- .section __special_page_section,"ax"
+ __PAGE_ALIGNED_DATA
.global empty_zero_page
empty_zero_page:
@@ -181,7 +181,7 @@ swapper_pg_dir:
halt_msg:
stringz "Halting kernel\n"
- .section .text.head,"ax"
+ __REF
.global start_ap
@@ -1130,95 +1130,6 @@ SET_REG(b5);
#endif /* CONFIG_IA64_BRL_EMU */
#ifdef CONFIG_SMP
- /*
- * This routine handles spinlock contention. It uses a non-standard calling
- * convention to avoid converting leaf routines into interior routines. Because
- * of this special convention, there are several restrictions:
- *
- * - do not use gp relative variables, this code is called from the kernel
- * and from modules, r1 is undefined.
- * - do not use stacked registers, the caller owns them.
- * - do not use the scratch stack space, the caller owns it.
- * - do not use any registers other than the ones listed below
- *
- * Inputs:
- * ar.pfs - saved CFM of caller
- * ar.ccv - 0 (and available for use)
- * r27 - flags from spin_lock_irqsave or 0. Must be preserved.
- * r28 - available for use.
- * r29 - available for use.
- * r30 - available for use.
- * r31 - address of lock, available for use.
- * b6 - return address
- * p14 - available for use.
- * p15 - used to track flag status.
- *
- * If you patch this code to use more registers, do not forget to update
- * the clobber lists for spin_lock() in arch/ia64/include/asm/spinlock.h.
- */
-
-#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-
-GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
- .prologue
- .save ar.pfs, r0 // this code effectively has a zero frame size
- .save rp, r28
- .body
- nop 0
- tbit.nz p15,p0=r27,IA64_PSR_I_BIT
- .restore sp // pop existing prologue after next insn
- mov b6 = r28
- .prologue
- .save ar.pfs, r0
- .altrp b6
- .body
- ;;
-(p15) ssm psr.i // reenable interrupts if they were on
- // DavidM says that srlz.d is slow and is not required in this case
-.wait:
- // exponential backoff, kdb, lockmeter etc. go in here
- hint @pause
- ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word
- nop 0
- ;;
- cmp4.ne p14,p0=r30,r0
-(p14) br.cond.sptk.few .wait
-(p15) rsm psr.i // disable interrupts if we reenabled them
- br.cond.sptk.few b6 // lock is now free, try to acquire
- .global ia64_spinlock_contention_pre3_4_end // for kernprof
-ia64_spinlock_contention_pre3_4_end:
-END(ia64_spinlock_contention_pre3_4)
-
-#else
-
-GLOBAL_ENTRY(ia64_spinlock_contention)
- .prologue
- .altrp b6
- .body
- tbit.nz p15,p0=r27,IA64_PSR_I_BIT
- ;;
-.wait:
-(p15) ssm psr.i // reenable interrupts if they were on
- // DavidM says that srlz.d is slow and is not required in this case
-.wait2:
- // exponential backoff, kdb, lockmeter etc. go in here
- hint @pause
- ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word
- ;;
- cmp4.ne p14,p0=r30,r0
- mov r30 = 1
-(p14) br.cond.sptk.few .wait2
-(p15) rsm psr.i // disable interrupts if we reenabled them
- ;;
- cmpxchg4.acq r30=[r31], r30, ar.ccv
- ;;
- cmp4.ne p14,p0=r0,r30
-(p14) br.cond.sptk.few .wait
-
- br.ret.sptk.many b6 // lock is now taken
-END(ia64_spinlock_contention)
-
-#endif
#ifdef CONFIG_HOTPLUG_CPU
GLOBAL_ENTRY(ia64_jump_to_sal)
@@ -1242,7 +1153,7 @@ GLOBAL_ENTRY(ia64_jump_to_sal)
movl r16=SAL_PSR_BITS_TO_SET;;
mov cr.ipsr=r16
mov cr.ifs=r0;;
- rfi;;
+ rfi;; // note: this unmask MCA/INIT (psr.mc)
1:
/*
* Invalidate all TLB data/inst
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 2d311864e35..14d39e30062 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -21,6 +21,7 @@ EXPORT_SYMBOL(csum_ipv6_magic);
#include <asm/page.h>
EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
#ifdef CONFIG_VIRTUAL_MEM_MAP
#include <linux/bootmem.h>
@@ -60,9 +61,6 @@ EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__umoddi3);
-#include <asm/page.h>
-EXPORT_SYMBOL(copy_page);
-
#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
extern void xor_ia64_2(void);
extern void xor_ia64_3(void);
@@ -86,26 +84,6 @@ EXPORT_SYMBOL(ia64_save_scratch_fpregs);
#include <asm/unwind.h>
EXPORT_SYMBOL(unw_init_running);
-#ifdef ASM_SUPPORTED
-# ifdef CONFIG_SMP
-# if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-/*
- * This is not a normal routine and we don't want a function descriptor for it, so we use
- * a fake declaration here.
- */
-extern char ia64_spinlock_contention_pre3_4;
-EXPORT_SYMBOL(ia64_spinlock_contention_pre3_4);
-# else
-/*
- * This is not a normal routine and we don't want a function descriptor for it, so we use
- * a fake declaration here.
- */
-extern char ia64_spinlock_contention;
-EXPORT_SYMBOL(ia64_spinlock_contention);
-# endif
-# endif
-#endif
-
#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE)
extern void esi_call_phys (void);
EXPORT_SYMBOL_GPL(esi_call_phys);
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index c475fc281be..e253ab8fcbc 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -33,7 +33,8 @@ union {
struct thread_info thread_info;
} s;
unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
-} init_task_mem asm ("init_task") __attribute__((section(".data.init_task"))) = {{
+} init_task_mem asm ("init_task") __init_task_data =
+ {{
.task = INIT_TASK(init_task_mem.s.task),
.thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
}};
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index c48b03f2b61..dab4d393908 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -1072,6 +1072,10 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
}
addr = ioremap(phys_addr, 0);
+ if (addr == NULL) {
+ spin_unlock_irqrestore(&iosapic_lock, flags);
+ return -ENOMEM;
+ }
ver = iosapic_version(addr);
if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
iounmap(addr);
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index 0823de1f6eb..3d3aeef4694 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -24,6 +24,8 @@
#include <asm/delay.h>
#include <asm/meminit.h>
#include <asm/processor.h>
+#include <asm/sal.h>
+#include <asm/mca.h>
typedef NORET_TYPE void (*relocate_new_kernel_t)(
unsigned long indirection_page,
@@ -85,13 +87,26 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
void *pal_addr = efi_get_pal_addr();
unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
int ii;
+ u64 fp, gp;
+ ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump;
BUG_ON(!image);
if (image->type == KEXEC_TYPE_CRASH) {
crash_save_this_cpu();
current->thread.ksp = (__u64)info->sw - 16;
+
+ /* Register noop init handler */
+ fp = ia64_tpa(init_handler->fp);
+ gp = ia64_tpa(ia64_getreg(_IA64_REG_GP));
+ ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0);
+ } else {
+ /* Unregister init handlers of current kernel */
+ ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0);
}
+ /* Unregister mca handler - No more recovery on current kernel */
+ ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0);
+
/* Interrupts aren't acceptable while we reboot */
local_irq_disable();
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 7b30d21c519..d2877a7bfe2 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1682,14 +1682,25 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
if (!sos->monarch) {
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
+
+#ifdef CONFIG_KEXEC
+ while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress))
+ udelay(1000);
+#else
while (monarch_cpu == -1)
- cpu_relax(); /* spin until monarch enters */
+ cpu_relax(); /* spin until monarch enters */
+#endif
NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
+#ifdef CONFIG_KEXEC
+ while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress))
+ udelay(1000);
+#else
while (monarch_cpu != -1)
- cpu_relax(); /* spin until monarch leaves */
+ cpu_relax(); /* spin until monarch leaves */
+#endif
NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index a06d46548ff..7461d2573d4 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -40,6 +40,7 @@
.global ia64_do_tlb_purge
.global ia64_os_mca_dispatch
+ .global ia64_os_init_on_kdump
.global ia64_os_init_dispatch_monarch
.global ia64_os_init_dispatch_slave
@@ -299,6 +300,25 @@ END(ia64_os_mca_virtual_begin)
//StartMain////////////////////////////////////////////////////////////////////
//
+// NOP init handler for kdump. In panic situation, we may receive INIT
+// while kernel transition. Since we initialize registers on leave from
+// current kernel, no longer monarch/slave handlers of current kernel in
+// virtual mode are called safely.
+// We can unregister these init handlers from SAL, however then the INIT
+// will result in warmboot by SAL and we cannot retrieve the crashdump.
+// Therefore register this NOP function to SAL, to prevent entering virtual
+// mode and resulting warmboot by SAL.
+//
+ia64_os_init_on_kdump:
+ mov r8=r0 // IA64_INIT_RESUME
+ mov r9=r10 // SAL_GP
+ mov r22=r17 // *minstate
+ ;;
+ mov r10=r0 // return to same context
+ mov b0=r12 // SAL_CHECK return address
+ br b0
+
+//
// SAL to OS entry point for INIT on all processors. This has been defined for
// registration purposes with SAL as a part of ia64_mca_init. Monarch and
// slave INIT have identical processing, except for the value of the
@@ -1073,3 +1093,30 @@ GLOBAL_ENTRY(ia64_get_rnat)
mov ar.rsc=3
br.ret.sptk.many rp
END(ia64_get_rnat)
+
+
+// void ia64_set_psr_mc(void)
+//
+// Set psr.mc bit to mask MCA/INIT.
+GLOBAL_ENTRY(ia64_set_psr_mc)
+ rsm psr.i | psr.ic // disable interrupts
+ ;;
+ srlz.d
+ ;;
+ mov r14 = psr // get psr{36:35,31:0}
+ movl r15 = 1f
+ ;;
+ dep r14 = -1, r14, PSR_MC, 1 // set psr.mc
+ ;;
+ dep r14 = -1, r14, PSR_IC, 1 // set psr.ic
+ ;;
+ dep r14 = -1, r14, PSR_BN, 1 // keep bank1 in use
+ ;;
+ mov cr.ipsr = r14
+ mov cr.ifs = r0
+ mov cr.iip = r15
+ ;;
+ rfi
+1:
+ br.ret.sptk.many rp
+END(ia64_set_psr_mc)
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 05695962fe4..f6b1ff0aea7 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -69,11 +69,6 @@ iommu_dma_init(void)
int iommu_dma_supported(struct device *dev, u64 mask)
{
- struct dma_map_ops *ops = platform_dma_get_ops(dev);
-
- if (ops->dma_supported)
- return ops->dma_supported(dev, mask);
-
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 223abb13410..285aae8431c 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void)
void __init pci_swiotlb_init(void)
{
- if (!iommu_detected || iommu_pass_through) {
+ if (!iommu_detected) {
#ifdef CONFIG_IA64_GENERIC
swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index abce2468a40..f1782705b1f 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg)
* /proc/perfmon interface, for debug only
*/
-#define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1)
+#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
static void *
pfm_proc_start(struct seq_file *m, loff_t *pos)
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 5d7c0e5b9e7..9bcec9945c1 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -161,6 +161,13 @@ show_regs (struct pt_regs *regs)
show_stack(NULL, NULL);
}
+/* local support for deprecated console_print */
+void
+console_print(const char *s)
+{
+ printk(KERN_EMERG "%s", s);
+}
+
void
do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
{
@@ -192,6 +199,8 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
if (test_thread_flag(TIF_NOTIFY_RESUME)) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(&scr->pt);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
}
/* copy user rbs to kernel rbs */
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 92c9689b7d9..9daa87fdb01 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -15,7 +15,6 @@
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
-#include <linux/smp_lock.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/audit.h>
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
index 903babd22d6..32f6fc131fb 100644
--- a/arch/ia64/kernel/relocate_kernel.S
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -52,7 +52,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
srlz.i
;;
mov ar.rnat=r18
- rfi
+ rfi // note: this unmask MCA/INIT (psr.mc)
;;
1:
//physical mode code begin
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 7053c55b764..e6676fca482 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -192,7 +192,7 @@ struct salinfo_platform_oemdata_parms {
static void
salinfo_work_to_do(struct salinfo_data *data)
{
- down_trylock(&data->mutex);
+ (void)(down_trylock(&data->mutex) ?: 0);
up(&data->mutex);
}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 1b23ec126b6..1de86c96801 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -855,11 +855,17 @@ identify_cpu (struct cpuinfo_ia64 *c)
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
}
+/*
+ * In UP configuration, setup_per_cpu_areas() is defined in
+ * include/linux/percpu.h
+ */
+#ifdef CONFIG_SMP
void __init
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
}
+#endif
/*
* Do the following calculations:
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index f0c521b0ba4..dabeefe2113 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -58,7 +58,8 @@ static struct local_tlb_flush_counts {
unsigned int count;
} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
-static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
+static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS],
+ shadow_flush_counts);
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
@@ -301,7 +302,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
return;
}
- smp_call_function_mask(mm->cpu_vm_mask,
+ smp_call_function_many(mm_cpumask(mm),
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
local_irq_disable();
local_finish_flush_tlb_mm(mm);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index bc80dff1df7..8f060352e12 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -372,6 +372,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
&cache_ktype_percpu_entry, &sys_dev->kobj,
"%s", "cache");
+ if (unlikely(retval < 0)) {
+ cpu_cache_sysfs_exit(cpu);
+ return retval;
+ }
for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
this_object = LEAF_KOBJECT_PTR(cpu,i);
@@ -385,7 +389,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
}
kobject_put(&all_cpu_cache_info[cpu].kobj);
cpu_cache_sysfs_exit(cpu);
- break;
+ return retval;
}
kobject_uevent(&(this_object->kobj), KOBJ_ADD);
}
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 4a95e86b9ac..0a0c77b2c98 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -24,14 +24,14 @@ PHDRS {
}
SECTIONS
{
- /* Sections to be discarded */
+ /* unwind exit sections must be discarded before the rest of the
+ sections get included. */
/DISCARD/ : {
- EXIT_TEXT
- EXIT_DATA
- *(.exitcall.exit)
*(.IA_64.unwind.exit.text)
*(.IA_64.unwind_info.exit.text)
- }
+ *(.comment)
+ *(.note)
+ }
v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
phys_start = _start - LOAD_OFFSET;
@@ -51,8 +51,6 @@ SECTIONS
KPROBES_TEXT
*(.gnu.linkonce.t*)
}
- .text.head : AT(ADDR(.text.head) - LOAD_OFFSET)
- { *(.text.head) }
.text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) }
#ifdef CONFIG_SMP
@@ -66,14 +64,7 @@ SECTIONS
NOTES :code :note /* put .notes in text and mark in PT_NOTE */
code_continues : {} :code /* switch back to regular program... */
- /* Exception table */
- . = ALIGN(16);
- __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
- {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
+ EXCEPTION_TABLE(16)
/* MCA table */
. = ALIGN(16);
@@ -115,38 +106,9 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
- .init.text : AT(ADDR(.init.text) - LOAD_OFFSET)
- {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
- .init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
- { INIT_DATA }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
- {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- }
-#endif
-
- . = ALIGN(16);
- .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
- {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
- .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
- {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
.data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
{
@@ -204,24 +166,13 @@ SECTIONS
}
#endif
- . = ALIGN(8);
- __con_initcall_start = .;
- .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
- { *(.con_initcall.init) }
- __con_initcall_end = .;
- __security_initcall_start = .;
- .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET)
- { *(.security_initcall.init) }
- __security_initcall_end = .;
. = ALIGN(PAGE_SIZE);
__init_end = .;
- /* The initial task and kernel stack */
- .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
- { *(.data.init_task) }
-
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
- { *(__special_page_section)
+ {
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
+ . = ALIGN(PAGE_SIZE);
__start_gate_section = .;
*(.data.gate)
__stop_gate_section = .;
@@ -236,12 +187,6 @@ SECTIONS
* kernel data
*/
- .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
- { *(.data.read_mostly) }
-
- .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
- { *(.data.cacheline_aligned) }
-
/* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE);
PERCPU_VADDR(PERCPU_ADDR, :percpu)
@@ -258,6 +203,9 @@ SECTIONS
__cpu0_per_cpu = .;
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
#endif
+ INIT_TASK_DATA(PAGE_SIZE)
+ CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
+ READ_MOSTLY_DATA(SMP_CACHE_BYTES)
DATA_DATA
*(.data1)
*(.gnu.linkonce.d*)
@@ -274,49 +222,16 @@ SECTIONS
.sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
{ *(.sdata) *(.sdata1) *(.srdata) }
_edata = .;
- __bss_start = .;
- .sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
- { *(.sbss) *(.scommon) }
- .bss : AT(ADDR(.bss) - LOAD_OFFSET)
- { *(.bss) *(COMMON) }
- __bss_stop = .;
+
+ BSS_SECTION(0, 0, 0)
_end = .;
code : { } :code
- /* Stabs debugging sections. */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- /* DWARF debug sections.
- Symbols in the DWARF debugging sections are relative to the beginning
- of the section so we begin them at 0. */
- /* DWARF 1 */
- .debug 0 : { *(.debug) }
- .line 0 : { *(.line) }
- /* GNU DWARF 1 extensions */
- .debug_srcinfo 0 : { *(.debug_srcinfo) }
- .debug_sfnames 0 : { *(.debug_sfnames) }
- /* DWARF 1.1 and DWARF 2 */
- .debug_aranges 0 : { *(.debug_aranges) }
- .debug_pubnames 0 : { *(.debug_pubnames) }
- /* DWARF 2 */
- .debug_info 0 : { *(.debug_info) }
- .debug_abbrev 0 : { *(.debug_abbrev) }
- .debug_line 0 : { *(.debug_line) }
- .debug_frame 0 : { *(.debug_frame) }
- .debug_str 0 : { *(.debug_str) }
- .debug_loc 0 : { *(.debug_loc) }
- .debug_macinfo 0 : { *(.debug_macinfo) }
- /* SGI/MIPS DWARF 2 extensions */
- .debug_weaknames 0 : { *(.debug_weaknames) }
- .debug_funcnames 0 : { *(.debug_funcnames) }
- .debug_typenames 0 : { *(.debug_typenames) }
- .debug_varnames 0 : { *(.debug_varnames) }
- /* These must appear regardless of . */
- /DISCARD/ : { *(.comment) }
- /DISCARD/ : { *(.note) }
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ /* Default discards */
+ DISCARDS
}
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index 64d52093787..ef3e7be29ca 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -1,12 +1,8 @@
#
# KVM configuration
#
-config HAVE_KVM
- bool
-config HAVE_KVM_IRQCHIP
- bool
- default y
+source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
@@ -28,6 +24,8 @@ config KVM
depends on PCI
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select HAVE_KVM_IRQCHIP
+ select KVM_APIC_ARCHITECTURE
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -49,9 +47,6 @@ config KVM_INTEL
Provides support for KVM on Itanium 2 processors equipped with the VT
extensions.
-config KVM_TRACE
- bool
-
source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 80c57b0a21c..0ad09f05efa 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -210,16 +210,6 @@ int kvm_dev_ioctl_check_extension(long ext)
}
-static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
- gpa_t addr, int len, int is_write)
-{
- struct kvm_io_device *dev;
-
- dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
-
- return dev;
-}
-
static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -231,6 +221,7 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct kvm_mmio_req *p;
struct kvm_io_device *mmio_dev;
+ int r;
p = kvm_get_vcpu_ioreq(vcpu);
@@ -247,16 +238,13 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->exit_reason = KVM_EXIT_MMIO;
return 0;
mmio:
- mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
- if (mmio_dev) {
- if (!p->dir)
- kvm_iodevice_write(mmio_dev, p->addr, p->size,
- &p->data);
- else
- kvm_iodevice_read(mmio_dev, p->addr, p->size,
- &p->data);
-
- } else
+ if (p->dir)
+ r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr,
+ p->size, &p->data);
+ else
+ r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr,
+ p->size, &p->data);
+ if (r)
printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
p->state = STATE_IORESP_READY;
@@ -337,13 +325,12 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
{
union ia64_lid lid;
int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
- if (kvm->vcpus[i]) {
- lid.val = VCPU_LID(kvm->vcpus[i]);
- if (lid.id == id && lid.eid == eid)
- return kvm->vcpus[i];
- }
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ lid.val = VCPU_LID(vcpu);
+ if (lid.id == id && lid.eid == eid)
+ return vcpu;
}
return NULL;
@@ -409,21 +396,21 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
struct kvm *kvm = vcpu->kvm;
struct call_data call_data;
int i;
+ struct kvm_vcpu *vcpui;
call_data.ptc_g_data = p->u.ptc_g_data;
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
- if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
- KVM_MP_STATE_UNINITIALIZED ||
- vcpu == kvm->vcpus[i])
+ kvm_for_each_vcpu(i, vcpui, kvm) {
+ if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
+ vcpu == vcpui)
continue;
- if (waitqueue_active(&kvm->vcpus[i]->wq))
- wake_up_interruptible(&kvm->vcpus[i]->wq);
+ if (waitqueue_active(&vcpui->wq))
+ wake_up_interruptible(&vcpui->wq);
- if (kvm->vcpus[i]->cpu != -1) {
- call_data.vcpu = kvm->vcpus[i];
- smp_call_function_single(kvm->vcpus[i]->cpu,
+ if (vcpui->cpu != -1) {
+ call_data.vcpu = vcpui;
+ smp_call_function_single(vcpui->cpu,
vcpu_global_purge, &call_data, 1);
} else
printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
@@ -852,8 +839,6 @@ struct kvm *kvm_arch_create_vm(void)
kvm_init_vm(kvm);
- kvm->arch.online_vcpus = 0;
-
return kvm;
}
@@ -1000,10 +985,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out;
if (irqchip_in_kernel(kvm)) {
__s32 status;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->irq_lock);
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->irq_lock);
if (ioctl == KVM_IRQ_LINE_STATUS) {
irq_event.status = status;
if (copy_to_user(argp, &irq_event,
@@ -1216,7 +1201,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
if (IS_ERR(vmm_vcpu))
return PTR_ERR(vmm_vcpu);
- if (vcpu->vcpu_id == 0) {
+ if (kvm_vcpu_is_bsp(vcpu)) {
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
/*Set entry address for first run.*/
@@ -1224,7 +1209,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
/*Initialize itc offset for vcpus*/
itc_offset = 0UL - kvm_get_itc(vcpu);
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
+ for (i = 0; i < KVM_MAX_VCPUS; i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
v->arch.itc_offset = itc_offset;
@@ -1356,8 +1341,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
goto fail;
}
- kvm->arch.online_vcpus++;
-
return vcpu;
fail:
return ERR_PTR(r);
@@ -1952,19 +1935,6 @@ int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
return find_highest_bits((int *)&vpd->irr[0]);
}
-int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
-{
- if (kvm_highest_pending_irq(vcpu) != -1)
- return 1;
- return 0;
-}
-
-int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
-{
- /* do real check here */
- return 1;
-}
-
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.timer_fired;
@@ -1977,7 +1947,8 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
+ return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
+ (kvm_highest_pending_irq(vcpu) != -1);
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c
index a85cb611ecd..f1268b8e6f9 100644
--- a/arch/ia64/kvm/kvm_lib.c
+++ b/arch/ia64/kvm/kvm_lib.c
@@ -11,5 +11,11 @@
*
*/
#undef CONFIG_MODULES
+#include <linux/module.h>
+#undef CONFIG_KALLSYMS
+#undef EXPORT_SYMBOL
+#undef EXPORT_SYMBOL_GPL
+#define EXPORT_SYMBOL(sym)
+#define EXPORT_SYMBOL_GPL(sym)
#include "../../../lib/vsprintf.c"
#include "../../../lib/ctype.c"
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index 21f63fffc37..9bf55afd08d 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -247,7 +247,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
/* Write high word. FIXME: this is a kludge! */
v.u.bits[1] &= 0x3ffff;
- mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
+ mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8,
+ ma, IOREQ_WRITE);
data = v.u.bits[0];
size = 3;
} else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
@@ -265,7 +266,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
/* Write high word.FIXME: this is a kludge! */
v.u.bits[1] &= 0x3ffff;
- mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
+ mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1],
+ 8, ma, IOREQ_WRITE);
data = v.u.bits[0];
size = 3;
} else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index a8f84da04b4..bb862fb224f 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -130,7 +130,7 @@ static void collect_interruption(struct kvm_vcpu *vcpu)
if (vdcr & IA64_DCR_PP) {
vpsr |= IA64_PSR_PP;
} else {
- vpsr &= ~IA64_PSR_PP;;
+ vpsr &= ~IA64_PSR_PP;
}
vcpu_set_psr(vcpu, vpsr);
@@ -594,11 +594,11 @@ static void set_pal_call_data(struct kvm_vcpu *vcpu)
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
break;
case PAL_BRAND_INFO:
- p->u.pal_data.gr29 = gr29;;
+ p->u.pal_data.gr29 = gr29;
p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
break;
default:
- p->u.pal_data.gr29 = gr29;;
+ p->u.pal_data.gr29 = gr29;
p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
}
p->u.pal_data.gr28 = gr28;
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index a2c6c15e476..dce75b70cdd 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -406,7 +406,7 @@ void getreg(unsigned long regnum, unsigned long *val,
* Now look at registers in [0-31] range and init correct UNAT
*/
addr = (unsigned long)regs;
- unat = &regs->eml_unat;;
+ unat = &regs->eml_unat;
addr += gr_info[regnum];
@@ -461,7 +461,7 @@ void setreg(unsigned long regnum, unsigned long val,
u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
{
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
- u64 val;
+ unsigned long val;
if (!reg)
return 0;
@@ -469,7 +469,7 @@ u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
return val;
}
-void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
+void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat)
{
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
long sof = (regs->cr_ifs) & 0x7f;
@@ -830,8 +830,8 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
kvm = (struct kvm *)KVM_VM_BASE;
- if (vcpu->vcpu_id == 0) {
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
+ if (kvm_vcpu_is_bsp(vcpu)) {
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
VMX(v, itc_offset) = itc_offset;
@@ -1072,7 +1072,7 @@ void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
}
-int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
+int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr)
{
struct thash_data *data;
union ia64_isr visr, pt_isr;
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index 042af92ced8..360724d3ae6 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -686,14 +686,15 @@ static inline int highest_inservice_irq(struct kvm_vcpu *vcpu)
return highest_bits((int *)&(VMX(vcpu, insvc[0])));
}
-extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, u64 reg,
+extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
struct ia64_fpreg *val);
-extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, u64 reg,
+extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
struct ia64_fpreg *val);
-extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, u64 reg);
-extern void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 val, int nat);
-extern u64 vcpu_get_psr(struct kvm_vcpu *vcpu);
-extern void vcpu_set_psr(struct kvm_vcpu *vcpu, u64 val);
+extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg);
+extern void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg,
+ u64 val, int nat);
+extern unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu);
+extern void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val);
extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr);
extern void vcpu_bsw0(struct kvm_vcpu *vcpu);
extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte,
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 4290a429bf7..20b3852f7a6 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -135,7 +135,7 @@ struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
u64 rid;
rid = vcpu_get_rr(vcpu, va);
- rid = rid & RR_RID_MASK;;
+ rid = rid & RR_RID_MASK;
if (type == D_TLB) {
if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
@@ -518,7 +518,7 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
struct thash_cb *hcb = &v->arch.vtlb;
- cch = __vtr_lookup(v, va, is_data);;
+ cch = __vtr_lookup(v, va, is_data);
if (cch)
return cch;
diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S
index 1f86aeb2c94..620d9dc5220 100644
--- a/arch/ia64/lib/ip_fast_csum.S
+++ b/arch/ia64/lib/ip_fast_csum.S
@@ -96,20 +96,22 @@ END(ip_fast_csum)
GLOBAL_ENTRY(csum_ipv6_magic)
ld4 r20=[in0],4
ld4 r21=[in1],4
- dep r15=in3,in2,32,16
+ zxt4 in2=in2
;;
ld4 r22=[in0],4
ld4 r23=[in1],4
- mux1 r15=r15,@rev
+ dep r15=in3,in2,32,16
;;
ld4 r24=[in0],4
ld4 r25=[in1],4
- shr.u r15=r15,16
+ mux1 r15=r15,@rev
add r16=r20,r21
add r17=r22,r23
+ zxt4 in4=in4
;;
ld4 r26=[in0],4
ld4 r27=[in1],4
+ shr.u r15=r15,16
add r18=r24,r25
add r8=r16,r17
;;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index b115b3bbf04..1857766a63c 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -617,7 +617,6 @@ mem_init (void)
long reserved_pages, codesize, datasize, initsize;
pg_data_t *pgdat;
int i;
- static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
@@ -639,10 +638,6 @@ mem_init (void)
high_memory = __va(max_low_pfn * PAGE_SIZE);
- kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
- kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
- kclist_add(&kcore_kernel, _stext, _end - _stext);
-
for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
totalram_pages += free_all_bootmem_node(pgdat);
@@ -655,7 +650,7 @@ mem_init (void)
initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
- "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
+ "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10),
num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
diff --git a/arch/ia64/oprofile/backtrace.c b/arch/ia64/oprofile/backtrace.c
index adb01566bd5..5cdd7e4a597 100644
--- a/arch/ia64/oprofile/backtrace.c
+++ b/arch/ia64/oprofile/backtrace.c
@@ -32,24 +32,6 @@ typedef struct
u64 *prev_pfs_loc; /* state for WAR for old spinlock ool code */
} ia64_backtrace_t;
-#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-/*
- * Returns non-zero if the PC is in the spinlock contention out-of-line code
- * with non-standard calling sequence (on older compilers).
- */
-static __inline__ int in_old_ool_spinlock_code(unsigned long pc)
-{
- extern const char ia64_spinlock_contention_pre3_4[] __attribute__ ((weak));
- extern const char ia64_spinlock_contention_pre3_4_end[] __attribute__ ((weak));
- unsigned long sc_start = (unsigned long)ia64_spinlock_contention_pre3_4;
- unsigned long sc_end = (unsigned long)ia64_spinlock_contention_pre3_4_end;
- return (sc_start && sc_end && pc >= sc_start && pc < sc_end);
-}
-#else
-/* Newer spinlock code does a proper br.call and works fine with the unwinder */
-#define in_old_ool_spinlock_code(pc) 0
-#endif
-
/* Returns non-zero if the PC is in the Interrupt Vector Table */
static __inline__ int in_ivt_code(unsigned long pc)
{
@@ -80,7 +62,7 @@ static __inline__ int next_frame(ia64_backtrace_t *bt)
*/
if (bt->prev_pfs_loc && bt->regs && bt->frame.pfs_loc == bt->prev_pfs_loc)
bt->frame.pfs_loc = &bt->regs->ar_pfs;
- bt->prev_pfs_loc = (in_old_ool_spinlock_code(bt->frame.ip) ? bt->frame.pfs_loc : NULL);
+ bt->prev_pfs_loc = NULL;
return unw_unwind(&bt->frame) == 0;
}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 729298f4b23..7de76dd352f 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -537,7 +537,7 @@ pcibios_align_resource (void *data, struct resource *res,
/*
* PCI BIOS setup, always defaults to SAL interface
*/
-char * __devinit
+char * __init
pcibios_setup (char *str)
{
return str;
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 76645cf6ac5..25831c47c57 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -435,7 +435,8 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
bricktype = MODULE_GET_BTYPE(moduleid);
if ((bricktype == L1_BRICKTYPE_191010) ||
(bricktype == L1_BRICKTYPE_1932))
- sprintf(address, "%s^%d", address, geo_slot(geoid));
+ sprintf(address + strlen(address), "^%d",
+ geo_slot(geoid));
}
void __devinit
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index e456f062f24..ece1bf99449 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
-DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
+DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index 239b3cedcf2..5bc34eac9e0 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -54,6 +54,8 @@ static int find_free_ate(struct ate_resource *ate_resource, int start,
break;
}
}
+ if (i >= ate_resource->num_ate)
+ return -1;
} else
index++; /* Try next ate */
}
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index fb833269017..dbeadb9c8e2 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -133,8 +133,7 @@ consider_steal_time(unsigned long new_itm)
account_idle_ticks(blocked);
run_local_timers();
- if (rcu_pending(cpu))
- rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
+ rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
scheduler_tick();
run_posix_cpu_timers(p);