diff options
author | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2009-10-06 16:01:27 +0100 |
---|---|---|
committer | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2009-10-06 16:01:27 +0100 |
commit | 907bc6c7fc7071b00083fc11e510e47dd93df45d (patch) | |
tree | 0697a608561522c00da9e1814974a2eb051bb96d /arch/ia64/include/asm | |
parent | d2b247a8be57647d1745535acd58169fbcbe431a (diff) | |
parent | 2a0f5cb32772e9a9560209e241a80bfbbc31dbc3 (diff) |
Merge branch 'for-2.6.32' into for-2.6.33
Diffstat (limited to 'arch/ia64/include/asm')
-rw-r--r-- | arch/ia64/include/asm/acpi.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/agp.h | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/bitops.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/cputime.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/device.h | 3 | ||||
-rw-r--r-- | arch/ia64/include/asm/dma-mapping.h | 19 | ||||
-rw-r--r-- | arch/ia64/include/asm/fpu.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/kvm_host.h | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/kvm_para.h | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/mca.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/mman.h | 14 | ||||
-rw-r--r-- | arch/ia64/include/asm/pci.h | 14 | ||||
-rw-r--r-- | arch/ia64/include/asm/pgalloc.h | 6 | ||||
-rw-r--r-- | arch/ia64/include/asm/pgtable.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/smp.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/socket.h | 3 | ||||
-rw-r--r-- | arch/ia64/include/asm/spinlock.h | 175 | ||||
-rw-r--r-- | arch/ia64/include/asm/spinlock_types.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/tlb.h | 12 | ||||
-rw-r--r-- | arch/ia64/include/asm/topology.h | 20 | ||||
-rw-r--r-- | arch/ia64/include/asm/xen/hypervisor.h | 1 |
22 files changed, 162 insertions, 132 deletions
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 0f82cc2934e..91df9686a0d 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -89,10 +89,12 @@ ia64_acpi_release_global_lock (unsigned int *lock) #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock)) +#ifdef CONFIG_ACPI #define acpi_disabled 0 /* ACPI always enabled on IA64 */ #define acpi_noirq 0 /* ACPI always enabled on IA64 */ #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ +#endif #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ static inline void disable_acpi(void) { } diff --git a/arch/ia64/include/asm/agp.h b/arch/ia64/include/asm/agp.h index c11fdd8ab4d..01d09c401c5 100644 --- a/arch/ia64/include/asm/agp.h +++ b/arch/ia64/include/asm/agp.h @@ -17,10 +17,6 @@ #define unmap_page_from_agp(page) /* nothing */ #define flush_agp_cache() mb() -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - /* GATT allocation. Returns/accepts GATT kernel virtual address. */ #define alloc_gatt_pages(order) \ ((char *)__get_free_pages(GFP_KERNEL, (order))) diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index e2ca8003733..57a2787bc9f 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -286,7 +286,7 @@ __test_and_clear_bit(int nr, volatile void * addr) { __u32 *p = (__u32 *) addr + (nr >> 5); __u32 m = 1 << (nr & 31); - int oldbitset = *p & m; + int oldbitset = (*p & m) != 0; *p &= ~m; return oldbitset; diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index d20b998cb91..7fa8a859466 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h @@ -30,6 +30,7 @@ typedef u64 cputime_t; typedef u64 cputime64_t; #define cputime_zero ((cputime_t)0) +#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_max ((~((cputime_t)0) >> 1) - 1) #define cputime_add(__a, __b) ((__a) + (__b)) #define cputime_sub(__a, __b) ((__a) - (__b)) diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h index 41ab85d66f3..d66d446b127 100644 --- a/arch/ia64/include/asm/device.h +++ b/arch/ia64/include/asm/device.h @@ -15,4 +15,7 @@ struct dev_archdata { #endif }; +struct pdev_archdata { +}; + #endif /* _ASM_IA64_DEVICE_H */ diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 5a61b5c2e18..8d3c79cd81e 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -44,7 +44,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size, #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define get_dma_ops(dev) platform_dma_get_ops(dev) -#define flush_write_buffers() #include <asm-generic/dma-mapping-common.h> @@ -69,6 +68,24 @@ dma_set_mask (struct device *dev, u64 mask) return 0; } +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + if (!dev->dma_mask) + return 0; + + return addr + size <= *dev->dma_mask; +} + +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return daddr; +} + extern int dma_get_cache_alignment(void); static inline void diff --git a/arch/ia64/include/asm/fpu.h b/arch/ia64/include/asm/fpu.h index 0c26157cffa..b6395ad1500 100644 --- a/arch/ia64/include/asm/fpu.h +++ b/arch/ia64/include/asm/fpu.h @@ -6,6 +6,8 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ +#include <linux/types.h> + /* floating point status register: */ #define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */ #define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */ diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 5f43697aed3..d9b6325a932 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -235,7 +235,8 @@ struct kvm_vm_data { #define KVM_REQ_PTC_G 32 #define KVM_REQ_RESUME 33 -#define KVM_PAGES_PER_HPAGE 1 +#define KVM_NR_PAGE_SIZES 1 +#define KVM_PAGES_PER_HPAGE(x) 1 struct kvm; struct kvm_vcpu; @@ -465,7 +466,6 @@ struct kvm_arch { unsigned long metaphysical_rr4; unsigned long vmm_init_rr; - int online_vcpus; int is_sn2; struct kvm_ioapic *vioapic; diff --git a/arch/ia64/include/asm/kvm_para.h b/arch/ia64/include/asm/kvm_para.h index 0d6d8ca07b8..1588aee781a 100644 --- a/arch/ia64/include/asm/kvm_para.h +++ b/arch/ia64/include/asm/kvm_para.h @@ -19,9 +19,13 @@ * */ +#ifdef __KERNEL__ + static inline unsigned int kvm_arch_para_features(void) { return 0; } #endif + +#endif diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index 44a0b53df90..c171cdf0a78 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -145,12 +145,14 @@ extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *); extern void ia64_init_handler(struct pt_regs *, struct switch_stack *, struct ia64_sal_os_state *); +extern void ia64_os_init_on_kdump(void); extern void ia64_monarch_init_handler(void); extern void ia64_slave_init_handler(void); extern void ia64_mca_cmc_vector_setup(void); extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); extern void ia64_unreg_MCA_extension(void); extern unsigned long ia64_get_rnat(unsigned long *); +extern void ia64_set_psr_mc(void); extern void ia64_mca_printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))); diff --git a/arch/ia64/include/asm/mman.h b/arch/ia64/include/asm/mman.h index 48cf8b98a0b..4459028e5aa 100644 --- a/arch/ia64/include/asm/mman.h +++ b/arch/ia64/include/asm/mman.h @@ -8,19 +8,9 @@ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co */ -#include <asm-generic/mman-common.h> +#include <asm-generic/mman.h> -#define MAP_GROWSDOWN 0x00100 /* stack-like segment */ -#define MAP_GROWSUP 0x00200 /* register stack-like segment */ -#define MAP_DENYWRITE 0x00800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x01000 /* mark it as an executable */ -#define MAP_LOCKED 0x02000 /* pages are locked */ -#define MAP_NORESERVE 0x04000 /* don't check for reservations */ -#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ +#define MAP_GROWSUP 0x0200 /* register stack-like segment */ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index fcfca56bb85..55281aabe5f 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -17,7 +17,6 @@ * loader. */ #define pcibios_assign_all_busses() 0 -#define pcibios_scan_all_fns(a, b) 0 #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0x10000000 @@ -135,7 +134,18 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev, extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -#define pcibios_scan_all_fns(a, b) 0 +static inline struct resource * +pcibios_select_root(struct pci_dev *pdev, struct resource *res) +{ + struct resource *root = NULL; + + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + if (res->flags & IORESOURCE_MEM) + root = &iomem_resource; + + return root; +} #define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h index b9ac1a6fc21..96a8d927db2 100644 --- a/arch/ia64/include/asm/pgalloc.h +++ b/arch/ia64/include/asm/pgalloc.h @@ -48,7 +48,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) { quicklist_free(0, NULL, pud); } -#define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud) +#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) #endif /* CONFIG_PGTABLE_4 */ static inline void @@ -67,7 +67,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) quicklist_free(0, NULL, pmd); } -#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) +#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) static inline void pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte) @@ -117,6 +117,6 @@ static inline void check_pgt_cache(void) quicklist_trim(0, NULL, 25, 16); } -#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) #endif /* _ASM_IA64_PGALLOC_H */ diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 0a9cc73d35c..8840a690d1e 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -155,7 +155,6 @@ #include <linux/bitops.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> -#include <asm/processor.h> /* * Next come the mappings that determine how mmap() protection bits diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index d217d1d4e05..0b3b3997dec 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h @@ -127,7 +127,6 @@ extern int is_multithreading_enabled(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask #else /* CONFIG_SMP */ diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h index 745421225ec..0b0d5ff062e 100644 --- a/arch/ia64/include/asm/socket.h +++ b/arch/ia64/include/asm/socket.h @@ -66,4 +66,7 @@ #define SO_TIMESTAMPING 37 #define SCM_TIMESTAMPING SO_TIMESTAMPING +#define SO_PROTOCOL 38 +#define SO_DOMAIN 39 + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 13ab71576bc..30bb930e111 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -19,103 +19,106 @@ #define __raw_spin_lock_init(x) ((x)->lock = 0) -#ifdef ASM_SUPPORTED /* - * Try to get the lock. If we fail to get the lock, make a non-standard call to - * ia64_spinlock_contention(). We do not use a normal call because that would force all - * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is - * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered". + * Ticket locks are conceptually two parts, one indicating the current head of + * the queue, and the other indicating the current tail. The lock is acquired + * by atomically noting the tail and incrementing it by one (thus adding + * ourself to the queue and noting our position), then waiting until the head + * becomes equal to the the initial value of the tail. + * + * 63 32 31 0 + * +----------------------------------------------------+ + * | next_ticket_number | now_serving | + * +----------------------------------------------------+ */ -#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" +#define TICKET_SHIFT 32 -static inline void -__raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) +static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) { - register volatile unsigned int *ptr asm ("r31") = &lock->lock; - -#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) -# ifdef CONFIG_ITANIUM - /* don't use brl on Itanium... */ - asm volatile ("{\n\t" - " mov ar.ccv = r0\n\t" - " mov r28 = ip\n\t" - " mov r30 = 1;;\n\t" - "}\n\t" - "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t" - "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n\t" - "mov b6 = r29;;\n\t" - "mov r27=%2\n\t" - "(p14) br.cond.spnt.many b6" - : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); -# else - asm volatile ("{\n\t" - " mov ar.ccv = r0\n\t" - " mov r28 = ip\n\t" - " mov r30 = 1;;\n\t" - "}\n\t" - "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n\t" - "mov r27=%2\n\t" - "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;" - : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); -# endif /* CONFIG_MCKINLEY */ -#else -# ifdef CONFIG_ITANIUM - /* don't use brl on Itanium... */ - /* mis-declare, so we get the entry-point, not it's function descriptor: */ - asm volatile ("mov r30 = 1\n\t" - "mov r27=%2\n\t" - "mov ar.ccv = r0;;\n\t" - "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t" - "movl r29 = ia64_spinlock_contention;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n\t" - "mov b6 = r29;;\n\t" - "(p14) br.call.spnt.many b6 = b6" - : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); -# else - asm volatile ("mov r30 = 1\n\t" - "mov r27=%2\n\t" - "mov ar.ccv = r0;;\n\t" - "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n\t" - "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;" - : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); -# endif /* CONFIG_MCKINLEY */ -#endif + int *p = (int *)&lock->lock, turn, now_serving; + + now_serving = *p; + turn = ia64_fetchadd(1, p+1, acq); + + if (turn == now_serving) + return; + + do { + cpu_relax(); + } while (ACCESS_ONCE(*p) != turn); } -#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) +static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +{ + long tmp = ACCESS_ONCE(lock->lock), try; -/* Unlock by doing an ordered store and releasing the cacheline with nta */ -static inline void __raw_spin_unlock(raw_spinlock_t *x) { - barrier(); - asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); + if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) { + try = tmp + (1L << TICKET_SHIFT); + + return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp; + } + return 0; } -#else /* !ASM_SUPPORTED */ -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -# define __raw_spin_lock(x) \ -do { \ - __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ - __u64 ia64_spinlock_val; \ - ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ - if (unlikely(ia64_spinlock_val)) { \ - do { \ - while (*ia64_spinlock_ptr) \ - ia64_barrier(); \ - ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ - } while (ia64_spinlock_val); \ - } \ -} while (0) -#define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) -#endif /* !ASM_SUPPORTED */ +static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +{ + int *p = (int *)&lock->lock; + + (void)ia64_fetchadd(1, p, rel); +} + +static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +{ + long tmp = ACCESS_ONCE(lock->lock); + + return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); +} + +static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +{ + long tmp = ACCESS_ONCE(lock->lock); -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) + return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; +} + +static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +{ + return __ticket_spin_is_locked(lock); +} + +static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +{ + return __ticket_spin_is_contended(lock); +} +#define __raw_spin_is_contended __raw_spin_is_contended + +static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + __ticket_spin_lock(lock); +} + +static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + return __ticket_spin_trylock(lock); +} + +static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + __ticket_spin_unlock(lock); +} + +static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, + unsigned long flags) +{ + __raw_spin_lock(lock); +} + +static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +{ + while (__raw_spin_is_locked(lock)) + cpu_relax(); +} #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 474e46f1ab4..b61d136d9bc 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -6,7 +6,7 @@ #endif typedef struct { - volatile unsigned int lock; + volatile unsigned long lock; } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index ae6922626bf..8ce2e388e37 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -48,7 +48,7 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .addr_limit = KERNEL_DS, \ - .preempt_count = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index 20d8a39680c..85d965cb19a 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -236,22 +236,22 @@ do { \ __tlb_remove_tlb_entry(tlb, ptep, addr); \ } while (0) -#define pte_free_tlb(tlb, ptep) \ +#define pte_free_tlb(tlb, ptep, address) \ do { \ tlb->need_flush = 1; \ - __pte_free_tlb(tlb, ptep); \ + __pte_free_tlb(tlb, ptep, address); \ } while (0) -#define pmd_free_tlb(tlb, ptep) \ +#define pmd_free_tlb(tlb, ptep, address) \ do { \ tlb->need_flush = 1; \ - __pmd_free_tlb(tlb, ptep); \ + __pmd_free_tlb(tlb, ptep, address); \ } while (0) -#define pud_free_tlb(tlb, pudp) \ +#define pud_free_tlb(tlb, pudp, address) \ do { \ tlb->need_flush = 1; \ - __pud_free_tlb(tlb, pudp); \ + __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif /* _ASM_IA64_TLB_H */ diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index 7b4c8c70b2d..3ddb4e709db 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -33,7 +33,6 @@ /* * Returns a bitmask of CPUs on Node 'node'. */ -#define node_to_cpumask(node) (node_to_cpu_mask[node]) #define cpumask_of_node(node) (&node_to_cpu_mask[node]) /* @@ -61,12 +60,13 @@ void build_cpu_to_node_map(void); .cache_nice_tries = 2, \ .busy_idx = 2, \ .idle_idx = 1, \ - .newidle_idx = 2, \ - .wake_idx = 1, \ - .forkexec_idx = 1, \ + .newidle_idx = 0, \ + .wake_idx = 0, \ + .forkexec_idx = 0, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ | SD_BALANCE_EXEC \ + | SD_BALANCE_FORK \ | SD_WAKE_AFFINE, \ .last_balance = jiffies, \ .balance_interval = 1, \ @@ -85,14 +85,14 @@ void build_cpu_to_node_map(void); .cache_nice_tries = 2, \ .busy_idx = 3, \ .idle_idx = 2, \ - .newidle_idx = 2, \ - .wake_idx = 1, \ - .forkexec_idx = 1, \ + .newidle_idx = 0, \ + .wake_idx = 0, \ + .forkexec_idx = 0, \ .flags = SD_LOAD_BALANCE \ + | SD_BALANCE_NEWIDLE \ | SD_BALANCE_EXEC \ | SD_BALANCE_FORK \ - | SD_SERIALIZE \ - | SD_WAKE_BALANCE, \ + | SD_SERIALIZE, \ .last_balance = jiffies, \ .balance_interval = 64, \ .nr_balance_failed = 0, \ @@ -103,8 +103,6 @@ void build_cpu_to_node_map(void); #ifdef CONFIG_SMP #define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id) #define topology_core_id(cpu) (cpu_data(cpu)->core_id) -#define topology_core_siblings(cpu) (cpu_core_map[cpu]) -#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) #define smt_capable() (smp_num_siblings > 1) diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h index e425227a418..88afb54501e 100644 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ b/arch/ia64/include/asm/xen/hypervisor.h @@ -33,6 +33,7 @@ #ifndef _ASM_IA64_XEN_HYPERVISOR_H #define _ASM_IA64_XEN_HYPERVISOR_H +#include <linux/err.h> #include <xen/interface/xen.h> #include <xen/interface/version.h> /* to compile feature.c */ #include <xen/features.h> /* to comiple xen-netfront.c */ |