diff options
Diffstat (limited to 'include/asm-i386')
39 files changed, 428 insertions, 249 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h index df4ed323aa4..55059abf9c9 100644 --- a/include/asm-i386/acpi.h +++ b/include/asm-i386/acpi.h @@ -179,7 +179,7 @@ extern void acpi_reserve_bootmem(void); extern u8 x86_acpiid_to_apicid[]; -#define ARCH_HAS_POWER_PDC_INIT 1 +#define ARCH_HAS_POWER_INIT 1 #endif /*__KERNEL__*/ diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index 8c454aa58ac..d30b8571573 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h @@ -132,6 +132,11 @@ extern unsigned int nmi_watchdog; extern int disable_timer_pin_1; +void smp_send_timer_broadcast_ipi(struct pt_regs *regs); +void switch_APIC_timer_to_ipi(void *cpumask); +void switch_ipi_to_APIC_timer(void *cpumask); +#define ARCH_APICTIMER_STOPS_ON_C3 1 + #else /* !CONFIG_X86_LOCAL_APIC */ static inline void lapic_shutdown(void) { } diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index c68557aa04b..de649d3aa2d 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -216,6 +216,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) } #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** * atomic_add_unless - add unless the number is a given value @@ -254,4 +255,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \ #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() +#include <asm-generic/atomic.h> #endif diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index ddf1739dc7f..88e6ca248cd 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h @@ -43,7 +43,7 @@ static inline void set_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( LOCK_PREFIX "btsl %1,%0" - :"=m" (ADDR) + :"+m" (ADDR) :"Ir" (nr)); } @@ -60,7 +60,7 @@ static inline void __set_bit(int nr, volatile unsigned long * addr) { __asm__( "btsl %1,%0" - :"=m" (ADDR) + :"+m" (ADDR) :"Ir" (nr)); } @@ -78,7 +78,7 @@ static inline void clear_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( LOCK_PREFIX "btrl %1,%0" - :"=m" (ADDR) + :"+m" (ADDR) :"Ir" (nr)); } @@ -86,7 +86,7 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( "btrl %1,%0" - :"=m" (ADDR) + :"+m" (ADDR) :"Ir" (nr)); } #define smp_mb__before_clear_bit() barrier() @@ -105,7 +105,7 @@ static inline void __change_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( "btcl %1,%0" - :"=m" (ADDR) + :"+m" (ADDR) :"Ir" (nr)); } @@ -123,7 +123,7 @@ static inline void change_bit(int nr, volatile unsigned long * addr) { __asm__ __volatile__( LOCK_PREFIX "btcl %1,%0" - :"=m" (ADDR) + :"+m" (ADDR) :"Ir" (nr)); } @@ -142,7 +142,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr) __asm__ __volatile__( LOCK_PREFIX "btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"=m" (ADDR) + :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } @@ -162,7 +162,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) __asm__( "btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"=m" (ADDR) + :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr)); return oldbit; } @@ -182,7 +182,7 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) __asm__ __volatile__( LOCK_PREFIX "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"=m" (ADDR) + :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } @@ -202,7 +202,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) __asm__( "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"=m" (ADDR) + :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr)); return oldbit; } @@ -214,7 +214,7 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) __asm__ __volatile__( "btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"=m" (ADDR) + :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } @@ -233,7 +233,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr) __asm__ __volatile__( LOCK_PREFIX "btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"=m" (ADDR) + :"=r" (oldbit),"+m" (ADDR) :"Ir" (nr) : "memory"); return oldbit; } @@ -247,7 +247,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr) static int test_bit(int nr, const volatile void * addr); #endif -static inline int constant_test_bit(int nr, const volatile unsigned long *addr) +static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) { return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; } @@ -332,9 +332,9 @@ static inline unsigned long __ffs(unsigned long word) * Returns the bit-number of the first set bit, not the number of the byte * containing a bit. */ -static inline int find_first_bit(const unsigned long *addr, unsigned size) +static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) { - int x = 0; + unsigned x = 0; while (x < size) { unsigned long val = *addr++; @@ -367,11 +367,7 @@ static inline unsigned long ffz(unsigned long word) return word; } -/* - * fls: find last bit set. - */ - -#define fls(x) generic_fls(x) +#define fls64(x) generic_fls64(x) #ifdef __KERNEL__ @@ -414,6 +410,23 @@ static inline int ffs(int x) } /** + * fls - find last bit set + * @x: the word to search + * + * This is defined the same way as ffs. + */ +static inline int fls(int x) +{ + int r; + + __asm__("bsrl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); + return r+1; +} + +/** * hweightN - returns the hamming weight of a N-bit word * @x: the word to weigh * diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h index ea54540638d..50233e0345f 100644 --- a/include/asm-i386/bugs.h +++ b/include/asm-i386/bugs.h @@ -8,9 +8,6 @@ * <rreilova@ececs.uc.edu> * - Channing Corn (tests & fixes), * - Andrew D. Balsa (code cleanup). - * - * Pentium III FXSR, SSE support - * Gareth Hughes <gareth@valinux.com>, May 2000 */ /* @@ -76,25 +73,7 @@ static void __init check_fpu(void) return; } -/* Enable FXSR and company _before_ testing for FP problems. */ - /* - * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. - */ - if (offsetof(struct task_struct, thread.i387.fxsave) & 15) { - extern void __buggy_fxsr_alignment(void); - __buggy_fxsr_alignment(); - } - if (cpu_has_fxsr) { - printk(KERN_INFO "Enabling fast FPU save and restore... "); - set_in_cr4(X86_CR4_OSFXSR); - printk("done.\n"); - } - if (cpu_has_xmm) { - printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... "); - set_in_cr4(X86_CR4_OSXMMEXCPT); - printk("done.\n"); - } - +/* trap_init() enabled FXSR and company _before_ testing for FP problems here. */ /* Test for the divl bug.. */ __asm__("fninit\n\t" "fldl %1\n\t" diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h index 849788710fe..615911e5bd2 100644 --- a/include/asm-i386/cache.h +++ b/include/asm-i386/cache.h @@ -10,6 +10,4 @@ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */ - #endif diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h index 2ea36dea37d..7199f7b326f 100644 --- a/include/asm-i386/cacheflush.h +++ b/include/asm-i386/cacheflush.h @@ -31,4 +31,8 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot); void kernel_map_pages(struct page *page, int numpages, int enable); #endif +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void); +#endif + #endif /* _I386_CACHEFLUSH_H */ diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h index ff1187e80c3..c4ec2a4d8fd 100644 --- a/include/asm-i386/cpufeature.h +++ b/include/asm-i386/cpufeature.h @@ -69,6 +69,7 @@ #define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ +#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h index d97328951f5..3cbbecd7901 100644 --- a/include/asm-i386/current.h +++ b/include/asm-i386/current.h @@ -5,7 +5,7 @@ struct task_struct; -static inline struct task_struct * get_current(void) +static __always_inline struct task_struct * get_current(void) { return current_thread_info()->task; } diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h index 29b851a18c6..494e73bca09 100644 --- a/include/asm-i386/desc.h +++ b/include/asm-i386/desc.h @@ -15,9 +15,6 @@ #include <asm/mmu.h> extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; -DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]); - -#define get_cpu_gdt_table(_cpu) (per_cpu(cpu_gdt_table,_cpu)) DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); @@ -29,6 +26,11 @@ struct Xgt_desc_struct { extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS]; +static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) +{ + return ((struct desc_struct *)cpu_gdt_descr[cpu].address); +} + #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h index e56c335f8ef..9cf20cacf76 100644 --- a/include/asm-i386/dma-mapping.h +++ b/include/asm-i386/dma-mapping.h @@ -6,6 +6,7 @@ #include <asm/cache.h> #include <asm/io.h> #include <asm/scatterlist.h> +#include <asm/bug.h> #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) @@ -20,7 +21,9 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) { - BUG_ON(direction == DMA_NONE); + if (direction == DMA_NONE) + BUG(); + WARN_ON(size == 0); flush_write_buffers(); return virt_to_phys(ptr); } @@ -29,7 +32,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { - BUG_ON(direction == DMA_NONE); + if (direction == DMA_NONE) + BUG(); } static inline int @@ -38,7 +42,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, { int i; - BUG_ON(direction == DMA_NONE); + if (direction == DMA_NONE) + BUG(); + WARN_ON(nents == 0 || sg[0].length == 0); for (i = 0; i < nents; i++ ) { BUG_ON(!sg[i].page); @@ -150,7 +156,7 @@ dma_get_cache_alignment(void) { /* no easy way to get cache size on all x86, so return the * maximum possible, to be safe */ - return (1 << L1_CACHE_SHIFT_MAX); + return (1 << INTERNODE_CACHE_SHIFT); } #define dma_is_consistent(d) (1) diff --git a/include/asm-i386/edac.h b/include/asm-i386/edac.h new file mode 100644 index 00000000000..3e7dd0ab68c --- /dev/null +++ b/include/asm-i386/edac.h @@ -0,0 +1,18 @@ +#ifndef ASM_EDAC_H +#define ASM_EDAC_H + +/* ECC atomic, DMA, SMP and interrupt safe scrub function */ + +static __inline__ void atomic_scrub(void *va, u32 size) +{ + unsigned long *virt_addr = va; + u32 i; + + for (i = 0; i < size / 4; i++, virt_addr++) + /* Very carefully read and write to memory atomically + * so we are interrupt, DMA and SMP safe. + */ + __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); +} + +#endif diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h index e7a271d3930..44b9db80647 100644 --- a/include/asm-i386/futex.h +++ b/include/asm-i386/futex.h @@ -61,7 +61,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (op == FUTEX_OP_SET) __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); else { -#if !defined(CONFIG_X86_BSWAP) && !defined(CONFIG_UML) +#ifndef CONFIG_X86_BSWAP if (boot_cpu_data.x86 == 3) ret = -ENOSYS; else diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h index 6747006743f..152d0baa576 100644 --- a/include/asm-i386/i387.h +++ b/include/asm-i386/i387.h @@ -49,19 +49,19 @@ static inline void __save_init_fpu( struct task_struct *tsk ) X86_FEATURE_FXSR, "m" (tsk->thread.i387.fxsave) :"memory"); - tsk->thread_info->status &= ~TS_USEDFPU; + task_thread_info(tsk)->status &= ~TS_USEDFPU; } #define __unlazy_fpu( tsk ) do { \ - if ((tsk)->thread_info->status & TS_USEDFPU) \ + if (task_thread_info(tsk)->status & TS_USEDFPU) \ save_init_fpu( tsk ); \ } while (0) #define __clear_fpu( tsk ) \ do { \ - if ((tsk)->thread_info->status & TS_USEDFPU) { \ + if (task_thread_info(tsk)->status & TS_USEDFPU) { \ asm volatile("fnclex ; fwait"); \ - (tsk)->thread_info->status &= ~TS_USEDFPU; \ + task_thread_info(tsk)->status &= ~TS_USEDFPU; \ stts(); \ } \ } while (0) diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h index 7babb97a02e..03233c2ab82 100644 --- a/include/asm-i386/io.h +++ b/include/asm-i386/io.h @@ -131,6 +131,11 @@ extern void iounmap(volatile void __iomem *addr); extern void *bt_ioremap(unsigned long offset, unsigned long size); extern void bt_iounmap(void *addr, unsigned long size); +/* Use early IO mappings for DMI because it's initialized early */ +#define dmi_ioremap bt_ioremap +#define dmi_iounmap bt_iounmap +#define dmi_alloc alloc_bootmem + /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ diff --git a/include/asm-i386/ioctl.h b/include/asm-i386/ioctl.h index 543f7843d55..b279fe06dfe 100644 --- a/include/asm-i386/ioctl.h +++ b/include/asm-i386/ioctl.h @@ -1,85 +1 @@ -/* $Id: ioctl.h,v 1.5 1993/07/19 21:53:50 root Exp root $ - * - * linux/ioctl.h for Linux by H.H. Bergman. - */ - -#ifndef _ASMI386_IOCTL_H -#define _ASMI386_IOCTL_H - -/* ioctl command encoding: 32 bits total, command in lower 16 bits, - * size of the parameter structure in the lower 14 bits of the - * upper 16 bits. - * Encoding the size of the parameter structure in the ioctl request - * is useful for catching programs compiled with old versions - * and to avoid overwriting user space outside the user buffer area. - * The highest 2 bits are reserved for indicating the ``access mode''. - * NOTE: This limits the max parameter size to 16kB -1 ! - */ - -/* - * The following is for compatibility across the various Linux - * platforms. The i386 ioctl numbering scheme doesn't really enforce - * a type field. De facto, however, the top 8 bits of the lower 16 - * bits are indeed used as a type field, so we might just as well make - * this explicit here. Please be sure to use the decoding macros - * below from now on. - */ -#define _IOC_NRBITS 8 -#define _IOC_TYPEBITS 8 -#define _IOC_SIZEBITS 14 -#define _IOC_DIRBITS 2 - -#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) -#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) -#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) -#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) - -#define _IOC_NRSHIFT 0 -#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) -#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) -#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) - -/* - * Direction bits. - */ -#define _IOC_NONE 0U -#define _IOC_WRITE 1U -#define _IOC_READ 2U - -#define _IOC(dir,type,nr,size) \ - (((dir) << _IOC_DIRSHIFT) | \ - ((type) << _IOC_TYPESHIFT) | \ - ((nr) << _IOC_NRSHIFT) | \ - ((size) << _IOC_SIZESHIFT)) - -/* provoke compile error for invalid uses of size argument */ -extern unsigned int __invalid_size_argument_for_IOC; -#define _IOC_TYPECHECK(t) \ - ((sizeof(t) == sizeof(t[1]) && \ - sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ - sizeof(t) : __invalid_size_argument_for_IOC) - -/* used to create numbers */ -#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) -#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size))) -#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) -#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) -#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) -#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) -#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) - -/* used to decode ioctl numbers.. */ -#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) -#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) -#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) -#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) - -/* ...and for the drivers/sound files... */ - -#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) -#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) -#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) -#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) -#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) - -#endif /* _ASMI386_IOCTL_H */ +#include <asm-generic/ioctl.h> diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h index 270f1986b19..5169d7af456 100644 --- a/include/asm-i386/irq.h +++ b/include/asm-i386/irq.h @@ -21,8 +21,6 @@ static __inline__ int irq_canonicalize(int irq) return ((irq == 2) ? 9 : irq); } -extern void release_vm86_irqs(struct task_struct *); - #ifdef CONFIG_X86_LOCAL_APIC # define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ #endif diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h index 6ed2a03e37b..53f0e06672d 100644 --- a/include/asm-i386/kexec.h +++ b/include/asm-i386/kexec.h @@ -2,6 +2,8 @@ #define _I386_KEXEC_H #include <asm/fixmap.h> +#include <asm/ptrace.h> +#include <asm/string.h> /* * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. @@ -26,8 +28,49 @@ #define KEXEC_ARCH KEXEC_ARCH_386 #define MAX_NOTE_BYTES 1024 -typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; -extern note_buf_t crash_notes[]; +/* CPU does not save ss and esp on stack if execution is already + * running in kernel mode at the time of NMI occurrence. This code + * fixes it. + */ +static inline void crash_fixup_ss_esp(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + memcpy(newregs, oldregs, sizeof(*newregs)); + newregs->esp = (unsigned long)&(oldregs->esp); + __asm__ __volatile__( + "xorl %%eax, %%eax\n\t" + "movw %%ss, %%ax\n\t" + :"=a"(newregs->xss)); +} + +/* + * This function is responsible for capturing register states if coming + * via panic otherwise just fix up the ss and esp if coming via kernel + * mode exception. + */ +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) + crash_fixup_ss_esp(newregs, oldregs); + else { + __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx)); + __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx)); + __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx)); + __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi)); + __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi)); + __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp)); + __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax)); + __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp)); + __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss)); + __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs)); + __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds)); + __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes)); + __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags)); + + newregs->eip = (unsigned long)current_text_addr(); + } +} #endif /* _I386_KEXEC_H */ diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index ca916a89287..27cac050a60 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h @@ -40,6 +40,7 @@ typedef u8 kprobe_opcode_t; #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry #define ARCH_SUPPORTS_KRETPROBES +#define arch_remove_kprobe(p) do {} while (0) void kretprobe_trampoline(void); @@ -76,14 +77,6 @@ static inline void restore_interrupts(struct pt_regs *regs) local_irq_enable(); } -#ifdef CONFIG_KPROBES extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); -#else /* !CONFIG_KPROBES */ -static inline int kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - return 0; -} -#endif #endif /* _ASM_KPROBES_H */ diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h index ba936d4daed..18b19a77344 100644 --- a/include/asm-i386/mach-bigsmp/mach_apic.h +++ b/include/asm-i386/mach-bigsmp/mach_apic.h @@ -1,17 +1,10 @@ #ifndef __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H -#include <asm/smp.h> - -#define SEQUENTIAL_APICID -#ifdef SEQUENTIAL_APICID -#define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\ - ((phys_apic<<2) & (~0xf)) ) -#elif CLUSTERED_APICID -#define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\ - ((phys_apic) & (~0xf)) ) -#endif - -#define NO_BALANCE_IRQ (1) + + +extern u8 bios_cpu_apicid[]; + +#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) #define esr_disable (1) static inline int apic_id_registered(void) @@ -19,7 +12,6 @@ static inline int apic_id_registered(void) return (1); } -#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) /* Round robin the irqs amoung the online cpus */ static inline cpumask_t target_cpus(void) { @@ -32,29 +24,34 @@ static inline cpumask_t target_cpus(void) } while (cpu >= NR_CPUS); return cpumask_of_cpu(cpu); } -#define TARGET_CPUS (target_cpus()) -#define INT_DELIVERY_MODE dest_Fixed -#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ +#undef APIC_DEST_LOGICAL +#define APIC_DEST_LOGICAL 0 +#define TARGET_CPUS (target_cpus()) +#define APIC_DFR_VALUE (APIC_DFR_FLAT) +#define INT_DELIVERY_MODE (dest_Fixed) +#define INT_DEST_MODE (0) /* phys delivery to target proc */ +#define NO_BALANCE_IRQ (0) +#define WAKE_SECONDARY_VIA_INIT + static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) { - return 0; + return (0); } -/* we don't use the phys_cpu_present_map to indicate apicid presence */ -static inline unsigned long check_apicid_present(int bit) +static inline unsigned long check_apicid_present(int bit) { - return 1; + return (1); } -#define apicid_cluster(apicid) (apicid & 0xF0) - -static inline unsigned long calculate_ldr(unsigned long old) +static inline unsigned long calculate_ldr(int cpu) { - unsigned long id; - id = xapic_phys_to_log_apicid(hard_smp_processor_id()); - return ((old & ~APIC_LDR_MASK) | SET_APIC_LOGICAL_ID(id)); + unsigned long val, id; + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + id = xapic_phys_to_log_apicid(cpu); + val |= SET_APIC_LOGICAL_ID(id); + return val; } /* @@ -67,37 +64,35 @@ static inline unsigned long calculate_ldr(unsigned long old) static inline void init_apic_ldr(void) { unsigned long val; + int cpu = smp_processor_id(); apic_write_around(APIC_DFR, APIC_DFR_VALUE); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val = calculate_ldr(val); + val = calculate_ldr(cpu); apic_write_around(APIC_LDR, val); } static inline void clustered_apic_check(void) { printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "Cluster", nr_ioapics); + "Physflat", nr_ioapics); } static inline int multi_timer_check(int apic, int irq) { - return 0; + return (0); } static inline int apicid_to_node(int logical_apicid) { - return 0; + return (0); } -extern u8 bios_cpu_apicid[]; - static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) - return (int)bios_cpu_apicid[mps_cpu]; - else - return BAD_APICID; + return (int) bios_cpu_apicid[mps_cpu]; + + return BAD_APICID; } static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) @@ -109,10 +104,10 @@ extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { - if (cpu >= NR_CPUS) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; - } + if (cpu >= NR_CPUS) + return BAD_APICID; + return cpu_physical_id(cpu); +} static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *translation_record) @@ -128,11 +123,9 @@ static inline int mpc_apic_id(struct mpc_config_processor *m, static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) { /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0xFUL); + return physids_promote(0xFFL); } -#define WAKE_SECONDARY_VIA_INIT - static inline void setup_portio_remap(void) { } diff --git a/include/asm-i386/mach-bigsmp/mach_apicdef.h b/include/asm-i386/mach-bigsmp/mach_apicdef.h index 23e58b317c7..a58ab5a75c8 100644 --- a/include/asm-i386/mach-bigsmp/mach_apicdef.h +++ b/include/asm-i386/mach-bigsmp/mach_apicdef.h @@ -1,11 +1,11 @@ #ifndef __ASM_MACH_APICDEF_H #define __ASM_MACH_APICDEF_H -#define APIC_ID_MASK (0x0F<<24) +#define APIC_ID_MASK (0xFF<<24) static inline unsigned get_apic_id(unsigned long x) { - return (((x)>>24)&0x0F); + return (((x)>>24)&0xFF); } #define GET_APIC_ID(x) get_apic_id(x) diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h index cc756a67cd6..a1d0072e36b 100644 --- a/include/asm-i386/mach-default/mach_ipi.h +++ b/include/asm-i386/mach-default/mach_ipi.h @@ -15,11 +15,9 @@ static inline void __local_send_IPI_allbutself(int vector) { if (no_broadcast) { cpumask_t mask = cpu_online_map; - int this_cpu = get_cpu(); - cpu_clear(this_cpu, mask); + cpu_clear(smp_processor_id(), mask); send_IPI_mask(mask, vector); - put_cpu(); } else __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); } diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h index 196619a8385..ba4941e6f64 100644 --- a/include/asm-i386/mman.h +++ b/include/asm-i386/mman.h @@ -35,6 +35,7 @@ #define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ #define MADV_WILLNEED 0x3 /* pre-fault pages */ #define MADV_DONTNEED 0x4 /* discard these pages */ +#define MADV_REMOVE 0x5 /* remove these pages & resources */ /* compatibility flags */ #define MAP_ANON MAP_ANONYMOUS diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h index 620a90641ea..74f595d8057 100644 --- a/include/asm-i386/mmzone.h +++ b/include/asm-i386/mmzone.h @@ -76,11 +76,6 @@ static inline int pfn_to_nid(unsigned long pfn) * Following are macros that each numa implmentation must define. */ -/* - * Given a kernel address, find the home node of the underlying memory. - */ -#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT) - #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) \ ({ \ diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h index eb7f2b4234a..424661d25bd 100644 --- a/include/asm-i386/module.h +++ b/include/asm-i386/module.h @@ -52,8 +52,10 @@ struct mod_arch_specific #define MODULE_PROC_FAMILY "CYRIXIII " #elif defined CONFIG_MVIAC3_2 #define MODULE_PROC_FAMILY "VIAC3-2 " -#elif CONFIG_MGEODEGX1 +#elif defined CONFIG_MGEODEGX1 #define MODULE_PROC_FAMILY "GEODEGX1 " +#elif defined CONFIG_MGEODE_LX +#define MODULE_PROC_FAMILY "GEODE " #else #error unknown processor family #endif diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h index a961093dbf8..76feedf85a8 100644 --- a/include/asm-i386/mpspec_def.h +++ b/include/asm-i386/mpspec_def.h @@ -75,7 +75,7 @@ struct mpc_config_bus { unsigned char mpc_type; unsigned char mpc_busid; - unsigned char mpc_bustype[6] __attribute((packed)); + unsigned char mpc_bustype[6]; }; /* List of Bus Type string values, Intel MP Spec. */ diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h new file mode 100644 index 00000000000..9b2199e829f --- /dev/null +++ b/include/asm-i386/mutex.h @@ -0,0 +1,136 @@ +/* + * Assembly implementation of the mutex fastpath, based on atomic + * decrement/increment. + * + * started by Ingo Molnar: + * + * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + */ +#ifndef _ASM_MUTEX_H +#define _ASM_MUTEX_H + +/** + * __mutex_fastpath_lock - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fn> if it + * wasn't 1 originally. This function MUST leave the value lower than 1 + * even when the "1" assertion wasn't true. + */ +#define __mutex_fastpath_lock(count, fail_fn) \ +do { \ + unsigned int dummy; \ + \ + typecheck(atomic_t *, count); \ + typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ + \ + __asm__ __volatile__( \ + LOCK " decl (%%eax) \n" \ + " js 2f \n" \ + "1: \n" \ + \ + LOCK_SECTION_START("") \ + "2: call "#fail_fn" \n" \ + " jmp 1b \n" \ + LOCK_SECTION_END \ + \ + :"=a" (dummy) \ + : "a" (count) \ + : "memory", "ecx", "edx"); \ +} while (0) + + +/** + * __mutex_fastpath_lock_retval - try to take the lock by moving the count + * from 1 to a 0 value + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 1 + * + * Change the count from 1 to a value lower than 1, and call <fail_fn> if it + * wasn't 1 originally. This function returns 0 if the fastpath succeeds, + * or anything the slow path function returns + */ +static inline int +__mutex_fastpath_lock_retval(atomic_t *count, + int fastcall (*fail_fn)(atomic_t *)) +{ + if (unlikely(atomic_dec_return(count) < 0)) + return fail_fn(count); + else + return 0; +} + +/** + * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1 + * @count: pointer of type atomic_t + * @fail_fn: function to call if the original value was not 0 + * + * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>. + * In the failure case, this function is allowed to either set the value + * to 1, or to set it to a value lower than 1. + * + * If the implementation sets it to a value of lower than 1, the + * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs + * to return 0 otherwise. + */ +#define __mutex_fastpath_unlock(count, fail_fn) \ +do { \ + unsigned int dummy; \ + \ + typecheck(atomic_t *, count); \ + typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ + \ + __asm__ __volatile__( \ + LOCK " incl (%%eax) \n" \ + " jle 2f \n" \ + "1: \n" \ + \ + LOCK_SECTION_START("") \ + "2: call "#fail_fn" \n" \ + " jmp 1b \n" \ + LOCK_SECTION_END \ + \ + :"=a" (dummy) \ + : "a" (count) \ + : "memory", "ecx", "edx"); \ +} while (0) + +#define __mutex_slowpath_needs_to_unlock() 1 + +/** + * __mutex_fastpath_trylock - try to acquire the mutex, without waiting + * + * @count: pointer of type atomic_t + * @fail_fn: fallback function + * + * Change the count from 1 to a value lower than 1, and return 0 (failure) + * if it wasn't 1 originally, or return 1 (success) otherwise. This function + * MUST leave the value lower than 1 even when the "1" assertion wasn't true. + * Additionally, if the value was < 0 originally, this function must not leave + * it to 0 on failure. + */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + /* + * We have two variants here. The cmpxchg based one is the best one + * because it never induce a false contention state. It is included + * here because architectures using the inc/dec algorithms over the + * xchg ones are much more likely to support cmpxchg natively. + * + * If not we fall back to the spinlock based variant - that is + * just as efficient (and simpler) as a 'destructive' probing of + * the mutex state would be. + */ +#ifdef __HAVE_ARCH_CMPXCHG + if (likely(atomic_cmpxchg(count, 1, 0) == 1)) + return 1; + return 0; +#else + return fail_fn(count); +#endif +} + +#endif diff --git a/include/asm-i386/param.h b/include/asm-i386/param.h index fa02e67ea86..095580f3a45 100644 --- a/include/asm-i386/param.h +++ b/include/asm-i386/param.h @@ -1,9 +1,8 @@ -#include <linux/config.h> - #ifndef _ASMi386_PARAM_H #define _ASMi386_PARAM_H #ifdef __KERNEL__ +# include <linux/config.h> # define HZ CONFIG_HZ /* Internal kernel timer frequency */ # define USER_HZ 100 /* .. some user interfaces are in "ticks" */ # define CLOCKS_PER_SEC (USER_HZ) /* like times() */ diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 5c96cf6dcb3..feca5d961e2 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -61,9 +61,11 @@ struct cpuinfo_x86 { int x86_cache_size; /* in KB - valid for CPUS which support this call */ int x86_cache_alignment; /* In bytes */ - int fdiv_bug; - int f00f_bug; - int coma_bug; + char fdiv_bug; + char f00f_bug; + char coma_bug; + char pad0; + int x86_power; unsigned long loops_per_jiffy; unsigned char x86_max_cores; /* cpuid returned max cores value */ unsigned char booted_cores; /* number of cores as seen by OS */ @@ -279,9 +281,11 @@ static inline void clear_in_cr4 (unsigned long mask) outb((data), 0x23); \ } while (0) -static inline void serialize_cpu(void) +/* Stop speculative execution */ +static inline void sync_core(void) { - __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); + int tmp; + asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); } static inline void __monitor(const void *eax, unsigned long ecx, @@ -557,10 +561,20 @@ unsigned long get_wchan(struct task_struct *p); (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ }) +/* + * The below -8 is to reserve 8 bytes on top of the ring0 stack. + * This is necessary to guarantee that the entire "struct pt_regs" + * is accessable even if the CPU haven't stored the SS/ESP registers + * on the stack (interrupt gate does not save these registers + * when switching to the same priv ring). + * Therefore beware: accessing the xss/esp fields of the + * "struct pt_regs" is possible, but they may contain the + * completely wrong values. + */ #define task_pt_regs(task) \ ({ \ struct pt_regs *__regs__; \ - __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \ + __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ __regs__ - 1; \ }) diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h index 7e0f2945d17..f324c53b6f9 100644 --- a/include/asm-i386/ptrace.h +++ b/include/asm-i386/ptrace.h @@ -54,6 +54,9 @@ struct pt_regs { #define PTRACE_GET_THREAD_AREA 25 #define PTRACE_SET_THREAD_AREA 26 +#define PTRACE_SYSEMU 31 +#define PTRACE_SYSEMU_SINGLESTEP 32 + #ifdef __KERNEL__ #include <asm/vm86.h> diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h index bb5ff5b2c02..faf995307b9 100644 --- a/include/asm-i386/segment.h +++ b/include/asm-i386/segment.h @@ -91,6 +91,20 @@ #define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) #define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) +/* The PnP BIOS entries in the GDT */ +#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) +#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) +#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) +#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) +#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) + +/* The PnP BIOS selectors */ +#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ +#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ +#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ +#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ +#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ + /* * The interrupt descriptor table has room for 256 idt's, * the global descriptor table is dependent on the number diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h index 76524b4052a..026fd231488 100644 --- a/include/asm-i386/signal.h +++ b/include/asm-i386/signal.h @@ -218,7 +218,6 @@ static __inline__ int sigfindinword(unsigned long word) } struct pt_regs; -extern int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset)); #define ptrace_signal_deliver(regs, cookie) \ do { \ diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h index 02c8f5d2206..bb5f88a27f7 100644 --- a/include/asm-i386/string.h +++ b/include/asm-i386/string.h @@ -201,7 +201,7 @@ __asm__ __volatile__( return __res; } -static inline void * __memcpy(void * to, const void * from, size_t n) +static __always_inline void * __memcpy(void * to, const void * from, size_t n) { int d0, d1, d2; __asm__ __volatile__( @@ -223,7 +223,7 @@ return (to); * This looks ugly, but the compiler can optimize it totally, * as the count is constant. */ -static inline void * __constant_memcpy(void * to, const void * from, size_t n) +static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) { long esi, edi; if (!n) return to; @@ -367,7 +367,7 @@ return s; * things 32 bits at a time even when we don't know the size of the * area at compile-time.. */ -static inline void * __constant_c_memset(void * s, unsigned long c, size_t count) +static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) { int d0, d1; __asm__ __volatile__( @@ -416,7 +416,7 @@ extern char *strstr(const char *cs, const char *ct); * This looks horribly ugly, but the compiler can optimize it totally, * as we by now know that both pattern and count is constant.. */ -static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) +static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) { switch (count) { case 0: diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 772f85da120..36a92ed6a9d 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -54,23 +54,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ ); } while(0) #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) -#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 ) - -static inline unsigned long _get_base(char * addr) -{ - unsigned long __base; - __asm__("movb %3,%%dh\n\t" - "movb %2,%%dl\n\t" - "shll $16,%%edx\n\t" - "movw %1,%%dx" - :"=&d" (__base) - :"m" (*((addr)+2)), - "m" (*((addr)+4)), - "m" (*((addr)+7))); - return __base; -} - -#define get_base(ldt) _get_base( ((char *)&(ldt)) ) +#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) /* * Load a segment. Fall back on loading the zero @@ -140,6 +124,19 @@ static inline unsigned long _get_base(char * addr) :"=r" (__dummy)); \ __dummy; \ }) + +#define read_cr4_safe() ({ \ + unsigned int __dummy; \ + /* This could fault if %cr4 does not exist */ \ + __asm__("1: movl %%cr4, %0 \n" \ + "2: \n" \ + ".section __ex_table,\"a\" \n" \ + ".long 1b,2b \n" \ + ".previous \n" \ + : "=r" (__dummy): "0" (0)); \ + __dummy; \ +}) + #define write_cr4(x) \ __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); #define stts() write_cr0(8 | read_cr0()) @@ -551,6 +548,15 @@ void enable_hlt(void); extern int es7000_plat; void cpu_idle_wait(void); +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible: + */ +static inline void sched_cacheflush(void) +{ + wbinvd(); +} + extern unsigned long arch_align_stack(unsigned long sp); #endif diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h index 8fbf791651b..e20e99551d7 100644 --- a/include/asm-i386/thread_info.h +++ b/include/asm-i386/thread_info.h @@ -111,8 +111,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; #endif #define free_thread_info(info) kfree(info) -#define get_thread_info(ti) get_task_struct((ti)->task) -#define put_thread_info(ti) put_task_struct((ti)->task) #else /* !__ASSEMBLY__ */ @@ -142,6 +140,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ +#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 17 @@ -154,6 +153,7 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__; #define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) +#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) /* work to do on interrupt/exception return */ diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index 0ec27c9e8e4..d7e19eb344b 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h @@ -72,7 +72,6 @@ static inline int node_to_first_cpu(int node) .max_interval = 32, \ .busy_factor = 32, \ .imbalance_pct = 125, \ - .cache_hot_time = (10*1000000), \ .cache_nice_tries = 1, \ .busy_idx = 3, \ .idle_idx = 1, \ diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h index 89ab7e2bc5a..3f1337c3420 100644 --- a/include/asm-i386/uaccess.h +++ b/include/asm-i386/uaccess.h @@ -411,7 +411,7 @@ unsigned long __must_check __copy_from_user_ll(void *to, * Returns number of bytes that could not be copied. * On success, this will be zero. */ -static inline unsigned long __must_check +static __always_inline unsigned long __must_check __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { if (__builtin_constant_p(n)) { @@ -432,7 +432,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) return __copy_to_user_ll(to, from, n); } -static inline unsigned long __must_check +static __always_inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) { might_sleep(); @@ -456,7 +456,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. */ -static inline unsigned long +static __always_inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { if (__builtin_constant_p(n)) { @@ -477,7 +477,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) return __copy_from_user_ll(to, from, n); } -static inline unsigned long +static __always_inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_sleep(); diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index 0f92e78dfea..597496ed2ae 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -256,7 +256,7 @@ #define __NR_io_submit 248 #define __NR_io_cancel 249 #define __NR_fadvise64 250 -#define __NR_set_zone_reclaim 251 +/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */ #define __NR_exit_group 252 #define __NR_lookup_dcookie 253 #define __NR_epoll_create 254 @@ -299,8 +299,24 @@ #define __NR_inotify_init 291 #define __NR_inotify_add_watch 292 #define __NR_inotify_rm_watch 293 +#define __NR_migrate_pages 294 +#define __NR_openat 295 +#define __NR_mkdirat 296 +#define __NR_mknodat 297 +#define __NR_fchownat 298 +#define __NR_futimesat 299 +#define __NR_newfstatat 300 +#define __NR_unlinkat 301 +#define __NR_renameat 302 +#define __NR_linkat 303 +#define __NR_symlinkat 304 +#define __NR_readlinkat 305 +#define __NR_fchmodat 306 +#define __NR_faccessat 307 +#define __NR_pselect6 308 +#define __NR_ppoll 309 -#define NR_syscalls 294 +#define NR_syscalls 310 /* * user-visible error numbers are in the range -1 - -128: see @@ -416,6 +432,7 @@ __syscall_return(type,__res); \ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION +#define __ARCH_WANT_SYS_RT_SIGSUSPEND #endif #ifdef __KERNEL_SYSCALLS__ diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h index 40ec82c6914..952fd695738 100644 --- a/include/asm-i386/vm86.h +++ b/include/asm-i386/vm86.h @@ -16,7 +16,11 @@ #define IF_MASK 0x00000200 #define IOPL_MASK 0x00003000 #define NT_MASK 0x00004000 +#ifdef CONFIG_VM86 #define VM_MASK 0x00020000 +#else +#define VM_MASK 0 /* ignored */ +#endif #define AC_MASK 0x00040000 #define VIF_MASK 0x00080000 /* virtual interrupt flag */ #define VIP_MASK 0x00100000 /* virtual interrupt pending */ @@ -200,9 +204,25 @@ struct kernel_vm86_struct { */ }; +#ifdef CONFIG_VM86 + void handle_vm86_fault(struct kernel_vm86_regs *, long); int handle_vm86_trap(struct kernel_vm86_regs *, long, int); +struct task_struct; +void release_vm86_irqs(struct task_struct *); + +#else + +#define handle_vm86_fault(a, b) +#define release_vm86_irqs(a) + +static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) { + return 0; +} + +#endif /* CONFIG_VM86 */ + #endif /* __KERNEL__ */ #endif |