diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-09-17 01:06:01 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-09-17 01:06:01 -0400 |
commit | f50e5ea14baa321c5f0ff71707a83b02bf5b9398 (patch) | |
tree | 2019c5bc11d99a353f3274853e913f489ed103ce /include | |
parent | ecf7f354e228ac9e80d2d25a0a0cbc8c876e9ab4 (diff) | |
parent | 803db244b9f71102e366fd689000c1417b9a7508 (diff) |
Merge branch 'upstream-fixes' into upstream
Diffstat (limited to 'include')
29 files changed, 192 insertions, 116 deletions
diff --git a/include/asm-alpha/Kbuild b/include/asm-alpha/Kbuild index e57fd57538b..2b06b3bad5f 100644 --- a/include/asm-alpha/Kbuild +++ b/include/asm-alpha/Kbuild @@ -1,5 +1,5 @@ include include/asm-generic/Kbuild.asm -unifdef-y += console.h fpu.h sysinfo.h +unifdef-y += console.h fpu.h sysinfo.h compiler.h header-y += gentrap.h regdef.h pal.h reg.h diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h index 00c6f57ad9a..d2768cc3d7a 100644 --- a/include/asm-alpha/compiler.h +++ b/include/asm-alpha/compiler.h @@ -90,6 +90,7 @@ __asm__("stw %1,%0" : "=m"(mem) : "r"(val)) #endif +#ifdef __KERNEL__ /* Some idiots over in <linux/compiler.h> thought inline should imply always_inline. This breaks stuff. We'll include this file whenever we run into such problems. */ @@ -101,4 +102,6 @@ #undef __always_inline #define __always_inline inline __attribute__((always_inline)) +#endif /* __KERNEL__ */ + #endif /* __ALPHA_COMPILER_H */ diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h index 8c7cd50d4ea..d2bed3cb33f 100644 --- a/include/asm-alpha/page.h +++ b/include/asm-alpha/page.h @@ -1,6 +1,8 @@ #ifndef _ALPHA_PAGE_H #define _ALPHA_PAGE_H +#ifdef __KERNEL__ + #include <asm/pal.h> /* PAGE_SHIFT determines the page size */ @@ -8,8 +10,6 @@ #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) -#ifdef __KERNEL__ - #ifndef __ASSEMBLY__ #define STRICT_MM_TYPECHECKS @@ -92,9 +92,9 @@ typedef unsigned long pgprot_t; #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#endif /* __KERNEL__ */ #include <asm-generic/memory_model.h> #include <asm-generic/page.h> +#endif /* __KERNEL__ */ #endif /* _ALPHA_PAGE_H */ diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm index 6b16dda1811..c00de6028fa 100644 --- a/include/asm-generic/Kbuild.asm +++ b/include/asm-generic/Kbuild.asm @@ -2,7 +2,7 @@ unifdef-y += a.out.h auxvec.h byteorder.h errno.h fcntl.h ioctl.h \ ioctls.h ipcbuf.h mman.h msgbuf.h param.h poll.h \ posix_types.h ptrace.h resource.h sembuf.h shmbuf.h shmparam.h \ sigcontext.h siginfo.h signal.h socket.h sockios.h stat.h \ - statfs.h termbits.h termios.h timex.h types.h unistd.h user.h + statfs.h termbits.h termios.h types.h unistd.h user.h # These probably shouldn't be exported unifdef-y += elf.h page.h diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild index 335b2fa4e06..2308190321d 100644 --- a/include/asm-i386/Kbuild +++ b/include/asm-i386/Kbuild @@ -1,5 +1,5 @@ include include/asm-generic/Kbuild.asm -header-y += boot.h debugreg.h ldt.h setup.h ucontext.h +header-y += boot.h debugreg.h ldt.h ucontext.h -unifdef-y += mtrr.h vm86.h +unifdef-y += mtrr.h setup.h vm86.h diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index 1eac92cb5b1..db4344d9f73 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h @@ -7,10 +7,7 @@ #include <asm/ptrace.h> #include <asm/user.h> -#include <asm/processor.h> -#include <asm/system.h> /* for savesegment */ #include <asm/auxvec.h> -#include <asm/desc.h> #include <linux/utsname.h> @@ -48,6 +45,12 @@ typedef struct user_fxsr_struct elf_fpxregset_t; #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_386 +#ifdef __KERNEL__ + +#include <asm/processor.h> +#include <asm/system.h> /* for savesegment */ +#include <asm/desc.h> + /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx contains a pointer to a function which might be registered using `atexit'. This provides a mean for the dynamic linker to call DT_FINI functions for @@ -111,7 +114,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t; #define ELF_PLATFORM (system_utsname.machine) -#ifdef __KERNEL__ #define SET_PERSONALITY(ex, ibcs2) do { } while (0) /* diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h index f737e423029..2734909eff8 100644 --- a/include/asm-i386/setup.h +++ b/include/asm-i386/setup.h @@ -6,6 +6,7 @@ #ifndef _i386_SETUP_H #define _i386_SETUP_H +#ifdef __KERNEL__ #include <linux/pfn.h> /* @@ -13,6 +14,7 @@ */ #define MAXMEM_PFN PFN_DOWN(MAXMEM) #define MAX_NONPAE_PFN (1 << 20) +#endif #define PARAM_SIZE 4096 #define COMMAND_LINE_SIZE 256 diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h index 3824a502351..c3e8adec591 100644 --- a/include/asm-i386/signal.h +++ b/include/asm-i386/signal.h @@ -2,7 +2,6 @@ #define _ASMi386_SIGNAL_H #include <linux/types.h> -#include <linux/linkage.h> #include <linux/time.h> #include <linux/compiler.h> @@ -10,6 +9,9 @@ struct siginfo; #ifdef __KERNEL__ + +#include <linux/linkage.h> + /* Most things should be clean enough to redefine this at will, if care is taken to make libc match. */ diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index d983b74e4d9..fc1c8ddae14 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -324,6 +324,8 @@ #define __NR_vmsplice 316 #define __NR_move_pages 317 +#ifdef __KERNEL__ + #define NR_syscalls 318 /* @@ -423,8 +425,6 @@ __asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \ __syscall_return(type,__res); \ } -#ifdef __KERNEL__ - #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild index 85d6f8005eb..f1cb00f39c2 100644 --- a/include/asm-ia64/Kbuild +++ b/include/asm-ia64/Kbuild @@ -4,4 +4,4 @@ header-y += break.h fpu.h fpswa.h gcc_intrin.h ia64regs.h \ intel_intrin.h intrinsics.h perfmon_default_smpl.h \ ptrace_offsets.h rse.h setup.h ucontext.h -unifdef-y += perfmon.h +unifdef-y += perfmon.h ustack.h diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index f5a949ec6e1..947cb72b520 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -7,6 +7,7 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ +# ifdef __KERNEL__ #include <asm/intrinsics.h> #include <asm/types.h> @@ -64,7 +65,6 @@ # define __pa(x) ((x) - PAGE_OFFSET) # define __va(x) ((x) + PAGE_OFFSET) #else /* !__ASSEMBLY */ -# ifdef __KERNEL__ # define STRICT_MM_TYPECHECKS extern void clear_page (void *page); @@ -174,7 +174,6 @@ get_order (unsigned long size) return order; } -# endif /* __KERNEL__ */ #endif /* !__ASSEMBLY__ */ #ifdef STRICT_MM_TYPECHECKS @@ -228,4 +227,5 @@ get_order (unsigned long size) (((current->personality & READ_IMPLIES_EXEC) != 0) \ ? VM_EXEC : 0)) +# endif /* __KERNEL__ */ #endif /* _ASM_IA64_PAGE_H */ diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index 415abb23b21..1414316efd4 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h @@ -56,6 +56,8 @@ #include <asm/fpu.h> + +#ifdef __KERNEL__ #ifndef ASM_OFFSETS_C #include <asm/asm-offsets.h> #endif @@ -79,10 +81,9 @@ #define KERNEL_STACK_SIZE IA64_STK_OFFSET -#ifndef __ASSEMBLY__ +#endif /* __KERNEL__ */ -#include <asm/current.h> -#include <asm/page.h> +#ifndef __ASSEMBLY__ /* * This struct defines the way the registers are saved on system @@ -229,6 +230,9 @@ struct switch_stack { #ifdef __KERNEL__ +#include <asm/current.h> +#include <asm/page.h> + #define __ARCH_SYS_PTRACE 1 /* diff --git a/include/asm-ia64/ustack.h b/include/asm-ia64/ustack.h index da55c91246e..a349467913e 100644 --- a/include/asm-ia64/ustack.h +++ b/include/asm-ia64/ustack.h @@ -5,12 +5,15 @@ * Constants for the user stack size */ +#ifdef __KERNEL__ #include <asm/page.h> /* The absolute hard limit for stack size is 1/2 of the mappable space in the region */ #define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2) -/* Make a default stack size of 2GB */ -#define DEFAULT_USER_STACK_SIZE (1UL << 31) #define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT) +#endif + +/* Make a default stack size of 2GiB */ +#define DEFAULT_USER_STACK_SIZE (1UL << 31) #endif /* _ASM_IA64_USTACK_H */ diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h index 6ed1151a05a..219d359861f 100644 --- a/include/asm-mips/page.h +++ b/include/asm-mips/page.h @@ -14,8 +14,6 @@ #include <spaces.h> -#endif - /* * PAGE_SHIFT determines the page size */ @@ -34,8 +32,6 @@ #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) - -#ifdef __KERNEL__ #ifndef __ASSEMBLY__ extern void clear_page(void * page); @@ -168,8 +164,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) -#endif /* defined (__KERNEL__) */ - #ifdef CONFIG_LIMITED_DMA #define WANT_PAGE_VIRTUAL #endif @@ -177,4 +171,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; #include <asm-generic/memory_model.h> #include <asm-generic/page.h> +#endif /* defined (__KERNEL__) */ + #endif /* _ASM_PAGE_H */ diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h index 4df3e80118f..6a784396660 100644 --- a/include/asm-powerpc/eeh.h +++ b/include/asm-powerpc/eeh.h @@ -205,6 +205,7 @@ static inline void eeh_memset_io(volatile void __iomem *addr, int c, lc |= lc << 8; lc |= lc << 16; + __asm__ __volatile__ ("sync" : : : "memory"); while(n && !EEH_CHECK_ALIGN(p, 4)) { *((volatile u8 *)p) = c; p++; @@ -229,6 +230,7 @@ static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *sr void *destsave = dest; unsigned long nsave = n; + __asm__ __volatile__ ("sync" : : : "memory"); while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) { *((u8 *)dest) = *((volatile u8 *)vsrc); __asm__ __volatile__ ("eieio" : : : "memory"); @@ -266,6 +268,7 @@ static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src, { void *vdest = (void __force *) dest; + __asm__ __volatile__ ("sync" : : : "memory"); while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) { *((volatile u8 *)vdest) = *((u8 *)src); src++; diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h index f1b3c00bc1c..936422e5489 100644 --- a/include/asm-powerpc/futex.h +++ b/include/asm-powerpc/futex.h @@ -84,7 +84,33 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - return -ENOSYS; + int prev; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + __asm__ __volatile__ ( + LWSYNC_ON_SMP +"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ + cmpw 0,%0,%3\n\ + bne- 3f\n" + PPC405_ERR77(0,%2) +"2: stwcx. %4,0,%2\n\ + bne- 1b\n" + ISYNC_ON_SMP +"3: .section .fixup,\"ax\"\n\ +4: li %0,%5\n\ + b 3b\n\ + .previous\n\ + .section __ex_table,\"a\"\n\ + .align 3\n\ + " PPC_LONG "1b,4b,2b,4b\n\ + .previous" \ + : "=&r" (prev), "+m" (*uaddr) + : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) + : "cc", "memory"); + + return prev; } #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 36c4c34bf56..212428db0d8 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h @@ -19,6 +19,7 @@ extern int check_legacy_ioport(unsigned long base_port); #include <linux/compiler.h> #include <asm/page.h> #include <asm/byteorder.h> +#include <asm/paca.h> #ifdef CONFIG_PPC_ISERIES #include <asm/iseries/iseries_io.h> #endif @@ -162,7 +163,11 @@ extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, int ns); extern void _insl_ns(volatile u32 __iomem *port, void *buf, int nl); extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl); -#define mmiowb() +static inline void mmiowb(void) +{ + __asm__ __volatile__ ("sync" : : : "memory"); + get_paca()->io_sync = 0; +} /* * output pause versions need a delay at least for the @@ -278,22 +283,23 @@ static inline int in_8(const volatile unsigned char __iomem *addr) { int ret; - __asm__ __volatile__("lbz%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lbz%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } static inline void out_8(volatile unsigned char __iomem *addr, int val) { - __asm__ __volatile__("stb%U0%X0 %1,%0; sync" + __asm__ __volatile__("sync; stb%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } static inline int in_le16(const volatile unsigned short __iomem *addr) { int ret; - __asm__ __volatile__("lhbrx %0,0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lhbrx %0,0,%1; twi 0,%0,0; isync" : "=r" (ret) : "r" (addr), "m" (*addr)); return ret; } @@ -302,28 +308,30 @@ static inline int in_be16(const volatile unsigned short __iomem *addr) { int ret; - __asm__ __volatile__("lhz%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lhz%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } static inline void out_le16(volatile unsigned short __iomem *addr, int val) { - __asm__ __volatile__("sthbrx %1,0,%2; sync" + __asm__ __volatile__("sync; sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); + get_paca()->io_sync = 1; } static inline void out_be16(volatile unsigned short __iomem *addr, int val) { - __asm__ __volatile__("sth%U0%X0 %1,%0; sync" + __asm__ __volatile__("sync; sth%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } static inline unsigned in_le32(const volatile unsigned __iomem *addr) { unsigned ret; - __asm__ __volatile__("lwbrx %0,0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lwbrx %0,0,%1; twi 0,%0,0; isync" : "=r" (ret) : "r" (addr), "m" (*addr)); return ret; } @@ -332,21 +340,23 @@ static inline unsigned in_be32(const volatile unsigned __iomem *addr) { unsigned ret; - __asm__ __volatile__("lwz%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; lwz%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } static inline void out_le32(volatile unsigned __iomem *addr, int val) { - __asm__ __volatile__("stwbrx %1,0,%2; sync" : "=m" (*addr) + __asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); + get_paca()->io_sync = 1; } static inline void out_be32(volatile unsigned __iomem *addr, int val) { - __asm__ __volatile__("stw%U0%X0 %1,%0; sync" + __asm__ __volatile__("sync; stw%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) @@ -354,6 +364,7 @@ static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) unsigned long tmp, ret; __asm__ __volatile__( + "sync\n" "ld %1,0(%2)\n" "twi 0,%1,0\n" "isync\n" @@ -372,7 +383,7 @@ static inline unsigned long in_be64(const volatile unsigned long __iomem *addr) { unsigned long ret; - __asm__ __volatile__("ld%U1%X1 %0,%1; twi 0,%0,0; isync" + __asm__ __volatile__("sync; ld%U1%X1 %0,%1; twi 0,%0,0; isync" : "=r" (ret) : "m" (*addr)); return ret; } @@ -389,14 +400,16 @@ static inline void out_le64(volatile unsigned long __iomem *addr, unsigned long "rldicl %1,%1,32,0\n" "rlwimi %0,%1,8,8,31\n" "rlwimi %0,%1,24,16,23\n" - "std %0,0(%3)\n" - "sync" + "sync\n" + "std %0,0(%3)" : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr)); + get_paca()->io_sync = 1; } static inline void out_be64(volatile unsigned long __iomem *addr, unsigned long val) { - __asm__ __volatile__("std%U0%X0 %1,%0; sync" : "=m" (*addr) : "r" (val)); + __asm__ __volatile__("sync; std%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); + get_paca()->io_sync = 1; } #ifndef CONFIG_PPC_ISERIES diff --git a/include/asm-powerpc/kdump.h b/include/asm-powerpc/kdump.h index dc1574c945f..10e8eb1e6f4 100644 --- a/include/asm-powerpc/kdump.h +++ b/include/asm-powerpc/kdump.h @@ -7,7 +7,7 @@ /* How many bytes to reserve at zero for kdump. The reserve limit should * be greater or equal to the trampoline's end address. * Reserve to the end of the FWNMI area, see head_64.S */ -#define KDUMP_RESERVE_LIMIT 0x8000 +#define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */ #ifdef CONFIG_CRASH_DUMP diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index 2d4585f0620..3d5d590bc4b 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -93,6 +93,7 @@ struct paca_struct { u64 saved_r1; /* r1 save for RTAS calls */ u64 saved_msr; /* MSR saved here by enter_rtas */ u8 proc_enabled; /* irq soft-enable flag */ + u8 io_sync; /* writel() needs spin_unlock sync */ /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h index 895cb6d3a42..c31e4382a77 100644 --- a/include/asm-powerpc/spinlock.h +++ b/include/asm-powerpc/spinlock.h @@ -36,6 +36,19 @@ #define LOCK_TOKEN 1 #endif +#if defined(CONFIG_PPC64) && defined(CONFIG_SMP) +#define CLEAR_IO_SYNC (get_paca()->io_sync = 0) +#define SYNC_IO do { \ + if (unlikely(get_paca()->io_sync)) { \ + mb(); \ + get_paca()->io_sync = 0; \ + } \ + } while (0) +#else +#define CLEAR_IO_SYNC +#define SYNC_IO +#endif + /* * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. @@ -61,6 +74,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) { + CLEAR_IO_SYNC; return __spin_trylock(lock) == 0; } @@ -91,6 +105,7 @@ extern void __rw_yield(raw_rwlock_t *lock); static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) { + CLEAR_IO_SYNC; while (1) { if (likely(__spin_trylock(lock) == 0)) break; @@ -107,6 +122,7 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long { unsigned long flags_dis; + CLEAR_IO_SYNC; while (1) { if (likely(__spin_trylock(lock) == 0)) break; @@ -124,6 +140,7 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) { + SYNC_IO; __asm__ __volatile__("# __raw_spin_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); lock->slock = 0; diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h index 89c6f1bc3aa..680555be22e 100644 --- a/include/asm-ppc/io.h +++ b/include/asm-ppc/io.h @@ -63,7 +63,7 @@ extern inline int in_8(const volatile unsigned char __iomem *addr) int ret; __asm__ __volatile__( - "lbz%U1%X1 %0,%1;\n" + "sync; lbz%U1%X1 %0,%1;\n" "twi 0,%0,0;\n" "isync" : "=r" (ret) : "m" (*addr)); return ret; @@ -78,7 +78,7 @@ extern inline int in_le16(const volatile unsigned short __iomem *addr) { int ret; - __asm__ __volatile__("lhbrx %0,0,%1;\n" + __asm__ __volatile__("sync; lhbrx %0,0,%1;\n" "twi 0,%0,0;\n" "isync" : "=r" (ret) : "r" (addr), "m" (*addr)); @@ -89,7 +89,7 @@ extern inline int in_be16(const volatile unsigned short __iomem *addr) { int ret; - __asm__ __volatile__("lhz%U1%X1 %0,%1;\n" + __asm__ __volatile__("sync; lhz%U1%X1 %0,%1;\n" "twi 0,%0,0;\n" "isync" : "=r" (ret) : "m" (*addr)); return ret; @@ -97,20 +97,20 @@ extern inline int in_be16(const volatile unsigned short __iomem *addr) extern inline void out_le16(volatile unsigned short __iomem *addr, int val) { - __asm__ __volatile__("sthbrx %1,0,%2; eieio" : "=m" (*addr) : + __asm__ __volatile__("sync; sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); } extern inline void out_be16(volatile unsigned short __iomem *addr, int val) { - __asm__ __volatile__("sth%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val)); + __asm__ __volatile__("sync; sth%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); } extern inline unsigned in_le32(const volatile unsigned __iomem *addr) { unsigned ret; - __asm__ __volatile__("lwbrx %0,0,%1;\n" + __asm__ __volatile__("sync; lwbrx %0,0,%1;\n" "twi 0,%0,0;\n" "isync" : "=r" (ret) : "r" (addr), "m" (*addr)); @@ -121,7 +121,7 @@ extern inline unsigned in_be32(const volatile unsigned __iomem *addr) { unsigned ret; - __asm__ __volatile__("lwz%U1%X1 %0,%1;\n" + __asm__ __volatile__("sync; lwz%U1%X1 %0,%1;\n" "twi 0,%0,0;\n" "isync" : "=r" (ret) : "m" (*addr)); return ret; @@ -129,13 +129,13 @@ extern inline unsigned in_be32(const volatile unsigned __iomem *addr) extern inline void out_le32(volatile unsigned __iomem *addr, int val) { - __asm__ __volatile__("stwbrx %1,0,%2; eieio" : "=m" (*addr) : + __asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); } extern inline void out_be32(volatile unsigned __iomem *addr, int val) { - __asm__ __volatile__("stw%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val)); + __asm__ __volatile__("sync; stw%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); } #if defined (CONFIG_8260_PCI9) #define readb(addr) in_8((volatile u8 *)(addr)) @@ -259,6 +259,7 @@ extern __inline__ unsigned int name(unsigned int port) \ { \ unsigned int x; \ __asm__ __volatile__( \ + "sync\n" \ "0:" op " %0,0,%1\n" \ "1: twi 0,%0,0\n" \ "2: isync\n" \ @@ -284,6 +285,7 @@ extern __inline__ unsigned int name(unsigned int port) \ extern __inline__ void name(unsigned int val, unsigned int port) \ { \ __asm__ __volatile__( \ + "sync\n" \ "0:" op " %0,0,%1\n" \ "1: sync\n" \ "2:\n" \ diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h index 7f1ef99fd1e..c00dd2b3dc5 100644 --- a/include/asm-s390/debug.h +++ b/include/asm-s390/debug.h @@ -10,7 +10,6 @@ #define DEBUG_H #include <linux/fs.h> -#include <linux/string.h> /* Note: * struct __debug_entry must be defined outside of #ifdef __KERNEL__ @@ -35,6 +34,7 @@ struct __debug_entry{ #define __DEBUG_FEATURE_VERSION 2 /* version of debug feature */ #ifdef __KERNEL__ +#include <linux/string.h> #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/time.h> diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h index 710646e64f7..c0d629d61d3 100644 --- a/include/asm-s390/elf.h +++ b/include/asm-s390/elf.h @@ -93,19 +93,6 @@ #define R_390_NUM 61 /* - * ELF register definitions.. - */ - -#include <linux/sched.h> /* for task_struct */ -#include <asm/ptrace.h> -#include <asm/user.h> -#include <asm/system.h> /* for save_access_regs */ - - -typedef s390_fp_regs elf_fpregset_t; -typedef s390_regs elf_gregset_t; - -/* * These are used to set parameters in the core dumps. */ #ifndef __s390x__ @@ -117,6 +104,20 @@ typedef s390_regs elf_gregset_t; #define ELF_ARCH EM_S390 /* + * ELF register definitions.. + */ + +#include <asm/ptrace.h> +#include <asm/user.h> + +typedef s390_fp_regs elf_fpregset_t; +typedef s390_regs elf_gregset_t; + +#ifdef __KERNEL__ +#include <linux/sched.h> /* for task_struct */ +#include <asm/system.h> /* for save_access_regs */ + +/* * This is used to ensure we don't load something for the wrong architecture. */ #define elf_check_arch(x) \ @@ -198,7 +199,6 @@ static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) #define ELF_PLATFORM (NULL) -#ifdef __KERNEL__ #ifndef __s390x__ #define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) #else /* __s390x__ */ diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h index b4f8f4a41a6..a406fcb1e92 100644 --- a/include/asm-x86_64/elf.h +++ b/include/asm-x86_64/elf.h @@ -7,8 +7,6 @@ #include <asm/ptrace.h> #include <asm/user.h> -#include <asm/processor.h> -#include <asm/compat.h> /* x86-64 relocation types */ #define R_X86_64_NONE 0 /* No reloc */ @@ -39,18 +37,23 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_i387_struct elf_fpregset_t; /* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) \ - ((x)->e_machine == EM_X86_64) - -/* * These are used to set parameters in the core dumps. */ #define ELF_CLASS ELFCLASS64 #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_X86_64 +#ifdef __KERNEL__ +#include <asm/processor.h> +#include <asm/compat.h> + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) \ + ((x)->e_machine == EM_X86_64) + + /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx contains a pointer to a function which might be registered using `atexit'. This provides a mean for the dynamic linker to call DT_FINI functions for @@ -141,7 +144,6 @@ typedef struct user_i387_struct elf_fpregset_t; /* I'm not sure if we can use '-' here */ #define ELF_PLATFORM ("x86_64") -#ifdef __KERNEL__ extern void set_personality_64bit(void); #define SET_PERSONALITY(ex, ibcs2) set_personality_64bit() /* diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h index cef7a7d51b7..3ede2a61973 100644 --- a/include/asm-x86_64/signal.h +++ b/include/asm-x86_64/signal.h @@ -3,13 +3,13 @@ #ifndef __ASSEMBLY__ #include <linux/types.h> -#include <linux/linkage.h> #include <linux/time.h> /* Avoid too many header ordering problems. */ struct siginfo; #ifdef __KERNEL__ +#include <linux/linkage.h> /* Most things should be clean enough to redefine this at will, if care is taken to make libc match. */ diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 2d89d309a2a..80fd48e84bb 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -620,6 +620,8 @@ __SYSCALL(__NR_vmsplice, sys_vmsplice) #define __NR_move_pages 279 __SYSCALL(__NR_move_pages, sys_move_pages) +#ifdef __KERNEL__ + #define __NR_syscall_max __NR_move_pages #ifndef __NO_STUBS @@ -744,8 +746,6 @@ __syscall_return(type,__res); \ #else /* __KERNEL_SYSCALLS__ */ -#ifdef __KERNEL__ - #include <linux/syscalls.h> #include <asm/ptrace.h> @@ -821,8 +821,6 @@ asmlinkage long sys_fork(struct pt_regs regs); asmlinkage long sys_vfork(struct pt_regs regs); asmlinkage long sys_pipe(int *fildes); -#endif /* __KERNEL_SYSCALLS__ */ - #ifndef __ASSEMBLY__ #include <linux/linkage.h> @@ -838,9 +836,9 @@ asmlinkage long sys_rt_sigaction(int sig, struct sigaction __user *oact, size_t sigsetsize); -#endif +#endif /* __ASSEMBLY__ */ -#endif +#endif /* __KERNEL_SYSCALLS__ */ /* * "Conditional" syscalls @@ -850,6 +848,8 @@ asmlinkage long sys_rt_sigaction(int sig, */ #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") -#endif +#endif /* __NO_STUBS */ -#endif +#endif /* __KERNEL__ */ + +#endif /* _ASM_X86_64_UNISTD_H_ */ diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h index a85e16f56d7..146b24402a5 100644 --- a/include/asm-x86_64/vsyscall.h +++ b/include/asm-x86_64/vsyscall.h @@ -1,8 +1,6 @@ #ifndef _ASM_X86_64_VSYSCALL_H_ #define _ASM_X86_64_VSYSCALL_H_ -#include <linux/seqlock.h> - enum vsyscall_num { __NR_vgettimeofday, __NR_vtime, @@ -14,6 +12,7 @@ enum vsyscall_num { #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr)) #ifdef __KERNEL__ +#include <linux/seqlock.h> #define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16))) #define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16))) diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 530b1e6173b..6c2066caeaa 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -9,27 +9,6 @@ #ifndef _LINUX_NFS_FS_H #define _LINUX_NFS_FS_H -#include <linux/in.h> -#include <linux/mm.h> -#include <linux/pagemap.h> -#include <linux/rwsem.h> -#include <linux/wait.h> - -#include <linux/sunrpc/debug.h> -#include <linux/sunrpc/auth.h> -#include <linux/sunrpc/clnt.h> - -#include <linux/nfs.h> -#include <linux/nfs2.h> -#include <linux/nfs3.h> -#include <linux/nfs4.h> -#include <linux/nfs_xdr.h> - -#include <linux/nfs_fs_sb.h> - -#include <linux/rwsem.h> -#include <linux/mempool.h> - /* * Enable debugging support for nfs client. * Requires RPC_DEBUG. @@ -48,11 +27,6 @@ #define NFS_SUPER_MAGIC 0x6969 /* - * These are the default flags for swap requests - */ -#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) - -/* * When flushing a cluster of dirty pages, there can be different * strategies: */ @@ -65,6 +39,32 @@ #ifdef __KERNEL__ +#include <linux/in.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/rwsem.h> +#include <linux/wait.h> + +#include <linux/sunrpc/debug.h> +#include <linux/sunrpc/auth.h> +#include <linux/sunrpc/clnt.h> + +#include <linux/nfs.h> +#include <linux/nfs2.h> +#include <linux/nfs3.h> +#include <linux/nfs4.h> +#include <linux/nfs_xdr.h> + +#include <linux/nfs_fs_sb.h> + +#include <linux/rwsem.h> +#include <linux/mempool.h> + +/* + * These are the default flags for swap requests + */ +#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) + /* * NFSv3/v4 Access mode cache entry */ diff --git a/include/linux/timex.h b/include/linux/timex.h index 19bb6538b49..d543d3871e3 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -57,7 +57,6 @@ #include <linux/time.h> #include <asm/param.h> -#include <asm/timex.h> /* * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen @@ -191,6 +190,8 @@ struct timex { #define TIME_BAD TIME_ERROR /* bw compat */ #ifdef __KERNEL__ +#include <asm/timex.h> + /* * kernel variables * Note: maximum error = NTP synch distance = dispersion + delay / 2; |