aboutsummaryrefslogtreecommitdiff
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/atomic.h30
-rw-r--r--include/asm-i386/futex.h10
-rw-r--r--include/asm-i386/local.h14
-rw-r--r--include/asm-i386/posix_types.h4
-rw-r--r--include/asm-i386/rwlock.h4
-rw-r--r--include/asm-i386/rwsem.h35
-rw-r--r--include/asm-i386/semaphore.h8
-rw-r--r--include/asm-i386/spinlock.h14
-rw-r--r--include/asm-i386/system.h23
-rw-r--r--include/asm-i386/thread_info.h7
10 files changed, 74 insertions, 75 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 4f061fa7379..51a16624252 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -46,8 +46,8 @@ static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
LOCK_PREFIX "addl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ :"+m" (v->counter)
+ :"ir" (i));
}
/**
@@ -61,8 +61,8 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
LOCK_PREFIX "subl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ :"+m" (v->counter)
+ :"ir" (i));
}
/**
@@ -80,8 +80,8 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
__asm__ __volatile__(
LOCK_PREFIX "subl %2,%0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ :"+m" (v->counter), "=qm" (c)
+ :"ir" (i) : "memory");
return c;
}
@@ -95,8 +95,7 @@ static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
LOCK_PREFIX "incl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ :"+m" (v->counter));
}
/**
@@ -109,8 +108,7 @@ static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
LOCK_PREFIX "decl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ :"+m" (v->counter));
}
/**
@@ -127,8 +125,8 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
__asm__ __volatile__(
LOCK_PREFIX "decl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ :"+m" (v->counter), "=qm" (c)
+ : : "memory");
return c != 0;
}
@@ -146,8 +144,8 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
__asm__ __volatile__(
LOCK_PREFIX "incl %0; sete %1"
- :"=m" (v->counter), "=qm" (c)
- :"m" (v->counter) : "memory");
+ :"+m" (v->counter), "=qm" (c)
+ : : "memory");
return c != 0;
}
@@ -166,8 +164,8 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
__asm__ __volatile__(
LOCK_PREFIX "addl %2,%0; sets %1"
- :"=m" (v->counter), "=qm" (c)
- :"ir" (i), "m" (v->counter) : "memory");
+ :"+m" (v->counter), "=qm" (c)
+ :"ir" (i) : "memory");
return c;
}
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index 7b8ceefd010..946d97cfea2 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -20,8 +20,8 @@
.align 8\n\
.long 1b,3b\n\
.previous" \
- : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \
- : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0))
+ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
+ : "i" (-EFAULT), "0" (oparg), "1" (0))
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
@@ -38,9 +38,9 @@
.align 8\n\
.long 1b,4b,2b,4b\n\
.previous" \
- : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \
+ : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \
"=&r" (tem) \
- : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
@@ -123,7 +123,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
" .long 1b,3b \n"
" .previous \n"
- : "=a" (oldval), "=m" (*uaddr)
+ : "=a" (oldval), "+m" (*uaddr)
: "i" (-EFAULT), "r" (newval), "0" (oldval)
: "memory"
);
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index 3b4998c51d0..12060e22f7e 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -17,32 +17,30 @@ static __inline__ void local_inc(local_t *v)
{
__asm__ __volatile__(
"incl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ :"+m" (v->counter));
}
static __inline__ void local_dec(local_t *v)
{
__asm__ __volatile__(
"decl %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ :"+m" (v->counter));
}
static __inline__ void local_add(long i, local_t *v)
{
__asm__ __volatile__(
"addl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ :"+m" (v->counter)
+ :"ir" (i));
}
static __inline__ void local_sub(long i, local_t *v)
{
__asm__ __volatile__(
"subl %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ :"+m" (v->counter)
+ :"ir" (i));
}
/* On x86, these are no better than the atomic variants. */
diff --git a/include/asm-i386/posix_types.h b/include/asm-i386/posix_types.h
index 4e47ed059ad..133e31e7dfd 100644
--- a/include/asm-i386/posix_types.h
+++ b/include/asm-i386/posix_types.h
@@ -51,12 +51,12 @@ typedef struct {
#undef __FD_SET
#define __FD_SET(fd,fdsetp) \
__asm__ __volatile__("btsl %1,%0": \
- "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+ "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
#undef __FD_CLR
#define __FD_CLR(fd,fdsetp) \
__asm__ __volatile__("btrl %1,%0": \
- "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
+ "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
#undef __FD_ISSET
#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
index 94f00195d54..96b0bef2ea5 100644
--- a/include/asm-i386/rwlock.h
+++ b/include/asm-i386/rwlock.h
@@ -37,7 +37,7 @@
"popl %%eax\n\t" \
"1:\n", \
"subl $1,%0\n\t", \
- "=m" (*(volatile int *)rw) : : "memory")
+ "+m" (*(volatile int *)rw) : : "memory")
#define __build_read_lock(rw, helper) do { \
if (__builtin_constant_p(rw)) \
@@ -63,7 +63,7 @@
"popl %%eax\n\t" \
"1:\n", \
"subl $" RW_LOCK_BIAS_STR ",%0\n\t", \
- "=m" (*(volatile int *)rw) : : "memory")
+ "+m" (*(volatile int *)rw) : : "memory")
#define __build_write_lock(rw, helper) do { \
if (__builtin_constant_p(rw)) \
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index 2f07601562e..43113f5608e 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -111,8 +111,8 @@ LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value
" jmp 1b\n"
LOCK_SECTION_END
"# ending down_read\n\t"
- : "=m"(sem->count)
- : "a"(sem), "m"(sem->count)
+ : "+m" (sem->count)
+ : "a" (sem)
: "memory", "cc");
}
@@ -133,8 +133,8 @@ LOCK_PREFIX " cmpxchgl %2,%0\n\t"
" jnz 1b\n\t"
"2:\n\t"
"# ending __down_read_trylock\n\t"
- : "+m"(sem->count), "=&a"(result), "=&r"(tmp)
- : "i"(RWSEM_ACTIVE_READ_BIAS)
+ : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+ : "i" (RWSEM_ACTIVE_READ_BIAS)
: "memory", "cc");
return result>=0 ? 1 : 0;
}
@@ -161,8 +161,8 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the
" jmp 1b\n"
LOCK_SECTION_END
"# ending down_write"
- : "=m"(sem->count), "=d"(tmp)
- : "a"(sem), "1"(tmp), "m"(sem->count)
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (tmp)
: "memory", "cc");
}
@@ -205,8 +205,8 @@ LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old valu
" jmp 1b\n"
LOCK_SECTION_END
"# ending __up_read\n"
- : "=m"(sem->count), "=d"(tmp)
- : "a"(sem), "1"(tmp), "m"(sem->count)
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (tmp)
: "memory", "cc");
}
@@ -231,8 +231,8 @@ LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 ->
" jmp 1b\n"
LOCK_SECTION_END
"# ending __up_write\n"
- : "=m"(sem->count)
- : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count)
+ : "+m" (sem->count)
+ : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc", "edx");
}
@@ -256,8 +256,8 @@ LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001
" jmp 1b\n"
LOCK_SECTION_END
"# ending __downgrade_write\n"
- : "=m"(sem->count)
- : "a"(sem), "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
+ : "+m" (sem->count)
+ : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
: "memory", "cc");
}
@@ -268,8 +268,8 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{
__asm__ __volatile__(
LOCK_PREFIX "addl %1,%0"
- : "=m"(sem->count)
- : "ir"(delta), "m"(sem->count));
+ : "+m" (sem->count)
+ : "ir" (delta));
}
/*
@@ -280,10 +280,9 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
int tmp = delta;
__asm__ __volatile__(
-LOCK_PREFIX "xadd %0,(%2)"
- : "+r"(tmp), "=m"(sem->count)
- : "r"(sem), "m"(sem->count)
- : "memory");
+LOCK_PREFIX "xadd %0,%1"
+ : "+r" (tmp), "+m" (sem->count)
+ : : "memory");
return tmp+delta;
}
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index f7a0f310c52..d51e800acf2 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -107,7 +107,7 @@ static inline void down(struct semaphore * sem)
"call __down_failed\n\t"
"jmp 1b\n"
LOCK_SECTION_END
- :"=m" (sem->count)
+ :"+m" (sem->count)
:
:"memory","ax");
}
@@ -132,7 +132,7 @@ static inline int down_interruptible(struct semaphore * sem)
"call __down_failed_interruptible\n\t"
"jmp 1b\n"
LOCK_SECTION_END
- :"=a" (result), "=m" (sem->count)
+ :"=a" (result), "+m" (sem->count)
:
:"memory");
return result;
@@ -157,7 +157,7 @@ static inline int down_trylock(struct semaphore * sem)
"call __down_failed_trylock\n\t"
"jmp 1b\n"
LOCK_SECTION_END
- :"=a" (result), "=m" (sem->count)
+ :"=a" (result), "+m" (sem->count)
:
:"memory");
return result;
@@ -182,7 +182,7 @@ static inline void up(struct semaphore * sem)
"jmp 1b\n"
LOCK_SECTION_END
".subsection 0\n"
- :"=m" (sem->count)
+ :"+m" (sem->count)
:
:"memory","ax");
}
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 87c40f83065..d816c62a7a1 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -65,7 +65,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
alternative_smp(
__raw_spin_lock_string,
__raw_spin_lock_string_up,
- "=m" (lock->slock) : : "memory");
+ "+m" (lock->slock) : : "memory");
}
/*
@@ -79,7 +79,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
alternative_smp(
__raw_spin_lock_string_flags,
__raw_spin_lock_string_up,
- "=m" (lock->slock) : "r" (flags) : "memory");
+ "+m" (lock->slock) : "r" (flags) : "memory");
}
#endif
@@ -88,7 +88,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
char oldval;
__asm__ __volatile__(
"xchgb %b0,%1"
- :"=q" (oldval), "=m" (lock->slock)
+ :"=q" (oldval), "+m" (lock->slock)
:"0" (0) : "memory");
return oldval > 0;
}
@@ -104,7 +104,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
#define __raw_spin_unlock_string \
"movb $1,%0" \
- :"=m" (lock->slock) : : "memory"
+ :"+m" (lock->slock) : : "memory"
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
@@ -118,7 +118,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
#define __raw_spin_unlock_string \
"xchgb %b0, %1" \
- :"=q" (oldval), "=m" (lock->slock) \
+ :"=q" (oldval), "+m" (lock->slock) \
:"0" (oldval) : "memory"
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
@@ -199,13 +199,13 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
- asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
+ asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
- : "=m" (rw->lock) : : "memory");
+ : "+m" (rw->lock) : : "memory");
}
#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index db398d88b1d..49928eb33f8 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -82,10 +82,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
#define savesegment(seg, value) \
asm volatile("mov %%" #seg ",%0":"=rm" (value))
-/*
- * Clear and set 'TS' bit respectively
- */
-#define clts() __asm__ __volatile__ ("clts")
#define read_cr0() ({ \
unsigned int __dummy; \
__asm__ __volatile__( \
@@ -94,7 +90,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
__dummy; \
})
#define write_cr0(x) \
- __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
#define read_cr2() ({ \
unsigned int __dummy; \
@@ -104,7 +100,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
__dummy; \
})
#define write_cr2(x) \
- __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
#define read_cr3() ({ \
unsigned int __dummy; \
@@ -114,7 +110,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
__dummy; \
})
#define write_cr3(x) \
- __asm__ __volatile__("movl %0,%%cr3": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
#define read_cr4() ({ \
unsigned int __dummy; \
@@ -123,7 +119,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
:"=r" (__dummy)); \
__dummy; \
})
-
#define read_cr4_safe() ({ \
unsigned int __dummy; \
/* This could fault if %cr4 does not exist */ \
@@ -135,15 +130,19 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
: "=r" (__dummy): "0" (0)); \
__dummy; \
})
-
#define write_cr4(x) \
- __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
#define stts() write_cr0(8 | read_cr0())
#endif /* __KERNEL__ */
#define wbinvd() \
- __asm__ __volatile__ ("wbinvd": : :"memory");
+ __asm__ __volatile__ ("wbinvd": : :"memory")
static inline unsigned long get_limit(unsigned long segment)
{
@@ -454,8 +453,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
#include <linux/irqflags.h>
/*
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 2833fa2c0dd..54d6d7aea93 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -140,6 +140,8 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SECCOMP 8 /* secure computing */
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
#define TIF_MEMDIE 16
+#define TIF_DEBUG 17 /* uses debug registers */
+#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
@@ -151,6 +153,8 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
+#define _TIF_DEBUG (1<<TIF_DEBUG)
+#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \
@@ -159,6 +163,9 @@ static inline struct thread_info *current_thread_info(void)
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
+/* flags to check in __switch_to() */
+#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
+
/*
* Thread-synchronous status.
*