diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-s390/futex.h | 87 | ||||
-rw-r--r-- | include/asm-s390/uaccess.h | 171 |
2 files changed, 63 insertions, 195 deletions
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h index ffedf14f89f..5e261e1de67 100644 --- a/include/asm-s390/futex.h +++ b/include/asm-s390/futex.h @@ -7,75 +7,21 @@ #include <asm/errno.h> #include <asm/uaccess.h> -#ifndef __s390x__ -#define __futex_atomic_fixup \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 0b,4b,2b,4b,3b,4b\n" \ - ".previous" -#else /* __s390x__ */ -#define __futex_atomic_fixup \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n" \ - " .quad 0b,4b,2b,4b,3b,4b\n" \ - ".previous" -#endif /* __s390x__ */ - -#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ - asm volatile(" sacf 256\n" \ - "0: l %1,0(%6)\n" \ - "1: " insn \ - "2: cs %1,%2,0(%6)\n" \ - "3: jl 1b\n" \ - " lhi %0,0\n" \ - "4: sacf 0\n" \ - __futex_atomic_fixup \ - : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ - "=m" (*uaddr) \ - : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ - "m" (*uaddr) : "cc" ); - static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; int cmparg = (encoded_op << 20) >> 20; - int oldval = 0, newval, ret; + int oldval, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - inc_preempt_count(); - - switch (op) { - case FUTEX_OP_SET: - __futex_atomic_op("lr %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_ADD: - __futex_atomic_op("lr %2,%1\nar %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_OR: - __futex_atomic_op("lr %2,%1\nor %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_ANDN: - __futex_atomic_op("lr %2,%1\nnr %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_XOR: - __futex_atomic_op("lr %2,%1\nxr %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - default: - ret = -ENOSYS; - } - - dec_preempt_count(); + ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval); if (!ret) { switch (cmp) { @@ -91,32 +37,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } -static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, + int oldval, int newval) { - int ret; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - asm volatile(" sacf 256\n" - " cs %1,%4,0(%5)\n" - "0: lr %0,%1\n" - "1: sacf 0\n" -#ifndef __s390x__ - ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 0b,1b\n" - ".previous" -#else /* __s390x__ */ - ".section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 0b,1b\n" - ".previous" -#endif /* __s390x__ */ - : "=d" (ret), "+d" (oldval), "=m" (*uaddr) - : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) - : "cc", "memory" ); - return oldval; + + return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); } #endif /* __KERNEL__ */ diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h index 0b7c0ca4c3d..39a2716ae18 100644 --- a/include/asm-s390/uaccess.h +++ b/include/asm-s390/uaccess.h @@ -47,7 +47,7 @@ S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \ }) -#else +#else /* __s390x__ */ #define set_fs(x) \ ({ \ unsigned long __pto; \ @@ -56,7 +56,7 @@ S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \ }) -#endif +#endif /* __s390x__ */ #define segment_eq(a,b) ((a).ar4 == (b).ar4) @@ -85,76 +85,50 @@ struct exception_table_entry unsigned long insn, fixup; }; -#ifndef __s390x__ -#define __uaccess_fixup \ - ".section .fixup,\"ax\"\n" \ - "2: lhi %0,%4\n" \ - " bras 1,3f\n" \ - " .long 1b\n" \ - "3: l 1,0(1)\n" \ - " br 1\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 0b,2b\n" \ - ".previous" -#define __uaccess_clobber "cc", "1" -#else /* __s390x__ */ -#define __uaccess_fixup \ - ".section .fixup,\"ax\"\n" \ - "2: lghi %0,%4\n" \ - " jg 1b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n" \ - " .quad 0b,2b\n" \ - ".previous" -#define __uaccess_clobber "cc" -#endif /* __s390x__ */ +struct uaccess_ops { + size_t (*copy_from_user)(size_t, const void __user *, void *); + size_t (*copy_from_user_small)(size_t, const void __user *, void *); + size_t (*copy_to_user)(size_t, void __user *, const void *); + size_t (*copy_to_user_small)(size_t, void __user *, const void *); + size_t (*copy_in_user)(size_t, void __user *, const void __user *); + size_t (*clear_user)(size_t, void __user *); + size_t (*strnlen_user)(size_t, const char __user *); + size_t (*strncpy_from_user)(size_t, const char __user *, char *); + int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); + int (*futex_atomic_cmpxchg)(int __user *, int old, int new); +}; + +extern struct uaccess_ops uaccess; +extern struct uaccess_ops uaccess_std; + +static inline int __put_user_fn(size_t size, void __user *ptr, void *x) +{ + size = uaccess.copy_to_user_small(size, ptr, x); + return size ? -EFAULT : size; +} + +static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) +{ + size = uaccess.copy_from_user_small(size, ptr, x); + return size ? -EFAULT : size; +} /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. */ -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) -#define __put_user_asm(x, ptr, err) \ -({ \ - err = 0; \ - asm volatile( \ - "0: mvcs 0(%1,%2),%3,%0\n" \ - "1:\n" \ - __uaccess_fixup \ - : "+&d" (err) \ - : "d" (sizeof(*(ptr))), "a" (ptr), "Q" (x), \ - "K" (-EFAULT) \ - : __uaccess_clobber ); \ -}) -#else -#define __put_user_asm(x, ptr, err) \ -({ \ - err = 0; \ - asm volatile( \ - "0: mvcs 0(%1,%2),0(%3),%0\n" \ - "1:\n" \ - __uaccess_fixup \ - : "+&d" (err) \ - : "d" (sizeof(*(ptr))), "a" (ptr), "a" (&(x)), \ - "K" (-EFAULT), "m" (x) \ - : __uaccess_clobber ); \ -}) -#endif - #define __put_user(x, ptr) \ ({ \ __typeof__(*(ptr)) __x = (x); \ - int __pu_err; \ + int __pu_err = -EFAULT; \ __chk_user_ptr(ptr); \ switch (sizeof (*(ptr))) { \ case 1: \ case 2: \ case 4: \ case 8: \ - __put_user_asm(__x, ptr, __pu_err); \ + __pu_err = __put_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ break; \ default: \ __put_user_bad(); \ @@ -172,60 +146,36 @@ struct exception_table_entry extern int __put_user_bad(void) __attribute__((noreturn)); -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) -#define __get_user_asm(x, ptr, err) \ -({ \ - err = 0; \ - asm volatile ( \ - "0: mvcp %O1(%2,%R1),0(%3),%0\n" \ - "1:\n" \ - __uaccess_fixup \ - : "+&d" (err), "=Q" (x) \ - : "d" (sizeof(*(ptr))), "a" (ptr), \ - "K" (-EFAULT) \ - : __uaccess_clobber ); \ -}) -#else -#define __get_user_asm(x, ptr, err) \ -({ \ - err = 0; \ - asm volatile ( \ - "0: mvcp 0(%2,%5),0(%3),%0\n" \ - "1:\n" \ - __uaccess_fixup \ - : "+&d" (err), "=m" (x) \ - : "d" (sizeof(*(ptr))), "a" (ptr), \ - "K" (-EFAULT), "a" (&(x)) \ - : __uaccess_clobber ); \ -}) -#endif - #define __get_user(x, ptr) \ ({ \ - int __gu_err; \ - __chk_user_ptr(ptr); \ + int __gu_err = -EFAULT; \ + __chk_user_ptr(ptr); \ switch (sizeof(*(ptr))) { \ case 1: { \ unsigned char __x; \ - __get_user_asm(__x, ptr, __gu_err); \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 2: { \ unsigned short __x; \ - __get_user_asm(__x, ptr, __gu_err); \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 4: { \ unsigned int __x; \ - __get_user_asm(__x, ptr, __gu_err); \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 8: { \ unsigned long long __x; \ - __get_user_asm(__x, ptr, __gu_err); \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ @@ -247,8 +197,6 @@ extern int __get_user_bad(void) __attribute__((noreturn)); #define __put_user_unaligned __put_user #define __get_user_unaligned __get_user -extern long __copy_to_user_asm(const void *from, long n, void __user *to); - /** * __copy_to_user: - Copy a block of data into user space, with less checking. * @to: Destination address, in user space. @@ -266,7 +214,10 @@ extern long __copy_to_user_asm(const void *from, long n, void __user *to); static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { - return __copy_to_user_asm(from, n, to); + if (__builtin_constant_p(n) && (n <= 256)) + return uaccess.copy_to_user_small(n, to, from); + else + return uaccess.copy_to_user(n, to, from); } #define __copy_to_user_inatomic __copy_to_user @@ -294,8 +245,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n) return n; } -extern long __copy_from_user_asm(void *to, long n, const void __user *from); - /** * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. @@ -316,7 +265,10 @@ extern long __copy_from_user_asm(void *to, long n, const void __user *from); static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { - return __copy_from_user_asm(to, n, from); + if (__builtin_constant_p(n) && (n <= 256)) + return uaccess.copy_from_user_small(n, from, to); + else + return uaccess.copy_from_user(n, from, to); } /** @@ -346,13 +298,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n) return n; } -extern unsigned long __copy_in_user_asm(const void __user *from, long n, - void __user *to); - static inline unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) { - return __copy_in_user_asm(from, n, to); + return uaccess.copy_in_user(n, to, from); } static inline unsigned long @@ -360,34 +309,28 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n) { might_sleep(); if (__access_ok(from,n) && __access_ok(to,n)) - n = __copy_in_user_asm(from, n, to); + n = __copy_in_user(to, from, n); return n; } /* * Copy a null terminated string from userspace. */ -extern long __strncpy_from_user_asm(long count, char *dst, - const char __user *src); - static inline long strncpy_from_user(char *dst, const char __user *src, long count) { long res = -EFAULT; might_sleep(); if (access_ok(VERIFY_READ, src, 1)) - res = __strncpy_from_user_asm(count, dst, src); + res = uaccess.strncpy_from_user(count, src, dst); return res; } - -extern long __strnlen_user_asm(long count, const char __user *src); - static inline unsigned long strnlen_user(const char __user * src, unsigned long n) { might_sleep(); - return __strnlen_user_asm(n, src); + return uaccess.strnlen_user(n, src); } /** @@ -410,12 +353,10 @@ strnlen_user(const char __user * src, unsigned long n) * Zero Userspace */ -extern long __clear_user_asm(void __user *to, long n); - static inline unsigned long __clear_user(void __user *to, unsigned long n) { - return __clear_user_asm(to, n); + return uaccess.clear_user(n, to); } static inline unsigned long @@ -423,7 +364,7 @@ clear_user(void __user *to, unsigned long n) { might_sleep(); if (access_ok(VERIFY_WRITE, to, n)) - n = __clear_user_asm(to, n); + n = uaccess.clear_user(n, to); return n; } |