aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/futex.h88
-rw-r--r--include/asm-sparc64/mman.h29
-rw-r--r--include/asm-sparc64/smp.h6
-rw-r--r--include/asm-sparc64/spinlock.h24
-rw-r--r--include/asm-sparc64/uaccess.h12
-rw-r--r--include/asm-sparc64/unistd.h9
6 files changed, 126 insertions, 42 deletions
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index 6a332a9f099..34c4b43d3f9 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -1,6 +1,86 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
+#ifndef _SPARC64_FUTEX_H
+#define _SPARC64_FUTEX_H
-#include <asm-generic/futex.h>
+#include <linux/futex.h>
+#include <asm/errno.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
-#endif
+#define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile__( \
+ "\n1: lduwa [%3] %%asi, %2\n" \
+ " " insn "\n" \
+ "2: casa [%3] %%asi, %2, %1\n" \
+ " cmp %2, %1\n" \
+ " bne,pn %%icc, 1b\n" \
+ " mov 0, %0\n" \
+ "3:\n" \
+ " .section .fixup,#alloc,#execinstr\n" \
+ " .align 4\n" \
+ "4: ba 3b\n" \
+ " mov %5, %0\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .word 1b, 4b\n" \
+ " .word 2b, 4b\n" \
+ " .previous\n" \
+ : "=&r" (ret), "=&r" (oldval), "=&r" (tem) \
+ : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
+ : "memory")
+
+static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret, tem;
+
+ if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int))))
+ return -EFAULT;
+ if (unlikely((((unsigned long) uaddr) & 0x3UL)))
+ return -EINVAL;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ inc_preempt_count();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_cas_op("mov\t%4, %1", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_cas_op("add\t%2, %4, %1", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_cas_op("or\t%2, %4, %1", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_cas_op("and\t%2, %4, %1", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_cas_op("xor\t%2, %4, %1", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ dec_preempt_count();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+#endif /* !(_SPARC64_FUTEX_H) */
diff --git a/include/asm-sparc64/mman.h b/include/asm-sparc64/mman.h
index cb4b6156194..6fd878e6143 100644
--- a/include/asm-sparc64/mman.h
+++ b/include/asm-sparc64/mman.h
@@ -2,21 +2,10 @@
#ifndef __SPARC64_MMAN_H__
#define __SPARC64_MMAN_H__
+#include <asm-generic/mman.h>
+
/* SunOS'ified... */
-#define PROT_READ 0x1 /* page can be read */
-#define PROT_WRITE 0x2 /* page can be written */
-#define PROT_EXEC 0x4 /* page can be executed */
-#define PROT_SEM 0x8 /* page may be used for atomic ops */
-#define PROT_NONE 0x0 /* page can not be accessed */
-#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
-#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
-
-#define MAP_SHARED 0x01 /* Share changes */
-#define MAP_PRIVATE 0x02 /* Changes are private */
-#define MAP_TYPE 0x0f /* Mask for type of mapping */
-#define MAP_FIXED 0x10 /* Interpret addr exactly */
-#define MAP_ANONYMOUS 0x20 /* don't use a file */
#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
#define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */
@@ -27,10 +16,6 @@
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
-#define MS_ASYNC 1 /* sync memory asynchronously */
-#define MS_INVALIDATE 2 /* invalidate the caches */
-#define MS_SYNC 4 /* synchronous memory sync */
-
#define MCL_CURRENT 0x2000 /* lock all currently mapped pages */
#define MCL_FUTURE 0x4000 /* lock all additions to address space */
@@ -48,16 +33,6 @@
#define MC_LOCKAS 5 /* Lock an entire address space of the calling process */
#define MC_UNLOCKAS 6 /* Unlock entire address space of calling process */
-#define MADV_NORMAL 0x0 /* default page-in behavior */
-#define MADV_RANDOM 0x1 /* page-in minimum required */
-#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
-#define MADV_WILLNEED 0x3 /* pre-fault pages */
-#define MADV_DONTNEED 0x4 /* discard these pages */
#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
-#define MADV_REMOVE 0x6 /* remove these pages & resources */
-
-/* compatibility flags */
-#define MAP_ANON MAP_ANONYMOUS
-#define MAP_FILE 0
#endif /* __SPARC64_MMAN_H__ */
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index 110a2de8912..473edb2603e 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -66,8 +66,14 @@ static __inline__ int hard_smp_processor_id(void)
#define raw_smp_processor_id() (current_thread_info()->cpu)
+extern void smp_setup_cpu_possible_map(void);
+
#endif /* !(__ASSEMBLY__) */
+#else
+
+#define smp_setup_cpu_possible_map() do { } while (0)
+
#endif /* !(CONFIG_SMP) */
#define NO_PROC_ID 0xFF
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index ec85d12d73b..508c416e9d6 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -131,6 +131,28 @@ static void inline __read_lock(raw_rwlock_t *lock)
: "memory");
}
+static int inline __read_trylock(raw_rwlock_t *lock)
+{
+ int tmp1, tmp2;
+
+ __asm__ __volatile__ (
+"1: ldsw [%2], %0\n"
+" brlz,a,pn %0, 2f\n"
+" mov 0, %0\n"
+" add %0, 1, %1\n"
+" cas [%2], %0, %1\n"
+" cmp %0, %1\n"
+" membar #StoreLoad | #StoreStore\n"
+" bne,pn %%icc, 1b\n"
+" mov 1, %0\n"
+"2:"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+ : "memory");
+
+ return tmp1;
+}
+
static void inline __read_unlock(raw_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -211,12 +233,12 @@ static int inline __write_trylock(raw_rwlock_t *lock)
}
#define __raw_read_lock(p) __read_lock(p)
+#define __raw_read_trylock(p) __read_trylock(p)
#define __raw_read_unlock(p) __read_unlock(p)
#define __raw_write_lock(p) __write_lock(p)
#define __raw_write_unlock(p) __write_unlock(p)
#define __raw_write_trylock(p) __write_trylock(p)
-#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
#define __raw_write_can_lock(rw) (!(rw)->lock)
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h
index 203e8eee635..c91d1e38eac 100644
--- a/include/asm-sparc64/uaccess.h
+++ b/include/asm-sparc64/uaccess.h
@@ -136,7 +136,7 @@ __asm__ __volatile__( \
"b 2b\n\t" \
" mov %3, %0\n\n\t" \
".previous\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b, 3b\n\t" \
".previous\n\n\t" \
@@ -148,7 +148,7 @@ if (__builtin_constant_p(ret) && ret == -EFAULT) \
__asm__ __volatile__( \
"/* Put user asm ret, inline. */\n" \
"1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b, __ret_efault\n\n\t" \
".previous\n\n\t" \
@@ -163,7 +163,7 @@ __asm__ __volatile__( \
"ret\n\t" \
" restore %%g0, %3, %%o0\n\n\t" \
".previous\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b, 3b\n\n\t" \
".previous\n\n\t" \
@@ -206,7 +206,7 @@ __asm__ __volatile__( \
"b 2b\n\t" \
" mov %3, %0\n\n\t" \
".previous\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b, 3b\n\n\t" \
".previous\n\t" \
@@ -218,7 +218,7 @@ if (__builtin_constant_p(retval) && retval == -EFAULT) \
__asm__ __volatile__( \
"/* Get user asm ret, inline. */\n" \
"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b,__ret_efault\n\n\t" \
".previous\n\t" \
@@ -233,7 +233,7 @@ __asm__ __volatile__( \
"ret\n\t" \
" restore %%g0, %2, %%o0\n\n\t" \
".previous\n\t" \
- ".section __ex_table,#alloc\n\t" \
+ ".section __ex_table,\"a\"\n\t" \
".align 4\n\t" \
".word 1b, 3b\n\n\t" \
".previous\n\t" \
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index 84ac2bdb090..a284986b154 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -307,7 +307,7 @@
#define __NR_mknodat 286
#define __NR_fchownat 287
#define __NR_futimesat 288
-#define __NR_newfstatat 289
+#define __NR_fstatat64 289
#define __NR_unlinkat 290
#define __NR_renameat 291
#define __NR_linkat 292
@@ -317,11 +317,12 @@
#define __NR_faccessat 296
#define __NR_pselect6 297
#define __NR_ppoll 298
+#define __NR_unshare 299
-/* WARNING: You MAY NOT add syscall numbers larger than 298, since
+/* WARNING: You MAY NOT add syscall numbers larger than 299, since
* all of the syscall tables in the Sparc kernel are
- * sized to have 298 entries (starting at zero). Therefore
- * find a free slot in the 0-298 range.
+ * sized to have 299 entries (starting at zero). Therefore
+ * find a free slot in the 0-299 range.
*/
#define _syscall0(type,name) \