aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2006-03-21 08:22:17 -0800
committerTony Luck <tony.luck@intel.com>2006-03-21 08:22:17 -0800
commitae02e964b669f19fb08d953032463ab7dc6f79eb (patch)
treee02ff418110b06c5b7cac8302f9930573f2f35b0 /include
parent409761bb6a06bd61e2d8e27a1af534371d9537ed (diff)
parentdcc1dd2366a7c355fd8b6543c52685b864a2044f (diff)
Pull icc-cleanup into release branch
Diffstat (limited to 'include')
-rw-r--r--include/asm-ia64/intel_intrin.h134
-rw-r--r--include/asm-ia64/sn/rw_mmr.h56
2 files changed, 22 insertions, 168 deletions
diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h
index a7122d85017..d069b6acddc 100644
--- a/include/asm-ia64/intel_intrin.h
+++ b/include/asm-ia64/intel_intrin.h
@@ -5,113 +5,10 @@
*
* Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ * Copyright (C) 2005,2006 Hongjiu Lu <hongjiu.lu@intel.com>
*
*/
-#include <asm/types.h>
-
-void __lfetch(int lfhint, void *y);
-void __lfetch_excl(int lfhint, void *y);
-void __lfetch_fault(int lfhint, void *y);
-void __lfetch_fault_excl(int lfhint, void *y);
-
-/* In the following, whichFloatReg should be an integer from 0-127 */
-void __ldfs(const int whichFloatReg, void *src);
-void __ldfd(const int whichFloatReg, void *src);
-void __ldfe(const int whichFloatReg, void *src);
-void __ldf8(const int whichFloatReg, void *src);
-void __ldf_fill(const int whichFloatReg, void *src);
-void __stfs(void *dst, const int whichFloatReg);
-void __stfd(void *dst, const int whichFloatReg);
-void __stfe(void *dst, const int whichFloatReg);
-void __stf8(void *dst, const int whichFloatReg);
-void __stf_spill(void *dst, const int whichFloatReg);
-
-void __st1_rel(void *dst, const __s8 value);
-void __st2_rel(void *dst, const __s16 value);
-void __st4_rel(void *dst, const __s32 value);
-void __st8_rel(void *dst, const __s64 value);
-__u8 __ld1_acq(void *src);
-__u16 __ld2_acq(void *src);
-__u32 __ld4_acq(void *src);
-__u64 __ld8_acq(void *src);
-
-__u64 __fetchadd4_acq(__u32 *addend, const int increment);
-__u64 __fetchadd4_rel(__u32 *addend, const int increment);
-__u64 __fetchadd8_acq(__u64 *addend, const int increment);
-__u64 __fetchadd8_rel(__u64 *addend, const int increment);
-
-__u64 __getf_exp(double d);
-
-/* OS Related Itanium(R) Intrinsics */
-
-/* The names to use for whichReg and whichIndReg below come from
- the include file asm/ia64regs.h */
-
-__u64 __getIndReg(const int whichIndReg, __s64 index);
-__u64 __getReg(const int whichReg);
-
-void __setIndReg(const int whichIndReg, __s64 index, __u64 value);
-void __setReg(const int whichReg, __u64 value);
-
-void __mf(void);
-void __mfa(void);
-void __synci(void);
-void __itcd(__s64 pa);
-void __itci(__s64 pa);
-void __itrd(__s64 whichTransReg, __s64 pa);
-void __itri(__s64 whichTransReg, __s64 pa);
-void __ptce(__s64 va);
-void __ptcl(__s64 va, __s64 pagesz);
-void __ptcg(__s64 va, __s64 pagesz);
-void __ptcga(__s64 va, __s64 pagesz);
-void __ptri(__s64 va, __s64 pagesz);
-void __ptrd(__s64 va, __s64 pagesz);
-void __invala (void);
-void __invala_gr(const int whichGeneralReg /* 0-127 */ );
-void __invala_fr(const int whichFloatReg /* 0-127 */ );
-void __nop(const int);
-void __fc(__u64 *addr);
-void __sum(int mask);
-void __rum(int mask);
-void __ssm(int mask);
-void __rsm(int mask);
-__u64 __thash(__s64);
-__u64 __ttag(__s64);
-__s64 __tpa(__s64);
-
-/* Intrinsics for implementing get/put_user macros */
-void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val);
-void __ld_user(const char *tableName, __u64 addr, char size, char relocType);
-
-/* This intrinsic does not generate code, it creates a barrier across which
- * the compiler will not schedule data access instructions.
- */
-void __memory_barrier(void);
-
-void __isrlz(void);
-void __dsrlz(void);
-
-__u64 _m64_mux1(__u64 a, const int n);
-__u64 __thash(__u64);
-
-/* Lock and Atomic Operation Related Intrinsics */
-__u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value);
-__u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value);
-__s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value);
-__s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value);
-
-__u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp);
-
-__s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len);
-__s64 _m64_shrp(__s64 a, __s64 b, const int count);
-__s64 _m64_popcnt(__s64 a);
+#include <ia64intrin.h>
#define ia64_barrier() __memory_barrier()
@@ -122,15 +19,16 @@ __s64 _m64_popcnt(__s64 a);
#define ia64_getreg __getReg
#define ia64_setreg __setReg
-#define ia64_hint(x)
+#define ia64_hint __hint
+#define ia64_hint_pause __hint_pause
-#define ia64_mux1_brcst 0
-#define ia64_mux1_mix 8
-#define ia64_mux1_shuf 9
-#define ia64_mux1_alt 10
-#define ia64_mux1_rev 11
+#define ia64_mux1_brcst _m64_mux1_brcst
+#define ia64_mux1_mix _m64_mux1_mix
+#define ia64_mux1_shuf _m64_mux1_shuf
+#define ia64_mux1_alt _m64_mux1_alt
+#define ia64_mux1_rev _m64_mux1_rev
-#define ia64_mux1 _m64_mux1
+#define ia64_mux1(x,v) _m_to_int64(_m64_mux1(_m_from_int64(x), (v)))
#define ia64_popcnt _m64_popcnt
#define ia64_getf_exp __getf_exp
#define ia64_shrp _m64_shrp
@@ -158,7 +56,7 @@ __s64 _m64_popcnt(__s64 a);
#define ia64_stf8 __stf8
#define ia64_stf_spill __stf_spill
-#define ia64_mf __mf
+#define ia64_mf __mf
#define ia64_mfa __mfa
#define ia64_fetchadd4_acq __fetchadd4_acq
@@ -234,10 +132,10 @@ __s64 _m64_popcnt(__s64 a);
/* Values for lfhint in __lfetch and __lfetch_fault */
-#define ia64_lfhint_none 0
-#define ia64_lfhint_nt1 1
-#define ia64_lfhint_nt2 2
-#define ia64_lfhint_nta 3
+#define ia64_lfhint_none __lfhint_none
+#define ia64_lfhint_nt1 __lfhint_nt1
+#define ia64_lfhint_nt2 __lfhint_nt2
+#define ia64_lfhint_nta __lfhint_nta
#define ia64_lfetch __lfetch
#define ia64_lfetch_excl __lfetch_excl
@@ -254,4 +152,6 @@ do { \
} \
} while (0)
+#define __builtin_trap() __break(0);
+
#endif /* _ASM_IA64_INTEL_INTRIN_H */
diff --git a/include/asm-ia64/sn/rw_mmr.h b/include/asm-ia64/sn/rw_mmr.h
index f40fd1a5510..2d78f4c5a45 100644
--- a/include/asm-ia64/sn/rw_mmr.h
+++ b/include/asm-ia64/sn/rw_mmr.h
@@ -3,15 +3,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_RW_MMR_H
#define _ASM_IA64_SN_RW_MMR_H
/*
- * This file contains macros used to access MMR registers via
- * uncached physical addresses.
+ * This file that access MMRs via uncached physical addresses.
* pio_phys_read_mmr - read an MMR
* pio_phys_write_mmr - write an MMR
* pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
@@ -22,53 +21,8 @@
*/
-extern inline long
-pio_phys_read_mmr(volatile long *mmr)
-{
- long val;
- asm volatile
- ("mov r2=psr;;"
- "rsm psr.i | psr.dt;;"
- "srlz.i;;"
- "ld8.acq %0=[%1];;"
- "mov psr.l=r2;;"
- "srlz.i;;"
- : "=r"(val)
- : "r"(mmr)
- : "r2");
- return val;
-}
-
-
-
-extern inline void
-pio_phys_write_mmr(volatile long *mmr, long val)
-{
- asm volatile
- ("mov r2=psr;;"
- "rsm psr.i | psr.dt;;"
- "srlz.i;;"
- "st8.rel [%0]=%1;;"
- "mov psr.l=r2;;"
- "srlz.i;;"
- :: "r"(mmr), "r"(val)
- : "r2", "memory");
-}
-
-extern inline void
-pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2)
-{
- asm volatile
- ("mov r2=psr;;"
- "rsm psr.i | psr.dt | psr.ic;;"
- "cmp.ne p9,p0=%2,r0;"
- "srlz.i;;"
- "st8.rel [%0]=%1;"
- "(p9) st8.rel [%2]=%3;;"
- "mov psr.l=r2;;"
- "srlz.i;;"
- :: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2)
- : "p9", "r2", "memory");
-}
+extern long pio_phys_read_mmr(volatile long *mmr);
+extern void pio_phys_write_mmr(volatile long *mmr, long val);
+extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2);
#endif /* _ASM_IA64_SN_RW_MMR_H */