diff options
Diffstat (limited to 'include/asm-mips')
53 files changed, 615 insertions, 791 deletions
diff --git a/include/asm-mips/addrspace.h b/include/asm-mips/addrspace.h index 45c706e34df..c6275088cf6 100644 --- a/include/asm-mips/addrspace.h +++ b/include/asm-mips/addrspace.h @@ -19,12 +19,16 @@ #define _ATYPE_ #define _ATYPE32_ #define _ATYPE64_ -#define _LLCONST_(x) x +#define _CONST64_(x) x #else #define _ATYPE_ __PTRDIFF_TYPE__ #define _ATYPE32_ int -#define _ATYPE64_ long long -#define _LLCONST_(x) x ## LL +#define _ATYPE64_ __s64 +#ifdef CONFIG_64BIT +#define _CONST64_(x) x ## L +#else +#define _CONST64_(x) x ## LL +#endif #endif /* @@ -48,7 +52,7 @@ */ #define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) #define XPHYSADDR(a) ((_ACAST64_(a)) & \ - _LLCONST_(0x000000ffffffffff)) + _CONST64_(0x000000ffffffffff)) #ifdef CONFIG_64BIT @@ -57,14 +61,14 @@ * The compatibility segments use the full 64-bit sign extended value. Note * the R8000 doesn't have them so don't reference these in generic MIPS code. */ -#define XKUSEG _LLCONST_(0x0000000000000000) -#define XKSSEG _LLCONST_(0x4000000000000000) -#define XKPHYS _LLCONST_(0x8000000000000000) -#define XKSEG _LLCONST_(0xc000000000000000) -#define CKSEG0 _LLCONST_(0xffffffff80000000) -#define CKSEG1 _LLCONST_(0xffffffffa0000000) -#define CKSSEG _LLCONST_(0xffffffffc0000000) -#define CKSEG3 _LLCONST_(0xffffffffe0000000) +#define XKUSEG _CONST64_(0x0000000000000000) +#define XKSSEG _CONST64_(0x4000000000000000) +#define XKPHYS _CONST64_(0x8000000000000000) +#define XKSEG _CONST64_(0xc000000000000000) +#define CKSEG0 _CONST64_(0xffffffff80000000) +#define CKSEG1 _CONST64_(0xffffffffa0000000) +#define CKSSEG _CONST64_(0xffffffffc0000000) +#define CKSEG3 _CONST64_(0xffffffffe0000000) #define CKSEG0ADDR(a) (CPHYSADDR(a) | CKSEG0) #define CKSEG1ADDR(a) (CPHYSADDR(a) | CKSEG1) @@ -122,7 +126,7 @@ #define PHYS_TO_XKSEG_UNCACHED(p) PHYS_TO_XKPHYS(K_CALG_UNCACHED,(p)) #define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE,(p)) #define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK) -#define PHYS_TO_XKPHYS(cm,a) (_LLCONST_(0x8000000000000000) | \ +#define PHYS_TO_XKPHYS(cm,a) (_CONST64_(0x8000000000000000) | \ ((cm)<<59) | (a)) #if defined (CONFIG_CPU_R4300) \ @@ -132,20 +136,20 @@ || defined (CONFIG_CPU_NEVADA) \ || defined (CONFIG_CPU_TX49XX) \ || defined (CONFIG_CPU_MIPS64) -#define TO_PHYS_MASK _LLCONST_(0x0000000fffffffff) /* 2^^36 - 1 */ +#define TO_PHYS_MASK _CONST64_(0x0000000fffffffff) /* 2^^36 - 1 */ #endif #if defined (CONFIG_CPU_R8000) /* We keep KUSIZE consistent with R4000 for now (2^^40) instead of (2^^48) */ -#define TO_PHYS_MASK _LLCONST_(0x000000ffffffffff) /* 2^^40 - 1 */ +#define TO_PHYS_MASK _CONST64_(0x000000ffffffffff) /* 2^^40 - 1 */ #endif #if defined (CONFIG_CPU_R10000) -#define TO_PHYS_MASK _LLCONST_(0x000000ffffffffff) /* 2^^40 - 1 */ +#define TO_PHYS_MASK _CONST64_(0x000000ffffffffff) /* 2^^40 - 1 */ #endif #if defined(CONFIG_CPU_SB1) || defined(CONFIG_CPU_SB1A) -#define TO_PHYS_MASK _LLCONST_(0x00000fffffffffff) /* 2^^44 - 1 */ +#define TO_PHYS_MASK _CONST64_(0x00000fffffffffff) /* 2^^44 - 1 */ #endif #ifndef CONFIG_CPU_R8000 @@ -155,7 +159,7 @@ * in order to catch bugs in the source code. */ -#define COMPAT_K1BASE32 _LLCONST_(0xffffffffa0000000) +#define COMPAT_K1BASE32 _CONST64_(0xffffffffa0000000) #define PHYS_TO_COMPATK1(x) ((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */ #endif diff --git a/include/asm-mips/asm.h b/include/asm-mips/asm.h index e3038a4599e..838eb3144d8 100644 --- a/include/asm-mips/asm.h +++ b/include/asm-mips/asm.h @@ -344,6 +344,7 @@ symbol = value #define PTR_L lw #define PTR_S sw #define PTR_LA la +#define PTR_LI li #define PTR_SLL sll #define PTR_SLLV sllv #define PTR_SRL srl @@ -368,6 +369,7 @@ symbol = value #define PTR_L ld #define PTR_S sd #define PTR_LA dla +#define PTR_LI dli #define PTR_SLL dsll #define PTR_SLLV dsllv #define PTR_SRL dsrl diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index e64abc0d822..c1a2409bb52 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -9,20 +9,13 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1996, 97, 99, 2000, 03, 04 by Ralf Baechle + * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle */ - -/* - * As workaround for the ATOMIC_DEC_AND_LOCK / atomic_dec_and_lock mess in - * <linux/spinlock.h> we have to include <linux/spinlock.h> outside the - * main big wrapper ... - */ -#include <linux/spinlock.h> - #ifndef _ASM_ATOMIC_H #define _ASM_ATOMIC_H #include <linux/irqflags.h> +#include <asm/barrier.h> #include <asm/cpu-features.h> #include <asm/war.h> @@ -138,6 +131,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -148,7 +143,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) " sc %0, %2 \n" " beqzl %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -163,7 +157,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) " sc %0, %2 \n" " beqz %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -178,6 +171,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -185,6 +180,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -195,7 +192,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) " sc %0, %2 \n" " beqzl %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -210,7 +206,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) " sc %0, %2 \n" " beqz %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -225,6 +220,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -240,6 +237,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -253,7 +252,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " beqzl %0, 1b \n" " subu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -272,7 +270,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " beqz %0, 1b \n" " subu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -289,6 +286,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -383,7 +382,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) #ifdef CONFIG_64BIT -typedef struct { volatile __s64 counter; } atomic64_t; +typedef struct { volatile long counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } @@ -492,6 +491,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -502,7 +503,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqzl %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -517,7 +517,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqz %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -532,6 +531,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -539,6 +540,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -549,7 +552,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqzl %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -564,7 +566,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqz %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -579,6 +580,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -594,6 +597,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -607,7 +612,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " beqzl %0, 1b \n" " dsubu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -626,7 +630,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " beqz %0, 1b \n" " dsubu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -643,6 +646,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } diff --git a/include/asm-mips/barrier.h b/include/asm-mips/barrier.h new file mode 100644 index 00000000000..ed82631b001 --- /dev/null +++ b/include/asm-mips/barrier.h @@ -0,0 +1,132 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) + */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +/* + * read_barrier_depends - Flush all pending reads that subsequents reads + * depend on. + * + * No data-dependent reads from memory-like regions are ever reordered + * over this barrier. All reads preceding this primitive are guaranteed + * to access memory (but not necessarily other CPUs' caches) before any + * reads following this primitive that depend on the data return by + * any of the preceding reads. This primitive is much lighter weight than + * rmb() on most CPUs, and is never heavier weight than is + * rmb(). + * + * These ordering constraints are respected by both the local CPU + * and the compiler. + * + * Ordering is not guaranteed by anything other than these primitives, + * not even by data dependencies. See the documentation for + * memory_barrier() for examples and URLs to more information. + * + * For example, the following code would force ordering (the initial + * value of "a" is zero, "b" is one, and "p" is "&a"): + * + * <programlisting> + * CPU 0 CPU 1 + * + * b = 2; + * memory_barrier(); + * p = &b; q = p; + * read_barrier_depends(); + * d = *q; + * </programlisting> + * + * because the read of "*q" depends on the read of "p" and these + * two reads are separated by a read_barrier_depends(). However, + * the following code, with the same initial values for "a" and "b": + * + * <programlisting> + * CPU 0 CPU 1 + * + * a = 2; + * memory_barrier(); + * b = 3; y = b; + * read_barrier_depends(); + * x = a; + * </programlisting> + * + * does not enforce ordering, since there is no data dependency between + * the read of "a" and the read of "b". Therefore, on some CPUs, such + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() + * in cases like this where there are no data dependencies. + */ + +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#ifdef CONFIG_CPU_HAS_SYNC +#define __sync() \ + __asm__ __volatile__( \ + ".set push\n\t" \ + ".set noreorder\n\t" \ + ".set mips2\n\t" \ + "sync\n\t" \ + ".set pop" \ + : /* no output */ \ + : /* no input */ \ + : "memory") +#else +#define __sync() do { } while(0) +#endif + +#define __fast_iob() \ + __asm__ __volatile__( \ + ".set push\n\t" \ + ".set noreorder\n\t" \ + "lw $0,%0\n\t" \ + "nop\n\t" \ + ".set pop" \ + : /* no output */ \ + : "m" (*(int *)CKSEG1) \ + : "memory") + +#define fast_wmb() __sync() +#define fast_rmb() __sync() +#define fast_mb() __sync() +#define fast_iob() \ + do { \ + __sync(); \ + __fast_iob(); \ + } while (0) + +#ifdef CONFIG_CPU_HAS_WB + +#include <asm/wbflush.h> + +#define wmb() fast_wmb() +#define rmb() fast_rmb() +#define mb() wbflush() +#define iob() wbflush() + +#else /* !CONFIG_CPU_HAS_WB */ + +#define wmb() fast_wmb() +#define rmb() fast_rmb() +#define mb() fast_mb() +#define iob() fast_iob() + +#endif /* !CONFIG_CPU_HAS_WB */ + +#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) +#define __WEAK_ORDERING_MB " sync \n" +#else +#define __WEAK_ORDERING_MB " \n" +#endif + +#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") +#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") +#define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") + +#define set_mb(var, value) \ + do { var = value; smp_mb(); } while (0) + +#endif /* __ASM_BARRIER_H */ diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h index 1bb89c5a10e..06445de1324 100644 --- a/include/asm-mips/bitops.h +++ b/include/asm-mips/bitops.h @@ -3,38 +3,34 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org) + * Copyright (c) 1994 - 1997, 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org) * Copyright (c) 1999, 2000 Silicon Graphics, Inc. */ #ifndef _ASM_BITOPS_H #define _ASM_BITOPS_H #include <linux/compiler.h> +#include <linux/irqflags.h> #include <linux/types.h> +#include <asm/barrier.h> #include <asm/bug.h> #include <asm/byteorder.h> /* sigh ... */ #include <asm/cpu-features.h> +#include <asm/sgidefs.h> +#include <asm/war.h> #if (_MIPS_SZLONG == 32) #define SZLONG_LOG 5 #define SZLONG_MASK 31UL #define __LL "ll " #define __SC "sc " -#define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x)) #elif (_MIPS_SZLONG == 64) #define SZLONG_LOG 6 #define SZLONG_MASK 63UL #define __LL "lld " #define __SC "scd " -#define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x)) #endif -#ifdef __KERNEL__ - -#include <linux/irqflags.h> -#include <asm/sgidefs.h> -#include <asm/war.h> - /* * clear_bit() doesn't provide any barrier for the compiler. */ @@ -42,20 +38,6 @@ #define smp_mb__after_clear_bit() smp_mb() /* - * Only disable interrupt for kernel mode stuff to keep usermode stuff - * that dares to use kernel include files alive. - */ - -#define __bi_flags unsigned long flags -#define __bi_local_irq_save(x) local_irq_save(x) -#define __bi_local_irq_restore(x) local_irq_restore(x) -#else -#define __bi_flags -#define __bi_local_irq_save(x) -#define __bi_local_irq_restore(x) -#endif /* __KERNEL__ */ - -/* * set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from @@ -93,13 +75,13 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) } else { volatile unsigned long *a = addr; unsigned long mask; - __bi_flags; + unsigned long flags; a += nr >> SZLONG_LOG; mask = 1UL << (nr & SZLONG_MASK); - __bi_local_irq_save(flags); + local_irq_save(flags); *a |= mask; - __bi_local_irq_restore(flags); + local_irq_restore(flags); } } @@ -141,13 +123,13 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) } else { volatile unsigned long *a = addr; unsigned long mask; - __bi_flags; + unsigned long flags; a += nr >> SZLONG_LOG; mask = 1UL << (nr & SZLONG_MASK); - __bi_local_irq_save(flags); + local_irq_save(flags); *a &= ~mask; - __bi_local_irq_restore(flags); + local_irq_restore(flags); } } @@ -191,13 +173,13 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) } else { volatile unsigned long *a = addr; unsigned long mask; - __bi_flags; + unsigned long flags; a += nr >> SZLONG_LOG; mask = 1UL << (nr & SZLONG_MASK); - __bi_local_irq_save(flags); + local_irq_save(flags); *a ^= mask; - __bi_local_irq_restore(flags); + local_irq_restore(flags); } } @@ -223,9 +205,6 @@ static inline int test_and_set_bit(unsigned long nr, " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -245,9 +224,6 @@ static inline int test_and_set_bit(unsigned long nr, " " __SC "%2, %1 \n" " beqz %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set pop \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -258,17 +234,19 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *a = addr; unsigned long mask; int retval; - __bi_flags; + unsigned long flags; a += nr >> SZLONG_LOG; mask = 1UL << (nr & SZLONG_MASK); - __bi_local_irq_save(flags); + local_irq_save(flags); retval = (mask & *a) != 0; *a |= mask; - __bi_local_irq_restore(flags); + local_irq_restore(flags); return retval; } + + smp_mb(); } /* @@ -294,9 +272,6 @@ static inline int test_and_clear_bit(unsigned long nr, " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -317,9 +292,6 @@ static inline int test_and_clear_bit(unsigned long nr, " " __SC "%2, %1 \n" " beqz %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set pop \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -330,17 +302,19 @@ static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *a = addr; unsigned long mask; int retval; - __bi_flags; + unsigned long flags; a += nr >> SZLONG_LOG; mask = 1UL << (nr & SZLONG_MASK); - __bi_local_irq_save(flags); + local_irq_save(flags); retval = (mask & *a) != 0; *a &= ~mask; - __bi_local_irq_restore(flags); + local_irq_restore(flags); return retval; } + + smp_mb(); } /* @@ -365,9 +339,6 @@ static inline int test_and_change_bit(unsigned long nr, " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -387,9 +358,6 @@ static inline int test_and_change_bit(unsigned long nr, " " __SC "\t%2, %1 \n" " beqz %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set pop \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -399,22 +367,20 @@ static inline int test_and_change_bit(unsigned long nr, } else { volatile unsigned long *a = addr; unsigned long mask, retval; - __bi_flags; + unsigned long flags; a += nr >> SZLONG_LOG; mask = 1UL << (nr & SZLONG_MASK); - __bi_local_irq_save(flags); + local_irq_save(flags); retval = (mask & *a) != 0; *a ^= mask; - __bi_local_irq_restore(flags); + local_irq_restore(flags); return retval; } -} -#undef __bi_flags -#undef __bi_local_irq_save -#undef __bi_local_irq_restore + smp_mb(); +} #include <asm-generic/bitops/non-atomic.h> diff --git a/include/asm-mips/bootinfo.h b/include/asm-mips/bootinfo.h index 1e5ccdad3b0..8e321f53a38 100644 --- a/include/asm-mips/bootinfo.h +++ b/include/asm-mips/bootinfo.h @@ -131,6 +131,7 @@ #define MACH_PHILIPS_NINO 0 /* Nino */ #define MACH_PHILIPS_VELO 1 /* Velo */ #define MACH_PHILIPS_JBS 2 /* JBS */ +#define MACH_PHILIPS_STB810 3 /* STB810 */ /* * Valid machtype for group SIBYTE diff --git a/include/asm-mips/bug.h b/include/asm-mips/bug.h index 7b4739dc8f3..4d560a53394 100644 --- a/include/asm-mips/bug.h +++ b/include/asm-mips/bug.h @@ -1,6 +1,7 @@ #ifndef __ASM_BUG_H #define __ASM_BUG_H +#include <asm/sgidefs.h> #ifdef CONFIG_BUG @@ -13,6 +14,17 @@ do { \ #define HAVE_ARCH_BUG +#if (_MIPS_ISA > _MIPS_ISA_MIPS1) + +#define BUG_ON(condition) \ +do { \ + __asm__ __volatile__("tne $0, %0" : : "r" (condition)); \ +} while (0) + +#define HAVE_ARCH_BUG_ON + +#endif /* _MIPS_ISA > _MIPS_ISA_MIPS1 */ + #endif #include <asm-generic/bug.h> diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h index 9ab59e2bb23..e3c9925876a 100644 --- a/include/asm-mips/cacheflush.h +++ b/include/asm-mips/cacheflush.h @@ -55,24 +55,13 @@ extern void (*flush_icache_range)(unsigned long start, unsigned long end); #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() -static inline void copy_to_user_page(struct vm_area_struct *vma, +extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - if (cpu_has_dc_aliases) - flush_cache_page(vma, vaddr, page_to_pfn(page)); - memcpy(dst, src, len); - __flush_icache_page(vma, page); -} + unsigned long len); -static inline void copy_from_user_page(struct vm_area_struct *vma, +extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - if (cpu_has_dc_aliases) - flush_cache_page(vma, vaddr, page_to_pfn(page)); - memcpy(dst, src, len); -} + unsigned long len); extern void (*flush_cache_sigtramp)(unsigned long addr); extern void (*flush_icache_all)(void); diff --git a/include/asm-mips/checksum.h b/include/asm-mips/checksum.h index a5e6050ec0f..9b768c3b96b 100644 --- a/include/asm-mips/checksum.h +++ b/include/asm-mips/checksum.h @@ -27,23 +27,22 @@ * * it's best to have buff aligned on a 32-bit boundary */ -unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum); +__wsum csum_partial(const void *buff, int len, __wsum sum); /* * this is a new version of the above that records errors it finds in *errp, * but continues and zeros the rest of the buffer. */ -unsigned int csum_partial_copy_from_user(const unsigned char __user *src, - unsigned char *dst, int len, - unsigned int sum, int *errp); +__wsum csum_partial_copy_from_user(const void __user *src, + void *dst, int len, + __wsum sum, int *errp); /* * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -static inline unsigned int csum_and_copy_to_user (const unsigned char *src, - unsigned char __user *dst, - int len, int sum, +static inline __wsum csum_and_copy_to_user (const void *src, void __user *dst, + int len, __wsum sum, int *err_ptr) { might_sleep(); @@ -51,7 +50,7 @@ static inline unsigned int csum_and_copy_to_user (const unsigned char *src, if (copy_to_user(dst, src, len)) { *err_ptr = -EFAULT; - return -1; + return (__force __wsum)-1; } return sum; @@ -61,13 +60,13 @@ static inline unsigned int csum_and_copy_to_user (const unsigned char *src, * the same as csum_partial, but copies from user space (but on MIPS * we have just one address space, so this is identical to the above) */ -unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, - int len, unsigned int sum); +__wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum); /* * Fold a partial checksum without adding pseudo headers */ -static inline unsigned short int csum_fold(unsigned int sum) +static inline __sum16 csum_fold(__wsum sum) { __asm__( " .set push # csum_fold\n" @@ -82,7 +81,7 @@ static inline unsigned short int csum_fold(unsigned int sum) : "=r" (sum) : "0" (sum)); - return sum; + return (__force __sum16)sum; } /* @@ -92,10 +91,10 @@ static inline unsigned short int csum_fold(unsigned int sum) * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by * Arnt Gulbrandsen. */ -static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { - unsigned int *word = (unsigned int *) iph; - unsigned int *stop = word + ihl; + const unsigned int *word = iph; + const unsigned int *stop = word + ihl; unsigned int csum; int carry; @@ -123,9 +122,9 @@ static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) return csum_fold(csum); } -static inline unsigned int csum_tcpudp_nofold(unsigned long saddr, - unsigned long daddr, unsigned short len, unsigned short proto, - unsigned int sum) +static inline __wsum csum_tcpudp_nofold(__be32 saddr, + __be32 daddr, unsigned short len, unsigned short proto, + __wsum sum) { __asm__( " .set push # csum_tcpudp_nofold\n" @@ -155,9 +154,9 @@ static inline unsigned int csum_tcpudp_nofold(unsigned long saddr, : "=r" (sum) : "0" (daddr), "r"(saddr), #ifdef __MIPSEL__ - "r" (((unsigned long)htons(len)<<16) + proto*256), + "r" ((proto + len) << 8), #else - "r" (((unsigned long)(proto)<<16) + len), + "r" (proto + len), #endif "r" (sum)); @@ -168,11 +167,10 @@ static inline unsigned int csum_tcpudp_nofold(unsigned long saddr, * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ -static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, - unsigned long daddr, +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, - unsigned int sum) + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } @@ -181,17 +179,16 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ -static inline unsigned short ip_compute_csum(unsigned char * buff, int len) +static inline __sum16 ip_compute_csum(const void *buff, int len) { return csum_fold(csum_partial(buff, len, 0)); } #define _HAVE_ARCH_IPV6_CSUM -static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, - struct in6_addr *daddr, - __u32 len, - unsigned short proto, - unsigned int sum) +static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) { __asm__( " .set push # csum_ipv6_magic\n" diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h index 900f472fdd2..432653d7ae0 100644 --- a/include/asm-mips/compat.h +++ b/include/asm-mips/compat.h @@ -5,6 +5,7 @@ */ #include <linux/types.h> #include <asm/page.h> +#include <asm/ptrace.h> #define COMPAT_USER_HZ 100 @@ -32,6 +33,7 @@ typedef struct { s32 val[2]; } compat_fsid_t; typedef s32 compat_timer_t; +typedef s32 compat_key_t; typedef s32 compat_int_t; typedef s32 compat_long_t; @@ -146,4 +148,71 @@ static inline void __user *compat_alloc_user_space(long len) return (void __user *) (regs->regs[29] - len); } +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; + compat_mode_t mode; + unsigned short seq; + unsigned short __pad2; + compat_ulong_t __unused1; + compat_ulong_t __unused2; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_time_t sem_otime; + compat_time_t sem_ctime; + compat_ulong_t sem_nsems; + compat_ulong_t __unused1; + compat_ulong_t __unused2; +}; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; +#ifndef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused1; +#endif + compat_time_t msg_stime; +#ifdef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused1; +#endif +#ifndef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused2; +#endif + compat_time_t msg_rtime; +#ifdef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused2; +#endif +#ifndef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused3; +#endif + compat_time_t msg_ctime; +#ifdef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused3; +#endif + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_size_t shm_segsz; + compat_time_t shm_atime; + compat_time_t shm_dtime; + compat_time_t shm_ctime; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t __unused1; + compat_ulong_t __unused2; +}; + #endif /* _ASM_COMPAT_H */ diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h index a2f0c8ea916..610d0cdeaa9 100644 --- a/include/asm-mips/cpu-info.h +++ b/include/asm-mips/cpu-info.h @@ -22,12 +22,12 @@ * Descriptor for a cache */ struct cache_desc { - unsigned short linesz; /* Size of line in bytes */ - unsigned short ways; /* Number of ways */ - unsigned short sets; /* Number of lines per set */ unsigned int waysize; /* Bytes per way */ - unsigned int waybit; /* Bits to select in a cache set */ - unsigned int flags; /* Flags describing cache properties */ + unsigned short sets; /* Number of lines per set */ + unsigned char ways; /* Number of ways */ + unsigned char linesz; /* Size of line in bytes */ + unsigned char waybit; /* Bits to select in a cache set */ + unsigned char flags; /* Flags describing cache properties */ }; /* diff --git a/include/asm-mips/dec/kn02.h b/include/asm-mips/dec/kn02.h index 8319ad77b25..93430b5f472 100644 --- a/include/asm-mips/dec/kn02.h +++ b/include/asm-mips/dec/kn02.h @@ -82,11 +82,9 @@ #ifndef __ASSEMBLY__ -#include <linux/spinlock.h> #include <linux/types.h> extern u32 cached_kn02_csr; -extern spinlock_t kn02_lock; extern void init_kn02_irqs(int base); #endif diff --git a/include/asm-mips/device.h b/include/asm-mips/device.h new file mode 100644 index 00000000000..d8f9872b0e2 --- /dev/null +++ b/include/asm-mips/device.h @@ -0,0 +1,7 @@ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + */ +#include <asm-generic/device.h> + diff --git a/include/asm-mips/div64.h b/include/asm-mips/div64.h index 5f7dcf5452e..d107832de1b 100644 --- a/include/asm-mips/div64.h +++ b/include/asm-mips/div64.h @@ -83,27 +83,6 @@ #if (_MIPS_SZLONG == 64) /* - * Don't use this one in new code - */ -#define do_div64_32(res, high, low, base) ({ \ - unsigned int __quot, __mod; \ - unsigned long __div; \ - unsigned int __low, __high, __base; \ - \ - __high = (high); \ - __low = (low); \ - __div = __high; \ - __div = __div << 32 | __low; \ - __base = (base); \ - \ - __mod = __div % __base; \ - __div = __div / __base; \ - \ - __quot = __div; \ - (res) = __quot; \ - __mod; }) - -/* * Hey, we're already 64-bit, no * need to play games.. */ diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h index 43288634c38..236d1a467cc 100644 --- a/include/asm-mips/dma-mapping.h +++ b/include/asm-mips/dma-mapping.h @@ -63,9 +63,9 @@ dma_get_cache_alignment(void) return 128; } -extern int dma_is_consistent(dma_addr_t dma_addr); +extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr); -extern void dma_cache_sync(void *vaddr, size_t size, +extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY diff --git a/include/asm-mips/dma.h b/include/asm-mips/dma.h index e85849ac165..23f789c8084 100644 --- a/include/asm-mips/dma.h +++ b/include/asm-mips/dma.h @@ -74,7 +74,9 @@ * */ +#ifndef GENERIC_ISA_DMA_SUPPORT_BROKEN #define MAX_DMA_CHANNELS 8 +#endif /* * The maximum address in KSEG0 that we can perform a DMA transfer to on this diff --git a/include/asm-mips/fixmap.h b/include/asm-mips/fixmap.h index 6959bdb5931..02c8a13fc89 100644 --- a/include/asm-mips/fixmap.h +++ b/include/asm-mips/fixmap.h @@ -45,8 +45,16 @@ * fix-mapped? */ enum fixed_addresses { +#define FIX_N_COLOURS 8 + FIX_CMAP_BEGIN, +#ifdef CONFIG_MIPS_MT_SMTC + FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS), +#else + FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, +#endif #ifdef CONFIG_HIGHMEM - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ + /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_BEGIN = FIX_CMAP_END + 1, FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, #endif __end_of_fixed_addresses @@ -70,9 +78,9 @@ extern void __set_fixmap (enum fixed_addresses idx, * at the top of mem.. */ #if defined(CONFIG_CPU_TX39XX) || defined(CONFIG_CPU_TX49XX) -#define FIXADDR_TOP (0xff000000UL - 0x2000) +#define FIXADDR_TOP ((unsigned long)(long)(int)(0xff000000 - 0x20000)) #else -#define FIXADDR_TOP (0xffffe000UL) +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) #endif #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h index ed023eae067..47e5679c235 100644 --- a/include/asm-mips/futex.h +++ b/include/asm-mips/futex.h @@ -1,19 +1,21 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org) + */ #ifndef _ASM_FUTEX_H #define _ASM_FUTEX_H #ifdef __KERNEL__ #include <linux/futex.h> +#include <asm/barrier.h> #include <asm/errno.h> #include <asm/uaccess.h> #include <asm/war.h> -#ifdef CONFIG_SMP -#define __FUTEX_SMP_SYNC " sync \n" -#else -#define __FUTEX_SMP_SYNC -#endif - #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ { \ if (cpu_has_llsc && R10000_LLSC_WAR) { \ @@ -27,7 +29,7 @@ " .set mips3 \n" \ "2: sc $1, %2 \n" \ " beqzl $1, 1b \n" \ - __FUTEX_SMP_SYNC \ + __WEAK_ORDERING_MB \ "3: \n" \ " .set pop \n" \ " .set mips0 \n" \ @@ -53,7 +55,7 @@ " .set mips3 \n" \ "2: sc $1, %2 \n" \ " beqz $1, 1b \n" \ - __FUTEX_SMP_SYNC \ + __WEAK_ORDERING_MB \ "3: \n" \ " .set pop \n" \ " .set mips0 \n" \ @@ -86,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - inc_preempt_count(); + pagefault_disable(); switch (op) { case FUTEX_OP_SET: @@ -113,7 +115,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ret = -ENOSYS; } - dec_preempt_count(); + pagefault_enable(); if (!ret) { switch (cmp) { @@ -150,7 +152,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .set mips3 \n" "2: sc $1, %1 \n" " beqzl $1, 1b \n" - __FUTEX_SMP_SYNC + __WEAK_ORDERING_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" @@ -177,7 +179,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .set mips3 \n" "2: sc $1, %1 \n" " beqz $1, 1b \n" - __FUTEX_SMP_SYNC + __WEAK_ORDERING_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" diff --git a/include/asm-mips/gt64120.h b/include/asm-mips/gt64120.h index 2edd171bb6c..4bf8e28f885 100644 --- a/include/asm-mips/gt64120.h +++ b/include/asm-mips/gt64120.h @@ -451,6 +451,13 @@ #define GT_SDRAM_OPMODE_OP_MODE 3 #define GT_SDRAM_OPMODE_OP_CBR 4 +#define GT_TC_CONTROL_ENTC0_SHF 0 +#define GT_TC_CONTROL_ENTC0_MSK (MSK(1) << GT_TC_CONTROL_ENTC0_SHF) +#define GT_TC_CONTROL_ENTC0_BIT GT_TC_CONTROL_ENTC0_MSK +#define GT_TC_CONTROL_SELTC0_SHF 1 +#define GT_TC_CONTROL_SELTC0_MSK (MSK(1) << GT_TC_CONTROL_SELTC0_SHF) +#define GT_TC_CONTROL_SELTC0_BIT GT_TC_CONTROL_SELTC0_MSK + #define GT_PCI0_BARE_SWSCS3BOOTDIS_SHF 0 #define GT_PCI0_BARE_SWSCS3BOOTDIS_MSK (MSK(1) << GT_PCI0_BARE_SWSCS3BOOTDIS_SHF) @@ -523,6 +530,13 @@ #define GT_PCI0_CMD_SWORDSWAP_MSK (MSK(1) << GT_PCI0_CMD_SWORDSWAP_SHF) #define GT_PCI0_CMD_SWORDSWAP_BIT GT_PCI0_CMD_SWORDSWAP_MSK +#define GT_INTR_T0EXP_SHF 8 +#define GT_INTR_T0EXP_MSK (MSK(1) << GT_INTR_T0EXP_SHF) +#define GT_INTR_T0EXP_BIT GT_INTR_T0EXP_MSK +#define GT_INTR_RETRYCTR0_SHF 20 +#define GT_INTR_RETRYCTR0_MSK (MSK(1) << GT_INTR_RETRYCTR0_SHF) +#define GT_INTR_RETRYCTR0_BIT GT_INTR_RETRYCTR0_MSK + /* * Misc */ diff --git a/include/asm-mips/highmem.h b/include/asm-mips/highmem.h index c976bfaaba8..f8c8182f7f2 100644 --- a/include/asm-mips/highmem.h +++ b/include/asm-mips/highmem.h @@ -21,6 +21,7 @@ #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/uaccess.h> #include <asm/kmap_types.h> /* undef for production */ @@ -70,11 +71,16 @@ static inline void *kmap(struct page *page) static inline void *kmap_atomic(struct page *page, enum km_type type) { + pagefault_disable(); return page_address(page); } -static inline void kunmap_atomic(void *kvaddr, enum km_type type) { } -#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) +static inline void kunmap_atomic(void *kvaddr, enum km_type type) +{ + pagefault_enable(); +} + +#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) diff --git a/include/asm-mips/i8259.h b/include/asm-mips/i8259.h index 0214abe3f0a..4df8d8b118c 100644 --- a/include/asm-mips/i8259.h +++ b/include/asm-mips/i8259.h @@ -19,10 +19,31 @@ #include <asm/io.h> +/* i8259A PIC registers */ +#define PIC_MASTER_CMD 0x20 +#define PIC_MASTER_IMR 0x21 +#define PIC_MASTER_ISR PIC_MASTER_CMD +#define PIC_MASTER_POLL PIC_MASTER_ISR +#define PIC_MASTER_OCW3 PIC_MASTER_ISR +#define PIC_SLAVE_CMD 0xa0 +#define PIC_SLAVE_IMR 0xa1 + +/* i8259A PIC related value */ +#define PIC_CASCADE_IR 2 +#define MASTER_ICW4_DEFAULT 0x01 +#define SLAVE_ICW4_DEFAULT 0x01 +#define PIC_ICW4_AEOI 2 + extern spinlock_t i8259A_lock; +extern void init_8259A(int auto_eoi); +extern void enable_8259A_irq(unsigned int irq); +extern void disable_8259A_irq(unsigned int irq); + extern void init_i8259_irqs(void); +#define I8259A_IRQ_BASE 0 + /* * Do the traditional i8259 interrupt polling thing. This is for the few * cases where no better interrupt acknowledge method is available and we @@ -35,15 +56,15 @@ static inline int i8259_irq(void) spin_lock(&i8259A_lock); /* Perform an interrupt acknowledge cycle on controller 1. */ - outb(0x0C, 0x20); /* prepare for poll */ - irq = inb(0x20) & 7; - if (irq == 2) { + outb(0x0C, PIC_MASTER_CMD); /* prepare for poll */ + irq = inb(PIC_MASTER_CMD) & 7; + if (irq == PIC_CASCADE_IR) { /* * Interrupt is cascaded so perform interrupt * acknowledge on controller 2. */ - outb(0x0C, 0xA0); /* prepare for poll */ - irq = (inb(0xA0) & 7) + 8; + outb(0x0C, PIC_SLAVE_CMD); /* prepare for poll */ + irq = (inb(PIC_SLAVE_CMD) & 7) + 8; } if (unlikely(irq == 7)) { @@ -54,14 +75,14 @@ static inline int i8259_irq(void) * significant bit is not set then there is no valid * interrupt. */ - outb(0x0B, 0x20); /* ISR register */ - if(~inb(0x20) & 0x80) + outb(0x0B, PIC_MASTER_ISR); /* ISR register */ + if(~inb(PIC_MASTER_ISR) & 0x80) irq = -1; } spin_unlock(&i8259A_lock); - return irq; + return likely(irq >= 0) ? irq + I8259A_IRQ_BASE : irq; } #endif /* _ASM_I8259_H */ diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h index c2d124badbe..d77b657c09c 100644 --- a/include/asm-mips/io.h +++ b/include/asm-mips/io.h @@ -113,7 +113,7 @@ static inline void set_io_port_base(unsigned long base) * almost all conceivable cases a device driver should not be using * this function */ -static inline unsigned long virt_to_phys(volatile void * address) +static inline unsigned long virt_to_phys(volatile const void *address) { return (unsigned long)address - PAGE_OFFSET; } @@ -172,7 +172,7 @@ extern unsigned long isa_slot_offset; #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags); -extern void __iounmap(volatile void __iomem *addr); +extern void __iounmap(const volatile void __iomem *addr); static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, unsigned long flags) @@ -279,7 +279,7 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, #define ioremap_uncached_accelerated(offset, size) \ __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) -static inline void iounmap(volatile void __iomem *addr) +static inline void iounmap(const volatile void __iomem *addr) { #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) diff --git a/include/asm-mips/irq.h b/include/asm-mips/irq.h index 0ce2a80b689..67657089efa 100644 --- a/include/asm-mips/irq.h +++ b/include/asm-mips/irq.h @@ -24,8 +24,6 @@ static inline int irq_canonicalize(int irq) #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ #endif -extern asmlinkage unsigned int do_IRQ(unsigned int irq); - #ifdef CONFIG_MIPS_MT_SMTC /* * Clear interrupt mask handling "backstop" if irq_hwmask @@ -43,8 +41,6 @@ do { \ #define __DO_IRQ_SMTC_HOOK() do { } while (0) #endif -#ifdef CONFIG_PREEMPT - /* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific @@ -57,12 +53,10 @@ do { \ do { \ irq_enter(); \ __DO_IRQ_SMTC_HOOK(); \ - __do_IRQ((irq)); \ + generic_handle_irq(irq); \ irq_exit(); \ } while (0) -#endif - extern void arch_init_irq(void); extern void spurious_interrupt(void); @@ -74,4 +68,8 @@ extern int setup_irq_smtc(unsigned int irq, struct irqaction * new, unsigned long hwmask); #endif /* CONFIG_MIPS_MT_SMTC */ +extern int allocate_irqno(void); +extern void alloc_legacy_irqno(void); +extern void free_irqno(unsigned int irq); + #endif /* _ASM_IRQ_H */ diff --git a/include/asm-mips/kexec.h b/include/asm-mips/kexec.h new file mode 100644 index 00000000000..b25267ebcb0 --- /dev/null +++ b/include/asm-mips/kexec.h @@ -0,0 +1,32 @@ +/* + * kexec.h for kexec + * Created by <nschichan@corp.free.fr> on Thu Oct 12 14:59:34 2006 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#ifndef _MIPS_KEXEC +# define _MIPS_KEXEC + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000) + /* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000) + +#define KEXEC_CONTROL_CODE_SIZE 4096 + +/* The native architecture */ +#define KEXEC_ARCH KEXEC_ARCH_MIPS + +#define MAX_NOTE_BYTES 1024 + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + /* Dummy implementation for now */ +} + +#endif /* !_MIPS_KEXEC */ diff --git a/include/asm-mips/mach-au1x00/au1xxx_ide.h b/include/asm-mips/mach-au1x00/au1xxx_ide.h index 301e7130077..e9fa252f8a3 100644 --- a/include/asm-mips/mach-au1x00/au1xxx_ide.h +++ b/include/asm-mips/mach-au1x00/au1xxx_ide.h @@ -170,10 +170,8 @@ int __init auide_probe(void); static int auide_dma_host_on(ide_drive_t *drive); static int auide_dma_lostirq(ide_drive_t *drive); static int auide_dma_on(ide_drive_t *drive); - static void auide_ddma_tx_callback(int irq, void *param, - struct pt_regs *regs); - static void auide_ddma_rx_callback(int irq, void *param, - struct pt_regs *regs); + static void auide_ddma_tx_callback(int irq, void *param); + static void auide_ddma_rx_callback(int irq, void *param); static int auide_dma_off_quietly(ide_drive_t *drive); #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ diff --git a/include/asm-mips/mach-cobalt/cobalt.h b/include/asm-mips/mach-cobalt/cobalt.h index b3c5ecbec03..00b0fc68d5c 100644 --- a/include/asm-mips/mach-cobalt/cobalt.h +++ b/include/asm-mips/mach-cobalt/cobalt.h @@ -67,34 +67,9 @@ #define COBALT_BRD_ID_QUBE2 0x5 #define COBALT_BRD_ID_RAQ2 0x6 -/* - * Galileo chipset access macros for the Cobalt. The base address for - * the GT64111 chip is 0x14000000 - * - * Most of this really should go into a separate GT64111 header file. - */ -#define GT64111_IO_BASE 0x10000000UL -#define GT64111_IO_END 0x11ffffffUL -#define GT64111_MEM_BASE 0x12000000UL -#define GT64111_MEM_END 0x13ffffffUL -#define GT64111_BASE 0x14000000UL -#define GALILEO_REG(ofs) CKSEG1ADDR(GT64111_BASE + (unsigned long)(ofs)) - -#define GALILEO_INL(port) (*(volatile unsigned int *) GALILEO_REG(port)) -#define GALILEO_OUTL(val, port) \ -do { \ - *(volatile unsigned int *) GALILEO_REG(port) = (val); \ -} while (0) - -#define GALILEO_INTR_T0EXP (1 << 8) -#define GALILEO_INTR_RETRY_CTR (1 << 20) - -#define GALILEO_ENTC0 0x01 -#define GALILEO_SELTC0 0x02 - #define PCI_CFG_SET(devfn,where) \ - GALILEO_OUTL((0x80000000 | (PCI_SLOT (devfn) << 11) | \ - (PCI_FUNC (devfn) << 8) | (where)), GT_PCI0_CFGADDR_OFS) + GT_WRITE(GT_PCI0_CFGADDR_OFS, (0x80000000 | (PCI_SLOT (devfn) << 11) | \ + (PCI_FUNC (devfn) << 8) | (where))) #define COBALT_LED_PORT (*(volatile unsigned char *) CKSEG1ADDR(0x1c000000)) # define COBALT_LED_BAR_LEFT (1 << 0) /* Qube */ diff --git a/include/asm-mips/mach-cobalt/mach-gt64120.h b/include/asm-mips/mach-cobalt/mach-gt64120.h index 587fc4378f4..ae9c5523c7e 100644 --- a/include/asm-mips/mach-cobalt/mach-gt64120.h +++ b/include/asm-mips/mach-cobalt/mach-gt64120.h @@ -1 +1,27 @@ -/* there's something here ... in the dark */ +/* + * Copyright (C) 2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef _COBALT_MACH_GT64120_H +#define _COBALT_MACH_GT64120_H + +/* + * Cobalt uses GT64111. GT64111 is almost the same as GT64120. + */ + +#define GT64120_BASE CKSEG1ADDR(GT_DEF_BASE) + +#endif /* _COBALT_MACH_GT64120_H */ diff --git a/include/asm-mips/mach-ip27/irq.h b/include/asm-mips/mach-ip27/irq.h index 806213ce31b..25f0c3f39ad 100644 --- a/include/asm-mips/mach-ip27/irq.h +++ b/include/asm-mips/mach-ip27/irq.h @@ -10,8 +10,6 @@ #ifndef __ASM_MACH_IP27_IRQ_H #define __ASM_MACH_IP27_IRQ_H -#include <asm/sn/arch.h> - /* * A hardwired interrupt number is completly stupid for this system - a * large configuration might have thousands if not tenthousands of diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h index a13b715fd9c..44790fdc5d0 100644 --- a/include/asm-mips/mach-ip27/topology.h +++ b/include/asm-mips/mach-ip27/topology.h @@ -1,7 +1,6 @@ #ifndef _ASM_MACH_TOPOLOGY_H #define _ASM_MACH_TOPOLOGY_H 1 -#include <asm/sn/arch.h> #include <asm/sn/hub.h> #include <asm/mmzone.h> diff --git a/include/asm-mips/mach-rm200/cpu-feature-overrides.h b/include/asm-mips/mach-rm/cpu-feature-overrides.h index 11410ae10d3..11410ae10d3 100644 --- a/include/asm-mips/mach-rm200/cpu-feature-overrides.h +++ b/include/asm-mips/mach-rm/cpu-feature-overrides.h diff --git a/include/asm-mips/mach-rm200/mc146818rtc.h b/include/asm-mips/mach-rm/mc146818rtc.h index d37ae68dc6a..d37ae68dc6a 100644 --- a/include/asm-mips/mach-rm200/mc146818rtc.h +++ b/include/asm-mips/mach-rm/mc146818rtc.h diff --git a/include/asm-mips/mach-rm200/timex.h b/include/asm-mips/mach-rm/timex.h index 11ff6cb0f21..11ff6cb0f21 100644 --- a/include/asm-mips/mach-rm200/timex.h +++ b/include/asm-mips/mach-rm/timex.h diff --git a/include/asm-mips/mipsmtregs.h b/include/asm-mips/mipsmtregs.h index f637ce70758..3e9468f424f 100644 --- a/include/asm-mips/mipsmtregs.h +++ b/include/asm-mips/mipsmtregs.h @@ -352,6 +352,8 @@ do { \ #define write_vpe_c0_vpecontrol(val) mttc0(1, 1, val) #define read_vpe_c0_vpeconf0() mftc0(1, 2) #define write_vpe_c0_vpeconf0(val) mttc0(1, 2, val) +#define read_vpe_c0_count() mftc0(9, 0) +#define write_vpe_c0_count(val) mttc0(9, 0, val) #define read_vpe_c0_status() mftc0(12, 0) #define write_vpe_c0_status(val) mttc0(12, 0, val) #define read_vpe_c0_cause() mftc0(13, 0) diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h index 1f318d70799..9985cb7c16e 100644 --- a/include/asm-mips/mipsregs.h +++ b/include/asm-mips/mipsregs.h @@ -545,62 +545,6 @@ #define MIPS_FPIR_L (_ULCAST_(1) << 21) #define MIPS_FPIR_F64 (_ULCAST_(1) << 22) -/* - * R10000 performance counter definitions. - * - * FIXME: The R10000 performance counter opens a nice way to implement CPU - * time accounting with a precission of one cycle. I don't have - * R10000 silicon but just a manual, so ... - */ - -/* - * Events counted by counter #0 - */ -#define CE0_CYCLES 0 -#define CE0_INSN_ISSUED 1 -#define CE0_LPSC_ISSUED 2 -#define CE0_S_ISSUED 3 -#define CE0_SC_ISSUED 4 -#define CE0_SC_FAILED 5 -#define CE0_BRANCH_DECODED 6 -#define CE0_QW_WB_SECONDARY 7 -#define CE0_CORRECTED_ECC_ERRORS 8 -#define CE0_ICACHE_MISSES 9 -#define CE0_SCACHE_I_MISSES 10 -#define CE0_SCACHE_I_WAY_MISSPREDICTED 11 -#define CE0_EXT_INTERVENTIONS_REQ 12 -#define CE0_EXT_INVALIDATE_REQ 13 -#define CE0_VIRTUAL_COHERENCY_COND 14 -#define CE0_INSN_GRADUATED 15 - -/* - * Events counted by counter #1 - */ -#define CE1_CYCLES 0 -#define CE1_INSN_GRADUATED 1 -#define CE1_LPSC_GRADUATED 2 -#define CE1_S_GRADUATED 3 -#define CE1_SC_GRADUATED 4 -#define CE1_FP_INSN_GRADUATED 5 -#define CE1_QW_WB_PRIMARY 6 -#define CE1_TLB_REFILL 7 -#define CE1_BRANCH_MISSPREDICTED 8 -#define CE1_DCACHE_MISS 9 -#define CE1_SCACHE_D_MISSES 10 -#define CE1_SCACHE_D_WAY_MISSPREDICTED 11 -#define CE1_EXT_INTERVENTION_HITS 12 -#define CE1_EXT_INVALIDATE_REQ 13 -#define CE1_SP_HINT_TO_CEXCL_SC_BLOCKS 14 -#define CE1_SP_HINT_TO_SHARED_SC_BLOCKS 15 - -/* - * These flags define in which privilege mode the counters count events - */ -#define CEB_USER 8 /* Count events in user mode, EXL = ERL = 0 */ -#define CEB_SUPERVISOR 4 /* Count events in supvervisor mode EXL = ERL = 0 */ -#define CEB_KERNEL 2 /* Count events in kernel mode EXL = ERL = 0 */ -#define CEB_EXL 1 /* Count events with EXL = 1, ERL = 0 */ - #ifndef __ASSEMBLY__ /* diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h index 85b258ee709..0dc1a45c27e 100644 --- a/include/asm-mips/page.h +++ b/include/asm-mips/page.h @@ -34,7 +34,9 @@ #ifndef __ASSEMBLY__ +#include <linux/pfn.h> #include <asm/cpu-features.h> +#include <asm/io.h> extern void clear_page(void * page); extern void copy_page(void * to, void * from); @@ -134,8 +136,14 @@ typedef struct { unsigned long pgprot; } pgprot_t; /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) -#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) -#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) +#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64) +#define __pa_page_offset(x) ((unsigned long)(x) < CKSEG0 ? PAGE_OFFSET : CKSEG0) +#else +#define __pa_page_offset(x) PAGE_OFFSET +#endif +#define __pa(x) ((unsigned long)(x) - __pa_page_offset(x)) +#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0)) +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) @@ -160,8 +168,8 @@ typedef struct { unsigned long pgprot; } pgprot_t; #endif -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr))) +#define virt_addr_valid(kaddr) pfn_valid(PFN_DOWN(virt_to_phys(kaddr))) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) diff --git a/include/asm-mips/pci.h b/include/asm-mips/pci.h index c4d68bebdca..7f0f120ca07 100644 --- a/include/asm-mips/pci.h +++ b/include/asm-mips/pci.h @@ -187,4 +187,10 @@ static inline void pcibios_add_platform_entries(struct pci_dev *dev) /* Do platform specific device initialization at pci_enable_device() time */ extern int pcibios_plat_dev_init(struct pci_dev *dev); +/* Chances are this interrupt is wired PC-style ... */ +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + return channel ? 15 : 14; +} + #endif /* _ASM_PCI_H */ diff --git a/include/asm-mips/pgalloc.h b/include/asm-mips/pgalloc.h index 582c1fe6cc4..af121c67dc7 100644 --- a/include/asm-mips/pgalloc.h +++ b/include/asm-mips/pgalloc.h @@ -48,7 +48,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); if (ret) { - init = pgd_offset(&init_mm, 0); + init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long)ret); memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h index d20f2e9b28b..2fbd47eba32 100644 --- a/include/asm-mips/pgtable-32.h +++ b/include/asm-mips/pgtable-32.h @@ -156,9 +156,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #define __pte_offset(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset(dir, address) \ - ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address)) -#define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h index d05fb6f38aa..a5b18710b6a 100644 --- a/include/asm-mips/pgtable-64.h +++ b/include/asm-mips/pgtable-64.h @@ -14,6 +14,7 @@ #include <asm/addrspace.h> #include <asm/page.h> #include <asm/cachectl.h> +#include <asm/fixmap.h> #include <asm-generic/pgtable-nopud.h> @@ -103,6 +104,13 @@ #define VMALLOC_START MAP_BASE #define VMALLOC_END \ (VMALLOC_START + PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE) +#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64) && \ + VMALLOC_START != CKSSEG +/* Load modules into 32bit-compatible segment. */ +#define MODULE_START CKSSEG +#define MODULE_END (FIXADDR_START-2*PAGE_SIZE) +extern pgd_t module_pg_dir[PTRS_PER_PGD]; +#endif #define pte_ERROR(e) \ printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) @@ -174,7 +182,12 @@ static inline void pud_clear(pud_t *pudp) #define __pmd_offset(address) pmd_index(address) /* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(address) pgd_offset(&init_mm, 0) +#ifdef MODULE_START +#define pgd_offset_k(address) \ + ((address) >= MODULE_START ? module_pg_dir : pgd_offset(&init_mm, 0UL)) +#else +#define pgd_offset_k(address) pgd_offset(&init_mm, 0UL) +#endif #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) @@ -199,9 +212,9 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) #define __pte_offset(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset(dir, address) \ - ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address)) + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) #define pte_offset_map_nested(dir, address) \ diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h index 1ca4d1e185c..f2e1325fec6 100644 --- a/include/asm-mips/pgtable.h +++ b/include/asm-mips/pgtable.h @@ -67,7 +67,7 @@ extern unsigned long empty_zero_page; extern unsigned long zero_page_mask; #define ZERO_PAGE(vaddr) \ - (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) + (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) #define __HAVE_ARCH_MOVE_PTE #define move_pte(pte, prot, old_addr, new_addr) \ diff --git a/include/asm-mips/ptrace.h b/include/asm-mips/ptrace.h index 5f3a9075cd2..8a1f2b6f04a 100644 --- a/include/asm-mips/ptrace.h +++ b/include/asm-mips/ptrace.h @@ -80,10 +80,16 @@ struct pt_regs { #define instruction_pointer(regs) ((regs)->cp0_epc) #define profile_pc(regs) instruction_pointer(regs) -extern void show_regs(struct pt_regs *); - extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit); +extern NORET_TYPE void die(const char *, struct pt_regs *); + +static inline void die_if_kernel(const char *str, struct pt_regs *regs) +{ + if (unlikely(!user_mode(regs))) + die(str, regs); +} + #endif #endif /* _ASM_PTRACE_H */ diff --git a/include/asm-mips/setup.h b/include/asm-mips/setup.h index 737fa4a6912..70009a90263 100644 --- a/include/asm-mips/setup.h +++ b/include/asm-mips/setup.h @@ -1,8 +1,6 @@ -#ifdef __KERNEL__ #ifndef _MIPS_SETUP_H #define _MIPS_SETUP_H #define COMMAND_LINE_SIZE 256 #endif /* __SETUP_H */ -#endif /* __KERNEL__ */ diff --git a/include/asm-mips/sibyte/sb1250.h b/include/asm-mips/sibyte/sb1250.h index b09e16c93ca..2ba6988ddc8 100644 --- a/include/asm-mips/sibyte/sb1250.h +++ b/include/asm-mips/sibyte/sb1250.h @@ -51,8 +51,8 @@ extern void sb1250_mask_irq(int cpu, int irq); extern void sb1250_unmask_irq(int cpu, int irq); extern void sb1250_smp_finish(void); +extern void bcm1480_hpt_setup(void); extern void bcm1480_time_init(void); -extern unsigned long bcm1480_gettimeoffset(void); extern void bcm1480_mask_irq(int cpu, int irq); extern void bcm1480_unmask_irq(int cpu, int irq); extern void bcm1480_smp_finish(void); diff --git a/include/asm-mips/sn/arch.h b/include/asm-mips/sn/arch.h index 51174af6ac5..da523de628b 100644 --- a/include/asm-mips/sn/arch.h +++ b/include/asm-mips/sn/arch.h @@ -18,7 +18,6 @@ #endif typedef u64 hubreg_t; -typedef u64 nic_t; #define cputonasid(cpu) (cpu_data[(cpu)].p_nasid) #define cputoslice(cpu) (cpu_data[(cpu)].p_slice) diff --git a/include/asm-mips/sn/klconfig.h b/include/asm-mips/sn/klconfig.h index b63cd0655b3..82aeb9e322d 100644 --- a/include/asm-mips/sn/klconfig.h +++ b/include/asm-mips/sn/klconfig.h @@ -61,6 +61,8 @@ #endif /* CONFIG_SGI_IP35 */ #endif /* CONFIG_SGI_IP27 || CONFIG_SGI_IP35 */ +typedef u64 nic_t; + #define KLCFGINFO_MAGIC 0xbeedbabe typedef s32 klconf_off_t; @@ -176,7 +178,7 @@ typedef struct kl_config_hdr { /* --- New Macros for the changed kl_config_hdr_t structure --- */ #define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\ - (unsigned long)_k + (_k->ch_malloc_hdr_off))) + ((unsigned long)_k + (_k->ch_malloc_hdr_off))) #define KL_CONFIG_CH_MALLOC_HDR(_n) PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n)) diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index c8d5587467b..fc3217fc111 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h @@ -3,12 +3,13 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1999, 2000 by Ralf Baechle + * Copyright (C) 1999, 2000, 06 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #ifndef _ASM_SPINLOCK_H #define _ASM_SPINLOCK_H +#include <asm/barrier.h> #include <asm/war.h> /* @@ -40,7 +41,6 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) " sc %1, %0 \n" " beqzl %1, 1b \n" " nop \n" - " sync \n" " .set reorder \n" : "=m" (lock->lock), "=&r" (tmp) : "m" (lock->lock) @@ -53,19 +53,22 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) " li %1, 1 \n" " sc %1, %0 \n" " beqz %1, 1b \n" - " sync \n" + " nop \n" " .set reorder \n" : "=m" (lock->lock), "=&r" (tmp) : "m" (lock->lock) : "memory"); } + + smp_mb(); } static inline void __raw_spin_unlock(raw_spinlock_t *lock) { + smp_mb(); + __asm__ __volatile__( " .set noreorder # __raw_spin_unlock \n" - " sync \n" " sw $0, %0 \n" " .set\treorder \n" : "=m" (lock->lock) @@ -86,7 +89,6 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) " beqzl %2, 1b \n" " nop \n" " andi %2, %0, 1 \n" - " sync \n" " .set reorder" : "=&r" (temp), "=m" (lock->lock), "=&r" (res) : "m" (lock->lock) @@ -99,13 +101,14 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) " sc %2, %1 \n" " beqz %2, 1b \n" " andi %2, %0, 1 \n" - " sync \n" " .set reorder" : "=&r" (temp), "=m" (lock->lock), "=&r" (res) : "m" (lock->lock) : "memory"); } + smp_mb(); + return res == 0; } @@ -143,7 +146,6 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) " sc %1, %0 \n" " beqzl %1, 1b \n" " nop \n" - " sync \n" " .set reorder \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) @@ -156,12 +158,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) " addu %1, 1 \n" " sc %1, %0 \n" " beqz %1, 1b \n" - " sync \n" + " nop \n" " .set reorder \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) : "memory"); } + + smp_mb(); } /* Note the use of sub, not subu which will make the kernel die with an @@ -171,13 +175,14 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) { unsigned int tmp; + smp_mb(); + if (R10000_LLSC_WAR) { __asm__ __volatile__( "1: ll %1, %2 # __raw_read_unlock \n" " sub %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" - " sync \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) : "memory"); @@ -188,7 +193,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) " sub %1, 1 \n" " sc %1, %0 \n" " beqz %1, 1b \n" - " sync \n" + " nop \n" " .set reorder \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) @@ -208,7 +213,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) " lui %1, 0x8000 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" - " sync \n" + " nop \n" " .set reorder \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) @@ -221,18 +226,22 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) " lui %1, 0x8000 \n" " sc %1, %0 \n" " beqz %1, 1b \n" - " sync \n" + " nop \n" " .set reorder \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) : "memory"); } + + smp_mb(); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { + smp_mb(); + __asm__ __volatile__( - " sync # __raw_write_unlock \n" + " # __raw_write_unlock \n" " sw $0, %0 \n" : "=m" (rw->lock) : "m" (rw->lock) @@ -252,11 +261,10 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) " bnez %1, 2f \n" " addu %1, 1 \n" " sc %1, %0 \n" - " beqzl %1, 1b \n" " .set reorder \n" -#ifdef CONFIG_SMP - " sync \n" -#endif + " beqzl %1, 1b \n" + " nop \n" + __WEAK_ORDERING_MB " li %2, 1 \n" "2: \n" : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) @@ -271,10 +279,9 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) " addu %1, 1 \n" " sc %1, %0 \n" " beqz %1, 1b \n" + " nop \n" " .set reorder \n" -#ifdef CONFIG_SMP - " sync \n" -#endif + __WEAK_ORDERING_MB " li %2, 1 \n" "2: \n" : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) @@ -299,7 +306,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) " lui %1, 0x8000 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" - " sync \n" + " nop \n" + __WEAK_ORDERING_MB " li %2, 1 \n" " .set reorder \n" "2: \n" @@ -315,7 +323,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) " lui %1, 0x8000 \n" " sc %1, %0 \n" " beqz %1, 1b \n" - " sync \n" + " nop \n" + __WEAK_ORDERING_MB " li %2, 1 \n" " .set reorder \n" "2: \n" diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index dcb4701d572..5e1289c85ed 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle * Copyright (C) 1996 by Paul M. Antoine * Copyright (C) 1999 Silicon Graphics * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com @@ -16,132 +16,11 @@ #include <linux/irqflags.h> #include <asm/addrspace.h> +#include <asm/barrier.h> #include <asm/cpu-features.h> #include <asm/dsp.h> -#include <asm/ptrace.h> #include <asm/war.h> -/* - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - */ - -#define read_barrier_depends() do { } while(0) - -#ifdef CONFIG_CPU_HAS_SYNC -#define __sync() \ - __asm__ __volatile__( \ - ".set push\n\t" \ - ".set noreorder\n\t" \ - ".set mips2\n\t" \ - "sync\n\t" \ - ".set pop" \ - : /* no output */ \ - : /* no input */ \ - : "memory") -#else -#define __sync() do { } while(0) -#endif - -#define __fast_iob() \ - __asm__ __volatile__( \ - ".set push\n\t" \ - ".set noreorder\n\t" \ - "lw $0,%0\n\t" \ - "nop\n\t" \ - ".set pop" \ - : /* no output */ \ - : "m" (*(int *)CKSEG1) \ - : "memory") - -#define fast_wmb() __sync() -#define fast_rmb() __sync() -#define fast_mb() __sync() -#define fast_iob() \ - do { \ - __sync(); \ - __fast_iob(); \ - } while (0) - -#ifdef CONFIG_CPU_HAS_WB - -#include <asm/wbflush.h> - -#define wmb() fast_wmb() -#define rmb() fast_rmb() -#define mb() wbflush() -#define iob() wbflush() - -#else /* !CONFIG_CPU_HAS_WB */ - -#define wmb() fast_wmb() -#define rmb() fast_rmb() -#define mb() fast_mb() -#define iob() fast_iob() - -#endif /* !CONFIG_CPU_HAS_WB */ - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif - -#define set_mb(var, value) \ -do { var = value; mb(); } while (0) /* * switch_to(n) should switch tasks to task nr n, first @@ -217,9 +96,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) " .set mips3 \n" " sc %2, %1 \n" " beqzl %2, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) @@ -235,9 +111,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) " .set mips3 \n" " sc %2, %1 \n" " beqz %2, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) @@ -251,6 +124,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) local_irq_restore(flags); /* implies memory barrier */ } + smp_mb(); + return retval; } @@ -268,9 +143,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) " move %2, %z4 \n" " scd %2, %1 \n" " beqzl %2, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) @@ -284,9 +156,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) " move %2, %z4 \n" " scd %2, %1 \n" " beqz %2, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) @@ -300,6 +169,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) local_irq_restore(flags); /* implies memory barrier */ } + smp_mb(); + return retval; } #else @@ -345,9 +216,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, " .set mips3 \n" " sc $1, %1 \n" " beqzl $1, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif "2: \n" " .set pop \n" : "=&r" (retval), "=R" (*m) @@ -365,9 +233,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, " .set mips3 \n" " sc $1, %1 \n" " beqz $1, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif "2: \n" " .set pop \n" : "=&r" (retval), "=R" (*m) @@ -383,6 +248,8 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, local_irq_restore(flags); /* implies memory barrier */ } + smp_mb(); + return retval; } @@ -392,7 +259,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, { __u64 retval; - if (cpu_has_llsc) { + if (cpu_has_llsc && R10000_LLSC_WAR) { __asm__ __volatile__( " .set push \n" " .set noat \n" @@ -402,9 +269,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, " move $1, %z4 \n" " scd $1, %1 \n" " beqzl $1, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif "2: \n" " .set pop \n" : "=&r" (retval), "=R" (*m) @@ -420,9 +284,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, " move $1, %z4 \n" " scd $1, %1 \n" " beqz $1, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif "2: \n" " .set pop \n" : "=&r" (retval), "=R" (*m) @@ -438,6 +299,8 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, local_irq_restore(flags); /* implies memory barrier */ } + smp_mb(); + return retval; } #else @@ -472,14 +335,6 @@ extern void *set_except_vector(int n, void *addr); extern unsigned long ebase; extern void per_cpu_trap_init(void); -extern NORET_TYPE void die(const char *, struct pt_regs *); - -static inline void die_if_kernel(const char *str, struct pt_regs *regs) -{ - if (unlikely(!user_mode(regs))) - die(str, regs); -} - extern int stop_a_enabled; /* diff --git a/include/asm-mips/termbits.h b/include/asm-mips/termbits.h index b62ec7c521c..0bbe07b42a0 100644 --- a/include/asm-mips/termbits.h +++ b/include/asm-mips/termbits.h @@ -30,6 +30,17 @@ struct termios { cc_t c_cc[NCCS]; /* control characters */ }; +struct ktermios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + /* c_cc characters */ #define VINTR 0 /* Interrupt character [ISIG]. */ #define VQUIT 1 /* Quit character [ISIG]. */ diff --git a/include/asm-mips/time.h b/include/asm-mips/time.h index 28512ba2266..a632cef830a 100644 --- a/include/asm-mips/time.h +++ b/include/asm-mips/time.h @@ -21,6 +21,7 @@ #include <linux/ptrace.h> #include <linux/rtc.h> #include <linux/spinlock.h> +#include <linux/clocksource.h> extern spinlock_t rtc_lock; @@ -44,11 +45,10 @@ extern int (*mips_timer_state)(void); extern void (*mips_timer_ack)(void); /* - * High precision timer functions. - * If mips_hpt_read is NULL, an R4k-compatible timer setup is attempted. + * High precision timer clocksource. + * If .read is NULL, an R4k-compatible timer setup is attempted. */ -extern unsigned int (*mips_hpt_read)(void); -extern void (*mips_hpt_init)(unsigned int); +extern struct clocksource clocksource_mips; /* * to_tm() converts system time back to (year, mon, day, hour, min, sec). @@ -58,13 +58,6 @@ extern void (*mips_hpt_init)(unsigned int); extern void to_tm(unsigned long tim, struct rtc_time *tm); /* - * do_gettimeoffset(). By default, this func pointer points to - * do_null_gettimeoffset(), which leads to the same resolution as HZ. - * Higher resolution versions are available, which give ~1us resolution. - */ -extern unsigned long (*do_gettimeoffset)(void); - -/* * high-level timer interrupt routines. */ extern irqreturn_t timer_interrupt(int irq, void *dev_id); diff --git a/include/asm-mips/types.h b/include/asm-mips/types.h index 2b52e180c6f..63a13c5bd83 100644 --- a/include/asm-mips/types.h +++ b/include/asm-mips/types.h @@ -93,16 +93,6 @@ typedef unsigned long long phys_t; typedef unsigned long phys_t; #endif -#ifdef CONFIG_LBD -typedef u64 sector_t; -#define HAVE_SECTOR_T -#endif - -#ifdef CONFIG_LSF -typedef u64 blkcnt_t; -#define HAVE_BLKCNT_T -#endif - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h index 685c91467e6..696cff39a1d 100644 --- a/include/asm-mips/unistd.h +++ b/include/asm-mips/unistd.h @@ -331,16 +331,19 @@ #define __NR_move_pages (__NR_Linux + 308) #define __NR_set_robust_list (__NR_Linux + 309) #define __NR_get_robust_list (__NR_Linux + 310) +#define __NR_kexec_load (__NR_Linux + 311) +#define __NR_getcpu (__NR_Linux + 312) +#define __NR_epoll_pwait (__NR_Linux + 313) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 310 +#define __NR_Linux_syscalls 313 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 310 +#define __NR_O32_Linux_syscalls 313 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -618,16 +621,19 @@ #define __NR_move_pages (__NR_Linux + 267) #define __NR_set_robust_list (__NR_Linux + 268) #define __NR_get_robust_list (__NR_Linux + 269) +#define __NR_kexec_load (__NR_Linux + 270) +#define __NR_getcpu (__NR_Linux + 271) +#define __NR_epoll_pwait (__NR_Linux + 272) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 269 +#define __NR_Linux_syscalls 272 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 269 +#define __NR_64_Linux_syscalls 272 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -909,283 +915,25 @@ #define __NR_move_pages (__NR_Linux + 271) #define __NR_set_robust_list (__NR_Linux + 272) #define __NR_get_robust_list (__NR_Linux + 273) +#define __NR_kexec_load (__NR_Linux + 274) +#define __NR_getcpu (__NR_Linux + 275) +#define __NR_epoll_pwait (__NR_Linux + 276) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 273 +#define __NR_Linux_syscalls 276 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 273 +#define __NR_N32_Linux_syscalls 276 #ifdef __KERNEL__ #ifndef __ASSEMBLY__ -/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ -#define _syscall0(type,name) \ -type name(void) \ -{ \ - register unsigned long __a3 asm("$7"); \ - unsigned long __v0; \ - \ - __asm__ volatile ( \ - ".set\tnoreorder\n\t" \ - "li\t$2, %2\t\t\t# " #name "\n\t" \ - "syscall\n\t" \ - "move\t%0, $2\n\t" \ - ".set\treorder" \ - : "=&r" (__v0), "=r" (__a3) \ - : "i" (__NR_##name) \ - : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ - "memory"); \ - \ - if (__a3 == 0) \ - return (type) __v0; \ - errno = __v0; \ - return (type) -1; \ -} - -/* - * DANGER: This macro isn't usable for the pipe(2) call - * which has a unusual return convention. - */ -#define _syscall1(type,name,atype,a) \ -type name(atype a) \ -{ \ - register unsigned long __a0 asm("$4") = (unsigned long) a; \ - register unsigned long __a3 asm("$7"); \ - unsigned long __v0; \ - \ - __asm__ volatile ( \ - ".set\tnoreorder\n\t" \ - "li\t$2, %3\t\t\t# " #name "\n\t" \ - "syscall\n\t" \ - "move\t%0, $2\n\t" \ - ".set\treorder" \ - : "=&r" (__v0), "=r" (__a3) \ - : "r" (__a0), "i" (__NR_##name) \ - : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ - "memory"); \ - \ - if (__a3 == 0) \ - return (type) __v0; \ - errno = __v0; \ - return (type) -1; \ -} - -#define _syscall2(type,name,atype,a,btype,b) \ -type name(atype a, btype b) \ -{ \ - register unsigned long __a0 asm("$4") = (unsigned long) a; \ - register unsigned long __a1 asm("$5") = (unsigned long) b; \ - register unsigned long __a3 asm("$7"); \ - unsigned long __v0; \ - \ - __asm__ volatile ( \ - ".set\tnoreorder\n\t" \ - "li\t$2, %4\t\t\t# " #name "\n\t" \ - "syscall\n\t" \ - "move\t%0, $2\n\t" \ - ".set\treorder" \ - : "=&r" (__v0), "=r" (__a3) \ - : "r" (__a0), "r" (__a1), "i" (__NR_##name) \ - : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ - "memory"); \ - \ - if (__a3 == 0) \ - return (type) __v0; \ - errno = __v0; \ - return (type) -1; \ -} - -#define _syscall3(type,name,atype,a,btype,b,ctype,c) \ -type name(atype a, btype b, ctype c) \ -{ \ - register unsigned long __a0 asm("$4") = (unsigned long) a; \ - register unsigned long __a1 asm("$5") = (unsigned long) b; \ - register unsigned long __a2 asm("$6") = (unsigned long) c; \ - register unsigned long __a3 asm("$7"); \ - unsigned long __v0; \ - \ - __asm__ volatile ( \ - ".set\tnoreorder\n\t" \ - "li\t$2, %5\t\t\t# " #name "\n\t" \ - "syscall\n\t" \ - "move\t%0, $2\n\t" \ - ".set\treorder" \ - : "=&r" (__v0), "=r" (__a3) \ - : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \ - : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ - "memory"); \ - \ - if (__a3 == 0) \ - return (type) __v0; \ - errno = __v0; \ - return (type) -1; \ -} - -#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \ -type name(atype a, btype b, ctype c, dtype d) \ -{ \ - register unsigned long __a0 asm("$4") = (unsigned long) a; \ - register unsigned long __a1 asm("$5") = (unsigned long) b; \ - register unsigned long __a2 asm("$6") = (unsigned long) c; \ - register unsigned long __a3 asm("$7") = (unsigned long) d; \ - unsigned long __v0; \ - \ - __asm__ volatile ( \ - ".set\tnoreorder\n\t" \ - "li\t$2, %5\t\t\t# " #name "\n\t" \ - "syscall\n\t" \ - "move\t%0, $2\n\t" \ - ".set\treorder" \ - : "=&r" (__v0), "+r" (__a3) \ - : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \ - : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ - "memory"); \ - \ - if (__a3 == 0) \ - return (type) __v0; \ - errno = __v0; \ - return (type) -1; \ -} - -#if (_MIPS_SIM == _MIPS_SIM_ABI32) - -/* - * Using those means your brain needs more than an oil change ;-) - */ - -#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \ -type name(atype a, btype b, ctype c, dtype d, etype e) \ -{ \ - register unsigned long __a0 asm("$4") = (unsigned long) a; \ - register unsigned long __a1 asm("$5") = (unsigned long) b; \ - register unsigned long __a2 asm("$6") = (unsigned long) c; \ - register unsigned long __a3 asm("$7") = (unsigned long) d; \ - unsigned long __v0; \ - \ - __asm__ volatile ( \ - ".set\tnoreorder\n\t" \ - "lw\t$2, %6\n\t" \ - "subu\t$29, 32\n\t" \ - "sw\t$2, 16($29)\n\t" \ - "li\t$2, %5\t\t\t# " #name "\n\t" \ - "syscall\n\t" \ - "move\t%0, $2\n\t" \ - "addiu\t$29, 32\n\t" \ - ".set\treorder" \ - : "=&r" (__v0), "+r" (__a3) \ - : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \ - "m" ((unsigned long)e) \ - : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ - "memory"); \ - \ - if (__a3 == 0) \ - return (type) __v0; \ - errno = __v0; \ - return (type) -1; \ -} - -#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \ -type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \ -{ \ - register unsigned long __a0 asm("$4") = (unsigned long) a; \ - register unsigned long __a1 asm("$5") = (unsigned long) b; \ - register unsigned long __a2 asm("$6") = (unsigned long) c; \ - register unsigned long __a3 asm("$7") = (unsigned long) d; \ - unsigned long __v0; \ - \ - __asm__ volatile ( \ - ".set\tnoreorder\n\t" \ - "lw\t$2, %6\n\t" \ - "lw\t$8, %7\n\t" \ - "subu\t$29, 32\n\t" \ - "sw\t$2, 16($29)\n\t" \ - "sw\t$8, 20($29)\n\t" \ - "li\t$2, %5\t\t\t# " #name "\n\t" \ - "syscall\n\t" \ - "move\t%0, $2\n\t" \ - "addiu\t$29, 32\n\t" \ - ".set\treorder" \ - : "=&r" (__v0), "+r" (__a3) \ - : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \ - "m" ((unsigned long)e), "m" ((unsigned long)f) \ - : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ - "memory"); \ - \ - if (__a3 == 0) \ - return (type) __v0; \ - errno = __v0; \ - return (type) -1; \ -} - -#endif /* (_MIPS_SIM == _MIPS_SIM_ABI32) */ - -#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) - -#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \ -type name (atype a,btype b,ctype c,dtype d,etype e) \ -{ \ - register unsigned long __a0 asm("$4") = (unsigned long) a; \ - register unsigned long __a1 asm("$5") = (unsigned long) b; \ - register unsigned long __a2 asm("$6") = (unsigned long) c; \ - register unsigned long __a3 asm("$7") = (unsigned long) d; \ - register unsigned long __a4 asm("$8") = (unsigned long) e; \ - unsigned long __v0; \ - \ - __asm__ volatile ( \ - ".set\tnoreorder\n\t" \ - "li\t$2, %6\t\t\t# " #name "\n\t" \ - "syscall\n\t" \ - "move\t%0, $2\n\t" \ - ".set\treorder" \ - : "=&r" (__v0), "+r" (__a3) \ - : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "i" (__NR_##name) \ - : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ - "memory"); \ - \ - if (__a3 == 0) \ - return (type) __v0; \ - errno = __v0; \ - return (type) -1; \ -} - -#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \ -type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \ -{ \ - register unsigned long __a0 asm("$4") = (unsigned long) a; \ - register unsigned long __a1 asm("$5") = (unsigned long) b; \ - register unsigned long __a2 asm("$6") = (unsigned long) c; \ - register unsigned long __a3 asm("$7") = (unsigned long) d; \ - register unsigned long __a4 asm("$8") = (unsigned long) e; \ - register unsigned long __a5 asm("$9") = (unsigned long) f; \ - unsigned long __v0; \ - \ - __asm__ volatile ( \ - ".set\tnoreorder\n\t" \ - "li\t$2, %7\t\t\t# " #name "\n\t" \ - "syscall\n\t" \ - "move\t%0, $2\n\t" \ - ".set\treorder" \ - : "=&r" (__v0), "+r" (__a3) \ - : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "r" (__a5), \ - "i" (__NR_##name) \ - : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \ - "memory"); \ - \ - if (__a3 == 0) \ - return (type) __v0; \ - errno = __v0; \ - return (type) -1; \ -} - -#endif /* (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) */ - - +#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64 #define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_ALARM diff --git a/include/asm-mips/vr41xx/vr41xx.h b/include/asm-mips/vr41xx/vr41xx.h index dd3eb3dc588..88b492f6ea9 100644 --- a/include/asm-mips/vr41xx/vr41xx.h +++ b/include/asm-mips/vr41xx/vr41xx.h @@ -75,7 +75,7 @@ extern void vr41xx_mask_clock(vr41xx_clock_t clock); * Interrupt Control Unit */ extern int vr41xx_set_intassign(unsigned int irq, unsigned char intassign); -extern int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int, struct pt_regs *)); +extern int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int)); #define PIUINT_COMMAND 0x0040 #define PIUINT_DATA 0x0020 diff --git a/include/asm-mips/war.h b/include/asm-mips/war.h index 3ac146c019c..13a3502eef4 100644 --- a/include/asm-mips/war.h +++ b/include/asm-mips/war.h @@ -76,7 +76,7 @@ /* * But the RM200C seems to have been shipped only with V2.0 R4600s */ -#ifdef CONFIG_SNI_RM200_PCI +#ifdef CONFIG_SNI_RM #define R4600_V2_HIT_CACHEOP_WAR 1 |