From fad57feba77d2e5b183e068cb6b90693e4567b40 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Wed, 12 Nov 2008 20:11:47 +0900 Subject: sh: dynamic ftrace support. First cut at dynamic ftrace support. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/ftrace.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 3aed362c946..4cb5dbfc404 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -1,8 +1,29 @@ #ifndef __ASM_SH_FTRACE_H #define __ASM_SH_FTRACE_H +#ifdef CONFIG_FUNCTION_TRACER + +#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ + #ifndef __ASSEMBLY__ extern void mcount(void); + +#define MCOUNT_ADDR ((long)(mcount)) + +#ifdef CONFIG_DYNAMIC_FTRACE +#define CALLER_ADDR ((long)(ftrace_caller)) +#define STUB_ADDR ((long)(ftrace_stub)) + +#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALLER_ADDR) >> 1) +#endif + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* 'addr' is the memory table address. */ + return addr; +} #endif +#endif /* CONFIG_FUNCTION_TRACER */ + #endif /* __ASM_SH_FTRACE_H */ -- cgit v1.2.3 From 16b529d1d78060254d5bc735390915ca5ccf13a1 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 20 Nov 2008 15:25:22 +0900 Subject: sh: Convert to generic bitops for IRQ-toggling implementation. Signed-off-by: Paul Mundt --- arch/sh/include/asm/bitops-grb.h | 3 ++ arch/sh/include/asm/bitops-irq.h | 91 --------------------------------------- arch/sh/include/asm/bitops-llsc.h | 2 + arch/sh/include/asm/bitops.h | 6 +-- 4 files changed, 7 insertions(+), 95 deletions(-) delete mode 100644 arch/sh/include/asm/bitops-irq.h (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/bitops-grb.h b/arch/sh/include/asm/bitops-grb.h index a5907b94395..e73af33acbf 100644 --- a/arch/sh/include/asm/bitops-grb.h +++ b/arch/sh/include/asm/bitops-grb.h @@ -166,4 +166,7 @@ static inline int test_and_change_bit(int nr, volatile void * addr) return retval; } + +#include + #endif /* __ASM_SH_BITOPS_GRB_H */ diff --git a/arch/sh/include/asm/bitops-irq.h b/arch/sh/include/asm/bitops-irq.h deleted file mode 100644 index 653a1275058..00000000000 --- a/arch/sh/include/asm/bitops-irq.h +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef __ASM_SH_BITOPS_IRQ_H -#define __ASM_SH_BITOPS_IRQ_H - -static inline void set_bit(int nr, volatile void *addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a |= mask; - local_irq_restore(flags); -} - -static inline void clear_bit(int nr, volatile void *addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a &= ~mask; - local_irq_restore(flags); -} - -static inline void change_bit(int nr, volatile void *addr) -{ - int mask; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - *a ^= mask; - local_irq_restore(flags); -} - -static inline int test_and_set_bit(int nr, volatile void *addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a |= mask; - local_irq_restore(flags); - - return retval; -} - -static inline int test_and_clear_bit(int nr, volatile void *addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a &= ~mask; - local_irq_restore(flags); - - return retval; -} - -static inline int test_and_change_bit(int nr, volatile void *addr) -{ - int mask, retval; - volatile unsigned int *a = addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save(flags); - retval = (mask & *a) != 0; - *a ^= mask; - local_irq_restore(flags); - - return retval; -} - -#endif /* __ASM_SH_BITOPS_IRQ_H */ diff --git a/arch/sh/include/asm/bitops-llsc.h b/arch/sh/include/asm/bitops-llsc.h index 43b8e1a8239..1d2fc0b010a 100644 --- a/arch/sh/include/asm/bitops-llsc.h +++ b/arch/sh/include/asm/bitops-llsc.h @@ -141,4 +141,6 @@ static inline int test_and_change_bit(int nr, volatile void * addr) return retval != 0; } +#include + #endif /* __ASM_SH_BITOPS_LLSC_H */ diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index 367930d8e5a..9b141e04d10 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -16,18 +16,16 @@ #elif defined(CONFIG_CPU_SH4A) #include #else -#include +#include +#include #endif - /* * clear_bit() doesn't provide any barrier for the compiler. */ #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() -#include - #ifdef CONFIG_SUPERH32 static inline unsigned long ffz(unsigned long word) { -- cgit v1.2.3 From 0d5bbe0bc2583c4dc06ea00adccf07c3acd1481d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 25 Nov 2008 21:22:02 +0900 Subject: sh: Provide optimized non-atomic bitops for SH-2A. This ties in the new SH-2A 32-bit non-atomic bitops. Signed-off-by: Paul Mundt --- arch/sh/include/asm/bitops-op32.h | 142 ++++++++++++++++++++++++++++++++++++++ arch/sh/include/asm/bitops.h | 3 + 2 files changed, 145 insertions(+) create mode 100644 arch/sh/include/asm/bitops-op32.h (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h new file mode 100644 index 00000000000..f0ae7e9218e --- /dev/null +++ b/arch/sh/include/asm/bitops-op32.h @@ -0,0 +1,142 @@ +#ifndef __ASM_SH_BITOPS_OP32_H +#define __ASM_SH_BITOPS_OP32_H + +/* + * The bit modifying instructions on SH-2A are only capable of working + * with a 3-bit immediate, which signifies the shift position for the bit + * being worked on. + */ +#if defined(__BIG_ENDIAN) +#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) +#define BYTE_NUMBER(nr) ((nr ^ BITOP_LE_SWIZZLE) / BITS_PER_BYTE) +#define BYTE_OFFSET(nr) ((nr ^ BITOP_LE_SWIZZLE) % BITS_PER_BYTE) +#else +#define BYTE_NUMBER(nr) ((nr) / BITS_PER_BYTE) +#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE) +#endif + +#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) + +static inline void __set_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ __volatile__ ( + "bset.b %1, @(%O2,%0) ! __set_bit\n\t" + : "+r" (addr) + : "i" (BYTE_OFFSET(nr)), "i" (BYTE_NUMBER(nr)) + : "t", "memory" + ); + } else { + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; + } +} + +static inline void __clear_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ __volatile__ ( + "bclr.b %1, @(%O2,%0) ! __clear_bit\n\t" + : "+r" (addr) + : "i" (BYTE_OFFSET(nr)), + "i" (BYTE_NUMBER(nr)) + : "t", "memory" + ); + } else { + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p &= ~mask; + } +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void __change_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ __volatile__ ( + "bxor.b %1, @(%O2,%0) ! __change_bit\n\t" + : "+r" (addr) + : "i" (BYTE_OFFSET(nr)), + "i" (BYTE_NUMBER(nr)) + : "t", "memory" + ); + } else { + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p ^= mask; + } +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/* WARNING: non atomic and it can be reordered! */ +static inline int __test_and_change_bit(int nr, + volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline int test_bit(int nr, const volatile unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +#endif /* __ASM_SH_BITOPS_OP32_H */ diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index 9b141e04d10..ebe595b7ab1 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -13,6 +13,9 @@ #ifdef CONFIG_GUSA_RB #include +#elif defined(CONFIG_CPU_SH2A) +#include +#include #elif defined(CONFIG_CPU_SH4A) #include #else -- cgit v1.2.3 From 8085ac753164f45fd23603e7cad85a4c985cbf75 Mon Sep 17 00:00:00 2001 From: Steve Glendinning Date: Sun, 23 Nov 2008 14:27:21 +0000 Subject: sh: Add platform-specific constants for SH7709 I'm using these constants in support of an in-house development board, and thought they may be useful to other users of SH7709. Signed-off-by: Steve Glendinning Signed-off-by: Paul Mundt --- arch/sh/include/cpu-sh3/cpu/gpio.h | 14 ++++++++++++++ arch/sh/include/mach-se/mach/se.h | 18 ++++++++++++++++++ 2 files changed, 32 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/cpu-sh3/cpu/gpio.h b/arch/sh/include/cpu-sh3/cpu/gpio.h index 4e53eb314b8..9a22b882f3d 100644 --- a/arch/sh/include/cpu-sh3/cpu/gpio.h +++ b/arch/sh/include/cpu-sh3/cpu/gpio.h @@ -62,6 +62,20 @@ #define PORT_PSELC 0xA4050128UL #define PORT_PSELD 0xA405012AUL +#elif defined(CONFIG_CPU_SUBTYPE_SH7709) + +/* Control registers */ +#define PORT_PACR 0xa4000100UL +#define PORT_PBCR 0xa4000102UL +#define PORT_PCCR 0xa4000104UL +#define PORT_PFCR 0xa400010aUL + +/* Data registers */ +#define PORT_PADR 0xa4000120UL +#define PORT_PBDR 0xa4000122UL +#define PORT_PCDR 0xa4000124UL +#define PORT_PFDR 0xa400012aUL + #endif #endif diff --git a/arch/sh/include/mach-se/mach/se.h b/arch/sh/include/mach-se/mach/se.h index eb23000e1bb..14be91c5a2f 100644 --- a/arch/sh/include/mach-se/mach/se.h +++ b/arch/sh/include/mach-se/mach/se.h @@ -68,6 +68,24 @@ #define BCR_ILCRF (PA_BCR + 10) #define BCR_ILCRG (PA_BCR + 12) +#if defined(CONFIG_CPU_SUBTYPE_SH7709) +#define INTC_IRR0 0xa4000004UL +#define INTC_IRR1 0xa4000006UL +#define INTC_IRR2 0xa4000008UL + +#define INTC_ICR0 0xfffffee0UL +#define INTC_ICR1 0xa4000010UL +#define INTC_ICR2 0xa4000012UL +#define INTC_INTER 0xa4000014UL + +#define INTC_IPRC 0xa4000016UL +#define INTC_IPRD 0xa4000018UL +#define INTC_IPRE 0xa400001aUL + +#define IRQ0_IRQ 32 +#define IRQ1_IRQ 33 +#endif + #if defined(CONFIG_CPU_SUBTYPE_SH7705) #define IRQ_STNIC 12 #define IRQ_CFCARD 14 -- cgit v1.2.3 From 0c9122323acb0c3410dfbd219cb47f4c2e9305e3 Mon Sep 17 00:00:00 2001 From: Michael Trimarchi Date: Tue, 25 Nov 2008 21:37:14 +0900 Subject: sh: Add SH-4A optimized fastpath mutex implementation. Add fast mutex path implementation for the SH4A architecture Signed-off-by: Michael Trimarchi Signed-off-by: Paul Mundt --- arch/sh/include/asm/mutex-llsc.h | 107 +++++++++++++++++++++++++++++++++++++++ arch/sh/include/asm/mutex.h | 5 +- 2 files changed, 111 insertions(+), 1 deletion(-) create mode 100644 arch/sh/include/asm/mutex-llsc.h (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h new file mode 100644 index 00000000000..7c75af5e734 --- /dev/null +++ b/arch/sh/include/asm/mutex-llsc.h @@ -0,0 +1,107 @@ +/* + * arch/sh/include/asm/mutex-llsc.h + * + * SH-4A optimized mutex locking primitives + * + * Please look into asm-generic/mutex-xchg.h for a formal definition. + */ +#ifndef __ASM_SH_MUTEX_LLSC_H +#define __ASM_SH_MUTEX_LLSC_H + +/* + * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure. + * with a bastardized atomic decrement (it is not a reliable atomic decrement + * but it satisfies the defined semantics for our purpose, while being + * smaller and faster than a real atomic decrement or atomic swap. + * The idea is to attempt decrementing the lock value only once. If once + * decremented it isn't zero, or if its store-back fails due to a dispute + * on the exclusive store, we simply bail out immediately through the slow + * path where the lock will be reattempted until it succeeds. + */ +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + int __res; + + __asm__ __volatile__ ( + "movli.l @%1, %0 \n" + "dt %0 \n" + "movco.l %0, @%1 \n" + : "=&z" (__res) + : "r" (&(count)->counter) + : "t"); + + if (unlikely(__res != 0)) + fail_fn(count); +} + +static inline int +__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + int __res; + + __asm__ __volatile__ ( + "movli.l @%1, %0 \n" + "dt %0 \n" + "movco.l %0, @%1 \n" + : "=&z" (__res) + : "r" (&(count)->counter) + : "t"); + + if (unlikely(__res != 0)) + __res = fail_fn(count); + + return __res; +} + +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + int __res; + + __asm__ __volatile__ ( + "1: movli.l @%1, %0 \n\t" + "add #1, %0 \n\t" + "movco.l %0, @%1 \n\t" + "bf 1b\n\t" + : "=&z" (__res) + : "r" (&(count)->counter) + : "t"); + + if (unlikely(__res <= 0)) + fail_fn(count); +} + +/* + * If the unlock was done on a contended lock, or if the unlock simply fails + * then the mutex remains locked. + */ +#define __mutex_slowpath_needs_to_unlock() 1 + +/* + * For __mutex_fastpath_trylock we do an atomic decrement and check the + * result and put it in the __res variable. + */ +static inline int +__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) +{ + int __res, __orig; + + __asm__ __volatile__ ( + "1: movli.l @%2, %0 \n\t" + "dt %0 \n\t" + "movco.l %0,@%2 \n\t" + "bf 1b \n\t" + "cmp/eq #0,%0 \n\t" + "bt 2f \n\t" + "mov #0, %1 \n\t" + "bf 3f \n\t" + "2: mov #1, %1 \n\t" + "3: " + : "=&z" (__orig), "=&r" (__res) + : "r" (&count->counter) + : "t"); + + return __res; +} +#endif /* __ASM_SH_MUTEX_LLSC_H */ diff --git a/arch/sh/include/asm/mutex.h b/arch/sh/include/asm/mutex.h index 458c1f7fbc1..d8e37716a4a 100644 --- a/arch/sh/include/asm/mutex.h +++ b/arch/sh/include/asm/mutex.h @@ -5,5 +5,8 @@ * implementation in place, or pick the atomic_xchg() based generic * implementation. (see asm-generic/mutex-xchg.h for details) */ - +#if defined(CONFIG_CPU_SH4A) +#include +#else #include +#endif -- cgit v1.2.3 From 716777db7270255f1f7210fd87a7188b08c9a267 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 25 Nov 2008 21:57:29 +0900 Subject: sh: P4 ioremap pass-through This patch adds a pass-through case when ioremapping P4 addresses. Addresses passed to ioremap() should be physical addresses, so the best option is usually to convert the virtual address to a physical address before calling ioremap. This will give you a virtual address in P2 which matches the physical address and this works well for most internal hardware blocks on the SuperH architecture. However, some hardware blocks must be accessed through P4. Converting the P4 address to a physical and then back to a P2 does not work. One example of this is the sh7722 TMU block, it must be accessed through P4. Without this patch P4 addresses will be mapped using PTEs which requires the page allocator to be up and running. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/addrspace.h | 11 +++++++++++ arch/sh/include/asm/io.h | 4 ++++ 2 files changed, 15 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 2702d81bfc0..36736c7e93d 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h @@ -49,5 +49,16 @@ /* Check if an address can be reached in 29 bits */ #define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000) +#ifdef CONFIG_SH_STORE_QUEUES +/* + * This is a special case for the SH-4 store queues, as pages for this + * space still need to be faulted in before it's possible to flush the + * store queue cache for writeout to the remapped region. + */ +#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) +#else +#define P3_ADDR_MAX P4SEG +#endif + #endif /* __KERNEL__ */ #endif /* __ASM_SH_ADDRSPACE_H */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 65eaae34e75..61f6dae4053 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -260,6 +260,10 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) return (void __iomem *)P2SEGADDR(offset); } + + /* P4 above the store queues are always mapped. */ + if (unlikely(offset >= P3_ADDR_MAX)) + return (void __iomem *)P4SEGADDR(offset); #endif return __ioremap(offset, size, flags); -- cgit v1.2.3 From 95b781c239f53b4c7ecaf2989404ec6379b2409b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 26 Nov 2008 00:29:58 +0900 Subject: sh: Provide optimized unaligned loads on SH-4A. This adds support for unaligned loads on SH-4A, using the SH-4A's neutered movua.l instruction. As movua.l is r0-inspired, stores are still handled through the packed struct. Based on asm-generic/unaligned.h by Harvey Harrison. Signed-off-by: Paul Mundt --- arch/sh/include/asm/unaligned-sh4a.h | 258 +++++++++++++++++++++++++++++++++++ arch/sh/include/asm/unaligned.h | 7 +- 2 files changed, 264 insertions(+), 1 deletion(-) create mode 100644 arch/sh/include/asm/unaligned-sh4a.h (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h new file mode 100644 index 00000000000..d8f89770275 --- /dev/null +++ b/arch/sh/include/asm/unaligned-sh4a.h @@ -0,0 +1,258 @@ +#ifndef __ASM_SH_UNALIGNED_SH4A_H +#define __ASM_SH_UNALIGNED_SH4A_H + +/* + * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only. + * Support for 16 and 64-bit accesses are done through shifting and + * masking relative to the endianness. Unaligned stores are not supported + * by the instruction encoding, so these continue to use the packed + * struct. + * + * The same note as with the movli.l/movco.l pair applies here, as long + * as the load is gauranteed to be inlined, nothing else will hook in to + * r0 and we get the return value for free. + * + * NOTE: Due to the fact we require r0 encoding, care should be taken to + * avoid mixing these heavily with other r0 consumers, such as the atomic + * ops. Failure to adhere to this can result in the compiler running out + * of spill registers and blowing up when building at low optimization + * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777. + */ +#include +#include + +static __always_inline u32 __get_unaligned_cpu32(const u8 *p) +{ + unsigned long unaligned; + + __asm__ __volatile__ ( + "movua.l @%1, %0\n\t" + : "=z" (unaligned) + : "r" (p) + ); + + return unaligned; +} + +struct __una_u16 { u16 x __attribute__((packed)); }; +struct __una_u32 { u32 x __attribute__((packed)); }; +struct __una_u64 { u64 x __attribute__((packed)); }; + +static inline u16 __get_unaligned_cpu16(const u8 *p) +{ +#ifdef __LITTLE_ENDIAN + return __get_unaligned_cpu32(p) & 0xffff; +#else + return __get_unaligned_cpu32(p) >> 16; +#endif +} + +/* + * Even though movua.l supports auto-increment on the read side, it can + * only store to r0 due to instruction encoding constraints, so just let + * the compiler sort it out on its own. + */ +static inline u64 __get_unaligned_cpu64(const u8 *p) +{ +#ifdef __LITTLE_ENDIAN + return (u64)__get_unaligned_cpu32(p + 4) << 32 | + __get_unaligned_cpu32(p); +#else + return (u64)__get_unaligned_cpu32(p) << 32 | + __get_unaligned_cpu32(p + 4); +#endif +} + +static inline u16 get_unaligned_le16(const void *p) +{ + return le16_to_cpu(__get_unaligned_cpu16(p)); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return le32_to_cpu(__get_unaligned_cpu32(p)); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return le64_to_cpu(__get_unaligned_cpu64(p)); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return be16_to_cpu(__get_unaligned_cpu16(p)); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return be32_to_cpu(__get_unaligned_cpu32(p)); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return be64_to_cpu(__get_unaligned_cpu64(p)); +} + +static inline void __put_le16_noalign(u8 *p, u16 val) +{ + *p++ = val; + *p++ = val >> 8; +} + +static inline void __put_le32_noalign(u8 *p, u32 val) +{ + __put_le16_noalign(p, val); + __put_le16_noalign(p + 2, val >> 16); +} + +static inline void __put_le64_noalign(u8 *p, u64 val) +{ + __put_le32_noalign(p, val); + __put_le32_noalign(p + 4, val >> 32); +} + +static inline void __put_be16_noalign(u8 *p, u16 val) +{ + *p++ = val >> 8; + *p++ = val; +} + +static inline void __put_be32_noalign(u8 *p, u32 val) +{ + __put_be16_noalign(p, val >> 16); + __put_be16_noalign(p + 2, val); +} + +static inline void __put_be64_noalign(u8 *p, u64 val) +{ + __put_be32_noalign(p, val >> 32); + __put_be32_noalign(p + 4, val); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ +#ifdef __LITTLE_ENDIAN + ((struct __una_u16 *)p)->x = val; +#else + __put_le16_noalign(p, val); +#endif +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ +#ifdef __LITTLE_ENDIAN + ((struct __una_u32 *)p)->x = val; +#else + __put_le32_noalign(p, val); +#endif +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ +#ifdef __LITTLE_ENDIAN + ((struct __una_u64 *)p)->x = val; +#else + __put_le64_noalign(p, val); +#endif +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ +#ifdef __BIG_ENDIAN + ((struct __una_u16 *)p)->x = val; +#else + __put_be16_noalign(p, val); +#endif +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ +#ifdef __BIG_ENDIAN + ((struct __una_u32 *)p)->x = val; +#else + __put_be32_noalign(p, val); +#endif +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ +#ifdef __BIG_ENDIAN + ((struct __una_u64 *)p)->x = val; +#else + __put_be64_noalign(p, val); +#endif +} + +/* + * Cause a link-time error if we try an unaligned access other than + * 1,2,4 or 8 bytes long + */ +extern void __bad_unaligned_access_size(void); + +#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __put_unaligned_le(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_le16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_le32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_le64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#define __put_unaligned_be(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_be16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_be32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_be64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#ifdef __LITTLE_ENDIAN +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#else +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#endif + +#endif /* __ASM_SH_UNALIGNED_SH4A_H */ diff --git a/arch/sh/include/asm/unaligned.h b/arch/sh/include/asm/unaligned.h index c1641a01d50..8c0ad5e4487 100644 --- a/arch/sh/include/asm/unaligned.h +++ b/arch/sh/include/asm/unaligned.h @@ -1,7 +1,11 @@ #ifndef _ASM_SH_UNALIGNED_H #define _ASM_SH_UNALIGNED_H -/* SH can't handle unaligned accesses. */ +#ifdef CONFIG_CPU_SH4A +/* SH-4A can handle unaligned loads in a relatively neutered fashion. */ +#include +#else +/* Otherwise, SH can't handle unaligned accesses. */ #ifdef __LITTLE_ENDIAN__ # include # include @@ -15,5 +19,6 @@ # define get_unaligned __get_unaligned_be # define put_unaligned __put_unaligned_be #endif +#endif #endif /* _ASM_SH_UNALIGNED_H */ -- cgit v1.2.3 From 9cfc9a9b6fff9ea7a19814b4472b3cb18b7bbdcc Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 26 Nov 2008 14:31:03 +0900 Subject: sh: Add a simple code dumper for SUPERH32 show_regs(). This implements a simple show_code() that is in turn plugged in to show_regs() to provide minimal code dumping at the end of the trace. Built on top of a simple instruction disassembler derived from the binutils opcode table. Signed-off-by: Paul Mundt --- arch/sh/include/asm/processor_32.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index a46a0207e97..7b14f0cff9b 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -175,6 +175,7 @@ static __inline__ void enable_fpu(void) void show_trace(struct task_struct *tsk, unsigned long *sp, struct pt_regs *regs); +void show_code(struct pt_regs *regs); extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) -- cgit v1.2.3 From eb67cf14ae5c21609c200859d6f3eba71c591569 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 26 Nov 2008 15:47:44 +0900 Subject: sh: Consolidate cpu_relax()/cpu_sleep() definitions across _32/_64. Signed-off-by: Paul Mundt --- arch/sh/include/asm/processor.h | 3 +++ arch/sh/include/asm/processor_32.h | 3 --- arch/sh/include/asm/processor_64.h | 2 -- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 693364a20ad..f186fc6966b 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -82,6 +82,9 @@ extern struct sh_cpuinfo cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #define raw_current_cpu_data cpu_data[raw_smp_processor_id()] +#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") +#define cpu_relax() barrier() + /* Forward decl */ struct seq_operations; diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 7b14f0cff9b..2bfb7353493 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -183,9 +183,6 @@ extern unsigned long get_wchan(struct task_struct *p); #define user_stack_pointer(regs) ((regs)->regs[15]) -#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") -#define cpu_relax() barrier() - #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ defined(CONFIG_CPU_SH4) #define PREFETCH_STRIDE L1_CACHE_BYTES diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index b0b4824dfc4..96067e9397e 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -228,7 +228,5 @@ extern unsigned long get_wchan(struct task_struct *p); #define user_stack_pointer(regs) ((regs)->sp) -#define cpu_relax() barrier() - #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_PROCESSOR_64_H */ -- cgit v1.2.3 From 22f131aa8de7a534339bf7051680234462f2e877 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 27 Nov 2008 11:04:43 +0900 Subject: sh: Provide a dyn_arch_ftrace struct definition. Needed for dynamic ftrace API changes. Signed-off-by: Paul Mundt --- arch/sh/include/asm/ftrace.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 4cb5dbfc404..8fea7d8c825 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -15,15 +15,20 @@ extern void mcount(void); #define STUB_ADDR ((long)(ftrace_stub)) #define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALLER_ADDR) >> 1) -#endif + +struct dyn_arch_ftrace { + /* No extra data needed on sh */ +}; + +#endif /* CONFIG_DYNAMIC_FTRACE */ static inline unsigned long ftrace_call_adjust(unsigned long addr) { /* 'addr' is the memory table address. */ return addr; } -#endif +#endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ #endif /* __ASM_SH_FTRACE_H */ -- cgit v1.2.3 From 2825999e8a9bd7ab7e25a7e7475c7cdd10371a13 Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Fri, 28 Nov 2008 22:48:20 +0900 Subject: sh: Add support for SH7201 CPU subtype. This patch adds support for the SH-2A FPU based SH7201 processor subtype. Signed-off-by: Peter Griffin Signed-off-by: Paul Mundt --- arch/sh/include/asm/bugs.h | 2 +- arch/sh/include/asm/processor.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 121b2ecddfc..4924ff6f543 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h @@ -25,7 +25,7 @@ static void __init check_bugs(void) case CPU_SH7619: *p++ = '2'; break; - case CPU_SH7203 ... CPU_MXG: + case CPU_SH7201 ... CPU_MXG: *p++ = '2'; *p++ = 'a'; break; diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index f186fc6966b..1ef4b24d761 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -18,7 +18,7 @@ enum cpu_type { CPU_SH7619, /* SH-2A types */ - CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_MXG, + CPU_SH7201, CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_MXG, /* SH-3 types */ CPU_SH7705, CPU_SH7706, CPU_SH7707, -- cgit v1.2.3 From 6aacba72dbdadc1445244e366ecf0263a160409e Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 4 Dec 2008 18:00:11 +0900 Subject: sh: add st16c2550 devices to se7343 Add 8250 platform data to setup the ST16C2550C chip on se7343. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/mach-se/mach/se7343.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/mach-se/mach/se7343.h b/arch/sh/include/mach-se/mach/se7343.h index 98458460e63..798d851343d 100644 --- a/arch/sh/include/mach-se/mach/se7343.h +++ b/arch/sh/include/mach-se/mach/se7343.h @@ -132,8 +132,10 @@ #define SE7343_FPGA_IRQ_MRSHPC3 3 #define SE7343_FPGA_IRQ_SMC 6 /* EXT_IRQ2 */ #define SE7343_FPGA_IRQ_USB 8 +#define SE7343_FPGA_IRQ_UARTA 10 +#define SE7343_FPGA_IRQ_UARTB 11 -#define SE7343_FPGA_IRQ_NR 11 +#define SE7343_FPGA_IRQ_NR 12 #define SE7343_FPGA_IRQ_BASE 120 #define MRSHPC_IRQ3 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC3) @@ -142,6 +144,8 @@ #define MRSHPC_IRQ0 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC0) #define SMC_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_SMC) #define USB_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_USB) +#define UARTA_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_UARTA) +#define UARTB_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_UARTB) /* arch/sh/boards/se/7343/irq.c */ void init_7343se_IRQ(void); -- cgit v1.2.3 From 21c601bb2ec79be5c52a99bc6f4b513aff4fa236 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 4 Dec 2008 18:00:30 +0900 Subject: sh: remove ioport cruft and smc91x from se7343 Remove out-of-date se7343 ioport code including some old support for unknown-ne2000-pcmcia-card, cf-over-pcmcia and a mysterical smc91x that once must have been on a special daughterboard. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/mach-se/mach/se7343.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/mach-se/mach/se7343.h b/arch/sh/include/mach-se/mach/se7343.h index 798d851343d..749914b400f 100644 --- a/arch/sh/include/mach-se/mach/se7343.h +++ b/arch/sh/include/mach-se/mach/se7343.h @@ -118,9 +118,6 @@ #define FPGA_IN 0xb1400000 #define FPGA_OUT 0xb1400002 -#define __IO_PREFIX sh7343se -#include - #define IRQ0_IRQ 32 #define IRQ1_IRQ 33 #define IRQ4_IRQ 36 -- cgit v1.2.3 From 3e51762759db9e26c6c3e4e1010d80a50c62ca03 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 4 Dec 2008 22:45:03 +0900 Subject: sh: move the hp6xx pm code Move the not-so-generic pm code from arch/sh/kernel/pm.c to the platform directory together with the rest of the hp6xx pm code. This is done to let non-hp6xx platforms enable CONFIG_PM. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/pm.h | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 arch/sh/include/asm/pm.h (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/pm.h b/arch/sh/include/asm/pm.h deleted file mode 100644 index 56fdbd6b1c9..00000000000 --- a/arch/sh/include/asm/pm.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright 2006 (c) Andriy Skulysh - * - */ -#ifndef __ASM_SH_PM_H -#define __ASM_SH_PM_H - -extern u8 wakeup_start; -extern u8 wakeup_end; - -void pm_enter(void); - -#endif -- cgit v1.2.3 From 77ba93a7ac5fb0d9338bffbf97c787b8efe00806 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Dec 2008 11:25:50 +0900 Subject: sh: Fix up the SH-4A mutex fastpath semantics. This fixes up the __mutex_fastpath_xxx() routines to match the semantics noted in the comment. Previously these were looping rather than doing a single-pass, which is counter-intuitive, as the slow path takes care of the looping for us in the event of contention. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mutex-llsc.h | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h index 7c75af5e734..a91990c6e8e 100644 --- a/arch/sh/include/asm/mutex-llsc.h +++ b/arch/sh/include/asm/mutex-llsc.h @@ -21,16 +21,18 @@ static inline void __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - int __res; + int __ex_flag, __res; __asm__ __volatile__ ( - "movli.l @%1, %0 \n" - "dt %0 \n" - "movco.l %0, @%1 \n" - : "=&z" (__res) + "movli.l @%2, %0 \n" + "add #-1, %0 \n" + "movco.l %0, @%2 \n" + "movt %1 \n" + : "=&z" (__res), "=&r" (__ex_flag) : "r" (&(count)->counter) : "t"); + __res |= !__ex_flag; if (unlikely(__res != 0)) fail_fn(count); } @@ -38,16 +40,18 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) static inline int __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) { - int __res; + int __ex_flag, __res; __asm__ __volatile__ ( - "movli.l @%1, %0 \n" - "dt %0 \n" - "movco.l %0, @%1 \n" - : "=&z" (__res) + "movli.l @%2, %0 \n" + "add #-1, %0 \n" + "movco.l %0, @%2 \n" + "movt %1 \n" + : "=&z" (__res), "=&r" (__ex_flag) : "r" (&(count)->counter) : "t"); + __res |= !__ex_flag; if (unlikely(__res != 0)) __res = fail_fn(count); @@ -57,18 +61,19 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) static inline void __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - int __res; + int __ex_flag, __res; __asm__ __volatile__ ( - "1: movli.l @%1, %0 \n\t" + "movli.l @%2, %0 \n\t" "add #1, %0 \n\t" - "movco.l %0, @%1 \n\t" - "bf 1b\n\t" - : "=&z" (__res) + "movco.l %0, @%2 \n\t" + "movt %1 \n\t" + : "=&z" (__res), "=&r" (__ex_flag) : "r" (&(count)->counter) : "t"); - if (unlikely(__res <= 0)) + __res |= !__ex_flag; + if (unlikely(__res != 0)) fail_fn(count); } -- cgit v1.2.3 From 06be3724548a443a99d703ff79f43d6f1e2975f0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Dec 2008 17:01:40 +0900 Subject: sh: Fix an off-by-1 check in __mutex_fastpath_unlock(). Signed-off-by: Paul Mundt --- arch/sh/include/asm/mutex-llsc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h index a91990c6e8e..ee839ee58ac 100644 --- a/arch/sh/include/asm/mutex-llsc.h +++ b/arch/sh/include/asm/mutex-llsc.h @@ -73,7 +73,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) : "t"); __res |= !__ex_flag; - if (unlikely(__res != 0)) + if (unlikely(__res <= 0)) fail_fn(count); } -- cgit v1.2.3 From 35724a0aed6e62bdad640e8a1b8498329708226f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Dec 2008 18:17:19 +0900 Subject: sh: Fix up the cpu_asid() return value on nommu. This ought to be unsigned long, rather than defaulting to int. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu_context.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 04c0c9733ad..5d9157bd474 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -22,7 +22,7 @@ #define MMU_CONTEXT_ASID_MASK 0x000000ff #define MMU_CONTEXT_VERSION_MASK 0xffffff00 #define MMU_CONTEXT_FIRST_VERSION 0x00000100 -#define NO_CONTEXT 0 +#define NO_CONTEXT 0UL /* ASID is 8-bit value, so it can't be 0x100 */ #define MMU_NO_ASID 0x100 @@ -130,7 +130,7 @@ static inline void switch_mm(struct mm_struct *prev, #define destroy_context(mm) do { } while (0) #define set_asid(asid) do { } while (0) #define get_asid() (0) -#define cpu_asid(cpu, mm) ({ (void)cpu; 0; }) +#define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; }) #define switch_and_save_asid(asid) (0) #define set_TTB(pgd) do { } while (0) #define get_TTB() (0) -- cgit v1.2.3 From f15b2dc02fef0c53aa5ffa3c4617e184f057d402 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Dec 2008 19:18:46 +0900 Subject: sh: Fix up syscall_get_nr() comment in syscall_32.h. Residual copy-and-paste damage, fix it up. Signed-off-by: Paul Mundt --- arch/sh/include/asm/syscall_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 54773f26cd4..05a868a71ef 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h @@ -5,7 +5,7 @@ #include #include -/* The system call number is given by the user in %g1 */ +/* The system call number is given by the user in R3 */ static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { -- cgit v1.2.3 From 94e2fb3d3e1f4cb6bad2b13c572c4c99ad734a37 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Dec 2008 19:46:18 +0900 Subject: sh: Provide asm/syscall.h for SH-5. This provides the asm/syscall.h implementation for sh64 parts. Signed-off-by: Paul Mundt --- arch/sh/include/asm/syscall_64.h | 76 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 75 insertions(+), 1 deletion(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h index bcaaa8ca4d7..e95f3ae30af 100644 --- a/arch/sh/include/asm/syscall_64.h +++ b/arch/sh/include/asm/syscall_64.h @@ -1,6 +1,80 @@ #ifndef __ASM_SH_SYSCALL_64_H #define __ASM_SH_SYSCALL_64_H -#include +#include +#include +#include + +/* The system call number is given by the user in R9 */ +static inline long syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return (regs->syscall_nr >= 0) ? regs->regs[9] : -1L; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + /* + * XXX: This needs some thought. On SH we don't + * save away the original R9 value anywhere. + */ +} + +static inline bool syscall_has_error(struct pt_regs *regs) +{ + return (regs->sr & 0x1) ? true : false; +} +static inline void syscall_set_error(struct pt_regs *regs) +{ + regs->sr |= 0x1; +} +static inline void syscall_clear_error(struct pt_regs *regs) +{ + regs->sr &= ~0x1; +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + return syscall_has_error(regs) ? regs->regs[9] : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[9]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + if (error) { + syscall_set_error(regs); + regs->regs[9] = -error; + } else { + syscall_clear_error(regs); + regs->regs[9] = val; + } +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ + BUG_ON(i + n > 6); + memcpy(args, ®s->reg[2 + i], n * sizeof(args[0])); +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args) +{ + BUG_ON(i + n > 6); + memcpy(®s->reg[2 + i], args, n * sizeof(args[0])); +} #endif /* __ASM_SH_SYSCALL_64_H */ -- cgit v1.2.3 From dd76279b47dce2c0bd7c54997938ec4cb9f16884 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Dec 2008 20:14:15 +0900 Subject: sh: Provide linux/regset.h interface for SH-5. Plugs in general and FPU regsets. Signed-off-by: Paul Mundt --- arch/sh/include/asm/elf.h | 2 -- arch/sh/include/asm/ptrace.h | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index 9eb9036a1bd..b809f22ea63 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -108,13 +108,11 @@ typedef struct user_fpu_struct elf_fpregset_t; #define elf_check_fdpic(x) ((x)->e_flags & EF_SH_FDPIC) #define elf_check_const_displacement(x) ((x)->e_flags & EF_SH_PIC) -#ifdef CONFIG_SUPERH32 /* * Enable dump using regset. * This covers all of general/DSP/FPU regs. */ #define CORE_DUMP_USE_REGSET -#endif #define USE_ELF_CORE_DUMP #define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index 3ad18e91bca..12912ab80c1 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h @@ -86,6 +86,7 @@ struct pt_dspregs { unsigned long re; unsigned long mod; }; +#endif #define PTRACE_GETREGS 12 /* General registers */ #define PTRACE_SETREGS 13 @@ -100,7 +101,6 @@ struct pt_dspregs { #define PTRACE_GETDSPREGS 55 /* DSP registers */ #define PTRACE_SETDSPREGS 56 -#endif #ifdef __KERNEL__ #include -- cgit v1.2.3 From d7b01f78a3ae6a3cc21a16a1a3d377adc2227537 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Dec 2008 20:17:15 +0900 Subject: sh: Enable HAVE_ARCH_TRACEHOOK for all SH, now that SH-5 supports it too. Signed-off-by: Paul Mundt --- arch/sh/include/asm/processor_64.h | 2 +- arch/sh/include/asm/syscall_64.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index 96067e9397e..803177fcf08 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -226,7 +226,7 @@ extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) ((tsk)->thread.pc) #define KSTK_ESP(tsk) ((tsk)->thread.sp) -#define user_stack_pointer(regs) ((regs)->sp) +#define user_stack_pointer(regs) ((regs)->regs[15]) #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_PROCESSOR_64_H */ diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h index e95f3ae30af..e1143b9784d 100644 --- a/arch/sh/include/asm/syscall_64.h +++ b/arch/sh/include/asm/syscall_64.h @@ -65,7 +65,7 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned long *args) { BUG_ON(i + n > 6); - memcpy(args, ®s->reg[2 + i], n * sizeof(args[0])); + memcpy(args, ®s->regs[2 + i], n * sizeof(args[0])); } static inline void syscall_set_arguments(struct task_struct *task, @@ -74,7 +74,7 @@ static inline void syscall_set_arguments(struct task_struct *task, const unsigned long *args) { BUG_ON(i + n > 6); - memcpy(®s->reg[2 + i], args, n * sizeof(args[0])); + memcpy(®s->regs[2 + i], args, n * sizeof(args[0])); } #endif /* __ASM_SH_SYSCALL_64_H */ -- cgit v1.2.3 From ab6e570ba33dbee18c2520d386e0f367a9b573c3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 11 Dec 2008 18:46:46 +0900 Subject: sh: Generic kgdb stub support. This migrates from the old bitrotted kgdb stub implementation and moves to the generic stub. In the process support for SH-2/SH-2A is also added, which the old stub never provided. Signed-off-by: Paul Mundt --- arch/sh/include/asm/kgdb.h | 66 +++++++++++++------------------------------- arch/sh/include/asm/system.h | 2 ++ 2 files changed, 21 insertions(+), 47 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/kgdb.h b/arch/sh/include/asm/kgdb.h index 24e42078f36..72704ed725e 100644 --- a/arch/sh/include/asm/kgdb.h +++ b/arch/sh/include/asm/kgdb.h @@ -1,21 +1,7 @@ -/* - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - * Based on original code by Glenn Engel, Jim Kingdon, - * David Grothe , Tigran Aivazian, and - * Amit S. Kale - * - * Super-H port based on sh-stub.c (Ben Lee and Steve Chamberlain) by - * Henry Bell - * - * Header file for low-level support for remote debug using GDB. - * - */ - -#ifndef __KGDB_H -#define __KGDB_H +#ifndef __ASM_SH_KGDB_H +#define __ASM_SH_KGDB_H +#include #include /* Same as pt_regs but has vbr in place of syscall_nr */ @@ -30,40 +16,26 @@ struct kgdb_regs { unsigned long vbr; }; -/* State info */ -extern char kgdb_in_gdb_mode; -extern int kgdb_nofault; /* Ignore bus errors (in gdb mem access) */ -extern char in_nmi; /* Debounce flag to prevent NMI reentry*/ +enum regnames { + GDB_R0, GDB_R1, GDB_R2, GDB_R3, GDB_R4, GDB_R5, GDB_R6, GDB_R7, + GDB_R8, GDB_R9, GDB_R10, GDB_R11, GDB_R12, GDB_R13, GDB_R14, GDB_R15, -/* SCI */ -extern int kgdb_portnum; -extern int kgdb_baud; -extern char kgdb_parity; -extern char kgdb_bits; + GDB_PC, GDB_PR, GDB_SR, GDB_GBR, GDB_MACH, GDB_MACL, GDB_VBR, +}; -/* Init and interface stuff */ -extern int kgdb_init(void); -extern int (*kgdb_getchar)(void); -extern void (*kgdb_putchar)(int); +#define NUMREGBYTES ((GDB_VBR + 1) * 4) -/* Trap functions */ -typedef void (kgdb_debug_hook_t)(struct pt_regs *regs); -typedef void (kgdb_bus_error_hook_t)(void); -extern kgdb_debug_hook_t *kgdb_debug_hook; -extern kgdb_bus_error_hook_t *kgdb_bus_err_hook; +static inline void arch_kgdb_breakpoint(void) +{ + __asm__ __volatile__ ("trapa #0x3c\n"); +} -/* Console */ -struct console; -void kgdb_console_write(struct console *co, const char *s, unsigned count); -extern int kgdb_console_setup(struct console *, char *); +/* State info */ +extern char in_nmi; /* Debounce flag to prevent NMI reentry*/ -/* Prototypes for jmp fns */ -#define _JBLEN 9 -typedef int jmp_buf[_JBLEN]; -extern void longjmp(jmp_buf __jmpb, int __retval); -extern int setjmp(jmp_buf __jmpb); +#define BUFMAX 2048 -/* Forced breakpoint */ -#define breakpoint() __asm__ __volatile__("trapa #0x3c") +#define CACHE_FLUSH_IS_SAFE 1 +#define BREAK_INSTR_SIZE 2 -#endif +#endif /* __ASM_SH_KGDB_H */ diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index 6160fe44516..c9ec6af8e74 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -175,6 +175,8 @@ asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs) BUILD_TRAP_HANDLER(address_error); BUILD_TRAP_HANDLER(debug); BUILD_TRAP_HANDLER(bug); +BUILD_TRAP_HANDLER(breakpoint); +BUILD_TRAP_HANDLER(singlestep); BUILD_TRAP_HANDLER(fpu_error); BUILD_TRAP_HANDLER(fpu_state_restore); -- cgit v1.2.3 From 4466b20cfcfa718ff515b9e3886749cc025e2005 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 12 Dec 2008 16:34:44 +0900 Subject: sh: Add SH-5 optimized memcpy()/memset()/strcpy()/strlen(). Adopted from the uClibc optimized string versions. Signed-off-by: Paul Mundt --- arch/sh/include/asm/string_64.h | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/string_64.h b/arch/sh/include/asm/string_64.h index aa1fef229c7..74200717262 100644 --- a/arch/sh/include/asm/string_64.h +++ b/arch/sh/include/asm/string_64.h @@ -1,17 +1,20 @@ #ifndef __ASM_SH_STRING_64_H #define __ASM_SH_STRING_64_H -/* - * include/asm-sh/string_64.h - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ +#ifdef __KERNEL__ + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *__s, int __c, size_t __count); #define __HAVE_ARCH_MEMCPY extern void *memcpy(void *dest, const void *src, size_t count); +#define __HAVE_ARCH_STRLEN +extern size_t strlen(const char *); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *__dest, const char *__src); + +#endif /* __KERNEL__ */ + #endif /* __ASM_SH_STRING_64_H */ -- cgit v1.2.3 From ca0c14e447a399eb90a1c9a4357560c2a29ef499 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 11:47:59 +0900 Subject: sh: Kill off sh_bios_in_gdb_mode(). With the reworked kgdb support, we always detach and reinitialize the stub. This was mostly a feature for handoffs between sh-ipl+g and the kgdb stub, but virtually no sh-ipl+g versions ever had this working right in the first place. Given that the sh-ipl+g stubs in general use today don't even support the GDB stub, and we have already killed off the special casing in the sh-sci serial driver, kill off this now unused symbol too. Signed-off-by: Paul Mundt --- arch/sh/include/asm/sh_bios.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/sh_bios.h b/arch/sh/include/asm/sh_bios.h index 0ca261956e3..d9c96d7cf6c 100644 --- a/arch/sh/include/asm/sh_bios.h +++ b/arch/sh/include/asm/sh_bios.h @@ -10,7 +10,6 @@ extern void sh_bios_console_write(const char *buf, unsigned int len); extern void sh_bios_char_out(char ch); -extern int sh_bios_in_gdb_mode(void); extern void sh_bios_gdb_detach(void); extern void sh_bios_get_node_addr(unsigned char *node_addr); -- cgit v1.2.3 From 7b80fb32b39a51ce3e1afa051f5a616eb8ecbed3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 12:19:30 +0900 Subject: sh: Kill off mv_heartbeat() from the machvec. Nothing is using this any more, so get rid of it before anyone gets the bright idea to start using it again. Signed-off-by: Paul Mundt --- arch/sh/include/asm/machvec.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index f1bae02ef7b..e14e09b3d6d 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -47,8 +47,6 @@ struct sh_machine_vector { void (*mv_init_irq)(void); void (*mv_init_pci)(void); - void (*mv_heartbeat)(void); - void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size); void (*mv_ioport_unmap)(void __iomem *); }; -- cgit v1.2.3 From 2125a46083dc5a9aa321c243e322638a9338cd11 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 12:31:32 +0900 Subject: sh: Kill off dead mv_init_pci() from machvec. Signed-off-by: Paul Mundt --- arch/sh/include/asm/machvec.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index e14e09b3d6d..eec0d22750b 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -45,7 +45,6 @@ struct sh_machine_vector { int (*mv_irq_demux)(int irq); void (*mv_init_irq)(void); - void (*mv_init_pci)(void); void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size); void (*mv_ioport_unmap)(void __iomem *); -- cgit v1.2.3 From 866ef8f48f2272ce8d84156c91964d730666ab33 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 13:57:15 +0900 Subject: sh: mach-edosk7705: Fix up edosk7705 so it all builds again. Signed-off-by: Paul Mundt --- arch/sh/include/asm/machvec.h | 2 -- arch/sh/include/mach-common/mach/edosk7705.h | 31 ++++------------------------ 2 files changed, 4 insertions(+), 29 deletions(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index eec0d22750b..64b1c16a0f0 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -14,8 +14,6 @@ #include #include -struct device; - struct sh_machine_vector { void (*mv_setup)(char **cmdline_p); const char *mv_name; diff --git a/arch/sh/include/mach-common/mach/edosk7705.h b/arch/sh/include/mach-common/mach/edosk7705.h index 5bdc9d9be3d..efc43b32346 100644 --- a/arch/sh/include/mach-common/mach/edosk7705.h +++ b/arch/sh/include/mach-common/mach/edosk7705.h @@ -1,30 +1,7 @@ -/* - * include/asm-sh/edosk7705.h - * - * Modified version of io_se.h for the EDOSK7705 specific functions. - * - * May be copied or modified under the terms of the GNU General Public - * License. See linux/COPYING for more information. - * - * IO functions for an Hitachi EDOSK7705 development board - */ - -#ifndef __ASM_SH_EDOSK7705_IO_H -#define __ASM_SH_EDOSK7705_IO_H +#ifndef __ASM_SH_EDOSK7705_H +#define __ASM_SH_EDOSK7705_H +#define __IO_PREFIX sh_edosk7705 #include -extern unsigned char sh_edosk7705_inb(unsigned long port); -extern unsigned int sh_edosk7705_inl(unsigned long port); - -extern void sh_edosk7705_outb(unsigned char value, unsigned long port); -extern void sh_edosk7705_outl(unsigned int value, unsigned long port); - -extern void sh_edosk7705_insb(unsigned long port, void *addr, unsigned long count); -extern void sh_edosk7705_insl(unsigned long port, void *addr, unsigned long count); -extern void sh_edosk7705_outsb(unsigned long port, const void *addr, unsigned long count); -extern void sh_edosk7705_outsl(unsigned long port, const void *addr, unsigned long count); - -extern unsigned long sh_edosk7705_isa_port2addr(unsigned long offset); - -#endif /* __ASM_SH_EDOSK7705_IO_H */ +#endif /* __ASM_SH_EDOSK7705_H */ -- cgit v1.2.3 From 073da9c0de401e8683b6bc76c008a7e0850045d5 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 14:41:54 +0900 Subject: sh: Kill off cf-enabler with extreme prejudice. Now that the rest of the boards that were using cf-enabler "generically" have switched to setting up their mappings on their own, only the mach-se boards were left using it. All of the cf-enabler using mach-se boards use a special initialization of the MRSHPC windows rather than going through the special PTE as other SH-4 platforms do. This consolidates the MRSHPC setup logic, hooks it up on the boards that care, and gets rid of any and all remaining references to cf-enabler. This has been long overdue, as cf-enabler has been the bane of arch/sh/kernel for the last 7 years. Good riddance. Signed-off-by: Paul Mundt --- arch/sh/include/mach-se/mach/mrshpc.h | 52 +++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 arch/sh/include/mach-se/mach/mrshpc.h (limited to 'arch/sh/include') diff --git a/arch/sh/include/mach-se/mach/mrshpc.h b/arch/sh/include/mach-se/mach/mrshpc.h new file mode 100644 index 00000000000..95c8b825401 --- /dev/null +++ b/arch/sh/include/mach-se/mach/mrshpc.h @@ -0,0 +1,52 @@ +#ifndef __MACH_SE_MRSHPC_H +#define __MACH_SE_MRSHPC_H + +#include + +static void __init mrshpc_setup_windows(void) +{ + if ((__raw_readw(MRSHPC_CSR) & 0x000c) != 0) + return; /* Not detected */ + + if ((__raw_readw(MRSHPC_CSR) & 0x0080) == 0) { + __raw_writew(0x0674, MRSHPC_CPWCR); /* Card Vcc is 3.3v? */ + } else { + __raw_writew(0x0678, MRSHPC_CPWCR); /* Card Vcc is 5V */ + } + + /* + * PC-Card window open + * flag == COMMON/ATTRIBUTE/IO + */ + /* common window open */ + __raw_writew(0x8a84, MRSHPC_MW0CR1); + if((__raw_readw(MRSHPC_CSR) & 0x4000) != 0) + /* common mode & bus width 16bit SWAP = 1*/ + __raw_writew(0x0b00, MRSHPC_MW0CR2); + else + /* common mode & bus width 16bit SWAP = 0*/ + __raw_writew(0x0300, MRSHPC_MW0CR2); + + /* attribute window open */ + __raw_writew(0x8a85, MRSHPC_MW1CR1); + if ((__raw_readw(MRSHPC_CSR) & 0x4000) != 0) + /* attribute mode & bus width 16bit SWAP = 1*/ + __raw_writew(0x0a00, MRSHPC_MW1CR2); + else + /* attribute mode & bus width 16bit SWAP = 0*/ + __raw_writew(0x0200, MRSHPC_MW1CR2); + + /* I/O window open */ + __raw_writew(0x8a86, MRSHPC_IOWCR1); + __raw_writew(0x0008, MRSHPC_CDCR); /* I/O card mode */ + if ((__raw_readw(MRSHPC_CSR) & 0x4000) != 0) + __raw_writew(0x0a00, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 1*/ + else + __raw_writew(0x0200, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 0*/ + + __raw_writew(0x2000, MRSHPC_ICR); + __raw_writeb(0x00, PA_MRSHPC_MW2 + 0x206); + __raw_writeb(0x42, PA_MRSHPC_MW2 + 0x200); +} + +#endif /* __MACH_SE_MRSHPC_H */ -- cgit v1.2.3 From 0146d7875976795fe364b4a3da629975ebd37671 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 14:58:04 +0900 Subject: sh: mrshpc_setup_windows() needs to be inline. While no one should be including this file multiple times, flag it inline anyways just in case. Signed-off-by: Paul Mundt --- arch/sh/include/mach-se/mach/mrshpc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/mach-se/mach/mrshpc.h b/arch/sh/include/mach-se/mach/mrshpc.h index 95c8b825401..56287ee8563 100644 --- a/arch/sh/include/mach-se/mach/mrshpc.h +++ b/arch/sh/include/mach-se/mach/mrshpc.h @@ -3,7 +3,7 @@ #include -static void __init mrshpc_setup_windows(void) +static inline void __init mrshpc_setup_windows(void) { if ((__raw_readw(MRSHPC_CSR) & 0x000c) != 0) return; /* Not detected */ -- cgit v1.2.3 From 5d2685d0b3edc51ecc92604d5b7f5ca9b29b90bb Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Dec 2008 15:56:06 +0900 Subject: sh: Conditionalize the code dumper on CONFIG_DUMP_CODE. We don't really want this enabled by default, but it is still quite useful for debugging. So, make it conditional and leave it off by default. Signed-off-by: Paul Mundt --- arch/sh/include/asm/processor_32.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index 2bfb7353493..d79063c5eb9 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -175,7 +175,15 @@ static __inline__ void enable_fpu(void) void show_trace(struct task_struct *tsk, unsigned long *sp, struct pt_regs *regs); + +#ifdef CONFIG_DUMP_CODE void show_code(struct pt_regs *regs); +#else +static inline void show_code(struct pt_regs *regs) +{ +} +#endif + extern unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) -- cgit v1.2.3 From fc5243d98ac2575ad14a974b3c097e9ba874c03d Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 25 Dec 2008 13:38:35 +0100 Subject: [S390] arch_setup_additional_pages arguments arch_setup_additional_pages currently gets two arguments, the binary format descripton and an indication if the process uses an executable stack or not. The second argument is not used by anybody, it could be removed without replacement. What actually does make sense is to pass an indication if the process uses the elf interpreter or not. The glibc code will not use anything from the vdso if the process does not use the dynamic linker, so for statically linked binaries the architecture backend can choose not to map the vdso. Acked-by: Ingo Molnar Signed-off-by: Martin Schwidefsky --- arch/sh/include/asm/elf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/include') diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index 9eb9036a1bd..9381397ebeb 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -204,7 +204,7 @@ do { \ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack); + int uses_interp); extern unsigned int vdso_enabled; extern void __kernel_vsyscall; -- cgit v1.2.3