aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/traps.c')
-rw-r--r--arch/sh/kernel/traps.c200
1 files changed, 147 insertions, 53 deletions
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 53dfa55f315..3762d9dc204 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -18,13 +18,14 @@
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/io.h>
+#include <linux/debug_locks.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#ifdef CONFIG_SH_KGDB
#include <asm/kgdb.h>
-#define CHK_REMOTE_DEBUG(regs) \
-{ \
+#define CHK_REMOTE_DEBUG(regs) \
+{ \
if (kgdb_debug_hook && !user_mode(regs))\
(*kgdb_debug_hook)(regs); \
}
@@ -33,8 +34,13 @@
#endif
#ifdef CONFIG_CPU_SH2
-#define TRAP_RESERVED_INST 4
-#define TRAP_ILLEGAL_SLOT_INST 6
+# define TRAP_RESERVED_INST 4
+# define TRAP_ILLEGAL_SLOT_INST 6
+# define TRAP_ADDRESS_ERROR 9
+# ifdef CONFIG_CPU_SH2A
+# define TRAP_DIVZERO_ERROR 17
+# define TRAP_DIVOVF_ERROR 18
+# endif
#else
#define TRAP_RESERVED_INST 12
#define TRAP_ILLEGAL_SLOT_INST 13
@@ -88,7 +94,7 @@ void die(const char * str, struct pt_regs * regs, long err)
if (!user_mode(regs) || in_interrupt())
dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
- (unsigned long)task_stack_page(current));
+ (unsigned long)task_stack_page(current));
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
@@ -102,8 +108,6 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs,
die(str, regs, err);
}
-static int handle_unaligned_notify_count = 10;
-
/*
* try and fix up kernelspace address errors
* - userspace errors just cause EFAULT to be returned, resulting in SEGV
@@ -198,7 +202,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
if (copy_to_user(dst,src,4))
goto fetch_fault;
ret = 0;
- break;
+ break;
case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
if (instruction & 4)
@@ -222,7 +226,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
if (copy_from_user(dst,src,4))
goto fetch_fault;
ret = 0;
- break;
+ break;
case 6: /* mov.[bwl] from memory, possibly with post-increment */
src = (unsigned char*) *rm;
@@ -230,7 +234,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
*rm += count;
dst = (unsigned char*) rn;
*(unsigned long*)dst = 0;
-
+
#ifdef __LITTLE_ENDIAN__
if (copy_from_user(dst, src, count))
goto fetch_fault;
@@ -241,7 +245,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
}
#else
dst += 4-count;
-
+
if (copy_from_user(dst, src, count))
goto fetch_fault;
@@ -320,7 +324,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
return -EFAULT;
/* kernel */
- die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
+ die("delay-slot-insn faulting in handle_unaligned_delayslot",
+ regs, 0);
}
return handle_unaligned_ins(instruction,regs);
@@ -342,6 +347,13 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
+/*
+ * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
+ * opcodes..
+ */
+#ifndef CONFIG_CPU_SH2A
+static int handle_unaligned_notify_count = 10;
+
static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
{
u_int rm;
@@ -354,7 +366,8 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
if (user_mode(regs) && handle_unaligned_notify_count>0) {
handle_unaligned_notify_count--;
- printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ printk(KERN_NOTICE "Fixing up unaligned userspace access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
current->comm,current->pid,(u16*)regs->pc,instruction);
}
@@ -478,32 +491,58 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
regs->pc += 2;
return ret;
}
+#endif /* CONFIG_CPU_SH2A */
+
+#ifdef CONFIG_CPU_HAS_SR_RB
+#define lookup_exception_vector(x) \
+ __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
+#else
+#define lookup_exception_vector(x) \
+ __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
+#endif
/*
- * Handle various address error exceptions
+ * Handle various address error exceptions:
+ * - instruction address error:
+ * misaligned PC
+ * PC >= 0x80000000 in user mode
+ * - data address error (read and write)
+ * misaligned data access
+ * access to >= 0x80000000 is user mode
+ * Unfortuntaly we can't distinguish between instruction address error
+ * and data address errors caused by read acceses.
*/
-asmlinkage void do_address_error(struct pt_regs *regs,
+asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address)
{
- unsigned long error_code;
+ unsigned long error_code = 0;
mm_segment_t oldfs;
+ siginfo_t info;
+#ifndef CONFIG_CPU_SH2A
u16 instruction;
int tmp;
+#endif
- asm volatile("stc r2_bank,%0": "=r" (error_code));
+ /* Intentional ifdef */
+#ifdef CONFIG_CPU_HAS_SR_RB
+ lookup_exception_vector(error_code);
+#endif
oldfs = get_fs();
if (user_mode(regs)) {
+ int si_code = BUS_ADRERR;
+
local_irq_enable();
- current->thread.error_code = error_code;
- current->thread.trap_no = (writeaccess) ? 8 : 7;
/* bad PC is not something we can fix */
- if (regs->pc & 1)
+ if (regs->pc & 1) {
+ si_code = BUS_ADRALN;
goto uspace_segv;
+ }
+#ifndef CONFIG_CPU_SH2A
set_fs(USER_DS);
if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
/* Argh. Fault on the instruction itself.
@@ -518,14 +557,23 @@ asmlinkage void do_address_error(struct pt_regs *regs,
if (tmp==0)
return; /* sorted */
+#endif
- uspace_segv:
- printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
- force_sig(SIGSEGV, current);
+uspace_segv:
+ printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
+ "access (PC %lx PR %lx)\n", current->comm, regs->pc,
+ regs->pr);
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void *) address;
+ force_sig_info(SIGBUS, &info, current);
} else {
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
+#ifndef CONFIG_CPU_SH2A
set_fs(KERNEL_DS);
if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
/* Argh. Fault on the instruction itself.
@@ -537,6 +585,12 @@ asmlinkage void do_address_error(struct pt_regs *regs,
handle_unaligned_access(instruction, regs);
set_fs(oldfs);
+#else
+ printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
+ "access\n", current->comm);
+
+ force_sig(SIGSEGV, current);
+#endif
}
}
@@ -548,7 +602,7 @@ int is_dsp_inst(struct pt_regs *regs)
{
unsigned short inst;
- /*
+ /*
* Safe guard if DSP mode is already enabled or we're lacking
* the DSP altogether.
*/
@@ -569,27 +623,49 @@ int is_dsp_inst(struct pt_regs *regs)
#define is_dsp_inst(regs) (0)
#endif /* CONFIG_SH_DSP */
+#ifdef CONFIG_CPU_SH2A
+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs)
+{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ siginfo_t info;
+
+ switch (r4) {
+ case TRAP_DIVZERO_ERROR:
+ info.si_code = FPE_INTDIV;
+ break;
+ case TRAP_DIVOVF_ERROR:
+ info.si_code = FPE_INTOVF;
+ break;
+ }
+
+ force_sig_info(SIGFPE, &info, current);
+}
+#endif
+
/* arch/sh/kernel/cpu/sh4/fpu.c */
extern int do_fpu_inst(unsigned short, struct pt_regs *);
extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7, struct pt_regs regs);
+ unsigned long r6, unsigned long r7, struct pt_regs __regs);
asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long error_code;
struct task_struct *tsk = current;
#ifdef CONFIG_SH_FPU_EMU
- unsigned short inst;
+ unsigned short inst = 0;
int err;
- get_user(inst, (unsigned short*)regs.pc);
+ get_user(inst, (unsigned short*)regs->pc);
- err = do_fpu_inst(inst, &regs);
+ err = do_fpu_inst(inst, regs);
if (!err) {
- regs.pc += 2;
+ regs->pc += 2;
return;
}
/* not a FPU inst. */
@@ -597,20 +673,19 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
#ifdef CONFIG_SH_DSP
/* Check if it's a DSP instruction */
- if (is_dsp_inst(&regs)) {
+ if (is_dsp_inst(regs)) {
/* Enable DSP mode, and restart instruction. */
- regs.sr |= SR_DSP;
+ regs->sr |= SR_DSP;
return;
}
#endif
- asm volatile("stc r2_bank, %0": "=r" (error_code));
+ lookup_exception_vector(error_code);
+
local_irq_enable();
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = TRAP_RESERVED_INST;
- CHK_REMOTE_DEBUG(&regs);
+ CHK_REMOTE_DEBUG(regs);
force_sig(SIGILL, tsk);
- die_if_no_fixup("reserved instruction", &regs, error_code);
+ die_if_no_fixup("reserved instruction", regs, error_code);
}
#ifdef CONFIG_SH_FPU_EMU
@@ -658,39 +733,41 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs)
asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long error_code;
struct task_struct *tsk = current;
#ifdef CONFIG_SH_FPU_EMU
- unsigned short inst;
+ unsigned short inst = 0;
- get_user(inst, (unsigned short *)regs.pc + 1);
- if (!do_fpu_inst(inst, &regs)) {
- get_user(inst, (unsigned short *)regs.pc);
- if (!emulate_branch(inst, &regs))
+ get_user(inst, (unsigned short *)regs->pc + 1);
+ if (!do_fpu_inst(inst, regs)) {
+ get_user(inst, (unsigned short *)regs->pc);
+ if (!emulate_branch(inst, regs))
return;
/* fault in branch.*/
}
/* not a FPU inst. */
#endif
- asm volatile("stc r2_bank, %0": "=r" (error_code));
+ lookup_exception_vector(error_code);
+
local_irq_enable();
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = TRAP_RESERVED_INST;
- CHK_REMOTE_DEBUG(&regs);
+ CHK_REMOTE_DEBUG(regs);
force_sig(SIGILL, tsk);
- die_if_no_fixup("illegal slot instruction", &regs, error_code);
+ die_if_no_fixup("illegal slot instruction", regs, error_code);
}
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
- struct pt_regs regs)
+ struct pt_regs __regs)
{
+ struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
long ex;
- asm volatile("stc r2_bank, %0" : "=r" (ex));
- die_if_kernel("exception", &regs, ex);
+
+ lookup_exception_vector(ex);
+ die_if_kernel("exception", regs, ex);
}
#if defined(CONFIG_SH_STANDARD_BIOS)
@@ -735,12 +812,16 @@ void *set_exception_table_vec(unsigned int vec, void *handler)
{
extern void *exception_handling_table[];
void *old_handler;
-
+
old_handler = exception_handling_table[vec];
exception_handling_table[vec] = handler;
return old_handler;
}
+extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+
void __init trap_init(void)
{
set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
@@ -759,7 +840,15 @@ void __init trap_init(void)
set_exception_table_evt(0x800, do_fpu_state_restore);
set_exception_table_evt(0x820, do_fpu_state_restore);
#endif
-
+
+#ifdef CONFIG_CPU_SH2
+ set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
+#endif
+#ifdef CONFIG_CPU_SH2A
+ set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
+ set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
+#endif
+
/* Setup VBR for boot cpu */
per_cpu_trap_init();
}
@@ -784,6 +873,11 @@ void show_trace(struct task_struct *tsk, unsigned long *sp,
}
printk("\n");
+
+ if (!tsk)
+ tsk = current;
+
+ debug_show_held_locks(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp)