aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/Kconfig.debug8
-rw-r--r--arch/sh/Makefile4
-rw-r--r--arch/sh/include/asm/dwarf.h407
-rw-r--r--arch/sh/include/asm/sections.h1
-rw-r--r--arch/sh/include/asm/vmlinux.lds.h17
-rw-r--r--arch/sh/kernel/Makefile_321
-rw-r--r--arch/sh/kernel/Makefile_641
-rw-r--r--arch/sh/kernel/dwarf.c876
-rw-r--r--arch/sh/kernel/irq.c4
-rw-r--r--arch/sh/kernel/vmlinux.lds.S4
10 files changed, 1322 insertions, 1 deletions
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 763b792b161..741d20fab2e 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -110,6 +110,14 @@ config DUMP_CODE
Those looking for more verbose debugging output should say Y.
+config DWARF_UNWINDER
+ bool "Enable the DWARF unwinder for stacktraces"
+ select FRAME_POINTER
+ default n
+ help
+ Enabling this option will make stacktraces more accurate, at
+ the cost of an increase in overall kernel size.
+
config SH_NO_BSS_INIT
bool "Avoid zeroing BSS (to speed-up startup on suitable platforms)"
depends on DEBUG_KERNEL
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index b6ff337fd85..e26421bf997 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -191,6 +191,10 @@ ifeq ($(CONFIG_MCOUNT),y)
KBUILD_CFLAGS += -pg
endif
+ifeq ($(CONFIG_DWARF_UNWINDER),y)
+ KBUILD_CFLAGS += -fasynchronous-unwind-tables
+endif
+
libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y)
libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y)
diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h
new file mode 100644
index 00000000000..23eba12880b
--- /dev/null
+++ b/arch/sh/include/asm/dwarf.h
@@ -0,0 +1,407 @@
+/*
+ * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+#ifndef __ASM_SH_DWARF_H
+#define __ASM_SH_DWARF_H
+
+#ifdef CONFIG_DWARF_UNWINDER
+
+/*
+ * DWARF expression operations
+ */
+#define DW_OP_addr 0x03
+#define DW_OP_deref 0x06
+#define DW_OP_const1u 0x08
+#define DW_OP_const1s 0x09
+#define DW_OP_const2u 0x0a
+#define DW_OP_const2s 0x0b
+#define DW_OP_const4u 0x0c
+#define DW_OP_const4s 0x0d
+#define DW_OP_const8u 0x0e
+#define DW_OP_const8s 0x0f
+#define DW_OP_constu 0x10
+#define DW_OP_consts 0x11
+#define DW_OP_dup 0x12
+#define DW_OP_drop 0x13
+#define DW_OP_over 0x14
+#define DW_OP_pick 0x15
+#define DW_OP_swap 0x16
+#define DW_OP_rot 0x17
+#define DW_OP_xderef 0x18
+#define DW_OP_abs 0x19
+#define DW_OP_and 0x1a
+#define DW_OP_div 0x1b
+#define DW_OP_minus 0x1c
+#define DW_OP_mod 0x1d
+#define DW_OP_mul 0x1e
+#define DW_OP_neg 0x1f
+#define DW_OP_not 0x20
+#define DW_OP_or 0x21
+#define DW_OP_plus 0x22
+#define DW_OP_plus_uconst 0x23
+#define DW_OP_shl 0x24
+#define DW_OP_shr 0x25
+#define DW_OP_shra 0x26
+#define DW_OP_xor 0x27
+#define DW_OP_skip 0x2f
+#define DW_OP_bra 0x28
+#define DW_OP_eq 0x29
+#define DW_OP_ge 0x2a
+#define DW_OP_gt 0x2b
+#define DW_OP_le 0x2c
+#define DW_OP_lt 0x2d
+#define DW_OP_ne 0x2e
+#define DW_OP_lit0 0x30
+#define DW_OP_lit1 0x31
+#define DW_OP_lit2 0x32
+#define DW_OP_lit3 0x33
+#define DW_OP_lit4 0x34
+#define DW_OP_lit5 0x35
+#define DW_OP_lit6 0x36
+#define DW_OP_lit7 0x37
+#define DW_OP_lit8 0x38
+#define DW_OP_lit9 0x39
+#define DW_OP_lit10 0x3a
+#define DW_OP_lit11 0x3b
+#define DW_OP_lit12 0x3c
+#define DW_OP_lit13 0x3d
+#define DW_OP_lit14 0x3e
+#define DW_OP_lit15 0x3f
+#define DW_OP_lit16 0x40
+#define DW_OP_lit17 0x41
+#define DW_OP_lit18 0x42
+#define DW_OP_lit19 0x43
+#define DW_OP_lit20 0x44
+#define DW_OP_lit21 0x45
+#define DW_OP_lit22 0x46
+#define DW_OP_lit23 0x47
+#define DW_OP_lit24 0x48
+#define DW_OP_lit25 0x49
+#define DW_OP_lit26 0x4a
+#define DW_OP_lit27 0x4b
+#define DW_OP_lit28 0x4c
+#define DW_OP_lit29 0x4d
+#define DW_OP_lit30 0x4e
+#define DW_OP_lit31 0x4f
+#define DW_OP_reg0 0x50
+#define DW_OP_reg1 0x51
+#define DW_OP_reg2 0x52
+#define DW_OP_reg3 0x53
+#define DW_OP_reg4 0x54
+#define DW_OP_reg5 0x55
+#define DW_OP_reg6 0x56
+#define DW_OP_reg7 0x57
+#define DW_OP_reg8 0x58
+#define DW_OP_reg9 0x59
+#define DW_OP_reg10 0x5a
+#define DW_OP_reg11 0x5b
+#define DW_OP_reg12 0x5c
+#define DW_OP_reg13 0x5d
+#define DW_OP_reg14 0x5e
+#define DW_OP_reg15 0x5f
+#define DW_OP_reg16 0x60
+#define DW_OP_reg17 0x61
+#define DW_OP_reg18 0x62
+#define DW_OP_reg19 0x63
+#define DW_OP_reg20 0x64
+#define DW_OP_reg21 0x65
+#define DW_OP_reg22 0x66
+#define DW_OP_reg23 0x67
+#define DW_OP_reg24 0x68
+#define DW_OP_reg25 0x69
+#define DW_OP_reg26 0x6a
+#define DW_OP_reg27 0x6b
+#define DW_OP_reg28 0x6c
+#define DW_OP_reg29 0x6d
+#define DW_OP_reg30 0x6e
+#define DW_OP_reg31 0x6f
+#define DW_OP_breg0 0x70
+#define DW_OP_breg1 0x71
+#define DW_OP_breg2 0x72
+#define DW_OP_breg3 0x73
+#define DW_OP_breg4 0x74
+#define DW_OP_breg5 0x75
+#define DW_OP_breg6 0x76
+#define DW_OP_breg7 0x77
+#define DW_OP_breg8 0x78
+#define DW_OP_breg9 0x79
+#define DW_OP_breg10 0x7a
+#define DW_OP_breg11 0x7b
+#define DW_OP_breg12 0x7c
+#define DW_OP_breg13 0x7d
+#define DW_OP_breg14 0x7e
+#define DW_OP_breg15 0x7f
+#define DW_OP_breg16 0x80
+#define DW_OP_breg17 0x81
+#define DW_OP_breg18 0x82
+#define DW_OP_breg19 0x83
+#define DW_OP_breg20 0x84
+#define DW_OP_breg21 0x85
+#define DW_OP_breg22 0x86
+#define DW_OP_breg23 0x87
+#define DW_OP_breg24 0x88
+#define DW_OP_breg25 0x89
+#define DW_OP_breg26 0x8a
+#define DW_OP_breg27 0x8b
+#define DW_OP_breg28 0x8c
+#define DW_OP_breg29 0x8d
+#define DW_OP_breg30 0x8e
+#define DW_OP_breg31 0x8f
+#define DW_OP_regx 0x90
+#define DW_OP_fbreg 0x91
+#define DW_OP_bregx 0x92
+#define DW_OP_piece 0x93
+#define DW_OP_deref_size 0x94
+#define DW_OP_xderef_size 0x95
+#define DW_OP_nop 0x96
+#define DW_OP_push_object_address 0x97
+#define DW_OP_call2 0x98
+#define DW_OP_call4 0x99
+#define DW_OP_call_ref 0x9a
+#define DW_OP_form_tls_address 0x9b
+#define DW_OP_call_frame_cfa 0x9c
+#define DW_OP_bit_piece 0x9d
+#define DW_OP_lo_user 0xe0
+#define DW_OP_hi_user 0xff
+
+/*
+ * Addresses used in FDE entries in the .eh_frame section may be encoded
+ * using one of the following encodings.
+ */
+#define DW_EH_PE_absptr 0x00
+#define DW_EH_PE_omit 0xff
+#define DW_EH_PE_uleb128 0x01
+#define DW_EH_PE_udata2 0x02
+#define DW_EH_PE_udata4 0x03
+#define DW_EH_PE_udata8 0x04
+#define DW_EH_PE_sleb128 0x09
+#define DW_EH_PE_sdata2 0x0a
+#define DW_EH_PE_sdata4 0x0b
+#define DW_EH_PE_sdata8 0x0c
+#define DW_EH_PE_signed 0x09
+
+#define DW_EH_PE_pcrel 0x10
+
+/*
+ * The architecture-specific register number that contains the return
+ * address in the .debug_frame table.
+ */
+#define DWARF_ARCH_RA_REG 17
+
+/*
+ * At what offset into dwarf_unwind_stack() is DWARF_ARCH_RA_REG setup?
+ */
+#define DWARF_ARCH_UNWIND_OFFSET 0x20
+
+#ifndef __ASSEMBLY__
+/*
+ * Read either the frame pointer (r14) or the stack pointer (r15).
+ * NOTE: this MUST be inlined.
+ */
+static __always_inline unsigned long dwarf_read_arch_reg(unsigned int reg)
+{
+ unsigned long value;
+
+ switch (reg) {
+ case 14:
+ __asm__ __volatile__("mov r14, %0\n" : "=r" (value));
+ break;
+ case 15:
+ __asm__ __volatile__("mov r15, %0\n" : "=r" (value));
+ break;
+ default:
+ BUG();
+ }
+
+ return value;
+}
+
+/**
+ * dwarf_cie - Common Information Entry
+ */
+struct dwarf_cie {
+ unsigned long length;
+ unsigned long cie_id;
+ unsigned char version;
+ const char *augmentation;
+ unsigned int code_alignment_factor;
+ int data_alignment_factor;
+
+ /* Which column in the rule table represents return addr of func. */
+ unsigned int return_address_reg;
+
+ unsigned char *initial_instructions;
+ unsigned char *instructions_end;
+
+ unsigned char encoding;
+
+ unsigned long cie_pointer;
+
+ struct list_head link;
+
+ unsigned long flags;
+#define DWARF_CIE_Z_AUGMENTATION (1 << 0)
+};
+
+/**
+ * dwarf_fde - Frame Description Entry
+ */
+struct dwarf_fde {
+ unsigned long length;
+ unsigned long cie_pointer;
+ struct dwarf_cie *cie;
+ unsigned long initial_location;
+ unsigned long address_range;
+ unsigned char *instructions;
+ unsigned char *end;
+ struct list_head link;
+};
+
+/**
+ * dwarf_frame - DWARF information for a frame in the call stack
+ */
+struct dwarf_frame {
+ struct dwarf_frame *prev, *next;
+
+ unsigned long pc;
+
+ struct dwarf_reg *regs;
+ unsigned int num_regs; /* how many regs are allocated? */
+
+ unsigned int depth; /* what level are we in the callstack? */
+
+ unsigned long cfa;
+
+ /* Valid when DW_FRAME_CFA_REG_OFFSET is set in flags */
+ unsigned int cfa_register;
+ unsigned int cfa_offset;
+
+ /* Valid when DW_FRAME_CFA_REG_EXP is set in flags */
+ unsigned char *cfa_expr;
+ unsigned int cfa_expr_len;
+
+ unsigned long flags;
+#define DWARF_FRAME_CFA_REG_OFFSET (1 << 0)
+#define DWARF_FRAME_CFA_REG_EXP (1 << 1)
+
+ unsigned long return_addr;
+};
+
+/**
+ * dwarf_reg - DWARF register
+ * @flags: Describes how to calculate the value of this register
+ */
+struct dwarf_reg {
+ unsigned long addr;
+ unsigned long flags;
+#define DWARF_REG_OFFSET (1 << 0)
+};
+
+/**
+ * dwarf_stack - a DWARF stack contains a collection of DWARF frames
+ * @depth: the number of frames in the stack
+ * @level: an array of DWARF frames, indexed by stack level
+ *
+ */
+struct dwarf_stack {
+ unsigned int depth;
+ struct dwarf_frame **level;
+};
+
+/*
+ * Call Frame instruction opcodes.
+ */
+#define DW_CFA_advance_loc 0x40
+#define DW_CFA_offset 0x80
+#define DW_CFA_restore 0xc0
+#define DW_CFA_nop 0x00
+#define DW_CFA_set_loc 0x01
+#define DW_CFA_advance_loc1 0x02
+#define DW_CFA_advance_loc2 0x03
+#define DW_CFA_advance_loc4 0x04
+#define DW_CFA_offset_extended 0x05
+#define DW_CFA_restore_extended 0x06
+#define DW_CFA_undefined 0x07
+#define DW_CFA_same_value 0x08
+#define DW_CFA_register 0x09
+#define DW_CFA_remember_state 0x0a
+#define DW_CFA_restore_state 0x0b
+#define DW_CFA_def_cfa 0x0c
+#define DW_CFA_def_cfa_register 0x0d
+#define DW_CFA_def_cfa_offset 0x0e
+#define DW_CFA_def_cfa_expression 0x0f
+#define DW_CFA_expression 0x10
+#define DW_CFA_offset_extended_sf 0x11
+#define DW_CFA_def_cfa_sf 0x12
+#define DW_CFA_def_cfa_offset_sf 0x13
+#define DW_CFA_val_offset 0x14
+#define DW_CFA_val_offset_sf 0x15
+#define DW_CFA_val_expression 0x16
+#define DW_CFA_lo_user 0x1c
+#define DW_CFA_hi_user 0x3f
+
+/*
+ * Some call frame instructions encode their operands in the opcode. We
+ * need some helper functions to extract both the opcode and operands
+ * from an instruction.
+ */
+static inline unsigned int DW_CFA_opcode(unsigned long insn)
+{
+ return (insn & 0xc0);
+}
+
+static inline unsigned int DW_CFA_operand(unsigned long insn)
+{
+ return (insn & 0x3f);
+}
+
+#define DW_EH_FRAME_CIE 0 /* .eh_frame CIE IDs are 0 */
+#define DW_CIE_ID 0xffffffff
+#define DW64_CIE_ID 0xffffffffffffffffULL
+
+/*
+ * DWARF FDE/CIE length field values.
+ */
+#define DW_EXT_LO 0xfffffff0
+#define DW_EXT_HI 0xffffffff
+#define DW_EXT_DWARF64 DW_EXT_HI
+
+extern void dwarf_unwinder_init(void);
+
+extern struct dwarf_frame *dwarf_unwind_stack(unsigned long,
+ struct dwarf_frame *);
+#endif /* __ASSEMBLY__ */
+
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA .cfi_def_cfa
+#define CFI_REGISTER .cfi_register
+#define CFI_REL_OFFSET .cfi_rel_offset
+
+#else
+
+/*
+ * Use the asm comment character to ignore the rest of the line.
+ */
+#define CFI_IGNORE !
+
+#define CFI_STARTPROC CFI_IGNORE
+#define CFI_ENDPROC CFI_IGNORE
+#define CFI_DEF_CFA CFI_IGNORE
+#define CFI_REGISTER CFI_IGNORE
+#define CFI_REL_OFFSET CFI_IGNORE
+
+#ifndef __ASSEMBLY__
+static inline void dwarf_unwinder_init(void)
+{
+}
+#endif
+
+#endif /* CONFIG_DWARF_UNWINDER */
+
+#endif /* __ASM_SH_DWARF_H */
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 01a4076a371..a78701da775 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -7,6 +7,7 @@ extern void __nosave_begin, __nosave_end;
extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end;
extern char _ebss[];
+extern char __start_eh_frame[], __stop_eh_frame[];
#endif /* __ASM_SH_SECTIONS_H */
diff --git a/arch/sh/include/asm/vmlinux.lds.h b/arch/sh/include/asm/vmlinux.lds.h
new file mode 100644
index 00000000000..244ec4ad9a7
--- /dev/null
+++ b/arch/sh/include/asm/vmlinux.lds.h
@@ -0,0 +1,17 @@
+#ifndef __ASM_SH_VMLINUX_LDS_H
+#define __ASM_SH_VMLINUX_LDS_H
+
+#include <asm-generic/vmlinux.lds.h>
+
+#ifdef CONFIG_DWARF_UNWINDER
+#define DWARF_EH_FRAME \
+ .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_eh_frame) = .; \
+ *(.eh_frame) \
+ VMLINUX_SYMBOL(__stop_eh_frame) = .; \
+ }
+#else
+#define DWARF_EH_FRAME
+#endif
+
+#endif /* __ASM_SH_VMLINUX_LDS_H */
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index 37a3b7704fc..f2245ebf0b3 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -33,6 +33,7 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_DUMP_CODE) += disassemble.o
obj-$(CONFIG_HIBERNATION) += swsusp.o
+obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index 00b73e7598c..639ee514266 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -13,6 +13,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
obj-$(CONFIG_GENERIC_GPIO) += gpio.o
+obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
new file mode 100644
index 00000000000..09c6fd7fd05
--- /dev/null
+++ b/arch/sh/kernel/dwarf.c
@@ -0,0 +1,876 @@
+/*
+ * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This is an implementation of a DWARF unwinder. Its main purpose is
+ * for generating stacktrace information. Based on the DWARF 3
+ * specification from http://www.dwarfstd.org.
+ *
+ * TODO:
+ * - DWARF64 doesn't work.
+ */
+
+/* #define DEBUG */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <asm/dwarf.h>
+#include <asm/unwinder.h>
+#include <asm/sections.h>
+#include <asm-generic/unaligned.h>
+#include <asm/dwarf.h>
+#include <asm/stacktrace.h>
+
+static LIST_HEAD(dwarf_cie_list);
+DEFINE_SPINLOCK(dwarf_cie_lock);
+
+static LIST_HEAD(dwarf_fde_list);
+DEFINE_SPINLOCK(dwarf_fde_lock);
+
+static struct dwarf_cie *cached_cie;
+
+/*
+ * Figure out whether we need to allocate some dwarf registers. If dwarf
+ * registers have already been allocated then we may need to realloc
+ * them. "reg" is a register number that we need to be able to access
+ * after this call.
+ *
+ * Register numbers start at zero, therefore we need to allocate space
+ * for "reg" + 1 registers.
+ */
+static void dwarf_frame_alloc_regs(struct dwarf_frame *frame,
+ unsigned int reg)
+{
+ struct dwarf_reg *regs;
+ unsigned int num_regs = reg + 1;
+ size_t new_size;
+ size_t old_size;
+
+ new_size = num_regs * sizeof(*regs);
+ old_size = frame->num_regs * sizeof(*regs);
+
+ /* Fast path: don't allocate any regs if we've already got enough. */
+ if (frame->num_regs >= num_regs)
+ return;
+
+ regs = kzalloc(new_size, GFP_KERNEL);
+ if (!regs) {
+ printk(KERN_WARNING "Unable to allocate DWARF registers\n");
+ /*
+ * Let's just bomb hard here, we have no way to
+ * gracefully recover.
+ */
+ BUG();
+ }
+
+ if (frame->regs) {
+ memcpy(regs, frame->regs, old_size);
+ kfree(frame->regs);
+ }
+
+ frame->regs = regs;
+ frame->num_regs = num_regs;
+}
+
+/**
+ * dwarf_read_addr - read dwarf data
+ * @src: source address of data
+ * @dst: destination address to store the data to
+ *
+ * Read 'n' bytes from @src, where 'n' is the size of an address on
+ * the native machine. We return the number of bytes read, which
+ * should always be 'n'. We also have to be careful when reading
+ * from @src and writing to @dst, because they can be arbitrarily
+ * aligned. Return 'n' - the number of bytes read.
+ */
+static inline int dwarf_read_addr(void *src, void *dst)
+{
+ u32 val = __get_unaligned_cpu32(src);
+ __put_unaligned_cpu32(val, dst);
+
+ return sizeof(unsigned long *);
+}
+
+/**
+ * dwarf_read_uleb128 - read unsigned LEB128 data
+ * @addr: the address where the ULEB128 data is stored
+ * @ret: address to store the result
+ *
+ * Decode an unsigned LEB128 encoded datum. The algorithm is taken
+ * from Appendix C of the DWARF 3 spec. For information on the
+ * encodings refer to section "7.6 - Variable Length Data". Return
+ * the number of bytes read.
+ */
+static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
+{
+ unsigned int result;
+ unsigned char byte;
+ int shift, count;
+
+ result = 0;
+ shift = 0;
+ count = 0;
+
+ while (1) {
+ byte = __raw_readb(addr);
+ addr++;
+ count++;
+
+ result |= (byte & 0x7f) << shift;
+ shift += 7;
+
+ if (!(byte & 0x80))
+ break;
+ }
+
+ *ret = result;
+
+ return count;
+}
+
+/**
+ * dwarf_read_leb128 - read signed LEB128 data
+ * @addr: the address of the LEB128 encoded data
+ * @ret: address to store the result
+ *
+ * Decode signed LEB128 data. The algorithm is taken from Appendix
+ * C of the DWARF 3 spec. Return the number of bytes read.
+ */
+static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
+{
+ unsigned char byte;
+ int result, shift;
+ int num_bits;
+ int count;
+
+ result = 0;
+ shift = 0;
+ count = 0;
+
+ while (1) {
+ byte = __raw_readb(addr);
+ addr++;
+ result |= (byte & 0x7f) << shift;
+ shift += 7;
+ count++;
+
+ if (!(byte & 0x80))
+ break;
+ }
+
+ /* The number of bits in a signed integer. */
+ num_bits = 8 * sizeof(result);
+
+ if ((shift < num_bits) && (byte & 0x40))
+ result |= (-1 << shift);
+
+ *ret = result;
+
+ return count;
+}
+
+/**
+ * dwarf_read_encoded_value - return the decoded value at @addr
+ * @addr: the address of the encoded value
+ * @val: where to write the decoded value
+ * @encoding: the encoding with which we can decode @addr
+ *
+ * GCC emits encoded address in the .eh_frame FDE entries. Decode
+ * the value at @addr using @encoding. The decoded value is written
+ * to @val and the number of bytes read is returned.
+ */
+static int dwarf_read_encoded_value(char *addr, unsigned long *val,
+ char encoding)
+{
+ unsigned long decoded_addr = 0;
+ int count = 0;
+
+ switch (encoding & 0x70) {
+ case DW_EH_PE_absptr:
+ break;
+ case DW_EH_PE_pcrel:
+ decoded_addr = (unsigned long)addr;
+ break;
+ default:
+ pr_debug("encoding=0x%x\n", (encoding & 0x70));
+ BUG();
+ }
+
+ if ((encoding & 0x07) == 0x00)
+ encoding |= DW_EH_PE_udata4;
+
+ switch (encoding & 0x0f) {
+ case DW_EH_PE_sdata4:
+ case DW_EH_PE_udata4:
+ count += 4;
+ decoded_addr += __get_unaligned_cpu32(addr);
+ __raw_writel(decoded_addr, val);
+ break;
+ default:
+ pr_debug("encoding=0x%x\n", encoding);
+ BUG();
+ }
+
+ return count;
+}
+
+/**
+ * dwarf_entry_len - return the length of an FDE or CIE
+ * @addr: the address of the entry
+ * @len: the length of the entry
+ *
+ * Read the initial_length field of the entry and store the size of
+ * the entry in @len. We return the number of bytes read. Return a
+ * count of 0 on error.
+ */
+static inline int dwarf_entry_len(char *addr, unsigned long *len)
+{
+ u32 initial_len;
+ int count;
+
+ initial_len = __get_unaligned_cpu32(addr);
+ count = 4;
+
+ /*
+ * An initial length field value in the range DW_LEN_EXT_LO -
+ * DW_LEN_EXT_HI indicates an extension, and should not be
+ * interpreted as a length. The only extension that we currently
+ * understand is the use of DWARF64 addresses.
+ */
+ if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
+ /*
+ * The 64-bit length field immediately follows the
+ * compulsory 32-bit length field.
+ */
+ if (initial_len == DW_EXT_DWARF64) {
+ *len = __get_unaligned_cpu64(addr + 4);
+ count = 12;
+ } else {
+ printk(KERN_WARNING "Unknown DWARF extension\n");
+ count = 0;
+ }
+ } else
+ *len = initial_len;
+
+ return count;
+}
+
+/**
+ * dwarf_lookup_cie - locate the cie
+ * @cie_ptr: pointer to help with lookup
+ */
+static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
+{
+ struct dwarf_cie *cie, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+
+ /*
+ * We've cached the last CIE we looked up because chances are
+ * that the FDE wants this CIE.
+ */
+ if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
+ cie = cached_cie;
+ goto out;
+ }
+
+ list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) {
+ if (cie->cie_pointer == cie_ptr) {
+ cached_cie = cie;
+ break;
+ }
+ }
+
+ /* Couldn't find the entry in the list. */
+ if (&cie->link == &dwarf_cie_list)
+ cie = NULL;
+out:
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+ return cie;
+}
+
+/**
+ * dwarf_lookup_fde - locate the FDE that covers pc
+ * @pc: the program counter
+ */
+struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
+{
+ unsigned long flags;
+ struct dwarf_fde *fde, *n;
+
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+ list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) {
+ unsigned long start, end;
+
+ start = fde->initial_location;
+ end = fde->initial_location + fde->address_range;
+
+ if (pc >= start && pc < end)
+ break;
+ }
+
+ /* Couldn't find the entry in the list. */
+ if (&fde->link == &dwarf_fde_list)
+ fde = NULL;
+
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+
+ return fde;
+}
+
+/**
+ * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
+ * @insn_start: address of the first instruction
+ * @insn_end: address of the last instruction
+ * @cie: the CIE for this function
+ * @fde: the FDE for this function
+ * @frame: the instructions calculate the CFA for this frame
+ * @pc: the program counter of the address we're interested in
+ *
+ * Execute the Call Frame instruction sequence starting at
+ * @insn_start and ending at @insn_end. The instructions describe
+ * how to calculate the Canonical Frame Address of a stackframe.
+ * Store the results in @frame.
+ */
+static int dwarf_cfa_execute_insns(unsigned char *insn_start,
+ unsigned char *insn_end,
+ struct dwarf_cie *cie,
+ struct dwarf_fde *fde,
+ struct dwarf_frame *frame,
+ unsigned long pc)
+{
+ unsigned char insn;
+ unsigned char *current_insn;
+ unsigned int count, delta, reg, expr_len, offset;
+
+ current_insn = insn_start;
+
+ while (current_insn < insn_end && frame->pc <= pc) {
+ insn = __raw_readb(current_insn++);
+
+ /*
+ * Firstly, handle the opcodes that embed their operands
+ * in the instructions.
+ */
+ switch (DW_CFA_opcode(insn)) {
+ case DW_CFA_advance_loc:
+ delta = DW_CFA_operand(insn);
+ delta *= cie->code_alignment_factor;
+ frame->pc += delta;
+ continue;
+ /* NOTREACHED */
+ case DW_CFA_offset:
+ reg = DW_CFA_operand(insn);
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ dwarf_frame_alloc_regs(frame, reg);
+ frame->regs[reg].addr = offset;
+ frame->regs[reg].flags |= DWARF_REG_OFFSET;
+ continue;
+ /* NOTREACHED */
+ case DW_CFA_restore:
+ reg = DW_CFA_operand(insn);
+ continue;
+ /* NOTREACHED */
+ }
+
+ /*
+ * Secondly, handle the opcodes that don't embed their
+ * operands in the instruction.
+ */
+ switch (insn) {
+ case DW_CFA_nop:
+ continue;
+ case DW_CFA_advance_loc1:
+ delta = *current_insn++;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_advance_loc2:
+ delta = __get_unaligned_cpu16(current_insn);
+ current_insn += 2;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_advance_loc4:
+ delta = __get_unaligned_cpu32(current_insn);
+ current_insn += 4;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_offset_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ break;
+ case DW_CFA_restore_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ break;
+ case DW_CFA_undefined:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ break;
+ case DW_CFA_def_cfa:
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_register);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_offset);
+ current_insn += count;
+
+ frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
+ break;
+ case DW_CFA_def_cfa_register:
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_register);
+ current_insn += count;
+ frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
+ break;
+ case DW_CFA_def_cfa_offset:
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ frame->cfa_offset = offset;
+ break;
+ case DW_CFA_def_cfa_expression:
+ count = dwarf_read_uleb128(current_insn, &expr_len);
+ current_insn += count;
+
+ frame->cfa_expr = current_insn;
+ frame->cfa_expr_len = expr_len;
+ current_insn += expr_len;
+
+ frame->flags |= DWARF_FRAME_CFA_REG_EXP;
+ break;
+ case DW_CFA_offset_extended_sf:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_leb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ dwarf_frame_alloc_regs(frame, reg);
+ frame->regs[reg].flags |= DWARF_REG_OFFSET;
+ frame->regs[reg].addr = offset;
+ break;
+ case DW_CFA_val_offset:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_leb128(current_insn, &offset);
+ offset *= cie->data_alignment_factor;
+ frame->regs[reg].flags |= DWARF_REG_OFFSET;
+ frame->regs[reg].addr = offset;
+ break;
+ default:
+ pr_debug("unhandled DWARF instruction 0x%x\n", insn);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * dwarf_unwind_stack - recursively unwind the stack
+ * @pc: address of the function to unwind
+ * @prev: struct dwarf_frame of the previous stackframe on the callstack
+ *
+ * Return a struct dwarf_frame representing the most recent frame
+ * on the callstack. Each of the lower (older) stack frames are
+ * linked via the "prev" member.
+ */
+struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
+ struct dwarf_frame *prev)
+{
+ struct dwarf_frame *frame;
+ struct dwarf_cie *cie;
+ struct dwarf_fde *fde;
+ unsigned long addr;
+ int i, offset;
+
+ /*
+ * If this is the first invocation of this recursive function we
+ * need get the contents of a physical register to get the CFA
+ * in order to begin the virtual unwinding of the stack.
+ *
+ * The constant DWARF_ARCH_UNWIND_OFFSET is added to the address of
+ * this function because the return address register
+ * (DWARF_ARCH_RA_REG) will probably not be initialised until a
+ * few instructions into the prologue.
+ */
+ if (!pc && !prev) {
+ pc = (unsigned long)&dwarf_unwind_stack;
+ pc += DWARF_ARCH_UNWIND_OFFSET;
+ }
+
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame)
+ return NULL;
+
+ frame->prev = prev;
+
+ fde = dwarf_lookup_fde(pc);
+ if (!fde) {
+ /*
+ * This is our normal exit path - the one that stops the
+ * recursion. There's two reasons why we might exit
+ * here,
+ *
+ * a) pc has no asscociated DWARF frame info and so
+ * we don't know how to unwind this frame. This is
+ * usually the case when we're trying to unwind a
+ * frame that was called from some assembly code
+ * that has no DWARF info, e.g. syscalls.
+ *
+ * b) the DEBUG info for pc is bogus. There's
+ * really no way to distinguish this case from the
+ * case above, which sucks because we could print a
+ * warning here.
+ */
+ return NULL;
+ }
+
+ cie = dwarf_lookup_cie(fde->cie_pointer);
+
+ frame->pc = fde->initial_location;
+
+ /* CIE initial instructions */
+ dwarf_cfa_execute_insns(cie->initial_instructions,
+ cie->instructions_end, cie, fde, frame, pc);
+
+ /* FDE instructions */
+ dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
+ fde, frame, pc);
+
+ /* Calculate the CFA */
+ switch (frame->flags) {
+ case DWARF_FRAME_CFA_REG_OFFSET:
+ if (prev) {
+ BUG_ON(!prev->regs[frame->cfa_register].flags);
+
+ addr = prev->cfa;
+ addr += prev->regs[frame->cfa_register].addr;
+ frame->cfa = __raw_readl(addr);
+
+ } else {
+ /*
+ * Again, this is the first invocation of this
+ * recurisve function. We need to physically
+ * read the contents of a register in order to
+ * get the Canonical Frame Address for this
+ * function.
+ */
+ frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
+ }
+
+ frame->cfa += frame->cfa_offset;
+ break;
+ default:
+ BUG();
+ }
+
+ /* If we haven't seen the return address reg, we're screwed. */
+ BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags);
+
+ for (i = 0; i <= frame->num_regs; i++) {
+ struct dwarf_reg *reg = &frame->regs[i];
+
+ if (!reg->flags)
+ continue;
+
+ offset = reg->addr;
+ offset += frame->cfa;
+ }
+
+ addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr;
+ frame->return_addr = __raw_readl(addr);
+
+ frame->next = dwarf_unwind_stack(frame->return_addr, frame);
+ return frame;
+}
+
+static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
+ unsigned char *end)
+{
+ struct dwarf_cie *cie;
+ unsigned long flags;
+ int count;
+
+ cie = kzalloc(sizeof(*cie), GFP_KERNEL);
+ if (!cie)
+ return -ENOMEM;
+
+ cie->length = len;
+
+ /*
+ * Record the offset into the .eh_frame section
+ * for this CIE. It allows this CIE to be
+ * quickly and easily looked up from the
+ * corresponding FDE.
+ */
+ cie->cie_pointer = (unsigned long)entry;
+
+ cie->version = *(char *)p++;
+ BUG_ON(cie->version != 1);
+
+ cie->augmentation = p;
+ p += strlen(cie->augmentation) + 1;
+
+ count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
+ p += count;
+
+ count = dwarf_read_leb128(p, &cie->data_alignment_factor);
+ p += count;
+
+ /*
+ * Which column in the rule table contains the
+ * return address?
+ */
+ if (cie->version == 1) {
+ cie->return_address_reg = __raw_readb(p);
+ p++;
+ } else {
+ count = dwarf_read_uleb128(p, &cie->return_address_reg);
+ p += count;
+ }
+
+ if (cie->augmentation[0] == 'z') {
+ unsigned int length, count;
+ cie->flags |= DWARF_CIE_Z_AUGMENTATION;
+
+ count = dwarf_read_uleb128(p, &length);
+ p += count;
+
+ BUG_ON((unsigned char *)p > end);
+
+ cie->initial_instructions = p + length;
+ cie->augmentation++;
+ }
+
+ while (*cie->augmentation) {
+ /*
+ * "L" indicates a byte showing how the
+ * LSDA pointer is encoded. Skip it.
+ */
+ if (*cie->augmentation == 'L') {
+ p++;
+ cie->augmentation++;
+ } else if (*cie->augmentation == 'R') {
+ /*
+ * "R" indicates a byte showing
+ * how FDE addresses are
+ * encoded.
+ */
+ cie->encoding = *(char *)p++;
+ cie->augmentation++;
+ } else if (*cie->augmentation == 'P') {
+ /*
+ * "R" indicates a personality
+ * routine in the CIE
+ * augmentation.
+ */
+ BUG();
+ } else if (*cie->augmentation == 'S') {
+ BUG();
+ } else {
+ /*
+ * Unknown augmentation. Assume
+ * 'z' augmentation.
+ */
+ p = cie->initial_instructions;
+ BUG_ON(!p);
+ break;
+ }
+ }
+
+ cie->initial_instructions = p;
+ cie->instructions_end = end;
+
+ /* Add to list */
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+ list_add_tail(&cie->link, &dwarf_cie_list);
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+
+ return 0;
+}
+
+static int dwarf_parse_fde(void *entry, u32 entry_type,
+ void *start, unsigned long len)
+{
+ struct dwarf_fde *fde;
+ struct dwarf_cie *cie;
+ unsigned long flags;
+ int count;
+ void *p = start;
+
+ fde = kzalloc(sizeof(*fde), GFP_KERNEL);
+ if (!fde)
+ return -ENOMEM;
+
+ fde->length = len;
+
+ /*
+ * In a .eh_frame section the CIE pointer is the
+ * delta between the address within the FDE
+ */
+ fde->cie_pointer = (unsigned long)(p - entry_type - 4);
+
+ cie = dwarf_lookup_cie(fde->cie_pointer);
+ fde->cie = cie;
+
+ if (cie->encoding)
+ count = dwarf_read_encoded_value(p, &fde->initial_location,
+ cie->encoding);
+ else
+ count = dwarf_read_addr(p, &fde->initial_location);
+
+ p += count;
+
+ if (cie->encoding)
+ count = dwarf_read_encoded_value(p, &fde->address_range,
+ cie->encoding & 0x0f);
+ else
+ count = dwarf_read_addr(p, &fde->address_range);
+
+ p += count;
+
+ if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
+ unsigned int length;
+ count = dwarf_read_uleb128(p, &length);
+ p += count + length;
+ }
+
+ /* Call frame instructions. */
+ fde->instructions = p;
+ fde->end = start + len;
+
+ /* Add to list. */
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+ list_add_tail(&fde->link, &dwarf_fde_list);
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+
+ return 0;
+}
+
+static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *sp,
+ const struct stacktrace_ops *ops, void *data)
+{
+ struct dwarf_frame *frame;
+
+ frame = dwarf_unwind_stack(0, NULL);
+
+ while (frame && frame->return_addr) {
+ ops->address(data, frame->return_addr, 1);
+ frame = frame->next;
+ }
+}
+
+static struct unwinder dwarf_unwinder = {
+ .name = "dwarf-unwinder",
+ .dump = dwarf_unwinder_dump,
+ .rating = 150,
+};
+
+static void dwarf_unwinder_cleanup(void)
+{
+ struct dwarf_cie *cie, *m;
+ struct dwarf_fde *fde, *n;
+ unsigned long flags;
+
+ /*
+ * Deallocate all the memory allocated for the DWARF unwinder.
+ * Traverse all the FDE/CIE lists and remove and free all the
+ * memory associated with those data structures.
+ */
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+ list_for_each_entry_safe(cie, m, &dwarf_cie_list, link)
+ kfree(cie);
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+ list_for_each_entry_safe(fde, n, &dwarf_fde_list, link)
+ kfree(fde);
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+}
+
+/**
+ * dwarf_unwinder_init - initialise the dwarf unwinder
+ *
+ * Build the data structures describing the .dwarf_frame section to
+ * make it easier to lookup CIE and FDE entries. Because the
+ * .eh_frame section is packed as tightly as possible it is not
+ * easy to lookup the FDE for a given PC, so we build a list of FDE
+ * and CIE entries that make it easier.
+ */
+void dwarf_unwinder_init(void)
+{
+ u32 entry_type;
+ void *p, *entry;
+ int count, err;
+ unsigned long len;
+ unsigned int c_entries, f_entries;
+ unsigned char *end;
+ INIT_LIST_HEAD(&dwarf_cie_list);
+ INIT_LIST_HEAD(&dwarf_fde_list);
+
+ c_entries = 0;
+ f_entries = 0;
+ entry = &__start_eh_frame;
+
+ while ((char *)entry < __stop_eh_frame) {
+ p = entry;
+
+ count = dwarf_entry_len(p, &len);
+ if (count == 0) {
+ /*
+ * We read a bogus length field value. There is
+ * nothing we can do here apart from disabling
+ * the DWARF unwinder. We can't even skip this
+ * entry and move to the next one because 'len'
+ * tells us where our next entry is.
+ */
+ goto out;
+ } else
+ p += count;
+
+ /* initial length does not include itself */
+ end = p + len;
+
+ entry_type = __get_unaligned_cpu32(p);
+ p += 4;
+
+ if (entry_type == DW_EH_FRAME_CIE) {
+ err = dwarf_parse_cie(entry, p, len, end);
+ if (err < 0)
+ goto out;
+ else
+ c_entries++;
+ } else {
+ err = dwarf_parse_fde(entry, entry_type, p, len);
+ if (err < 0)
+ goto out;
+ else
+ f_entries++;
+ }
+
+ entry = (char *)entry + len + 4;
+ }
+
+ printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
+ c_entries, f_entries);
+
+ err = unwinder_register(&dwarf_unwinder);
+ if (err)
+ goto out;
+
+ return;
+
+out:
+ printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
+ dwarf_unwinder_cleanup();
+}
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 278c68c6048..2bb43dc74f2 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -14,6 +14,7 @@
#include <asm/processor.h>
#include <asm/machvec.h>
#include <asm/uaccess.h>
+#include <asm/dwarf.h>
#include <asm/thread_info.h>
#include <cpu/mmu_context.h>
@@ -261,6 +262,9 @@ void __init init_IRQ(void)
sh_mv.mv_init_irq();
irq_ctx_init(smp_processor_id());
+
+ /* This needs to be early, but not too early.. */
+ dwarf_unwinder_init();
}
#ifdef CONFIG_SPARSE_IRQ
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 80dc9f8d941..1b7d9d541e0 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -12,7 +12,7 @@ OUTPUT_ARCH(sh)
#include <asm/thread_info.h>
#include <asm/cache.h>
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
ENTRY(_start)
SECTIONS
@@ -70,6 +70,8 @@ SECTIONS
_edata = .; /* End of data section */
+ DWARF_EH_FRAME
+
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)