From 2c59b0b70b9d5d61c726f179724660c4c2423f31 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Jul 2009 14:41:35 +0000 Subject: usb: m66592-udc platform data on_chip support Convert the m66592-udc driver to use the on_chip flag from platform data to enable on chip behaviour instead of relying on CONFIG_SUPERH_BUILT_IN_M66592 ugliness. This makes the code cleaner and also allows us to support both external and internal m66592 with the same kernel. It also makes the Kconfig part more future proof since we with this patch can add support for new processors with on-chip m66592 without modifying the Kconfig. The patch adds a m66592 header file for platform data and ties in platform data to the existing m66592 devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index ea524a2da3e..0bad14a4423 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -47,9 +48,13 @@ static struct platform_device rtc_device = { .resource = rtc_resources, }; +static struct m66592_platdata usbf_platdata = { + .on_chip = 1, +}; + static struct resource usbf_resources[] = { [0] = { - .name = "m66592_udc", + .name = "USBF", .start = 0x04480000, .end = 0x044800FF, .flags = IORESOURCE_MEM, @@ -67,6 +72,7 @@ static struct platform_device usbf_device = { .dev = { .dma_mask = NULL, .coherent_dma_mask = 0xffffffff, + .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(usbf_resources), .resource = usbf_resources, -- cgit v1.2.3 From 955c9863bb5855a994751843e7066017edc00410 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Jul 2009 15:14:29 +0000 Subject: sh: convert processor device setup functions to arch_initcall() Convert the processor platform device setup functions from __initcall() and sometimes device_initcall() to arch_initcall(). This makes sure that the platform devices are registered a bit earlier so the devices are available when drivers register using initcall levels earlier than device_initcall(). A good example is platform devices needed by i2c-sh_mobile.c which registers a bit earlier using subsys_initcall(). Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh2/setup-sh7619.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-mxg.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-sh7201.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-sh7203.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-sh7206.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh7705.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh770x.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh7710.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh7720.c | 2 +- arch/sh/kernel/cpu/sh4/setup-sh4-202.c | 2 +- arch/sh/kernel/cpu/sh4/setup-sh7750.c | 2 +- arch/sh/kernel/cpu/sh4/setup-sh7760.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7343.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7366.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7723.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7724.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7763.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7770.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7780.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7785.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7786.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-shx3.c | 2 +- arch/sh/kernel/cpu/sh5/setup-sh5.c | 2 +- 24 files changed, 24 insertions(+), 24 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index 13798733f2d..8555c05e866 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -187,7 +187,7 @@ static int __init sh7619_devices_setup(void) return platform_add_devices(sh7619_devices, ARRAY_SIZE(sh7619_devices)); } -__initcall(sh7619_devices_setup); +arch_initcall(sh7619_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c index 869c2da4820..b6737644531 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c +++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c @@ -238,7 +238,7 @@ static int __init mxg_devices_setup(void) return platform_add_devices(mxg_devices, ARRAY_SIZE(mxg_devices)); } -__initcall(mxg_devices_setup); +arch_initcall(mxg_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c index d8febe12806..fbde5b75deb 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c @@ -357,7 +357,7 @@ static int __init sh7201_devices_setup(void) return platform_add_devices(sh7201_devices, ARRAY_SIZE(sh7201_devices)); } -__initcall(sh7201_devices_setup); +arch_initcall(sh7201_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c index 62e3039d239..d3fd536c9a8 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c @@ -367,7 +367,7 @@ static int __init sh7203_devices_setup(void) return platform_add_devices(sh7203_devices, ARRAY_SIZE(sh7203_devices)); } -__initcall(sh7203_devices_setup); +arch_initcall(sh7203_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c index 3e6f3d7a58b..a9ccc5e8d9e 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c @@ -338,7 +338,7 @@ static int __init sh7206_devices_setup(void) return platform_add_devices(sh7206_devices, ARRAY_SIZE(sh7206_devices)); } -__initcall(sh7206_devices_setup); +arch_initcall(sh7206_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c index 88f742fed9e..c2310598387 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c @@ -222,7 +222,7 @@ static int __init sh7705_devices_setup(void) return platform_add_devices(sh7705_devices, ARRAY_SIZE(sh7705_devices)); } -__initcall(sh7705_devices_setup); +arch_initcall(sh7705_devices_setup); static struct platform_device *sh7705_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index c5630679858..347ab35d069 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -250,7 +250,7 @@ static int __init sh770x_devices_setup(void) return platform_add_devices(sh770x_devices, ARRAY_SIZE(sh770x_devices)); } -__initcall(sh770x_devices_setup); +arch_initcall(sh770x_devices_setup); static struct platform_device *sh770x_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c index efa76c8148f..717e90ae109 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c @@ -226,7 +226,7 @@ static int __init sh7710_devices_setup(void) return platform_add_devices(sh7710_devices, ARRAY_SIZE(sh7710_devices)); } -__initcall(sh7710_devices_setup); +arch_initcall(sh7710_devices_setup); static struct platform_device *sh7710_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c index 5b2107798ed..74d8baaf8e9 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c @@ -388,7 +388,7 @@ static int __init sh7720_devices_setup(void) return platform_add_devices(sh7720_devices, ARRAY_SIZE(sh7720_devices)); } -__initcall(sh7720_devices_setup); +arch_initcall(sh7720_devices_setup); static struct platform_device *sh7720_early_devices[] __initdata = { &cmt0_device, diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c index 6d088d12359..de4827df19a 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c @@ -138,7 +138,7 @@ static int __init sh4202_devices_setup(void) return platform_add_devices(sh4202_devices, ARRAY_SIZE(sh4202_devices)); } -__initcall(sh4202_devices_setup); +arch_initcall(sh4202_devices_setup); static struct platform_device *sh4202_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 851672d15cf..1b8b122e8f3 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -239,7 +239,7 @@ static int __init sh7750_devices_setup(void) return platform_add_devices(sh7750_devices, ARRAY_SIZE(sh7750_devices)); } -__initcall(sh7750_devices_setup); +arch_initcall(sh7750_devices_setup); static struct platform_device *sh7750_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c index 5b822519bd9..7fbb7be9284 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c @@ -265,7 +265,7 @@ static int __init sh7760_devices_setup(void) return platform_add_devices(sh7760_devices, ARRAY_SIZE(sh7760_devices)); } -__initcall(sh7760_devices_setup); +arch_initcall(sh7760_devices_setup); static struct platform_device *sh7760_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c index 6307e087c86..ac4d5672ec1 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c @@ -325,7 +325,7 @@ static int __init sh7343_devices_setup(void) return platform_add_devices(sh7343_devices, ARRAY_SIZE(sh7343_devices)); } -__initcall(sh7343_devices_setup); +arch_initcall(sh7343_devices_setup); static struct platform_device *sh7343_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index f6d20881356..4a9010bf4fd 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -318,7 +318,7 @@ static int __init sh7366_devices_setup(void) return platform_add_devices(sh7366_devices, ARRAY_SIZE(sh7366_devices)); } -__initcall(sh7366_devices_setup); +arch_initcall(sh7366_devices_setup); static struct platform_device *sh7366_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 0bad14a4423..67b0d87fcb2 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -365,7 +365,7 @@ static int __init sh7722_devices_setup(void) return platform_add_devices(sh7722_devices, ARRAY_SIZE(sh7722_devices)); } -__initcall(sh7722_devices_setup); +arch_initcall(sh7722_devices_setup); static struct platform_device *sh7722_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index 28516499a2c..26dc4d32325 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -473,7 +473,7 @@ static int __init sh7723_devices_setup(void) return platform_add_devices(sh7723_devices, ARRAY_SIZE(sh7723_devices)); } -__initcall(sh7723_devices_setup); +arch_initcall(sh7723_devices_setup); static struct platform_device *sh7723_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index e5ac9eb11c6..a04edaab9a2 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c @@ -508,7 +508,7 @@ static int __init sh7724_devices_setup(void) return platform_add_devices(sh7724_devices, ARRAY_SIZE(sh7724_devices)); } -device_initcall(sh7724_devices_setup); +arch_initcall(sh7724_devices_setup); static struct platform_device *sh7724_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c index f1e0c0d36da..4659fff6b84 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c @@ -314,7 +314,7 @@ static int __init sh7763_devices_setup(void) return platform_add_devices(sh7763_devices, ARRAY_SIZE(sh7763_devices)); } -__initcall(sh7763_devices_setup); +arch_initcall(sh7763_devices_setup); static struct platform_device *sh7763_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c index 1e86209db28..eead08d89d3 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c @@ -368,7 +368,7 @@ static int __init sh7770_devices_setup(void) return platform_add_devices(sh7770_devices, ARRAY_SIZE(sh7770_devices)); } -__initcall(sh7770_devices_setup); +arch_initcall(sh7770_devices_setup); static struct platform_device *sh7770_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index 715e05b431e..2c901f44695 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c @@ -256,7 +256,7 @@ static int __init sh7780_devices_setup(void) return platform_add_devices(sh7780_devices, ARRAY_SIZE(sh7780_devices)); } -__initcall(sh7780_devices_setup); +arch_initcall(sh7780_devices_setup); static struct platform_device *sh7780_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index af561402570..7f6c718b6c3 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c @@ -263,7 +263,7 @@ static int __init sh7785_devices_setup(void) return platform_add_devices(sh7785_devices, ARRAY_SIZE(sh7785_devices)); } -__initcall(sh7785_devices_setup); +arch_initcall(sh7785_devices_setup); static struct platform_device *sh7785_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c index b70049470a0..0104a8ec536 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c @@ -547,7 +547,7 @@ static int __init sh7786_devices_setup(void) return platform_add_devices(sh7786_devices, ARRAY_SIZE(sh7786_devices)); } -device_initcall(sh7786_devices_setup); +arch_initcall(sh7786_devices_setup); void __init plat_early_device_setup(void) { diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c index 53c65fd9cce..07f078961c7 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c @@ -256,7 +256,7 @@ static int __init shx3_devices_setup(void) return platform_add_devices(shx3_devices, ARRAY_SIZE(shx3_devices)); } -__initcall(shx3_devices_setup); +arch_initcall(shx3_devices_setup); void __init plat_early_device_setup(void) { diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c index f5ff1ac57fc..6a0f82f7003 100644 --- a/arch/sh/kernel/cpu/sh5/setup-sh5.c +++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c @@ -186,7 +186,7 @@ static int __init sh5_devices_setup(void) return platform_add_devices(sh5_devices, ARRAY_SIZE(sh5_devices)); } -__initcall(sh5_devices_setup); +arch_initcall(sh5_devices_setup); void __init plat_early_device_setup(void) { -- cgit v1.2.3 From 82b242214b6f5b96eb9b76452ac6e2b67dd81abd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 29 Jul 2009 22:43:58 +0900 Subject: Revert "sh: Bump the earlytimer bits back to time_init()." This reverts commit 1d29ebebcb951ab6b04d22807cafb24b893310a2. Bumping up the earlytimer initialization causes IRQs to be enabled too early, which blows up lockdep: ... NR_IRQS:256 nr_irqs:256 ------------[ cut here ]------------ Badness at kernel/lockdep.c:2128 Pid : 0, Comm: swapper CPU : 0 Not tainted (2.6.31-rc3-00205-g3ed6e12-dirty #2443) PC is at trace_hardirqs_on_caller+0x48/0x10c PR is at trace_hardirqs_on_caller+0x3c/0x10c ... Revert it back to late_time_init time, which fixes up lockdep. Signed-off-by: Paul Mundt --- arch/sh/kernel/time.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index d2424b068b7..7f95f479060 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -92,6 +92,21 @@ module_init(rtc_generic_init); void (*board_time_init)(void); +static void __init sh_late_time_init(void) +{ + /* + * Make sure all compiled-in early timers register themselves. + * + * Run probe() for two "earlytimer" devices, these will be the + * clockevents and clocksource devices respectively. In the event + * that only a clockevents device is available, we -ENODEV on the + * clocksource and the jiffies clocksource is used transparently + * instead. No error handling is necessary here. + */ + early_platform_driver_register_all("earlytimer"); + early_platform_driver_probe("earlytimer", 2, 0); +} + void __init time_init(void) { if (board_time_init) @@ -108,15 +123,5 @@ void __init time_init(void) local_timer_setup(smp_processor_id()); #endif - /* - * Make sure all compiled-in early timers register themselves. - * - * Run probe() for two "earlytimer" devices, these will be the - * clockevents and clocksource devices respectively. In the event - * that only a clockevents device is available, we -ENODEV on the - * clocksource and the jiffies clocksource is used transparently - * instead. No error handling is necessary here. - */ - early_platform_driver_register_all("earlytimer"); - early_platform_driver_probe("earlytimer", 2, 0); + late_time_init = sh_late_time_init; } -- cgit v1.2.3 From fd78a76aefb5bf28a11d6960d29e03a11db62320 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Wed, 29 Jul 2009 23:01:24 +0900 Subject: sh: Rework irqflags tracing to fix up CONFIG_PROVE_LOCKING. This cleans up the irqflags tracing code quite a bit and ties it in to various missing callsites that caused an imbalance when CONFIG_PROVE_LOCKING was enabled. Previously this was catching on: 987 #ifdef CONFIG_PROVE_LOCKING 988 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 989 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 990 #endif 991 retval = -EAGAIN; with hardirqs being doubly enabled, and subsequently bailing out with the following call trace: Call trace: [<88035224>] __lock_acquire+0x616/0x6a6 [<88015a8c>] do_fork+0xf8/0x2b0 [<880331ec>] trace_hardirqs_on_caller+0xd4/0x114 [<88241074>] _spin_unlock_irq+0x20/0x64 [<88035224>] __lock_acquire+0x616/0x6a6 [<8800386c>] kernel_thread+0x48/0x70 [<88024ecc>] ____call_usermodehelper+0x0/0x110 [<88024ecc>] ____call_usermodehelper+0x0/0x110 [<88003894>] kernel_thread_helper+0x0/0x14 [<88024bac>] __call_usermodehelper+0x38/0x70 [<88025dc0>] worker_thread+0x150/0x274 [<88035b9c>] lock_release+0x0/0x198 [<88024b74>] __call_usermodehelper+0x0/0x70 [<88028cf0>] autoremove_wake_function+0x0/0x30 [<88028bf2>] kthread+0x3e/0x70 [<88025c70>] worker_thread+0x0/0x274 [<8800389c>] kernel_thread_helper+0x8/0x14 [<88028bb4>] kthread+0x0/0x70 [<88003894>] kernel_thread_helper+0x0/0x14 Reported-by: Nobuhiro Iwamatsu Signed-off-by: Stuart Menefy Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/entry-common.S | 63 +++++++++++-------------------------------- arch/sh/kernel/io_trapped.c | 7 ++--- 2 files changed, 20 insertions(+), 50 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index d62175650c5..fc26ccd8278 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -45,7 +45,7 @@ */ #if defined(CONFIG_PREEMPT) -# define preempt_stop() cli +# define preempt_stop() cli ; TRACE_IRQS_OFF #else # define preempt_stop() # define resume_kernel __restore_all @@ -55,11 +55,7 @@ .align 2 ENTRY(exception_error) ! -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 2f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_ON sti mov.l 1f, r0 jmp @r0 @@ -67,22 +63,23 @@ ENTRY(exception_error) .align 2 1: .long do_exception_error -#ifdef CONFIG_TRACE_IRQFLAGS -2: .long trace_hardirqs_on -#endif .align 2 ret_from_exception: preempt_stop() -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 4f, r0 - jsr @r0 - nop -#endif ENTRY(ret_from_irq) ! mov #OFF_SR, r0 mov.l @(r0,r15), r0 ! get status register + + shlr2 r0 + and #0x3c, r0 + cmp/eq #0x3c, r0 + bt 9f + TRACE_IRQS_ON +9: + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register shll r0 shll r0 ! kernel space? get_current_thread_info r8, r0 @@ -125,11 +122,7 @@ noresched: ENTRY(resume_userspace) ! r8: current_thread_info cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_OfF mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_WORK_MASK & 0xff), r0 bt/s __restore_all @@ -156,11 +149,7 @@ work_resched: jsr @r1 ! schedule nop cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_OFF ! mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_WORK_MASK & 0xff), r0 @@ -172,10 +161,6 @@ work_resched: 1: .long schedule 2: .long do_notify_resume 3: .long resume_userspace -#ifdef CONFIG_TRACE_IRQFLAGS -4: .long trace_hardirqs_on -5: .long trace_hardirqs_off -#endif .align 2 syscall_exit_work: @@ -184,11 +169,7 @@ syscall_exit_work: tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0 bt/s work_pending tst #_TIF_NEED_RESCHED, r0 -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_ON sti mov r15, r4 mov.l 8f, r0 ! do_syscall_trace_leave @@ -321,11 +302,7 @@ ENTRY(system_call) bt/s debug_trap ! it's a debug trap.. nop -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r10 - jsr @r10 - nop -#endif + TRACE_IRQS_ON sti ! @@ -355,11 +332,7 @@ syscall_call: ! syscall_exit: cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 6f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_OFF ! get_current_thread_info r8, r0 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags @@ -377,9 +350,5 @@ syscall_exit: #endif 2: .long NR_syscalls 3: .long sys_call_table -#ifdef CONFIG_TRACE_IRQFLAGS -5: .long trace_hardirqs_on -6: .long trace_hardirqs_off -#endif 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c index 77dfecb6437..e27a19e1f46 100644 --- a/arch/sh/kernel/io_trapped.c +++ b/arch/sh/kernel/io_trapped.c @@ -112,14 +112,15 @@ void __iomem *match_trapped_io_handler(struct list_head *list, struct trapped_io *tiop; struct resource *res; int k, len; + unsigned long flags; - spin_lock_irq(&trapped_lock); + spin_lock_irqsave(&trapped_lock, flags); list_for_each_entry(tiop, list, list) { voffs = 0; for (k = 0; k < tiop->num_resources; k++) { res = tiop->resource + k; if (res->start == offset) { - spin_unlock_irq(&trapped_lock); + spin_unlock_irqrestore(&trapped_lock, flags); return tiop->virt_base + voffs; } @@ -127,7 +128,7 @@ void __iomem *match_trapped_io_handler(struct list_head *list, voffs += roundup(len, PAGE_SIZE); } } - spin_unlock_irq(&trapped_lock); + spin_unlock_irqrestore(&trapped_lock, flags); return NULL; } EXPORT_SYMBOL_GPL(match_trapped_io_handler); -- cgit v1.2.3 From 05aa7882757f68af799140142ec44f83b2df4298 Mon Sep 17 00:00:00 2001 From: Rafael Ignacio Zurita Date: Tue, 4 Aug 2009 14:38:08 +0900 Subject: sh: Add early printk support for SH770x CPUs. This adds early printk support for SH770x (tested on SH7709 based hp6xx). Signed-off-by: Rafael Ignacio Zurita Signed-off-by: Paul Mundt --- arch/sh/kernel/early_printk.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c index a952dcf9999..64f2746baf9 100644 --- a/arch/sh/kernel/early_printk.c +++ b/arch/sh/kernel/early_printk.c @@ -134,7 +134,7 @@ static void scif_sercon_init(char *s) sci_out(&scif_port, SCFCR, 0x0030); /* TTRG=b'11 */ sci_out(&scif_port, SCSCR, 0x0030); /* TE, RE */ } -#elif defined(CONFIG_CPU_SH4) +#elif defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3) #define DEFAULT_BAUD 115200 /* * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4 @@ -220,10 +220,8 @@ static int __init setup_early_printk(char *buf) early_console = &scif_console; #if !defined(CONFIG_SH_STANDARD_BIOS) -#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3) scif_sercon_init(buf + 6); -#endif #endif } #endif -- cgit v1.2.3 From 6ba4a8f0f542e791e4158c91a844234b142578dc Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 06:57:36 +0000 Subject: sh: hwblk support for sh7724 This patch adds hwblk support for the sh7724 processor. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/Makefile | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 119 ++++++++++++++++---------------- arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c | 121 +++++++++++++++++++++++++++++++++ 3 files changed, 184 insertions(+), 58 deletions(-) create mode 100644 arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 1d7ae38bc61..12cddf4c721 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -27,7 +27,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o hwblk-sh7722.o clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o hwblk-sh7723.o -clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o +clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o hwblk-sh7724.o clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index 5d5c9b95288..ba24e38c9fc 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include /* SH7724 registers */ #define FRQCRA 0xa4150000 @@ -156,64 +158,67 @@ struct clk div6_clks[] = { SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, 0), }; -#define MSTP(_str, _parent, _reg, _bit, _force_on, _need_cpg, _need_ram) \ - SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _force_on * CLK_ENABLE_ON_INIT) +#define R_CLK (&r_clk) +#define P_CLK (&div4_clks[DIV4_P]) +#define B_CLK (&div4_clks[DIV4_B]) +#define I_CLK (&div4_clks[DIV4_I]) +#define SH_CLK (&div4_clks[DIV4_SH]) static struct clk mstp_clks[] = { - MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, 1, 1, 0), - MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, 1, 1, 0), - MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, 1, 1, 0), - MSTP("rs0", &div4_clks[DIV4_B], MSTPCR0, 28, 1, 1, 0), - MSTP("ilmem0", &div4_clks[DIV4_I], MSTPCR0, 27, 1, 1, 0), - MSTP("l2c0", &div4_clks[DIV4_SH], MSTPCR0, 26, 1, 1, 0), - MSTP("fpu0", &div4_clks[DIV4_I], MSTPCR0, 24, 1, 1, 0), - MSTP("intc0", &div4_clks[DIV4_P], MSTPCR0, 22, 1, 1, 0), - MSTP("dmac0", &div4_clks[DIV4_B], MSTPCR0, 21, 0, 1, 1), - MSTP("sh0", &div4_clks[DIV4_SH], MSTPCR0, 20, 0, 1, 0), - MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0, 1, 0), - MSTP("ubc0", &div4_clks[DIV4_I], MSTPCR0, 17, 0, 1, 0), - MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0, 1, 0), - MSTP("cmt0", &r_clk, MSTPCR0, 14, 0, 0, 0), - MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0, 0, 0), - MSTP("dmac1", &div4_clks[DIV4_B], MSTPCR0, 12, 0, 1, 1), - MSTP("tmu1", &div4_clks[DIV4_P], MSTPCR0, 10, 0, 1, 0), - MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 9, 0, 1, 0), - MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 8, 0, 1, 0), - MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 7, 0, 1, 0), - MSTP("scif3", &div4_clks[DIV4_B], MSTPCR0, 6, 0, 1, 0), - MSTP("scif4", &div4_clks[DIV4_B], MSTPCR0, 5, 0, 1, 0), - MSTP("scif5", &div4_clks[DIV4_B], MSTPCR0, 4, 0, 1, 0), - MSTP("msiof0", &div4_clks[DIV4_B], MSTPCR0, 2, 0, 1, 0), - MSTP("msiof1", &div4_clks[DIV4_B], MSTPCR0, 1, 0, 1, 0), - - MSTP("keysc0", &r_clk, MSTPCR1, 12, 0, 0, 0), - MSTP("rtc0", &r_clk, MSTPCR1, 11, 0, 0, 0), - MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0, 1, 0), - MSTP("i2c1", &div4_clks[DIV4_P], MSTPCR1, 8, 0, 1, 0), - - MSTP("mmc0", &div4_clks[DIV4_B], MSTPCR2, 29, 0, 1, 0), - MSTP("eth0", &div4_clks[DIV4_B], MSTPCR2, 28, 0, 1, 0), - MSTP("atapi0", &div4_clks[DIV4_B], MSTPCR2, 26, 0, 1, 0), - MSTP("tpu0", &div4_clks[DIV4_B], MSTPCR2, 25, 0, 1, 0), - MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0, 1, 0), - MSTP("tsif0", &div4_clks[DIV4_B], MSTPCR2, 22, 0, 1, 0), - MSTP("usb1", &div4_clks[DIV4_B], MSTPCR2, 21, 0, 1, 1), - MSTP("usb0", &div4_clks[DIV4_B], MSTPCR2, 20, 0, 1, 1), - MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 19, 0, 1, 1), - MSTP("sdhi0", &div4_clks[DIV4_B], MSTPCR2, 18, 0, 1, 0), - MSTP("sdhi1", &div4_clks[DIV4_B], MSTPCR2, 17, 0, 1, 0), - MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 15, 1, 1, 1), - MSTP("ceu1", &div4_clks[DIV4_B], MSTPCR2, 13, 0, 1, 1), - MSTP("beu1", &div4_clks[DIV4_B], MSTPCR2, 12, 0, 1, 1), - MSTP("2ddmac0", &div4_clks[DIV4_SH], MSTPCR2, 10, 0, 1, 1), - MSTP("spu0", &div4_clks[DIV4_B], MSTPCR2, 9, 0, 1, 0), - MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, 1, 1, 1), - MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0, 1, 1), - MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0, 1, 1), - MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0, 1, 1), - MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, 1, 1, 1), - MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, 1, 1, 1), - MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0, 1, 1), + SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("rs0", -1, B_CLK, HWBLK_RSMEM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("intc0", -1, P_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0), + SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0), + SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0), + SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0), + SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0), + SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0), + SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0), + SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0), + SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0), + SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0), + SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0), + SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0), + SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0), + SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0), + SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0), + SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0), + + SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0), + SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0), + SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC0, 0), + SH_HWBLK_CLK("i2c1", -1, P_CLK, HWBLK_IIC1, 0), + + SH_HWBLK_CLK("mmc0", -1, B_CLK, HWBLK_MMC, 0), + SH_HWBLK_CLK("eth0", -1, B_CLK, HWBLK_ETHER, 0), + SH_HWBLK_CLK("atapi0", -1, B_CLK, HWBLK_ATAPI, 0), + SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0), + SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0), + SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0), + SH_HWBLK_CLK("usb1", -1, B_CLK, HWBLK_USB1, 0), + SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB0, 0), + SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0), + SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0), + SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0), + SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU1, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ceu1", -1, B_CLK, HWBLK_CEU1, 0), + SH_HWBLK_CLK("beu1", -1, B_CLK, HWBLK_BEU1, 0), + SH_HWBLK_CLK("2ddmac0", -1, SH_CLK, HWBLK_2DDMAC, 0), + SH_HWBLK_CLK("spu0", -1, B_CLK, HWBLK_SPU, 0), + SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0), + SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU0, 0), + SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU0, 0), + SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU0, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0), }; int __init arch_clk_init(void) @@ -236,7 +241,7 @@ int __init arch_clk_init(void) ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); + ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks)); return ret; } diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c new file mode 100644 index 00000000000..1613ad6013c --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c @@ -0,0 +1,121 @@ +/* + * arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c + * + * SH7724 hardware block support + * + * Copyright (C) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include + +/* SH7724 registers */ +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 + +/* SH7724 Power Domains */ +enum { CORE_AREA, SUB_AREA, CORE_AREA_BM }; +static struct hwblk_area sh7724_hwblk_area[] = { + [CORE_AREA] = HWBLK_AREA(0, 0), + [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA), + [SUB_AREA] = HWBLK_AREA(0, 0), +}; + +/* Table mapping HWBLK to Module Stop Bit and Power Domain */ +static struct hwblk sh7724_hwblk[HWBLK_NR] = { + [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA), + [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA), + [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA), + [HWBLK_RSMEM] = HWBLK(MSTPCR0, 28, CORE_AREA), + [HWBLK_ILMEM] = HWBLK(MSTPCR0, 27, CORE_AREA), + [HWBLK_L2C] = HWBLK(MSTPCR0, 26, CORE_AREA), + [HWBLK_FPU] = HWBLK(MSTPCR0, 24, CORE_AREA), + [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA), + [HWBLK_DMAC0] = HWBLK(MSTPCR0, 21, CORE_AREA_BM), + [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA), + [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA), + [HWBLK_DBG] = HWBLK(MSTPCR0, 18, CORE_AREA), + [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA), + [HWBLK_TMU0] = HWBLK(MSTPCR0, 15, CORE_AREA), + [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA), + [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA), + [HWBLK_DMAC1] = HWBLK(MSTPCR0, 12, CORE_AREA_BM), + [HWBLK_TMU1] = HWBLK(MSTPCR0, 10, CORE_AREA), + [HWBLK_SCIF0] = HWBLK(MSTPCR0, 9, CORE_AREA), + [HWBLK_SCIF1] = HWBLK(MSTPCR0, 8, CORE_AREA), + [HWBLK_SCIF2] = HWBLK(MSTPCR0, 7, CORE_AREA), + [HWBLK_SCIF3] = HWBLK(MSTPCR0, 6, CORE_AREA), + [HWBLK_SCIF4] = HWBLK(MSTPCR0, 5, CORE_AREA), + [HWBLK_SCIF5] = HWBLK(MSTPCR0, 4, CORE_AREA), + [HWBLK_MSIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA), + [HWBLK_MSIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA), + + [HWBLK_KEYSC] = HWBLK(MSTPCR1, 12, SUB_AREA), + [HWBLK_RTC] = HWBLK(MSTPCR1, 11, SUB_AREA), + [HWBLK_IIC0] = HWBLK(MSTPCR1, 9, CORE_AREA), + [HWBLK_IIC1] = HWBLK(MSTPCR1, 8, CORE_AREA), + + [HWBLK_MMC] = HWBLK(MSTPCR2, 29, CORE_AREA), + [HWBLK_ETHER] = HWBLK(MSTPCR2, 28, CORE_AREA_BM), + [HWBLK_ATAPI] = HWBLK(MSTPCR2, 26, CORE_AREA_BM), + [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA), + [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA), + [HWBLK_TSIF] = HWBLK(MSTPCR2, 22, CORE_AREA), + [HWBLK_USB1] = HWBLK(MSTPCR2, 21, CORE_AREA), + [HWBLK_USB0] = HWBLK(MSTPCR2, 20, CORE_AREA), + [HWBLK_2DG] = HWBLK(MSTPCR2, 19, CORE_AREA_BM), + [HWBLK_SDHI0] = HWBLK(MSTPCR2, 18, CORE_AREA), + [HWBLK_SDHI1] = HWBLK(MSTPCR2, 17, CORE_AREA), + [HWBLK_VEU1] = HWBLK(MSTPCR2, 15, CORE_AREA_BM), + [HWBLK_CEU1] = HWBLK(MSTPCR2, 13, CORE_AREA_BM), + [HWBLK_BEU1] = HWBLK(MSTPCR2, 12, CORE_AREA_BM), + [HWBLK_2DDMAC] = HWBLK(MSTPCR2, 10, CORE_AREA_BM), + [HWBLK_SPU] = HWBLK(MSTPCR2, 9, CORE_AREA_BM), + [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM), + [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM), + [HWBLK_BEU0] = HWBLK(MSTPCR2, 4, CORE_AREA_BM), + [HWBLK_CEU0] = HWBLK(MSTPCR2, 3, CORE_AREA_BM), + [HWBLK_VEU0] = HWBLK(MSTPCR2, 2, CORE_AREA_BM), + [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM), + [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM), +}; + +static struct hwblk_info sh7724_hwblk_info = { + .areas = sh7724_hwblk_area, + .nr_areas = ARRAY_SIZE(sh7724_hwblk_area), + .hwblks = sh7724_hwblk, + .nr_hwblks = ARRAY_SIZE(sh7724_hwblk), +}; + +int arch_hwblk_sleep_mode(void) +{ + if (!sh7724_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE]) + return SUSP_SH_STANDBY | SUSP_SH_SF; + + if (!sh7724_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE]) + return SUSP_SH_SLEEP | SUSP_SH_SF; + + return SUSP_SH_SLEEP; +} + +int __init arch_hwblk_init(void) +{ + return hwblk_register(&sh7724_hwblk_info); +} -- cgit v1.2.3 From 133b170f08d6c20578f25b1ae71f80a5e638ccb6 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:01:36 +0000 Subject: sh: clean up MSTPCRn register definitions This patch removes the unused MSTPCRn register definitions from the SuperH Mobile code for sh7722, sh7723 and sh7724. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/clock-sh7722.c | 3 --- arch/sh/kernel/cpu/sh4a/clock-sh7723.c | 3 --- arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 3 --- 3 files changed, 9 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c index 1fa9e1dd1cc..5b1bbbe63b1 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c @@ -32,9 +32,6 @@ #define SCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 -#define MSTPCR0 0xa4150030 -#define MSTPCR1 0xa4150034 -#define MSTPCR2 0xa4150038 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c index bf64c78eee3..e5c63911403 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c @@ -32,9 +32,6 @@ #define SCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 -#define MSTPCR0 0xa4150030 -#define MSTPCR1 0xa4150034 -#define MSTPCR2 0xa4150038 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index ba24e38c9fc..34611d97378 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -33,9 +33,6 @@ #define FCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 -#define MSTPCR0 0xa4150030 -#define MSTPCR1 0xa4150034 -#define MSTPCR2 0xa4150038 #define SPUCLKCR 0xa415003c #define FLLFRQ 0xa4150050 #define LSTATS 0xa4150060 -- cgit v1.2.3 From 11d82905e0159c07fe2d1bfe5e7d80e4cea333ce Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 15:54:33 +0900 Subject: sh: Fix up early printk build error. Missing endif in the early printk case, fix it up.. Signed-off-by: Paul Mundt --- arch/sh/kernel/early_printk.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c index 64f2746baf9..81a46145ffa 100644 --- a/arch/sh/kernel/early_printk.c +++ b/arch/sh/kernel/early_printk.c @@ -222,6 +222,7 @@ static int __init setup_early_printk(char *buf) #if !defined(CONFIG_SH_STANDARD_BIOS) #if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3) scif_sercon_init(buf + 6); +#endif #endif } #endif -- cgit v1.2.3 From 4e14dfc722b8e9e07a355f97aa60a3d9f0739071 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 7 Aug 2009 16:11:19 +0100 Subject: sh: Use the generalized stacktrace ops Copy the stacktrace ops code from x86 and provide a central function for use by functions that need to dump a callstack. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/Makefile_32 | 2 +- arch/sh/kernel/dumpstack.c | 128 ++++++++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/stacktrace.c | 87 +++++++++++++++++++++--------- arch/sh/kernel/traps_32.c | 24 --------- 4 files changed, 190 insertions(+), 51 deletions(-) create mode 100644 arch/sh/kernel/dumpstack.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 94ed99b6800..6b32de701a7 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -9,7 +9,7 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = -pg endif -obj-y := debugtraps.o idle.o io.o io_generic.o irq.o \ +obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ machvec.o process_32.o ptrace_32.o setup.o signal_32.o \ sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \ traps.o traps_32.o diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c new file mode 100644 index 00000000000..6ab996fc612 --- /dev/null +++ b/arch/sh/kernel/dumpstack.c @@ -0,0 +1,128 @@ +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs + * Copyright (C) 2009 Matt Fleming + */ +#include +#include +#include + +#include + +void printk_address(unsigned long address, int reliable) +{ + printk(" [<%p>] %s%pS\n", (void *) address, + reliable ? "" : "? ", (void *) address); +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static void +print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, + struct thread_info *tinfo, int *graph) +{ + struct task_struct *task = tinfo->task; + unsigned long ret_addr; + int index = task->curr_ret_stack; + + if (addr != (unsigned long)return_to_handler) + return; + + if (!task->ret_stack || index < *graph) + return; + + index -= *graph; + ret_addr = task->ret_stack[index].ret; + + ops->address(data, ret_addr, 1); + + (*graph)++; +} +#else +static inline void +print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, + struct thread_info *tinfo, int *graph) +{ } +#endif + +/* + * Unwind the call stack and pass information to the stacktrace_ops + * functions. + */ +void dump_trace(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, const struct stacktrace_ops *ops, + void *data) +{ + struct thread_info *context; + int graph = 0; + + context = (struct thread_info *) + ((unsigned long)sp & (~(THREAD_SIZE - 1))); + + while (!kstack_end(sp)) { + unsigned long addr = *sp++; + + if (__kernel_text_address(addr)) { + ops->address(data, addr, 0); + + print_ftrace_graph_addr(addr, data, ops, + context, &graph); + } + } +} +EXPORT_SYMBOL(dump_trace); + + +static void +print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) +{ + printk(data); + print_symbol(msg, symbol); + printk("\n"); +} + +static void print_trace_warning(void *data, char *msg) +{ + printk("%s%s\n", (char *)data, msg); +} + +static int print_trace_stack(void *data, char *name) +{ + printk("%s <%s> ", (char *)data, name); + return 0; +} + +/* + * Print one address/symbol entries per line. + */ +static void print_trace_address(void *data, unsigned long addr, int reliable) +{ + printk(data); + printk_address(addr, reliable); +} + +static const struct stacktrace_ops print_trace_ops = { + .warning = print_trace_warning, + .warning_symbol = print_trace_warning_symbol, + .stack = print_trace_stack, + .address = print_trace_address, +}; + +void show_trace(struct task_struct *tsk, unsigned long *sp, + struct pt_regs *regs) +{ + if (regs && user_mode(regs)) + return; + + printk("\nCall trace:\n"); + + dump_trace(tsk, regs, sp, &print_trace_ops, ""); + + printk("\n"); + + if (!tsk) + tsk = current; + + debug_show_held_locks(tsk); +} diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index 1a2a5eb76e4..6c24a400b05 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -14,46 +14,81 @@ #include #include #include +#include + +static void save_stack_warning(void *data, char *msg) +{ +} + +static void +save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) +{ +} + +static int save_stack_stack(void *data, char *name) +{ + return 0; +} /* * Save stack-backtrace addresses into a stack_trace buffer. */ +static void save_stack_address(void *data, unsigned long addr, int reliable) +{ + struct stack_trace *trace = data; + + if (trace->skip > 0) { + trace->skip--; + return; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = addr; +} + +static const struct stacktrace_ops save_stack_ops = { + .warning = save_stack_warning, + .warning_symbol = save_stack_warning_symbol, + .stack = save_stack_stack, + .address = save_stack_address, +}; + void save_stack_trace(struct stack_trace *trace) { unsigned long *sp = (unsigned long *)current_stack_pointer; - while (!kstack_end(sp)) { - unsigned long addr = *sp++; - - if (__kernel_text_address(addr)) { - if (trace->skip > 0) - trace->skip--; - else - trace->entries[trace->nr_entries++] = addr; - if (trace->nr_entries >= trace->max_entries) - break; - } - } + dump_trace(current, NULL, sp, &save_stack_ops, trace); } EXPORT_SYMBOL_GPL(save_stack_trace); +static void +save_stack_address_nosched(void *data, unsigned long addr, int reliable) +{ + struct stack_trace *trace = (struct stack_trace *)data; + + if (in_sched_functions(addr)) + return; + + if (trace->skip > 0) { + trace->skip--; + return; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = addr; +} + +static const struct stacktrace_ops save_stack_ops_nosched = { + .warning = save_stack_warning, + .warning_symbol = save_stack_warning_symbol, + .stack = save_stack_stack, + .address = save_stack_address_nosched, +}; + void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long *sp = (unsigned long *)tsk->thread.sp; - while (!kstack_end(sp)) { - unsigned long addr = *sp++; - - if (__kernel_text_address(addr)) { - if (in_sched_functions(addr)) - break; - if (trace->skip > 0) - trace->skip--; - else - trace->entries[trace->nr_entries++] = addr; - if (trace->nr_entries >= trace->max_entries) - break; - } - } + dump_trace(current, NULL, sp, &save_stack_ops_nosched, trace); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 2b772776fcd..563426487c6 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -858,30 +858,6 @@ void __init trap_init(void) per_cpu_trap_init(); } -void show_trace(struct task_struct *tsk, unsigned long *sp, - struct pt_regs *regs) -{ - unsigned long addr; - - if (regs && user_mode(regs)) - return; - - printk("\nCall trace:\n"); - - while (!kstack_end(sp)) { - addr = *sp++; - if (kernel_text_address(addr)) - print_ip_sym(addr); - } - - printk("\n"); - - if (!tsk) - tsk = current; - - debug_show_held_locks(tsk); -} - void show_stack(struct task_struct *tsk, unsigned long *sp) { unsigned long stack; -- cgit v1.2.3 From bf61ad1f870be88676a07bfef69acd59ce10172e Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 13 Aug 2009 19:49:03 +0900 Subject: sh: Allow multiple stack unwinders to be setup Provide an interface for registering stack unwinders, where each unwinder is given a rating that describes its accuracy and complexity. The more accurate an unwinder is, the more complex it is. If a the current stack unwinder faults, then the stack unwinder with the next highest accuracy will be used in its place (provided one is available). For example, this allows unwinders, such as the DWARF unwinder, to liberally sprinkle BUG()s to catch badly formed DWARF debug info. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/Makefile_32 | 4 +- arch/sh/kernel/Makefile_64 | 2 +- arch/sh/kernel/unwinder.c | 162 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 165 insertions(+), 3 deletions(-) create mode 100644 arch/sh/kernel/unwinder.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 6b32de701a7..37a3b7704fc 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -11,8 +11,8 @@ endif obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ machvec.o process_32.o ptrace_32.o setup.o signal_32.o \ - sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \ - traps.o traps_32.o + sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \ + traps.o traps_32.o unwinder.o obj-y += cpu/ obj-$(CONFIG_VSYSCALL) += vsyscall/ diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index 67b9f6c6326..00b73e7598c 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 @@ -2,7 +2,7 @@ extra-y := head_64.o init_task.o vmlinux.lds obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \ ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \ - syscalls_64.o time.o topology.o traps.o traps_64.o + syscalls_64.o time.o topology.o traps.o traps_64.o unwinder.o obj-y += cpu/ obj-$(CONFIG_SMP) += smp.o diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c new file mode 100644 index 00000000000..2b30fa28b44 --- /dev/null +++ b/arch/sh/kernel/unwinder.c @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2009 Matt Fleming + * + * Based, in part, on kernel/time/clocksource.c. + * + * This file provides arbitration code for stack unwinders. + * + * Multiple stack unwinders can be available on a system, usually with + * the most accurate unwinder being the currently active one. + */ +#include +#include +#include +#include +#include + +/* + * This is the most basic stack unwinder an architecture can + * provide. For architectures without reliable frame pointers, e.g. + * RISC CPUs, it can be implemented by looking through the stack for + * addresses that lie within the kernel text section. + * + * Other CPUs, e.g. x86, can use their frame pointer register to + * construct more accurate stack traces. + */ +static struct list_head unwinder_list; +static struct unwinder stack_reader = { + .name = "stack-reader", + .dump = stack_reader_dump, + .rating = 50, + .list = { + .next = &unwinder_list, + .prev = &unwinder_list, + }, +}; + +/* + * "curr_unwinder" points to the stack unwinder currently in use. This + * is the unwinder with the highest rating. + * + * "unwinder_list" is a linked-list of all available unwinders, sorted + * by rating. + * + * All modifications of "curr_unwinder" and "unwinder_list" must be + * performed whilst holding "unwinder_lock". + */ +static struct unwinder *curr_unwinder = &stack_reader; + +static struct list_head unwinder_list = { + .next = &stack_reader.list, + .prev = &stack_reader.list, +}; + +static DEFINE_SPINLOCK(unwinder_lock); + +static atomic_t unwinder_running = ATOMIC_INIT(0); + +/** + * select_unwinder - Select the best registered stack unwinder. + * + * Private function. Must hold unwinder_lock when called. + * + * Select the stack unwinder with the best rating. This is useful for + * setting up curr_unwinder. + */ +static struct unwinder *select_unwinder(void) +{ + struct unwinder *best; + + if (list_empty(&unwinder_list)) + return NULL; + + best = list_entry(unwinder_list.next, struct unwinder, list); + if (best == curr_unwinder) + return NULL; + + return best; +} + +/* + * Enqueue the stack unwinder sorted by rating. + */ +static int unwinder_enqueue(struct unwinder *ops) +{ + struct list_head *tmp, *entry = &unwinder_list; + + list_for_each(tmp, &unwinder_list) { + struct unwinder *o; + + o = list_entry(tmp, struct unwinder, list); + if (o == ops) + return -EBUSY; + /* Keep track of the place, where to insert */ + if (o->rating >= ops->rating) + entry = tmp; + } + list_add(&ops->list, entry); + + return 0; +} + +/** + * unwinder_register - Used to install new stack unwinder + * @u: unwinder to be registered + * + * Install the new stack unwinder on the unwinder list, which is sorted + * by rating. + * + * Returns -EBUSY if registration fails, zero otherwise. + */ +int unwinder_register(struct unwinder *u) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&unwinder_lock, flags); + ret = unwinder_enqueue(u); + if (!ret) + curr_unwinder = select_unwinder(); + spin_unlock_irqrestore(&unwinder_lock, flags); + + return ret; +} + +/* + * Unwind the call stack and pass information to the stacktrace_ops + * functions. Also handle the case where we need to switch to a new + * stack dumper because the current one faulted unexpectedly. + */ +void unwind_stack(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, const struct stacktrace_ops *ops, + void *data) +{ + unsigned long flags; + + /* + * The problem with unwinders with high ratings is that they are + * inherently more complicated than the simple ones with lower + * ratings. We are therefore more likely to fault in the + * complicated ones, e.g. hitting BUG()s. If we fault in the + * code for the current stack unwinder we try to downgrade to + * one with a lower rating. + * + * Hopefully this will give us a semi-reliable stacktrace so we + * can diagnose why curr_unwinder->dump() faulted. + */ + if (atomic_inc_return(&unwinder_running) != 1) { + spin_lock_irqsave(&unwinder_lock, flags); + + if (!list_is_singular(&unwinder_list)) { + list_del(&curr_unwinder->list); + curr_unwinder = select_unwinder(); + } + + spin_unlock_irqrestore(&unwinder_lock, flags); + atomic_dec(&unwinder_running); + } + + curr_unwinder->dump(task, regs, sp, ops, data); + + atomic_dec(&unwinder_running); +} -- cgit v1.2.3 From 0eff9f66de79a0707a9c3a2f8528ccfd62100f0b Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 11 Aug 2009 22:43:20 +0100 Subject: sh: Use the new stack unwinder API Instead of implementing our own stack unwinder via dump_trace() we should use the new stack unwinder API because it is more modular. This change allows us to decouple the interface for generating stacktraces from the implementation of a stack unwinder. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/dumpstack.c | 17 ++++++----------- arch/sh/kernel/stacktrace.c | 5 +++-- 2 files changed, 9 insertions(+), 13 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 6ab996fc612..005dc1d1146 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -6,7 +6,7 @@ #include #include #include - +#include #include void printk_address(unsigned long address, int reliable) @@ -46,13 +46,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data, { } #endif -/* - * Unwind the call stack and pass information to the stacktrace_ops - * functions. - */ -void dump_trace(struct task_struct *task, struct pt_regs *regs, - unsigned long *sp, const struct stacktrace_ops *ops, - void *data) +void +stack_reader_dump(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, const struct stacktrace_ops *ops, + void *data) { struct thread_info *context; int graph = 0; @@ -71,8 +68,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, } } } -EXPORT_SYMBOL(dump_trace); - static void print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) @@ -117,7 +112,7 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, printk("\nCall trace:\n"); - dump_trace(tsk, regs, sp, &print_trace_ops, ""); + unwind_stack(tsk, regs, sp, &print_trace_ops, ""); printk("\n"); diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index 6c24a400b05..45b1adde3ab 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -57,7 +58,7 @@ void save_stack_trace(struct stack_trace *trace) { unsigned long *sp = (unsigned long *)current_stack_pointer; - dump_trace(current, NULL, sp, &save_stack_ops, trace); + unwind_stack(current, NULL, sp, &save_stack_ops, trace); } EXPORT_SYMBOL_GPL(save_stack_trace); @@ -89,6 +90,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long *sp = (unsigned long *)tsk->thread.sp; - dump_trace(current, NULL, sp, &save_stack_ops_nosched, trace); + unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); -- cgit v1.2.3 From bd353861c735b2265c9d8b2559960c693e7c68ab Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 14 Aug 2009 01:58:43 +0900 Subject: sh: dwarf unwinder support. This is a first cut at a generic DWARF unwinder for the kernel. It's still lacking DWARF64 support and the DWARF expression support hasn't been tested very well but it is generating proper stacktraces on SH for WARN_ON() and NULL dereferences. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/Makefile_32 | 1 + arch/sh/kernel/Makefile_64 | 1 + arch/sh/kernel/dwarf.c | 876 +++++++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/irq.c | 4 + arch/sh/kernel/vmlinux.lds.S | 4 +- 5 files changed, 885 insertions(+), 1 deletion(-) create mode 100644 arch/sh/kernel/dwarf.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 37a3b7704fc..f2245ebf0b3 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -33,6 +33,7 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o +obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index 00b73e7598c..639ee514266 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 @@ -13,6 +13,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_IO_TRAPPED) += io_trapped.o obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c new file mode 100644 index 00000000000..09c6fd7fd05 --- /dev/null +++ b/arch/sh/kernel/dwarf.c @@ -0,0 +1,876 @@ +/* + * Copyright (C) 2009 Matt Fleming + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * This is an implementation of a DWARF unwinder. Its main purpose is + * for generating stacktrace information. Based on the DWARF 3 + * specification from http://www.dwarfstd.org. + * + * TODO: + * - DWARF64 doesn't work. + */ + +/* #define DEBUG */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static LIST_HEAD(dwarf_cie_list); +DEFINE_SPINLOCK(dwarf_cie_lock); + +static LIST_HEAD(dwarf_fde_list); +DEFINE_SPINLOCK(dwarf_fde_lock); + +static struct dwarf_cie *cached_cie; + +/* + * Figure out whether we need to allocate some dwarf registers. If dwarf + * registers have already been allocated then we may need to realloc + * them. "reg" is a register number that we need to be able to access + * after this call. + * + * Register numbers start at zero, therefore we need to allocate space + * for "reg" + 1 registers. + */ +static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, + unsigned int reg) +{ + struct dwarf_reg *regs; + unsigned int num_regs = reg + 1; + size_t new_size; + size_t old_size; + + new_size = num_regs * sizeof(*regs); + old_size = frame->num_regs * sizeof(*regs); + + /* Fast path: don't allocate any regs if we've already got enough. */ + if (frame->num_regs >= num_regs) + return; + + regs = kzalloc(new_size, GFP_KERNEL); + if (!regs) { + printk(KERN_WARNING "Unable to allocate DWARF registers\n"); + /* + * Let's just bomb hard here, we have no way to + * gracefully recover. + */ + BUG(); + } + + if (frame->regs) { + memcpy(regs, frame->regs, old_size); + kfree(frame->regs); + } + + frame->regs = regs; + frame->num_regs = num_regs; +} + +/** + * dwarf_read_addr - read dwarf data + * @src: source address of data + * @dst: destination address to store the data to + * + * Read 'n' bytes from @src, where 'n' is the size of an address on + * the native machine. We return the number of bytes read, which + * should always be 'n'. We also have to be careful when reading + * from @src and writing to @dst, because they can be arbitrarily + * aligned. Return 'n' - the number of bytes read. + */ +static inline int dwarf_read_addr(void *src, void *dst) +{ + u32 val = __get_unaligned_cpu32(src); + __put_unaligned_cpu32(val, dst); + + return sizeof(unsigned long *); +} + +/** + * dwarf_read_uleb128 - read unsigned LEB128 data + * @addr: the address where the ULEB128 data is stored + * @ret: address to store the result + * + * Decode an unsigned LEB128 encoded datum. The algorithm is taken + * from Appendix C of the DWARF 3 spec. For information on the + * encodings refer to section "7.6 - Variable Length Data". Return + * the number of bytes read. + */ +static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret) +{ + unsigned int result; + unsigned char byte; + int shift, count; + + result = 0; + shift = 0; + count = 0; + + while (1) { + byte = __raw_readb(addr); + addr++; + count++; + + result |= (byte & 0x7f) << shift; + shift += 7; + + if (!(byte & 0x80)) + break; + } + + *ret = result; + + return count; +} + +/** + * dwarf_read_leb128 - read signed LEB128 data + * @addr: the address of the LEB128 encoded data + * @ret: address to store the result + * + * Decode signed LEB128 data. The algorithm is taken from Appendix + * C of the DWARF 3 spec. Return the number of bytes read. + */ +static inline unsigned long dwarf_read_leb128(char *addr, int *ret) +{ + unsigned char byte; + int result, shift; + int num_bits; + int count; + + result = 0; + shift = 0; + count = 0; + + while (1) { + byte = __raw_readb(addr); + addr++; + result |= (byte & 0x7f) << shift; + shift += 7; + count++; + + if (!(byte & 0x80)) + break; + } + + /* The number of bits in a signed integer. */ + num_bits = 8 * sizeof(result); + + if ((shift < num_bits) && (byte & 0x40)) + result |= (-1 << shift); + + *ret = result; + + return count; +} + +/** + * dwarf_read_encoded_value - return the decoded value at @addr + * @addr: the address of the encoded value + * @val: where to write the decoded value + * @encoding: the encoding with which we can decode @addr + * + * GCC emits encoded address in the .eh_frame FDE entries. Decode + * the value at @addr using @encoding. The decoded value is written + * to @val and the number of bytes read is returned. + */ +static int dwarf_read_encoded_value(char *addr, unsigned long *val, + char encoding) +{ + unsigned long decoded_addr = 0; + int count = 0; + + switch (encoding & 0x70) { + case DW_EH_PE_absptr: + break; + case DW_EH_PE_pcrel: + decoded_addr = (unsigned long)addr; + break; + default: + pr_debug("encoding=0x%x\n", (encoding & 0x70)); + BUG(); + } + + if ((encoding & 0x07) == 0x00) + encoding |= DW_EH_PE_udata4; + + switch (encoding & 0x0f) { + case DW_EH_PE_sdata4: + case DW_EH_PE_udata4: + count += 4; + decoded_addr += __get_unaligned_cpu32(addr); + __raw_writel(decoded_addr, val); + break; + default: + pr_debug("encoding=0x%x\n", encoding); + BUG(); + } + + return count; +} + +/** + * dwarf_entry_len - return the length of an FDE or CIE + * @addr: the address of the entry + * @len: the length of the entry + * + * Read the initial_length field of the entry and store the size of + * the entry in @len. We return the number of bytes read. Return a + * count of 0 on error. + */ +static inline int dwarf_entry_len(char *addr, unsigned long *len) +{ + u32 initial_len; + int count; + + initial_len = __get_unaligned_cpu32(addr); + count = 4; + + /* + * An initial length field value in the range DW_LEN_EXT_LO - + * DW_LEN_EXT_HI indicates an extension, and should not be + * interpreted as a length. The only extension that we currently + * understand is the use of DWARF64 addresses. + */ + if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) { + /* + * The 64-bit length field immediately follows the + * compulsory 32-bit length field. + */ + if (initial_len == DW_EXT_DWARF64) { + *len = __get_unaligned_cpu64(addr + 4); + count = 12; + } else { + printk(KERN_WARNING "Unknown DWARF extension\n"); + count = 0; + } + } else + *len = initial_len; + + return count; +} + +/** + * dwarf_lookup_cie - locate the cie + * @cie_ptr: pointer to help with lookup + */ +static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) +{ + struct dwarf_cie *cie, *n; + unsigned long flags; + + spin_lock_irqsave(&dwarf_cie_lock, flags); + + /* + * We've cached the last CIE we looked up because chances are + * that the FDE wants this CIE. + */ + if (cached_cie && cached_cie->cie_pointer == cie_ptr) { + cie = cached_cie; + goto out; + } + + list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) { + if (cie->cie_pointer == cie_ptr) { + cached_cie = cie; + break; + } + } + + /* Couldn't find the entry in the list. */ + if (&cie->link == &dwarf_cie_list) + cie = NULL; +out: + spin_unlock_irqrestore(&dwarf_cie_lock, flags); + return cie; +} + +/** + * dwarf_lookup_fde - locate the FDE that covers pc + * @pc: the program counter + */ +struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) +{ + unsigned long flags; + struct dwarf_fde *fde, *n; + + spin_lock_irqsave(&dwarf_fde_lock, flags); + list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) { + unsigned long start, end; + + start = fde->initial_location; + end = fde->initial_location + fde->address_range; + + if (pc >= start && pc < end) + break; + } + + /* Couldn't find the entry in the list. */ + if (&fde->link == &dwarf_fde_list) + fde = NULL; + + spin_unlock_irqrestore(&dwarf_fde_lock, flags); + + return fde; +} + +/** + * dwarf_cfa_execute_insns - execute instructions to calculate a CFA + * @insn_start: address of the first instruction + * @insn_end: address of the last instruction + * @cie: the CIE for this function + * @fde: the FDE for this function + * @frame: the instructions calculate the CFA for this frame + * @pc: the program counter of the address we're interested in + * + * Execute the Call Frame instruction sequence starting at + * @insn_start and ending at @insn_end. The instructions describe + * how to calculate the Canonical Frame Address of a stackframe. + * Store the results in @frame. + */ +static int dwarf_cfa_execute_insns(unsigned char *insn_start, + unsigned char *insn_end, + struct dwarf_cie *cie, + struct dwarf_fde *fde, + struct dwarf_frame *frame, + unsigned long pc) +{ + unsigned char insn; + unsigned char *current_insn; + unsigned int count, delta, reg, expr_len, offset; + + current_insn = insn_start; + + while (current_insn < insn_end && frame->pc <= pc) { + insn = __raw_readb(current_insn++); + + /* + * Firstly, handle the opcodes that embed their operands + * in the instructions. + */ + switch (DW_CFA_opcode(insn)) { + case DW_CFA_advance_loc: + delta = DW_CFA_operand(insn); + delta *= cie->code_alignment_factor; + frame->pc += delta; + continue; + /* NOTREACHED */ + case DW_CFA_offset: + reg = DW_CFA_operand(insn); + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + offset *= cie->data_alignment_factor; + dwarf_frame_alloc_regs(frame, reg); + frame->regs[reg].addr = offset; + frame->regs[reg].flags |= DWARF_REG_OFFSET; + continue; + /* NOTREACHED */ + case DW_CFA_restore: + reg = DW_CFA_operand(insn); + continue; + /* NOTREACHED */ + } + + /* + * Secondly, handle the opcodes that don't embed their + * operands in the instruction. + */ + switch (insn) { + case DW_CFA_nop: + continue; + case DW_CFA_advance_loc1: + delta = *current_insn++; + frame->pc += delta * cie->code_alignment_factor; + break; + case DW_CFA_advance_loc2: + delta = __get_unaligned_cpu16(current_insn); + current_insn += 2; + frame->pc += delta * cie->code_alignment_factor; + break; + case DW_CFA_advance_loc4: + delta = __get_unaligned_cpu32(current_insn); + current_insn += 4; + frame->pc += delta * cie->code_alignment_factor; + break; + case DW_CFA_offset_extended: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + offset *= cie->data_alignment_factor; + break; + case DW_CFA_restore_extended: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + break; + case DW_CFA_undefined: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + break; + case DW_CFA_def_cfa: + count = dwarf_read_uleb128(current_insn, + &frame->cfa_register); + current_insn += count; + count = dwarf_read_uleb128(current_insn, + &frame->cfa_offset); + current_insn += count; + + frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; + break; + case DW_CFA_def_cfa_register: + count = dwarf_read_uleb128(current_insn, + &frame->cfa_register); + current_insn += count; + frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; + break; + case DW_CFA_def_cfa_offset: + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + frame->cfa_offset = offset; + break; + case DW_CFA_def_cfa_expression: + count = dwarf_read_uleb128(current_insn, &expr_len); + current_insn += count; + + frame->cfa_expr = current_insn; + frame->cfa_expr_len = expr_len; + current_insn += expr_len; + + frame->flags |= DWARF_FRAME_CFA_REG_EXP; + break; + case DW_CFA_offset_extended_sf: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_leb128(current_insn, &offset); + current_insn += count; + offset *= cie->data_alignment_factor; + dwarf_frame_alloc_regs(frame, reg); + frame->regs[reg].flags |= DWARF_REG_OFFSET; + frame->regs[reg].addr = offset; + break; + case DW_CFA_val_offset: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_leb128(current_insn, &offset); + offset *= cie->data_alignment_factor; + frame->regs[reg].flags |= DWARF_REG_OFFSET; + frame->regs[reg].addr = offset; + break; + default: + pr_debug("unhandled DWARF instruction 0x%x\n", insn); + break; + } + } + + return 0; +} + +/** + * dwarf_unwind_stack - recursively unwind the stack + * @pc: address of the function to unwind + * @prev: struct dwarf_frame of the previous stackframe on the callstack + * + * Return a struct dwarf_frame representing the most recent frame + * on the callstack. Each of the lower (older) stack frames are + * linked via the "prev" member. + */ +struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, + struct dwarf_frame *prev) +{ + struct dwarf_frame *frame; + struct dwarf_cie *cie; + struct dwarf_fde *fde; + unsigned long addr; + int i, offset; + + /* + * If this is the first invocation of this recursive function we + * need get the contents of a physical register to get the CFA + * in order to begin the virtual unwinding of the stack. + * + * The constant DWARF_ARCH_UNWIND_OFFSET is added to the address of + * this function because the return address register + * (DWARF_ARCH_RA_REG) will probably not be initialised until a + * few instructions into the prologue. + */ + if (!pc && !prev) { + pc = (unsigned long)&dwarf_unwind_stack; + pc += DWARF_ARCH_UNWIND_OFFSET; + } + + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) + return NULL; + + frame->prev = prev; + + fde = dwarf_lookup_fde(pc); + if (!fde) { + /* + * This is our normal exit path - the one that stops the + * recursion. There's two reasons why we might exit + * here, + * + * a) pc has no asscociated DWARF frame info and so + * we don't know how to unwind this frame. This is + * usually the case when we're trying to unwind a + * frame that was called from some assembly code + * that has no DWARF info, e.g. syscalls. + * + * b) the DEBUG info for pc is bogus. There's + * really no way to distinguish this case from the + * case above, which sucks because we could print a + * warning here. + */ + return NULL; + } + + cie = dwarf_lookup_cie(fde->cie_pointer); + + frame->pc = fde->initial_location; + + /* CIE initial instructions */ + dwarf_cfa_execute_insns(cie->initial_instructions, + cie->instructions_end, cie, fde, frame, pc); + + /* FDE instructions */ + dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, + fde, frame, pc); + + /* Calculate the CFA */ + switch (frame->flags) { + case DWARF_FRAME_CFA_REG_OFFSET: + if (prev) { + BUG_ON(!prev->regs[frame->cfa_register].flags); + + addr = prev->cfa; + addr += prev->regs[frame->cfa_register].addr; + frame->cfa = __raw_readl(addr); + + } else { + /* + * Again, this is the first invocation of this + * recurisve function. We need to physically + * read the contents of a register in order to + * get the Canonical Frame Address for this + * function. + */ + frame->cfa = dwarf_read_arch_reg(frame->cfa_register); + } + + frame->cfa += frame->cfa_offset; + break; + default: + BUG(); + } + + /* If we haven't seen the return address reg, we're screwed. */ + BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags); + + for (i = 0; i <= frame->num_regs; i++) { + struct dwarf_reg *reg = &frame->regs[i]; + + if (!reg->flags) + continue; + + offset = reg->addr; + offset += frame->cfa; + } + + addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr; + frame->return_addr = __raw_readl(addr); + + frame->next = dwarf_unwind_stack(frame->return_addr, frame); + return frame; +} + +static int dwarf_parse_cie(void *entry, void *p, unsigned long len, + unsigned char *end) +{ + struct dwarf_cie *cie; + unsigned long flags; + int count; + + cie = kzalloc(sizeof(*cie), GFP_KERNEL); + if (!cie) + return -ENOMEM; + + cie->length = len; + + /* + * Record the offset into the .eh_frame section + * for this CIE. It allows this CIE to be + * quickly and easily looked up from the + * corresponding FDE. + */ + cie->cie_pointer = (unsigned long)entry; + + cie->version = *(char *)p++; + BUG_ON(cie->version != 1); + + cie->augmentation = p; + p += strlen(cie->augmentation) + 1; + + count = dwarf_read_uleb128(p, &cie->code_alignment_factor); + p += count; + + count = dwarf_read_leb128(p, &cie->data_alignment_factor); + p += count; + + /* + * Which column in the rule table contains the + * return address? + */ + if (cie->version == 1) { + cie->return_address_reg = __raw_readb(p); + p++; + } else { + count = dwarf_read_uleb128(p, &cie->return_address_reg); + p += count; + } + + if (cie->augmentation[0] == 'z') { + unsigned int length, count; + cie->flags |= DWARF_CIE_Z_AUGMENTATION; + + count = dwarf_read_uleb128(p, &length); + p += count; + + BUG_ON((unsigned char *)p > end); + + cie->initial_instructions = p + length; + cie->augmentation++; + } + + while (*cie->augmentation) { + /* + * "L" indicates a byte showing how the + * LSDA pointer is encoded. Skip it. + */ + if (*cie->augmentation == 'L') { + p++; + cie->augmentation++; + } else if (*cie->augmentation == 'R') { + /* + * "R" indicates a byte showing + * how FDE addresses are + * encoded. + */ + cie->encoding = *(char *)p++; + cie->augmentation++; + } else if (*cie->augmentation == 'P') { + /* + * "R" indicates a personality + * routine in the CIE + * augmentation. + */ + BUG(); + } else if (*cie->augmentation == 'S') { + BUG(); + } else { + /* + * Unknown augmentation. Assume + * 'z' augmentation. + */ + p = cie->initial_instructions; + BUG_ON(!p); + break; + } + } + + cie->initial_instructions = p; + cie->instructions_end = end; + + /* Add to list */ + spin_lock_irqsave(&dwarf_cie_lock, flags); + list_add_tail(&cie->link, &dwarf_cie_list); + spin_unlock_irqrestore(&dwarf_cie_lock, flags); + + return 0; +} + +static int dwarf_parse_fde(void *entry, u32 entry_type, + void *start, unsigned long len) +{ + struct dwarf_fde *fde; + struct dwarf_cie *cie; + unsigned long flags; + int count; + void *p = start; + + fde = kzalloc(sizeof(*fde), GFP_KERNEL); + if (!fde) + return -ENOMEM; + + fde->length = len; + + /* + * In a .eh_frame section the CIE pointer is the + * delta between the address within the FDE + */ + fde->cie_pointer = (unsigned long)(p - entry_type - 4); + + cie = dwarf_lookup_cie(fde->cie_pointer); + fde->cie = cie; + + if (cie->encoding) + count = dwarf_read_encoded_value(p, &fde->initial_location, + cie->encoding); + else + count = dwarf_read_addr(p, &fde->initial_location); + + p += count; + + if (cie->encoding) + count = dwarf_read_encoded_value(p, &fde->address_range, + cie->encoding & 0x0f); + else + count = dwarf_read_addr(p, &fde->address_range); + + p += count; + + if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) { + unsigned int length; + count = dwarf_read_uleb128(p, &length); + p += count + length; + } + + /* Call frame instructions. */ + fde->instructions = p; + fde->end = start + len; + + /* Add to list. */ + spin_lock_irqsave(&dwarf_fde_lock, flags); + list_add_tail(&fde->link, &dwarf_fde_list); + spin_unlock_irqrestore(&dwarf_fde_lock, flags); + + return 0; +} + +static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, + const struct stacktrace_ops *ops, void *data) +{ + struct dwarf_frame *frame; + + frame = dwarf_unwind_stack(0, NULL); + + while (frame && frame->return_addr) { + ops->address(data, frame->return_addr, 1); + frame = frame->next; + } +} + +static struct unwinder dwarf_unwinder = { + .name = "dwarf-unwinder", + .dump = dwarf_unwinder_dump, + .rating = 150, +}; + +static void dwarf_unwinder_cleanup(void) +{ + struct dwarf_cie *cie, *m; + struct dwarf_fde *fde, *n; + unsigned long flags; + + /* + * Deallocate all the memory allocated for the DWARF unwinder. + * Traverse all the FDE/CIE lists and remove and free all the + * memory associated with those data structures. + */ + spin_lock_irqsave(&dwarf_cie_lock, flags); + list_for_each_entry_safe(cie, m, &dwarf_cie_list, link) + kfree(cie); + spin_unlock_irqrestore(&dwarf_cie_lock, flags); + + spin_lock_irqsave(&dwarf_fde_lock, flags); + list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) + kfree(fde); + spin_unlock_irqrestore(&dwarf_fde_lock, flags); +} + +/** + * dwarf_unwinder_init - initialise the dwarf unwinder + * + * Build the data structures describing the .dwarf_frame section to + * make it easier to lookup CIE and FDE entries. Because the + * .eh_frame section is packed as tightly as possible it is not + * easy to lookup the FDE for a given PC, so we build a list of FDE + * and CIE entries that make it easier. + */ +void dwarf_unwinder_init(void) +{ + u32 entry_type; + void *p, *entry; + int count, err; + unsigned long len; + unsigned int c_entries, f_entries; + unsigned char *end; + INIT_LIST_HEAD(&dwarf_cie_list); + INIT_LIST_HEAD(&dwarf_fde_list); + + c_entries = 0; + f_entries = 0; + entry = &__start_eh_frame; + + while ((char *)entry < __stop_eh_frame) { + p = entry; + + count = dwarf_entry_len(p, &len); + if (count == 0) { + /* + * We read a bogus length field value. There is + * nothing we can do here apart from disabling + * the DWARF unwinder. We can't even skip this + * entry and move to the next one because 'len' + * tells us where our next entry is. + */ + goto out; + } else + p += count; + + /* initial length does not include itself */ + end = p + len; + + entry_type = __get_unaligned_cpu32(p); + p += 4; + + if (entry_type == DW_EH_FRAME_CIE) { + err = dwarf_parse_cie(entry, p, len, end); + if (err < 0) + goto out; + else + c_entries++; + } else { + err = dwarf_parse_fde(entry, entry_type, p, len); + if (err < 0) + goto out; + else + f_entries++; + } + + entry = (char *)entry + len + 4; + } + + printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", + c_entries, f_entries); + + err = unwinder_register(&dwarf_unwinder); + if (err) + goto out; + + return; + +out: + printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); + dwarf_unwinder_cleanup(); +} diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 278c68c6048..2bb43dc74f2 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -261,6 +262,9 @@ void __init init_IRQ(void) sh_mv.mv_init_irq(); irq_ctx_init(smp_processor_id()); + + /* This needs to be early, but not too early.. */ + dwarf_unwinder_init(); } #ifdef CONFIG_SPARSE_IRQ diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 80dc9f8d941..1b7d9d541e0 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -12,7 +12,7 @@ OUTPUT_ARCH(sh) #include #include -#include +#include ENTRY(_start) SECTIONS @@ -70,6 +70,8 @@ SECTIONS _edata = .; /* End of data section */ + DWARF_EH_FRAME + . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) -- cgit v1.2.3 From 0b930489b8606224b829c8a6037eac24249a97ec Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 2 Aug 2009 22:33:26 +0100 Subject: sh: Setup the frame register in asm code In order to use DWARF unwinder info the frame register has to contain a valid value. Whilst GCC takes care of this for C code, we have to do it ourselves for assembly. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/entry.S | 1 + arch/sh/kernel/entry-common.S | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 3cb531f233f..67ad6467c69 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -137,6 +137,7 @@ ENTRY(tlb_protection_violation_store) mov #1, r5 call_dpf: + setup_frame_reg mov.l 1f, r0 mov r5, r8 mov.l @r0, r6 diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index fc26ccd8278..b3b215fb21f 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -285,6 +285,7 @@ ret_from_fork: * system calls and debug traps through their respective jump tables. */ ENTRY(system_call) + setup_frame_reg #if !defined(CONFIG_CPU_SH2) mov.l 1f, r9 mov.l @r9, r8 ! Read from TRA (Trap Address) Register -- cgit v1.2.3 From cafb0ddac60556f7d2d4cd0ef1a93da8a6c71ffb Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 2 Aug 2009 22:40:11 +0100 Subject: sh: Add CFI annotations for exception return. Annotate various assembly code paths with CFI assembler directives so that DWARF unwind info is available for the unwinder. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/entry-common.S | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index b3b215fb21f..e63178fefb9 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -43,6 +43,7 @@ * syscall # * */ +#include #if defined(CONFIG_PREEMPT) # define preempt_stop() cli ; TRACE_IRQS_OFF @@ -66,6 +67,11 @@ ENTRY(exception_error) .align 2 ret_from_exception: + CFI_STARTPROC simple + CFI_DEF_CFA r14, 0 + CFI_REL_OFFSET 17, 64 + CFI_REL_OFFSET 15, 0 + CFI_REL_OFFSET 14, 56 preempt_stop() ENTRY(ret_from_irq) ! @@ -240,6 +246,7 @@ debug_trap: nop bra __restore_all nop + CFI_ENDPROC .align 2 1: .long debug_trap_table -- cgit v1.2.3 From 3497447f15485b479366ec86effaac16fc82411b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 14 Aug 2009 02:10:59 +0900 Subject: sh: unwinder: Fix up usage of unaligned accessors. This was using internal symbols for unaligned accesses, bypassing the exposed interface for variable sized safe accesses. This converts all of the __get_unaligned_cpuXX() users over to get_unaligned() directly, relying on the cast to select the proper internal routine. Additionally, the __put_unaligned_cpuXX() case is superfluous given that the destination address is aligned in all of the current cases, so just drop that outright. Furthermore, this switches to the asm/unaligned.h header instead of the asm-generic version, which was silently bypassing the SH-4A optimized unaligned ops. Signed-off-by: Paul Mundt --- arch/sh/kernel/dwarf.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 09c6fd7fd05..d1d8536e5ba 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -87,11 +87,9 @@ static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, * from @src and writing to @dst, because they can be arbitrarily * aligned. Return 'n' - the number of bytes read. */ -static inline int dwarf_read_addr(void *src, void *dst) +static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst) { - u32 val = __get_unaligned_cpu32(src); - __put_unaligned_cpu32(val, dst); - + *dst = get_unaligned(src); return sizeof(unsigned long *); } @@ -207,7 +205,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val, case DW_EH_PE_sdata4: case DW_EH_PE_udata4: count += 4; - decoded_addr += __get_unaligned_cpu32(addr); + decoded_addr += get_unaligned((u32 *)addr); __raw_writel(decoded_addr, val); break; default: @@ -232,7 +230,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len) u32 initial_len; int count; - initial_len = __get_unaligned_cpu32(addr); + initial_len = get_unaligned((u32 *)addr); count = 4; /* @@ -247,7 +245,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len) * compulsory 32-bit length field. */ if (initial_len == DW_EXT_DWARF64) { - *len = __get_unaligned_cpu64(addr + 4); + *len = get_unaligned((u64 *)addr + 4); count = 12; } else { printk(KERN_WARNING "Unknown DWARF extension\n"); @@ -392,12 +390,12 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, frame->pc += delta * cie->code_alignment_factor; break; case DW_CFA_advance_loc2: - delta = __get_unaligned_cpu16(current_insn); + delta = get_unaligned((u16 *)current_insn); current_insn += 2; frame->pc += delta * cie->code_alignment_factor; break; case DW_CFA_advance_loc4: - delta = __get_unaligned_cpu32(current_insn); + delta = get_unaligned((u32 *)current_insn); current_insn += 4; frame->pc += delta * cie->code_alignment_factor; break; @@ -841,7 +839,7 @@ void dwarf_unwinder_init(void) /* initial length does not include itself */ end = p + len; - entry_type = __get_unaligned_cpu32(p); + entry_type = get_unaligned((u32 *)p); p += 4; if (entry_type == DW_EH_FRAME_CIE) { -- cgit v1.2.3 From bf43a160ff2d67a21b076286bab6f5e2c993bd0a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 14 Aug 2009 03:06:13 +0900 Subject: sh: unwinder: Restore put_unaligned() for an unaligned destination. The destination address might be unaligned, so set it with put_unaligned() for safety. This restores the previous behaviour, albeit through the proper API. Signed-off-by: Paul Mundt --- arch/sh/kernel/dwarf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index d1d8536e5ba..49d039f1942 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -89,7 +89,8 @@ static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, */ static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst) { - *dst = get_unaligned(src); + u32 val = get_unaligned(src); + put_unaligned(val, dst); return sizeof(unsigned long *); } -- cgit v1.2.3 From f826466772ae52f26152287fcb2259351de78f0f Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 13 Aug 2009 20:41:31 +0100 Subject: sh: Delete DWARF_ARCH_UNWIND_OFFSET Trying to figure out the best value for DWARF_ARCH_UNWIND_OFFSET is tricky at best. Various things can change the size (and offset from the beginning of the function) of the prologue. Notably, turning on ftrace adds calls to mcount at the beginning of functions, thereby pushing the prologue further into the function. So replace DWARF_ARCH_UNWIND_OFFSET with some code that continues to execute CFA instructions until the value of return address register is defined. This is safe to do because we know that the return address must have been pushed onto the frame before our first function call; we just can't figure out where at compile-time. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/dwarf.c | 44 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 9 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 49d039f1942..83f3cc92549 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -330,6 +330,7 @@ struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) * @fde: the FDE for this function * @frame: the instructions calculate the CFA for this frame * @pc: the program counter of the address we're interested in + * @define_ra: keep executing insns until the return addr reg is defined? * * Execute the Call Frame instruction sequence starting at * @insn_start and ending at @insn_end. The instructions describe @@ -341,17 +342,36 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, struct dwarf_cie *cie, struct dwarf_fde *fde, struct dwarf_frame *frame, - unsigned long pc) + unsigned long pc, + bool define_ra) { unsigned char insn; unsigned char *current_insn; unsigned int count, delta, reg, expr_len, offset; + bool seen_ra_reg; current_insn = insn_start; - while (current_insn < insn_end && frame->pc <= pc) { + /* + * If we're executing instructions for the dwarf_unwind_stack() + * FDE we need to keep executing instructions until the value of + * DWARF_ARCH_RA_REG is defined. See the comment in + * dwarf_unwind_stack() for more details. + */ + if (define_ra) + seen_ra_reg = false; + else + seen_ra_reg = true; + + while (current_insn < insn_end && (frame->pc <= pc || !seen_ra_reg) ) { insn = __raw_readb(current_insn++); + if (!seen_ra_reg) { + if (frame->num_regs >= DWARF_ARCH_RA_REG && + frame->regs[DWARF_ARCH_RA_REG].flags) + seen_ra_reg = true; + } + /* * Firstly, handle the opcodes that embed their operands * in the instructions. @@ -490,20 +510,25 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, struct dwarf_fde *fde; unsigned long addr; int i, offset; + bool define_ra = false; /* * If this is the first invocation of this recursive function we * need get the contents of a physical register to get the CFA * in order to begin the virtual unwinding of the stack. * - * The constant DWARF_ARCH_UNWIND_OFFSET is added to the address of - * this function because the return address register - * (DWARF_ARCH_RA_REG) will probably not be initialised until a - * few instructions into the prologue. + * Setting "define_ra" to true indictates that we want + * dwarf_cfa_execute_insns() to continue executing instructions + * until we know how to calculate the value of DWARF_ARCH_RA_REG + * (which we need in order to kick off the whole unwinding + * process). + * + * NOTE: the return address is guaranteed to be setup by the + * time this function makes its first function call. */ if (!pc && !prev) { pc = (unsigned long)&dwarf_unwind_stack; - pc += DWARF_ARCH_UNWIND_OFFSET; + define_ra = true; } frame = kzalloc(sizeof(*frame), GFP_KERNEL); @@ -539,11 +564,12 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, /* CIE initial instructions */ dwarf_cfa_execute_insns(cie->initial_instructions, - cie->instructions_end, cie, fde, frame, pc); + cie->instructions_end, cie, fde, + frame, pc, false); /* FDE instructions */ dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, - fde, frame, pc); + fde, frame, pc, define_ra); /* Calculate the CFA */ switch (frame->flags) { -- cgit v1.2.3 From 0fc11e3618bb1f9e0640127ec84f5d2690fa3894 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 14 Aug 2009 23:58:37 +0900 Subject: sh: unwinder: Convert frame allocations to GFP_ATOMIC. save_stack_trace_tsk() and friends can be called from atomic context (as triggered by latencytop), and subsequently hit two problematic allocation points that were using GFP_KERNEL (these were dwarf_unwind_stack() and dwarf_frame_alloc_regs()). Convert these over to GFP_ATOMIC and get latencytop working with the DWARF unwinder. Signed-off-by: Paul Mundt --- arch/sh/kernel/dwarf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 83f3cc92549..db021361b16 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -57,7 +57,7 @@ static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, if (frame->num_regs >= num_regs) return; - regs = kzalloc(new_size, GFP_KERNEL); + regs = kzalloc(new_size, GFP_ATOMIC); if (!regs) { printk(KERN_WARNING "Unable to allocate DWARF registers\n"); /* @@ -531,7 +531,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, define_ra = true; } - frame = kzalloc(sizeof(*frame), GFP_KERNEL); + frame = kzalloc(sizeof(*frame), GFP_ATOMIC); if (!frame) return NULL; -- cgit v1.2.3 From 48e4d4604b7da52d6a56ddc821a460b183b7353d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 01:05:46 +0900 Subject: sh: stacktrace: Add reliability checks in address saving ops. This adopts the reliability checks from the x86 stacktrace code so known bad addresses are not recorded in the stack trace buffer. Signed-off-by: Paul Mundt --- arch/sh/kernel/stacktrace.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index 45b1adde3ab..a0f497bcbb4 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -38,6 +38,9 @@ static void save_stack_address(void *data, unsigned long addr, int reliable) { struct stack_trace *trace = data; + if (!reliable) + return; + if (trace->skip > 0) { trace->skip--; return; @@ -67,6 +70,9 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable) { struct stack_trace *trace = (struct stack_trace *)data; + if (!reliable) + return; + if (in_sched_functions(addr)) return; -- cgit v1.2.3 From f9967e23c10f025d85bbf66a2a5f18f016890ccb Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 01:09:03 +0900 Subject: sh: flag the default unwinder as reliable. This flags the default unwinder as reliable, as it tends to be reliable enough for the purposes of the stacktrace buffer. We leave the unreliable cases for the unwind methods that we know to be completely broken. Signed-off-by: Paul Mundt --- arch/sh/kernel/dumpstack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 005dc1d1146..6f5ad151340 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -61,7 +61,7 @@ stack_reader_dump(struct task_struct *task, struct pt_regs *regs, unsigned long addr = *sp++; if (__kernel_text_address(addr)) { - ops->address(data, addr, 0); + ops->address(data, addr, 1); print_ftrace_graph_addr(addr, data, ops, context, &graph); -- cgit v1.2.3 From 606b4c992f3b28f906100f1b6eb49059909d8da7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 01:11:37 +0900 Subject: sh: stacktrace: Properly terminate the trace entry buffer. This inserts a ULONG_MAX entry at the end of the valid entries in the stack trace buffer so the default code doesn't need to scan to the end of available slots. This also makes the trace buffer termination behaviour consistent with the other architectures. Signed-off-by: Paul Mundt --- arch/sh/kernel/stacktrace.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index a0f497bcbb4..c2e45c48409 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -62,6 +62,8 @@ void save_stack_trace(struct stack_trace *trace) unsigned long *sp = (unsigned long *)current_stack_pointer; unwind_stack(current, NULL, sp, &save_stack_ops, trace); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace); @@ -97,5 +99,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) unsigned long *sp = (unsigned long *)tsk->thread.sp; unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); -- cgit v1.2.3