aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-10-31 13:20:08 +0100
committerIngo Molnar <mingo@elte.hu>2008-11-04 17:14:06 +0100
commitd7ad44b697c9d13e445ddc7d16f736fbac333249 (patch)
treea18ac8995bf7158835c69ca1c9ab9b674fc617fa /kernel/trace
parente55f605c14679c30be41473e60b7ad26524cdc35 (diff)
tracing/fastboot: use sched switch tracer from boot tracer
Impact: enhance boot trace output with scheduling events Use the sched_switch tracer from the boot tracer. We also can trace schedule events inside the initcalls. Sched tracing is disabled after the initcall has finished and then reenabled before the next one is started. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_boot.c6
-rw-r--r--kernel/trace/trace_sched_switch.c6
4 files changed, 12 insertions, 3 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e4c40c868d6..50d7018163f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3251,6 +3251,8 @@ __init static int tracer_alloc_buffers(void)
register_tracer(&nop_trace);
#ifdef CONFIG_BOOT_TRACER
+ /* We don't want to launch sched_switch tracer yet */
+ global_trace.ctrl = 0;
register_tracer(&boot_tracer);
current_trace = &boot_tracer;
current_trace->init(&global_trace);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8465ad05270..9911277b268 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -49,6 +49,7 @@ struct ftrace_entry {
unsigned long parent_ip;
};
extern struct tracer boot_tracer;
+extern struct tracer sched_switch_trace; /* Used by the boot tracer */
/*
* Context switch trace entry - which task (and prio) we switched from/to:
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index d104d5b4641..6bbc8794a6d 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -27,10 +27,14 @@ void start_boot_trace(void)
void enable_boot_trace(void)
{
+ if (pre_initcalls_finished)
+ tracing_start_cmdline_record();
}
void disable_boot_trace(void)
{
+ if (pre_initcalls_finished)
+ tracing_stop_cmdline_record();
}
void reset_boot_trace(struct trace_array *tr)
@@ -45,6 +49,8 @@ static void boot_trace_init(struct trace_array *tr)
for_each_cpu_mask(cpu, cpu_possible_map)
tracing_reset(tr, cpu);
+
+ sched_switch_trace.init(tr);
}
static void boot_trace_ctrl_update(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 96620c71430..9d7bdac331d 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -127,6 +127,7 @@ static void tracing_start_sched_switch(void)
long ref;
mutex_lock(&tracepoint_mutex);
+ tracer_enabled = 1;
ref = atomic_inc_return(&sched_ref);
if (ref == 1)
tracing_sched_register();
@@ -138,6 +139,7 @@ static void tracing_stop_sched_switch(void)
long ref;
mutex_lock(&tracepoint_mutex);
+ tracer_enabled = 0;
ref = atomic_dec_and_test(&sched_ref);
if (ref)
tracing_sched_unregister();
@@ -158,12 +160,10 @@ static void start_sched_trace(struct trace_array *tr)
{
sched_switch_reset(tr);
tracing_start_cmdline_record();
- tracer_enabled = 1;
}
static void stop_sched_trace(struct trace_array *tr)
{
- tracer_enabled = 0;
tracing_stop_cmdline_record();
}
@@ -190,7 +190,7 @@ static void sched_switch_trace_ctrl_update(struct trace_array *tr)
stop_sched_trace(tr);
}
-static struct tracer sched_switch_trace __read_mostly =
+struct tracer sched_switch_trace __read_mostly =
{
.name = "sched_switch",
.init = sched_switch_trace_init,