aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/trace/trace.c110
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_functions_graph.c122
-rw-r--r--kernel/trace/trace_selftest.c1
4 files changed, 121 insertions, 113 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1b73acb40e5..0cfd1a62def 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -942,54 +942,6 @@ trace_function(struct trace_array *tr,
ring_buffer_unlock_commit(tr->buffer, event);
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static int __trace_graph_entry(struct trace_array *tr,
- struct ftrace_graph_ent *trace,
- unsigned long flags,
- int pc)
-{
- struct ftrace_event_call *call = &event_funcgraph_entry;
- struct ring_buffer_event *event;
- struct ftrace_graph_ent_entry *entry;
-
- if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
- return 0;
-
- event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
- sizeof(*entry), flags, pc);
- if (!event)
- return 0;
- entry = ring_buffer_event_data(event);
- entry->graph_ent = *trace;
- if (!filter_current_check_discard(call, entry, event))
- ring_buffer_unlock_commit(global_trace.buffer, event);
-
- return 1;
-}
-
-static void __trace_graph_return(struct trace_array *tr,
- struct ftrace_graph_ret *trace,
- unsigned long flags,
- int pc)
-{
- struct ftrace_event_call *call = &event_funcgraph_exit;
- struct ring_buffer_event *event;
- struct ftrace_graph_ret_entry *entry;
-
- if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
- return;
-
- event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
- sizeof(*entry), flags, pc);
- if (!event)
- return;
- entry = ring_buffer_event_data(event);
- entry->ret = *trace;
- if (!filter_current_check_discard(call, entry, event))
- ring_buffer_unlock_commit(global_trace.buffer, event);
-}
-#endif
-
void
ftrace(struct trace_array *tr, struct trace_array_cpu *data,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -1129,68 +1081,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
local_irq_restore(flags);
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-int trace_graph_entry(struct ftrace_graph_ent *trace)
-{
- struct trace_array *tr = &global_trace;
- struct trace_array_cpu *data;
- unsigned long flags;
- long disabled;
- int ret;
- int cpu;
- int pc;
-
- if (!ftrace_trace_task(current))
- return 0;
-
- if (!ftrace_graph_addr(trace->func))
- return 0;
-
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
- if (likely(disabled == 1)) {
- pc = preempt_count();
- ret = __trace_graph_entry(tr, trace, flags, pc);
- } else {
- ret = 0;
- }
- /* Only do the atomic if it is not already set */
- if (!test_tsk_trace_graph(current))
- set_tsk_trace_graph(current);
-
- atomic_dec(&data->disabled);
- local_irq_restore(flags);
-
- return ret;
-}
-
-void trace_graph_return(struct ftrace_graph_ret *trace)
-{
- struct trace_array *tr = &global_trace;
- struct trace_array_cpu *data;
- unsigned long flags;
- long disabled;
- int cpu;
- int pc;
-
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
- if (likely(disabled == 1)) {
- pc = preempt_count();
- __trace_graph_return(tr, trace, flags, pc);
- }
- if (!trace->depth)
- clear_tsk_trace_graph(current);
- atomic_dec(&data->disabled);
- local_irq_restore(flags);
-}
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-
/**
* trace_vbprintk - write binary msg to tracing buffer
*
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 116524d6236..9301f1263c5 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -471,6 +471,7 @@ void trace_function(struct trace_array *tr,
void trace_graph_return(struct ftrace_graph_ret *trace);
int trace_graph_entry(struct ftrace_graph_ent *trace);
+void set_graph_array(struct trace_array *tr);
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e30472da15d..f97244a41a4 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -52,7 +52,7 @@ static struct tracer_flags tracer_flags = {
.opts = trace_opts
};
-/* pid on the last trace processed */
+static struct trace_array *graph_array;
/* Add a function return address to the trace stack on thread info.*/
@@ -166,10 +166,121 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
return ret;
}
+static int __trace_graph_entry(struct trace_array *tr,
+ struct ftrace_graph_ent *trace,
+ unsigned long flags,
+ int pc)
+{
+ struct ftrace_event_call *call = &event_funcgraph_entry;
+ struct ring_buffer_event *event;
+ struct ftrace_graph_ent_entry *entry;
+
+ if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+ return 0;
+
+ event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_ENT,
+ sizeof(*entry), flags, pc);
+ if (!event)
+ return 0;
+ entry = ring_buffer_event_data(event);
+ entry->graph_ent = *trace;
+ if (!filter_current_check_discard(call, entry, event))
+ ring_buffer_unlock_commit(tr->buffer, event);
+
+ return 1;
+}
+
+int trace_graph_entry(struct ftrace_graph_ent *trace)
+{
+ struct trace_array *tr = graph_array;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int ret;
+ int cpu;
+ int pc;
+
+ if (unlikely(!tr))
+ return 0;
+
+ if (!ftrace_trace_task(current))
+ return 0;
+
+ if (!ftrace_graph_addr(trace->func))
+ return 0;
+
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ ret = __trace_graph_entry(tr, trace, flags, pc);
+ } else {
+ ret = 0;
+ }
+ /* Only do the atomic if it is not already set */
+ if (!test_tsk_trace_graph(current))
+ set_tsk_trace_graph(current);
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static void __trace_graph_return(struct trace_array *tr,
+ struct ftrace_graph_ret *trace,
+ unsigned long flags,
+ int pc)
+{
+ struct ftrace_event_call *call = &event_funcgraph_exit;
+ struct ring_buffer_event *event;
+ struct ftrace_graph_ret_entry *entry;
+
+ if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+ return;
+
+ event = trace_buffer_lock_reserve(tr, TRACE_GRAPH_RET,
+ sizeof(*entry), flags, pc);
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ entry->ret = *trace;
+ if (!filter_current_check_discard(call, entry, event))
+ ring_buffer_unlock_commit(tr->buffer, event);
+}
+
+void trace_graph_return(struct ftrace_graph_ret *trace)
+{
+ struct trace_array *tr = graph_array;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+ int pc;
+
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+ if (likely(disabled == 1)) {
+ pc = preempt_count();
+ __trace_graph_return(tr, trace, flags, pc);
+ }
+ if (!trace->depth)
+ clear_tsk_trace_graph(current);
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+}
+
static int graph_trace_init(struct trace_array *tr)
{
- int ret = register_ftrace_graph(&trace_graph_return,
- &trace_graph_entry);
+ int ret;
+
+ graph_array = tr;
+ ret = register_ftrace_graph(&trace_graph_return,
+ &trace_graph_entry);
if (ret)
return ret;
tracing_start_cmdline_record();
@@ -177,6 +288,11 @@ static int graph_trace_init(struct trace_array *tr)
return 0;
}
+void set_graph_array(struct trace_array *tr)
+{
+ graph_array = tr;
+}
+
static void graph_trace_reset(struct trace_array *tr)
{
tracing_stop_cmdline_record();
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 00dd6485bdd..d2cdbabb4ea 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -288,6 +288,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
* to detect and recover from possible hangs
*/
tracing_reset_online_cpus(tr);
+ set_graph_array(tr);
ret = register_ftrace_graph(&trace_graph_return,
&trace_graph_entry_watchdog);
if (ret) {