aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig28
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace_clock.c24
-rw-r--r--kernel/trace/trace_entries.h27
-rw-r--r--kernel/trace/trace_events.c14
-rw-r--r--kernel/trace/trace_export.c71
6 files changed, 104 insertions, 64 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 1ea0d1234f4..e7163460440 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -11,12 +11,18 @@ config NOP_TRACER
config HAVE_FTRACE_NMI_ENTER
bool
+ help
+ See Documentation/trace/ftrace-implementation.txt
config HAVE_FUNCTION_TRACER
bool
+ help
+ See Documentation/trace/ftrace-implementation.txt
config HAVE_FUNCTION_GRAPH_TRACER
bool
+ help
+ See Documentation/trace/ftrace-implementation.txt
config HAVE_FUNCTION_GRAPH_FP_TEST
bool
@@ -28,21 +34,25 @@ config HAVE_FUNCTION_GRAPH_FP_TEST
config HAVE_FUNCTION_TRACE_MCOUNT_TEST
bool
help
- This gets selected when the arch tests the function_trace_stop
- variable at the mcount call site. Otherwise, this variable
- is tested by the called function.
+ See Documentation/trace/ftrace-implementation.txt
config HAVE_DYNAMIC_FTRACE
bool
+ help
+ See Documentation/trace/ftrace-implementation.txt
config HAVE_FTRACE_MCOUNT_RECORD
bool
+ help
+ See Documentation/trace/ftrace-implementation.txt
config HAVE_HW_BRANCH_TRACER
bool
config HAVE_SYSCALL_TRACEPOINTS
bool
+ help
+ See Documentation/trace/ftrace-implementation.txt
config TRACER_MAX_TRACE
bool
@@ -469,6 +479,18 @@ config FTRACE_STARTUP_TEST
functioning properly. It will do tests on all the configured
tracers of ftrace.
+config EVENT_TRACE_TEST_SYSCALLS
+ bool "Run selftest on syscall events"
+ depends on FTRACE_STARTUP_TEST
+ help
+ This option will also enable testing every syscall event.
+ It only enables the event and disables it and runs various loads
+ with the event enabled. This adds a bit more time for kernel boot
+ up since it runs this on every system call defined.
+
+ TBD - enable a way to actually call the syscalls as we test their
+ events
+
config MMIOTRACE
bool "Memory mapped IO tracing"
depends on HAVE_MMIOTRACE_SUPPORT && PCI
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8b23d567008..f7ab7fc162c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2062,9 +2062,9 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
int i, len = 0;
char *search;
- if (glob && (strcmp(glob, "*") || !strlen(glob)))
+ if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
glob = NULL;
- else {
+ else if (glob) {
int not;
type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index b588fd81f7f..20c5f92e28a 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -66,10 +66,14 @@ u64 notrace trace_clock(void)
* Used by plugins that need globally coherent timestamps.
*/
-static u64 prev_trace_clock_time;
-
-static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+/* keep prev_time and lock in the same cacheline. */
+static struct {
+ u64 prev_time;
+ raw_spinlock_t lock;
+} trace_clock_struct ____cacheline_aligned_in_smp =
+ {
+ .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
+ };
u64 notrace trace_clock_global(void)
{
@@ -88,19 +92,19 @@ u64 notrace trace_clock_global(void)
if (unlikely(in_nmi()))
goto out;
- __raw_spin_lock(&trace_clock_lock);
+ __raw_spin_lock(&trace_clock_struct.lock);
/*
* TODO: if this happens often then maybe we should reset
- * my_scd->clock to prev_trace_clock_time+1, to make sure
+ * my_scd->clock to prev_time+1, to make sure
* we start ticking with the local clock from now on?
*/
- if ((s64)(now - prev_trace_clock_time) < 0)
- now = prev_trace_clock_time + 1;
+ if ((s64)(now - trace_clock_struct.prev_time) < 0)
+ now = trace_clock_struct.prev_time + 1;
- prev_trace_clock_time = now;
+ trace_clock_struct.prev_time = now;
- __raw_spin_unlock(&trace_clock_lock);
+ __raw_spin_unlock(&trace_clock_struct.lock);
out:
raw_local_irq_restore(flags);
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index c866d34e014..a431748ddd6 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -78,7 +78,7 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry,
__field_desc( int, graph_ent, depth )
),
- F_printk("--> %lx (%d)", __entry->graph_ent.func, __entry->depth)
+ F_printk("--> %lx (%d)", __entry->func, __entry->depth)
);
/* Function return entry */
@@ -97,8 +97,8 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry,
F_printk("<-- %lx (%d) (start: %llx end: %llx) over: %d",
__entry->func, __entry->depth,
- __entry->calltime, __entry->rettim,
- __entrty->depth)
+ __entry->calltime, __entry->rettime,
+ __entry->depth)
);
/*
@@ -116,15 +116,6 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry,
__field( unsigned char, next_state ) \
__field( unsigned int, next_cpu )
-#if 0
-FTRACE_ENTRY_STRUCT_ONLY(ctx_switch_entry,
-
- F_STRUCT(
- FTRACE_CTX_FIELDS
- )
-);
-#endif
-
FTRACE_ENTRY(context_switch, ctx_switch_entry,
TRACE_CTX,
@@ -133,7 +124,7 @@ FTRACE_ENTRY(context_switch, ctx_switch_entry,
FTRACE_CTX_FIELDS
),
- F_printk(b"%u:%u:%u ==> %u:%u:%u [%03u]",
+ F_printk("%u:%u:%u ==> %u:%u:%u [%03u]",
__entry->prev_pid, __entry->prev_prio, __entry->prev_state,
__entry->next_pid, __entry->next_prio, __entry->next_state,
__entry->next_cpu
@@ -257,8 +248,8 @@ FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw,
__field_desc( unsigned char, rw, width )
),
- F_printk("%lx %lx %lx %d %lx %lx",
- __entry->phs, __entry->value, __entry->pc,
+ F_printk("%lx %lx %lx %d %x %x",
+ (unsigned long)__entry->phys, __entry->value, __entry->pc,
__entry->map_id, __entry->opcode, __entry->width)
);
@@ -275,8 +266,8 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map,
__field_desc( unsigned char, map, opcode )
),
- F_printk("%lx %lx %lx %d %lx",
- __entry->phs, __entry->virt, __entry->len,
+ F_printk("%lx %lx %lx %d %x",
+ (unsigned long)__entry->phys, __entry->virt, __entry->len,
__entry->map_id, __entry->opcode)
);
@@ -370,7 +361,7 @@ FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
__field( int, node )
),
- F_printk("type:%u call_site:%lx ptr:%p req:%lu alloc:%lu"
+ F_printk("type:%u call_site:%lx ptr:%p req:%zi alloc:%zi"
" flags:%x node:%d",
__entry->type_id, __entry->call_site, __entry->ptr,
__entry->bytes_req, __entry->bytes_alloc,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index adbed124c3e..787f0fb0994 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1154,7 +1154,7 @@ static int trace_module_notify(struct notifier_block *self,
}
#endif /* CONFIG_MODULES */
-struct notifier_block trace_module_nb = {
+static struct notifier_block trace_module_nb = {
.notifier_call = trace_module_notify,
.priority = 0,
};
@@ -1326,6 +1326,18 @@ static __init void event_trace_self_tests(void)
if (!call->regfunc)
continue;
+/*
+ * Testing syscall events here is pretty useless, but
+ * we still do it if configured. But this is time consuming.
+ * What we really need is a user thread to perform the
+ * syscalls as we test.
+ */
+#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
+ if (call->system &&
+ strcmp(call->system, "syscalls") == 0)
+ continue;
+#endif
+
pr_info("Testing event %s: ", call->name);
/*
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 4cb29d84d73..9753fcc61bc 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -23,6 +23,47 @@
#define __field_struct(type, item)
#undef __field
+#define __field(type, item) type item;
+
+#undef __field_desc
+#define __field_desc(type, container, item) type item;
+
+#undef __array
+#define __array(type, item, size) type item[size];
+
+#undef __array_desc
+#define __array_desc(type, container, item, size) type item[size];
+
+#undef __dynamic_array
+#define __dynamic_array(type, item) type item[];
+
+#undef F_STRUCT
+#define F_STRUCT(args...) args
+
+#undef F_printk
+#define F_printk(fmt, args...) fmt, args
+
+#undef FTRACE_ENTRY
+#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
+struct ____ftrace_##name { \
+ tstruct \
+}; \
+static void __used ____ftrace_check_##name(void) \
+{ \
+ struct ____ftrace_##name *__entry = NULL; \
+ \
+ /* force cmpile-time check on F_printk() */ \
+ printk(print); \
+}
+
+#undef FTRACE_ENTRY_DUP
+#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \
+ FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
+
+#include "trace_entries.h"
+
+
+#undef __field
#define __field(type, item) \
ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
"offset:%zu;\tsize:%zu;\n", \
@@ -88,10 +129,6 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
return ret; \
}
-#undef FTRACE_ENTRY_DUP
-#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \
- FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
-
#include "trace_entries.h"
@@ -172,32 +209,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
#undef __dynamic_array
#define __dynamic_array(type, item)
-
-#undef TRACE_ZERO_CHAR
-#define TRACE_ZERO_CHAR(arg)
-
-#undef TRACE_FIELD
-#define TRACE_FIELD(type, item, assign)\
- entry->item = assign;
-
-#undef TRACE_FIELD
-#define TRACE_FIELD(type, item, assign)\
- entry->item = assign;
-
-#undef TRACE_FIELD_SIGN
-#define TRACE_FIELD_SIGN(type, item, assign, is_signed) \
- TRACE_FIELD(type, item, assign)
-
-#undef TP_CMD
-#define TP_CMD(cmd...) cmd
-
-#undef TRACE_ENTRY
-#define TRACE_ENTRY entry
-
-#undef TRACE_FIELD_SPECIAL
-#define TRACE_FIELD_SPECIAL(type_item, item, len, cmd) \
- cmd;
-
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \
static int ftrace_raw_init_event_##call(void); \