diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/events.c | 1 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace.c | 111 | ||||
-rw-r--r-- | kernel/trace/trace.h | 17 | ||||
-rw-r--r-- | kernel/trace/trace_event_profile.c | 31 | ||||
-rw-r--r-- | kernel/trace/trace_event_types.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 45 | ||||
-rw-r--r-- | kernel/trace/trace_events_stage_3.h | 59 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 219 | ||||
-rw-r--r-- | kernel/trace/trace_mmiotrace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 37 | ||||
-rw-r--r-- | kernel/trace/trace_output.h | 5 | ||||
-rw-r--r-- | kernel/trace/trace_printk.c | 8 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 38 |
15 files changed, 411 insertions, 173 deletions
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index c3feea01c3e..0e45c206c2f 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -44,5 +44,6 @@ obj-$(CONFIG_EVENT_TRACER) += trace_events.o obj-$(CONFIG_EVENT_TRACER) += events.o obj-$(CONFIG_EVENT_TRACER) += trace_export.o obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o +obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o libftrace-y := ftrace.o diff --git a/kernel/trace/events.c b/kernel/trace/events.c index 9fc918da404..246f2aa6dc4 100644 --- a/kernel/trace/events.c +++ b/kernel/trace/events.c @@ -12,4 +12,3 @@ #include "trace_events_stage_2.h" #include "trace_events_stage_3.h" -#include <trace/trace_event_types.h> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 384ca5d9d72..808b14bbf07 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -535,8 +535,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) extern int ring_buffer_page_too_big(void); #ifdef CONFIG_HOTPLUG_CPU -static int __cpuinit rb_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu); +static int rb_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu); #endif /** @@ -2784,8 +2784,8 @@ static __init int rb_init_debugfs(void) fs_initcall(rb_init_debugfs); #ifdef CONFIG_HOTPLUG_CPU -static int __cpuinit rb_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int rb_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) { struct ring_buffer *buffer = container_of(self, struct ring_buffer, cpu_notify); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c95b7292be7..e6fac0ffe6f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1201,7 +1201,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) * trace_vbprintk - write binary msg to tracing buffer * */ -int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args) +int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { static raw_spinlock_t trace_buf_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; @@ -1243,7 +1243,6 @@ int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args) goto out_unlock; entry = ring_buffer_event_data(event); entry->ip = ip; - entry->depth = depth; entry->fmt = fmt; memcpy(entry->buf, trace_buf, sizeof(u32) * len); @@ -1261,7 +1260,7 @@ out: } EXPORT_SYMBOL_GPL(trace_vbprintk); -int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) +int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; static char trace_buf[TRACE_BUF_SIZE]; @@ -1298,7 +1297,6 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) goto out_unlock; entry = ring_buffer_event_data(event); entry->ip = ip; - entry->depth = depth; memcpy(&entry->buf, trace_buf, len); entry->buf[len] = 0; @@ -1701,38 +1699,6 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) return TRACE_TYPE_HANDLED; } -static enum print_line_t print_bprintk_msg_only(struct trace_iterator *iter) -{ - struct trace_seq *s = &iter->seq; - struct trace_entry *entry = iter->ent; - struct bprint_entry *field; - int ret; - - trace_assign_type(field, entry); - - ret = trace_seq_bprintf(s, field->fmt, field->buf); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; -} - -static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) -{ - struct trace_seq *s = &iter->seq; - struct trace_entry *entry = iter->ent; - struct print_entry *field; - int ret; - - trace_assign_type(field, entry); - - ret = trace_seq_printf(s, "%s", field->buf); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - - return TRACE_TYPE_HANDLED; -} - static enum print_line_t print_bin_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; @@ -1794,12 +1760,12 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) if (iter->ent->type == TRACE_BPRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) - return print_bprintk_msg_only(iter); + return trace_print_bprintk_msg_only(iter); if (iter->ent->type == TRACE_PRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) - return print_printk_msg_only(iter); + return trace_print_printk_msg_only(iter); if (trace_flags & TRACE_ITER_BIN) return print_bin_fmt(iter); @@ -1948,9 +1914,14 @@ int tracing_open_generic(struct inode *inode, struct file *filp) static int tracing_release(struct inode *inode, struct file *file) { struct seq_file *m = (struct seq_file *)file->private_data; - struct trace_iterator *iter = m->private; + struct trace_iterator *iter; int cpu; + if (!(file->f_mode & FMODE_READ)) + return 0; + + iter = m->private; + mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) @@ -1976,12 +1947,24 @@ static int tracing_open(struct inode *inode, struct file *file) struct trace_iterator *iter; int ret = 0; - iter = __tracing_open(inode, file); - if (IS_ERR(iter)) - ret = PTR_ERR(iter); - else if (trace_flags & TRACE_ITER_LATENCY_FMT) - iter->iter_flags |= TRACE_FILE_LAT_FMT; + /* If this file was open for write, then erase contents */ + if ((file->f_mode & FMODE_WRITE) && + !(file->f_flags & O_APPEND)) { + long cpu = (long) inode->i_private; + + if (cpu == TRACE_PIPE_ALL_CPU) + tracing_reset_online_cpus(&global_trace); + else + tracing_reset(&global_trace, cpu); + } + if (file->f_mode & FMODE_READ) { + iter = __tracing_open(inode, file); + if (IS_ERR(iter)) + ret = PTR_ERR(iter); + else if (trace_flags & TRACE_ITER_LATENCY_FMT) + iter->iter_flags |= TRACE_FILE_LAT_FMT; + } return ret; } @@ -2056,9 +2039,17 @@ static int show_traces_open(struct inode *inode, struct file *file) return ret; } +static ssize_t +tracing_write_stub(struct file *filp, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + return count; +} + static const struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, + .write = tracing_write_stub, .llseek = seq_lseek, .release = tracing_release, }; @@ -3154,7 +3145,7 @@ static int mark_printk(const char *fmt, ...) int ret; va_list args; va_start(args, fmt); - ret = trace_vprintk(0, -1, fmt, args); + ret = trace_vprintk(0, fmt, args); va_end(args); return ret; } @@ -3583,7 +3574,7 @@ static void tracing_init_debugfs_percpu(long cpu) pr_warning("Could not create debugfs 'trace_pipe' entry\n"); /* per cpu trace */ - entry = debugfs_create_file("trace", 0444, d_cpu, + entry = debugfs_create_file("trace", 0644, d_cpu, (void *) cpu, &tracing_fops); if (!entry) pr_warning("Could not create debugfs 'trace' entry\n"); @@ -3897,7 +3888,7 @@ static __init int tracer_init_debugfs(void) if (!entry) pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); - entry = debugfs_create_file("trace", 0444, d_tracer, + entry = debugfs_create_file("trace", 0644, d_tracer, (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); if (!entry) pr_warning("Could not create debugfs 'trace' entry\n"); @@ -4027,11 +4018,12 @@ trace_printk_seq(struct trace_seq *s) trace_seq_init(s); } -void ftrace_dump(void) +static void __ftrace_dump(bool disable_tracing) { static DEFINE_SPINLOCK(ftrace_dump_lock); /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; + unsigned int old_userobj; static int dump_ran; unsigned long flags; int cnt = 0, cpu; @@ -4043,14 +4035,17 @@ void ftrace_dump(void) dump_ran = 1; - /* No turning back! */ tracing_off(); - ftrace_kill(); + + if (disable_tracing) + ftrace_kill(); for_each_tracing_cpu(cpu) { atomic_inc(&global_trace.data[cpu]->disabled); } + old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; + /* don't look at user memory in panic mode */ trace_flags &= ~TRACE_ITER_SYM_USEROBJ; @@ -4095,10 +4090,26 @@ void ftrace_dump(void) else printk(KERN_TRACE "---------------------------------\n"); + /* Re-enable tracing if requested */ + if (!disable_tracing) { + trace_flags |= old_userobj; + + for_each_tracing_cpu(cpu) { + atomic_dec(&global_trace.data[cpu]->disabled); + } + tracing_on(); + } + out: spin_unlock_irqrestore(&ftrace_dump_lock, flags); } +/* By default: disable tracing after the dump */ +void ftrace_dump(void) +{ + __ftrace_dump(true); +} + __init static int tracer_alloc_buffers(void) { struct trace_array_cpu *data; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 38276d1638e..7cfb741be20 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -123,7 +123,6 @@ struct userstack_entry { struct bprint_entry { struct trace_entry ent; unsigned long ip; - int depth; const char *fmt; u32 buf[]; }; @@ -131,7 +130,6 @@ struct bprint_entry { struct print_entry { struct trace_entry ent; unsigned long ip; - int depth; char buf[]; }; @@ -598,9 +596,9 @@ extern int trace_selftest_startup_branch(struct tracer *trace, extern void *head_page(struct trace_array_cpu *data); extern long ns2usecs(cycle_t nsec); extern int -trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args); +trace_vbprintk(unsigned long ip, const char *fmt, va_list args); extern int -trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); +trace_vprintk(unsigned long ip, const char *fmt, va_list args); extern unsigned long trace_flags; @@ -787,12 +785,23 @@ struct ftrace_event_call { int id; int (*raw_init)(void); int (*show_format)(struct trace_seq *s); + +#ifdef CONFIG_EVENT_PROFILE + atomic_t profile_count; + int (*profile_enable)(struct ftrace_event_call *); + void (*profile_disable)(struct ftrace_event_call *); +#endif }; void event_trace_printk(unsigned long ip, const char *fmt, ...); extern struct ftrace_event_call __start_ftrace_events[]; extern struct ftrace_event_call __stop_ftrace_events[]; +#define for_each_event(event) \ + for (event = __start_ftrace_events; \ + (unsigned long)event < (unsigned long)__stop_ftrace_events; \ + event++) + extern const char *__start___trace_bprintk_fmt[]; extern const char *__stop___trace_bprintk_fmt[]; diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c new file mode 100644 index 00000000000..22cba997077 --- /dev/null +++ b/kernel/trace/trace_event_profile.c @@ -0,0 +1,31 @@ +/* + * trace event based perf counter profiling + * + * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> + * + */ + +#include "trace.h" + +int ftrace_profile_enable(int event_id) +{ + struct ftrace_event_call *event; + + for_each_event(event) { + if (event->id == event_id) + return event->profile_enable(event); + } + + return -EINVAL; +} + +void ftrace_profile_disable(int event_id) +{ + struct ftrace_event_call *event; + + for_each_event(event) { + if (event->id == event_id) + return event->profile_disable(event); + } +} + diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h index 019915063fe..fd78bee71dd 100644 --- a/kernel/trace/trace_event_types.h +++ b/kernel/trace/trace_event_types.h @@ -105,7 +105,6 @@ TRACE_EVENT_FORMAT(user_stack, TRACE_USER_STACK, userstack_entry, ignore, TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore, TRACE_STRUCT( TRACE_FIELD(unsigned long, ip, ip) - TRACE_FIELD(unsigned int, depth, depth) TRACE_FIELD(char *, fmt, fmt) TRACE_FIELD_ZERO_CHAR(buf) ), @@ -115,7 +114,6 @@ TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore, TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore, TRACE_STRUCT( TRACE_FIELD(unsigned long, ip, ip) - TRACE_FIELD(unsigned int, depth, depth) TRACE_FIELD_ZERO_CHAR(buf) ), TP_RAW_FMT("%08lx (%d) fmt:%p %s") diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index c88227b3b9d..3047b56f663 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -19,11 +19,6 @@ static DEFINE_MUTEX(event_mutex); -#define events_for_each(event) \ - for (event = __start_ftrace_events; \ - (unsigned long)event < (unsigned long)__stop_ftrace_events; \ - event++) - static void ftrace_clear_events(void) { struct ftrace_event_call *call = (void *)__start_ftrace_events; @@ -90,7 +85,7 @@ static int ftrace_set_clr_event(char *buf, int set) } mutex_lock(&event_mutex); - events_for_each(call) { + for_each_event(call) { if (!call->name || !call->regfunc) continue; @@ -412,6 +407,29 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt, return r; } +static ssize_t +event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + struct ftrace_event_call *call = filp->private_data; + struct trace_seq *s; + int r; + + if (*ppos) + return 0; + + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + trace_seq_init(s); + trace_seq_printf(s, "%d\n", call->id); + + r = simple_read_from_buffer(ubuf, cnt, ppos, + s->buffer, s->len); + kfree(s); + return r; +} + static const struct seq_operations show_event_seq_ops = { .start = t_start, .next = t_next, @@ -452,6 +470,11 @@ static const struct file_operations ftrace_event_format_fops = { .read = event_format_read, }; +static const struct file_operations ftrace_event_id_fops = { + .open = tracing_open_generic, + .read = event_id_read, +}; + static struct dentry *event_trace_events_dir(void) { static struct dentry *d_tracer; @@ -550,6 +573,14 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events) "'%s/enable' entry\n", call->name); } + if (call->id) { + entry = debugfs_create_file("id", 0444, call->dir, call, + &ftrace_event_id_fops); + if (!entry) + pr_warning("Could not create debugfs '%s/id' entry\n", + call->name); + } + /* A trace may not want to export its format */ if (!call->show_format) return 0; @@ -592,7 +623,7 @@ static __init int event_trace_init(void) if (!d_events) return 0; - events_for_each(call) { + for_each_event(call) { /* The linker may leave blanks */ if (!call->name) continue; diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h index ae2e323df0c..6b3261ca988 100644 --- a/kernel/trace/trace_events_stage_3.h +++ b/kernel/trace/trace_events_stage_3.h @@ -109,6 +109,40 @@ #undef TP_FMT #define TP_FMT(fmt, args...) fmt "\n", ##args +#ifdef CONFIG_EVENT_PROFILE +#define _TRACE_PROFILE(call, proto, args) \ +static void ftrace_profile_##call(proto) \ +{ \ + extern void perf_tpcounter_event(int); \ + perf_tpcounter_event(event_##call.id); \ +} \ + \ +static int ftrace_profile_enable_##call(struct ftrace_event_call *call) \ +{ \ + int ret = 0; \ + \ + if (!atomic_inc_return(&call->profile_count)) \ + ret = register_trace_##call(ftrace_profile_##call); \ + \ + return ret; \ +} \ + \ +static void ftrace_profile_disable_##call(struct ftrace_event_call *call) \ +{ \ + if (atomic_add_negative(-1, &call->profile_count)) \ + unregister_trace_##call(ftrace_profile_##call); \ +} + +#define _TRACE_PROFILE_INIT(call) \ + .profile_count = ATOMIC_INIT(-1), \ + .profile_enable = ftrace_profile_enable_##call, \ + .profile_disable = ftrace_profile_disable_##call, + +#else +#define _TRACE_PROFILE(call, proto, args) +#define _TRACE_PROFILE_INIT(call) +#endif + #define _TRACE_FORMAT(call, proto, args, fmt) \ static void ftrace_event_##call(proto) \ { \ @@ -130,18 +164,33 @@ static void ftrace_unreg_event_##call(void) \ { \ unregister_trace_##call(ftrace_event_##call); \ } \ - + \ +static struct ftrace_event_call event_##call; \ + \ +static int ftrace_init_event_##call(void) \ +{ \ + int id; \ + \ + id = register_ftrace_event(NULL); \ + if (!id) \ + return -ENODEV; \ + event_##call.id = id; \ + return 0; \ +} #undef TRACE_FORMAT #define TRACE_FORMAT(call, proto, args, fmt) \ _TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \ +_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) event_##call = { \ .name = #call, \ .system = __stringify(TRACE_SYSTEM), \ + .raw_init = ftrace_init_event_##call, \ .regfunc = ftrace_reg_event_##call, \ .unregfunc = ftrace_unreg_event_##call, \ + _TRACE_PROFILE_INIT(call) \ } #undef __entry @@ -149,6 +198,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ #undef TRACE_EVENT #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ +_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ \ static struct ftrace_event_call event_##call; \ \ @@ -214,4 +264,11 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .regfunc = ftrace_raw_reg_event_##call, \ .unregfunc = ftrace_raw_unreg_event_##call, \ .show_format = ftrace_format_##call, \ + _TRACE_PROFILE_INIT(call) \ } + +#include <trace/trace_event_types.h> + +#undef _TRACE_PROFILE +#undef _TRACE_PROFILE_INIT + diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 6004ccac2dd..e876816fa8e 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -14,6 +14,11 @@ #include "trace.h" #include "trace_output.h" +struct fgraph_data { + pid_t last_pid; + int depth; +}; + #define TRACE_GRAPH_INDENT 2 /* Flag options */ @@ -231,16 +236,16 @@ print_graph_proc(struct trace_seq *s, pid_t pid) /* If the pid changed since the last trace, output this event */ static enum print_line_t -verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu) +verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) { pid_t prev_pid; pid_t *last_pid; int ret; - if (!last_pids_cpu) + if (!data) return TRACE_TYPE_HANDLED; - last_pid = per_cpu_ptr(last_pids_cpu, cpu); + last_pid = &(per_cpu_ptr(data, cpu)->last_pid); if (*last_pid == pid) return TRACE_TYPE_HANDLED; @@ -471,6 +476,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, struct ftrace_graph_ent_entry *entry, struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) { + struct fgraph_data *data = iter->private; struct ftrace_graph_ret *graph_ret; struct ftrace_graph_ent *call; unsigned long long duration; @@ -481,6 +487,18 @@ print_graph_entry_leaf(struct trace_iterator *iter, call = &entry->graph_ent; duration = graph_ret->rettime - graph_ret->calltime; + if (data) { + int cpu = iter->cpu; + int *depth = &(per_cpu_ptr(data, cpu)->depth); + + /* + * Comments display at + 1 to depth. Since + * this is a leaf function, keep the comments + * equal to this depth. + */ + *depth = call->depth - 1; + } + /* Overhead */ ret = print_graph_overhead(duration, s); if (!ret) @@ -512,12 +530,21 @@ print_graph_entry_leaf(struct trace_iterator *iter, } static enum print_line_t -print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, - struct trace_seq *s, pid_t pid, int cpu) +print_graph_entry_nested(struct trace_iterator *iter, + struct ftrace_graph_ent_entry *entry, + struct trace_seq *s, int cpu) { - int i; - int ret; struct ftrace_graph_ent *call = &entry->graph_ent; + struct fgraph_data *data = iter->private; + int ret; + int i; + + if (data) { + int cpu = iter->cpu; + int *depth = &(per_cpu_ptr(data, cpu)->depth); + + *depth = call->depth; + } /* No overhead */ ret = print_graph_overhead(-1, s); @@ -554,24 +581,24 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, } static enum print_line_t -print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, - struct trace_iterator *iter) +print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, + int type, unsigned long addr) { - int ret; - int cpu = iter->cpu; - pid_t *last_entry = iter->private; + struct fgraph_data *data = iter->private; struct trace_entry *ent = iter->ent; - struct ftrace_graph_ent *call = &field->graph_ent; - struct ftrace_graph_ret_entry *leaf_ret; + int cpu = iter->cpu; + int ret; /* Pid */ - if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) + if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) return TRACE_TYPE_PARTIAL_LINE; - /* Interrupt */ - ret = print_graph_irq(iter, call->func, TRACE_GRAPH_ENT, cpu, ent->pid); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; + if (type) { + /* Interrupt */ + ret = print_graph_irq(iter, addr, type, cpu, ent->pid); + if (ret == TRACE_TYPE_PARTIAL_LINE) + return TRACE_TYPE_PARTIAL_LINE; + } /* Absolute time */ if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { @@ -598,11 +625,25 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, return TRACE_TYPE_PARTIAL_LINE; } + return 0; +} + +static enum print_line_t +print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, + struct trace_iterator *iter) +{ + int cpu = iter->cpu; + struct ftrace_graph_ent *call = &field->graph_ent; + struct ftrace_graph_ret_entry *leaf_ret; + + if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) + return TRACE_TYPE_PARTIAL_LINE; + leaf_ret = get_return_for_leaf(iter, field); if (leaf_ret) return print_graph_entry_leaf(iter, field, leaf_ret, s); else - return print_graph_entry_nested(field, s, iter->ent->pid, cpu); + return print_graph_entry_nested(iter, field, s, cpu); } @@ -610,40 +651,27 @@ static enum print_line_t print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, struct trace_entry *ent, struct trace_iterator *iter) { - int i; - int ret; - int cpu = iter->cpu; - pid_t *last_pid = iter->private, pid = ent->pid; unsigned long long duration = trace->rettime - trace->calltime; + struct fgraph_data *data = iter->private; + pid_t pid = ent->pid; + int cpu = iter->cpu; + int ret; + int i; - /* Pid */ - if (verif_pid(s, pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - - /* Absolute time */ - if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { - ret = print_graph_abs_time(iter->ts, s); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + if (data) { + int cpu = iter->cpu; + int *depth = &(per_cpu_ptr(data, cpu)->depth); - /* Cpu */ - if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { - ret = print_graph_cpu(s, cpu); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; + /* + * Comments display at + 1 to depth. This is the + * return from a function, we now want the comments + * to display at the same level of the bracket. + */ + *depth = trace->depth - 1; } - /* Proc */ - if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { - ret = print_graph_proc(s, ent->pid); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - - ret = trace_seq_printf(s, " | "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + if (print_graph_prologue(iter, s, 0, 0)) + return TRACE_TYPE_PARTIAL_LINE; /* Overhead */ ret = print_graph_overhead(duration, s); @@ -684,42 +712,21 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, } static enum print_line_t -print_graph_comment(struct bprint_entry *trace, struct trace_seq *s, - struct trace_entry *ent, struct trace_iterator *iter) +print_graph_comment(struct trace_seq *s, struct trace_entry *ent, + struct trace_iterator *iter) { - int i; + unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); + struct fgraph_data *data = iter->private; + struct trace_event *event; + int depth = 0; int ret; - int cpu = iter->cpu; - pid_t *last_pid = iter->private; - - /* Pid */ - if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - - /* Absolute time */ - if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { - ret = print_graph_abs_time(iter->ts, s); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } - - /* Cpu */ - if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { - ret = print_graph_cpu(s, cpu); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; - } + int i; - /* Proc */ - if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { - ret = print_graph_proc(s, ent->pid); - if (ret == TRACE_TYPE_PARTIAL_LINE) - return TRACE_TYPE_PARTIAL_LINE; + if (data) + depth = per_cpu_ptr(data, iter->cpu)->depth; - ret = trace_seq_printf(s, " | "); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; - } + if (print_graph_prologue(iter, s, 0, 0)) + return TRACE_TYPE_PARTIAL_LINE; /* No overhead */ ret = print_graph_overhead(-1, s); @@ -734,8 +741,8 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s, } /* Indentation */ - if (trace->depth > 0) - for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { + if (depth > 0) + for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { ret = trace_seq_printf(s, " "); if (!ret) return TRACE_TYPE_PARTIAL_LINE; @@ -746,9 +753,26 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s, if (!ret) return TRACE_TYPE_PARTIAL_LINE; - ret = trace_seq_bprintf(s, trace->fmt, trace->buf); - if (!ret) - return TRACE_TYPE_PARTIAL_LINE; + switch (iter->ent->type) { + case TRACE_BPRINT: + ret = trace_print_bprintk_msg_only(iter); + if (ret != TRACE_TYPE_HANDLED) + return ret; + break; + case TRACE_PRINT: + ret = trace_print_printk_msg_only(iter); + if (ret != TRACE_TYPE_HANDLED) + return ret; + break; + default: + event = ftrace_find_event(ent->type); + if (!event) + return TRACE_TYPE_UNHANDLED; + + ret = event->trace(iter, sym_flags); + if (ret != TRACE_TYPE_HANDLED) + return ret; + } /* Strip ending newline */ if (s->buffer[s->len - 1] == '\n') { @@ -767,8 +791,8 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s, enum print_line_t print_graph_function(struct trace_iterator *iter) { - struct trace_seq *s = &iter->seq; struct trace_entry *entry = iter->ent; + struct trace_seq *s = &iter->seq; switch (entry->type) { case TRACE_GRAPH_ENT: { @@ -781,14 +805,11 @@ print_graph_function(struct trace_iterator *iter) trace_assign_type(field, entry); return print_graph_return(&field->ret, s, entry, iter); } - case TRACE_BPRINT: { - struct bprint_entry *field; - trace_assign_type(field, entry); - return print_graph_comment(field, s, entry, iter); - } default: - return TRACE_TYPE_UNHANDLED; + return print_graph_comment(s, entry, iter); } + + return TRACE_TYPE_HANDLED; } static void print_graph_headers(struct seq_file *s) @@ -820,19 +841,21 @@ static void print_graph_headers(struct seq_file *s) static void graph_trace_open(struct trace_iterator *iter) { - /* pid on the last trace processed */ - pid_t *last_pid = alloc_percpu(pid_t); + /* pid and depth on the last trace processed */ + struct fgraph_data *data = alloc_percpu(struct fgraph_data); int cpu; - if (!last_pid) + if (!data) pr_warning("function graph tracer: not enough memory\n"); else for_each_possible_cpu(cpu) { - pid_t *pid = per_cpu_ptr(last_pid, cpu); + pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); + int *depth = &(per_cpu_ptr(data, cpu)->depth); *pid = -1; + *depth = 0; } - iter->private = last_pid; + iter->private = data; } static void graph_trace_close(struct trace_iterator *iter) diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index f095916e477..8e37fcddd8b 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c @@ -359,5 +359,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map) int mmio_trace_printk(const char *fmt, va_list args) { - return trace_vprintk(0, -1, fmt, args); + return trace_vprintk(0, fmt, args); } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 6a4c9dea191..19261fdd245 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -19,6 +19,38 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; static int next_event_type = __TRACE_LAST_TYPE + 1; +enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) +{ + struct trace_seq *s = &iter->seq; + struct trace_entry *entry = iter->ent; + struct bprint_entry *field; + int ret; + + trace_assign_type(field, entry); + + ret = trace_seq_bprintf(s, field->fmt, field->buf); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + + return TRACE_TYPE_HANDLED; +} + +enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) +{ + struct trace_seq *s = &iter->seq; + struct trace_entry *entry = iter->ent; + struct print_entry *field; + int ret; + + trace_assign_type(field, entry); + + ret = trace_seq_printf(s, "%s", field->buf); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + + return TRACE_TYPE_HANDLED; +} + /** * trace_seq_printf - sequence printing of trace information * @s: trace sequence descriptor @@ -449,6 +481,11 @@ int register_ftrace_event(struct trace_event *event) mutex_lock(&trace_event_mutex); + if (!event) { + ret = next_event_type++; + goto out; + } + if (!event->type) event->type = next_event_type++; else if (event->type > __TRACE_LAST_TYPE) { diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index 3b90e6ade1a..35c422fb51a 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h @@ -15,6 +15,11 @@ struct trace_event { trace_print_func binary; }; +extern enum print_line_t +trace_print_bprintk_msg_only(struct trace_iterator *iter); +extern enum print_line_t +trace_print_printk_msg_only(struct trace_iterator *iter); + extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); extern int diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 486785214e3..eb81556107f 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c @@ -112,7 +112,7 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...) return 0; va_start(ap, fmt); - ret = trace_vbprintk(ip, task_curr_ret_stack(current), fmt, ap); + ret = trace_vbprintk(ip, fmt, ap); va_end(ap); return ret; } @@ -126,7 +126,7 @@ int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap) if (!(trace_flags & TRACE_ITER_PRINTK)) return 0; - return trace_vbprintk(ip, task_curr_ret_stack(current), fmt, ap); + return trace_vbprintk(ip, fmt, ap); } EXPORT_SYMBOL_GPL(__ftrace_vbprintk); @@ -139,7 +139,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...) return 0; va_start(ap, fmt); - ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); + ret = trace_vprintk(ip, fmt, ap); va_end(ap); return ret; } @@ -150,7 +150,7 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) if (!(trace_flags & TRACE_ITER_PRINTK)) return 0; - return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); + return trace_vprintk(ip, fmt, ap); } EXPORT_SYMBOL_GPL(__ftrace_vprintk); diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 38856ba78a9..08f4eb2763d 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -248,6 +248,28 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) #ifdef CONFIG_FUNCTION_GRAPH_TRACER + +/* Maximum number of functions to trace before diagnosing a hang */ +#define GRAPH_MAX_FUNC_TEST 100000000 + +static void __ftrace_dump(bool disable_tracing); +static unsigned int graph_hang_thresh; + +/* Wrap the real function entry probe to avoid possible hanging */ +static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) +{ + /* This is harmlessly racy, we want to approximately detect a hang */ + if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { + ftrace_graph_stop(); + printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); + if (ftrace_dump_on_oops) + __ftrace_dump(false); + return 0; + } + + return trace_graph_entry(trace); +} + /* * Pretty much the same than for the function tracer from which the selftest * has been borrowed. @@ -259,15 +281,29 @@ trace_selftest_startup_function_graph(struct tracer *trace, int ret; unsigned long count; - ret = tracer_init(trace, tr); + /* + * Simulate the init() callback but we attach a watchdog callback + * to detect and recover from possible hangs + */ + tracing_reset_online_cpus(tr); + ret = register_ftrace_graph(&trace_graph_return, + &trace_graph_entry_watchdog); if (ret) { warn_failed_init_tracer(trace, ret); goto out; } + tracing_start_cmdline_record(); /* Sleep for a 1/10 of a second */ msleep(100); + /* Have we just recovered from a hang? */ + if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { + tracing_selftest_disabled = true; + ret = -1; + goto out; + } + tracing_stop(); /* check the trace buffer */ |