aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig31
-rw-r--r--kernel/trace/Makefile6
-rw-r--r--kernel/trace/ftrace.c642
-rw-r--r--kernel/trace/ring_buffer.c179
-rw-r--r--kernel/trace/trace.c82
-rw-r--r--kernel/trace/trace.h22
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_irqsoff.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c4
-rw-r--r--kernel/trace/trace_selftest.c18
-rw-r--r--kernel/trace/trace_stack.c4
11 files changed, 383 insertions, 611 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 1cb3e1f616a..33dbefd471e 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1,13 +1,13 @@
#
-# Architectures that offer an FTRACE implementation should select HAVE_FTRACE:
+# Architectures that offer an FUNCTION_TRACER implementation should
+# select HAVE_FUNCTION_TRACER:
#
config NOP_TRACER
bool
-config HAVE_FTRACE
+config HAVE_FUNCTION_TRACER
bool
- select NOP_TRACER
config HAVE_DYNAMIC_FTRACE
bool
@@ -25,12 +25,15 @@ config TRACING
bool
select DEBUG_FS
select RING_BUFFER
- select STACKTRACE
+ select STACKTRACE if STACKTRACE_SUPPORT
select TRACEPOINTS
+ select NOP_TRACER
-config FTRACE
+menu "Tracers"
+
+config FUNCTION_TRACER
bool "Kernel Function Tracer"
- depends on HAVE_FTRACE
+ depends on HAVE_FUNCTION_TRACER
depends on DEBUG_KERNEL
select FRAME_POINTER
select TRACING
@@ -49,7 +52,6 @@ config IRQSOFF_TRACER
default n
depends on TRACE_IRQFLAGS_SUPPORT
depends on GENERIC_TIME
- depends on HAVE_FTRACE
depends on DEBUG_KERNEL
select TRACE_IRQFLAGS
select TRACING
@@ -73,7 +75,6 @@ config PREEMPT_TRACER
default n
depends on GENERIC_TIME
depends on PREEMPT
- depends on HAVE_FTRACE
depends on DEBUG_KERNEL
select TRACING
select TRACER_MAX_TRACE
@@ -101,7 +102,6 @@ config SYSPROF_TRACER
config SCHED_TRACER
bool "Scheduling Latency Tracer"
- depends on HAVE_FTRACE
depends on DEBUG_KERNEL
select TRACING
select CONTEXT_SWITCH_TRACER
@@ -112,7 +112,6 @@ config SCHED_TRACER
config CONTEXT_SWITCH_TRACER
bool "Trace process context switches"
- depends on HAVE_FTRACE
depends on DEBUG_KERNEL
select TRACING
select MARKERS
@@ -122,9 +121,9 @@ config CONTEXT_SWITCH_TRACER
config BOOT_TRACER
bool "Trace boot initcalls"
- depends on HAVE_FTRACE
depends on DEBUG_KERNEL
select TRACING
+ select CONTEXT_SWITCH_TRACER
help
This tracer helps developers to optimize boot times: it records
the timings of the initcalls and traces key events and the identity
@@ -141,9 +140,9 @@ config BOOT_TRACER
config STACK_TRACER
bool "Trace max stack"
- depends on HAVE_FTRACE
+ depends on HAVE_FUNCTION_TRACER
depends on DEBUG_KERNEL
- select FTRACE
+ select FUNCTION_TRACER
select STACKTRACE
help
This special tracer records the maximum stack footprint of the
@@ -160,7 +159,7 @@ config STACK_TRACER
config DYNAMIC_FTRACE
bool "enable/disable ftrace tracepoints dynamically"
- depends on FTRACE
+ depends on FUNCTION_TRACER
depends on HAVE_DYNAMIC_FTRACE
depends on DEBUG_KERNEL
default y
@@ -170,7 +169,7 @@ config DYNAMIC_FTRACE
with a No-Op instruction) as they are called. A table is
created to dynamically enable them again.
- This way a CONFIG_FTRACE kernel is slightly larger, but otherwise
+ This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise
has native performance as long as no tracing is active.
The changes to the code are done by a kernel thread that
@@ -195,3 +194,5 @@ config FTRACE_STARTUP_TEST
a series of tests are made to verify that the tracer is
functioning properly. It will do tests on all the configured
tracers of ftrace.
+
+endmenu
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index a85dfba88ba..c8228b1a49e 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -1,7 +1,7 @@
# Do not instrument the tracer itself:
-ifdef CONFIG_FTRACE
+ifdef CONFIG_FUNCTION_TRACER
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
@@ -10,13 +10,13 @@ CFLAGS_trace_selftest_dynamic.o = -pg
obj-y += trace_selftest_dynamic.o
endif
-obj-$(CONFIG_FTRACE) += libftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
obj-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
-obj-$(CONFIG_FTRACE) += trace_functions.o
+obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4dda4f60a2a..e60205722d0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -25,13 +25,24 @@
#include <linux/ftrace.h>
#include <linux/sysctl.h>
#include <linux/ctype.h>
-#include <linux/hash.h>
#include <linux/list.h>
#include <asm/ftrace.h>
#include "trace.h"
+#define FTRACE_WARN_ON(cond) \
+ do { \
+ if (WARN_ON(cond)) \
+ ftrace_kill(); \
+ } while (0)
+
+#define FTRACE_WARN_ON_ONCE(cond) \
+ do { \
+ if (WARN_ON_ONCE(cond)) \
+ ftrace_kill(); \
+ } while (0)
+
/* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly;
static int last_ftrace_enabled;
@@ -153,21 +164,8 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
}
#ifdef CONFIG_DYNAMIC_FTRACE
-
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
-/*
- * The hash lock is only needed when the recording of the mcount
- * callers are dynamic. That is, by the caller themselves and
- * not recorded via the compilation.
- */
-static DEFINE_SPINLOCK(ftrace_hash_lock);
-#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
-#define ftrace_hash_unlock(flags) \
- spin_unlock_irqrestore(&ftrace_hash_lock, flags)
-#else
-/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
-#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
-#define ftrace_hash_unlock(flags) do { } while(0)
+# error Dynamic ftrace depends on MCOUNT_RECORD
#endif
/*
@@ -178,8 +176,6 @@ static DEFINE_SPINLOCK(ftrace_hash_lock);
*/
static unsigned long mcount_addr = MCOUNT_ADDR;
-static struct task_struct *ftraced_task;
-
enum {
FTRACE_ENABLE_CALLS = (1 << 0),
FTRACE_DISABLE_CALLS = (1 << 1),
@@ -189,14 +185,9 @@ enum {
};
static int ftrace_filtered;
-static int tracing_on;
-static int frozen_record_count;
-static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
+static LIST_HEAD(ftrace_new_addrs);
-static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
-
-static DEFINE_MUTEX(ftraced_lock);
static DEFINE_MUTEX(ftrace_regex_lock);
struct ftrace_page {
@@ -214,16 +205,13 @@ struct ftrace_page {
static struct ftrace_page *ftrace_pages_start;
static struct ftrace_page *ftrace_pages;
-static int ftraced_trigger;
-static int ftraced_suspend;
-static int ftraced_stop;
-
-static int ftrace_record_suspend;
-
static struct dyn_ftrace *ftrace_free_records;
#ifdef CONFIG_KPROBES
+
+static int frozen_record_count;
+
static inline void freeze_record(struct dyn_ftrace *rec)
{
if (!(rec->flags & FTRACE_FL_FROZEN)) {
@@ -250,72 +238,6 @@ static inline int record_frozen(struct dyn_ftrace *rec)
# define record_frozen(rec) ({ 0; })
#endif /* CONFIG_KPROBES */
-int skip_trace(unsigned long ip)
-{
- unsigned long fl;
- struct dyn_ftrace *rec;
- struct hlist_node *t;
- struct hlist_head *head;
-
- if (frozen_record_count == 0)
- return 0;
-
- head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
- hlist_for_each_entry_rcu(rec, t, head, node) {
- if (rec->ip == ip) {
- if (record_frozen(rec)) {
- if (rec->flags & FTRACE_FL_FAILED)
- return 1;
-
- if (!(rec->flags & FTRACE_FL_CONVERTED))
- return 1;
-
- if (!tracing_on || !ftrace_enabled)
- return 1;
-
- if (ftrace_filtered) {
- fl = rec->flags & (FTRACE_FL_FILTER |
- FTRACE_FL_NOTRACE);
- if (!fl || (fl & FTRACE_FL_NOTRACE))
- return 1;
- }
- }
- break;
- }
- }
-
- return 0;
-}
-
-static inline int
-ftrace_ip_in_hash(unsigned long ip, unsigned long key)
-{
- struct dyn_ftrace *p;
- struct hlist_node *t;
- int found = 0;
-
- hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
- if (p->ip == ip) {
- found = 1;
- break;
- }
- }
-
- return found;
-}
-
-static inline void
-ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
-{
- hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
-}
-
-/* called from kstop_machine */
-static inline void ftrace_del_hash(struct dyn_ftrace *node)
-{
- hlist_del(&node->node);
-}
-
static void ftrace_free_rec(struct dyn_ftrace *rec)
{
rec->ip = (unsigned long)ftrace_free_records;
@@ -346,7 +268,6 @@ void ftrace_release(void *start, unsigned long size)
}
}
spin_unlock(&ftrace_lock);
-
}
static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
@@ -358,10 +279,8 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
rec = ftrace_free_records;
if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
- WARN_ON_ONCE(1);
+ FTRACE_WARN_ON_ONCE(1);
ftrace_free_records = NULL;
- ftrace_disabled = 1;
- ftrace_enabled = 0;
return NULL;
}
@@ -371,76 +290,36 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
}
if (ftrace_pages->index == ENTRIES_PER_PAGE) {
- if (!ftrace_pages->next)
- return NULL;
+ if (!ftrace_pages->next) {
+ /* allocate another page */
+ ftrace_pages->next =
+ (void *)get_zeroed_page(GFP_KERNEL);
+ if (!ftrace_pages->next)
+ return NULL;
+ }
ftrace_pages = ftrace_pages->next;
}
return &ftrace_pages->records[ftrace_pages->index++];
}
-static void
+static struct dyn_ftrace *
ftrace_record_ip(unsigned long ip)
{
- struct dyn_ftrace *node;
- unsigned long flags;
- unsigned long key;
- int resched;
- int cpu;
+ struct dyn_ftrace *rec;
if (!ftrace_enabled || ftrace_disabled)
- return;
-
- resched = need_resched();
- preempt_disable_notrace();
+ return NULL;
- /*
- * We simply need to protect against recursion.
- * Use the the raw version of smp_processor_id and not
- * __get_cpu_var which can call debug hooks that can
- * cause a recursive crash here.
- */
- cpu = raw_smp_processor_id();
- per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
- if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
- goto out;
-
- if (unlikely(ftrace_record_suspend))
- goto out;
-
- key = hash_long(ip, FTRACE_HASHBITS);
-
- WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
-
- if (ftrace_ip_in_hash(ip, key))
- goto out;
-
- ftrace_hash_lock(flags);
-
- /* This ip may have hit the hash before the lock */
- if (ftrace_ip_in_hash(ip, key))
- goto out_unlock;
-
- node = ftrace_alloc_dyn_node(ip);
- if (!node)
- goto out_unlock;
-
- node->ip = ip;
-
- ftrace_add_hash(node, key);
+ rec = ftrace_alloc_dyn_node(ip);
+ if (!rec)
+ return NULL;
- ftraced_trigger = 1;
+ rec->ip = ip;
- out_unlock:
- ftrace_hash_unlock(flags);
- out:
- per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
+ list_add(&rec->list, &ftrace_new_addrs);
- /* prevent recursion with scheduler */
- if (resched)
- preempt_enable_no_resched_notrace();
- else
- preempt_enable_notrace();
+ return rec;
}
#define FTRACE_ADDR ((long)(ftrace_caller))
@@ -559,7 +438,6 @@ static void ftrace_replace_code(int enable)
rec->flags |= FTRACE_FL_FAILED;
if ((system_state == SYSTEM_BOOTING) ||
!core_kernel_text(rec->ip)) {
- ftrace_del_hash(rec);
ftrace_free_rec(rec);
}
}
@@ -567,15 +445,6 @@ static void ftrace_replace_code(int enable)
}
}
-static void ftrace_shutdown_replenish(void)
-{
- if (ftrace_pages->next)
- return;
-
- /* allocate another page */
- ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
-}
-
static void print_ip_ins(const char *fmt, unsigned char *p)
{
int i;
@@ -591,23 +460,23 @@ ftrace_code_disable(struct dyn_ftrace *rec)
{
unsigned long ip;
unsigned char *nop, *call;
- int failed;
+ int ret;
ip = rec->ip;
nop = ftrace_nop_replace();
call = ftrace_call_replace(ip, mcount_addr);
- failed = ftrace_modify_code(ip, call, nop);
- if (failed) {
- switch (failed) {
- case 1:
- WARN_ON_ONCE(1);
+ ret = ftrace_modify_code(ip, call, nop);
+ if (ret) {
+ switch (ret) {
+ case -EFAULT:
+ FTRACE_WARN_ON_ONCE(1);
pr_info("ftrace faulted on modifying ");
print_ip_sym(ip);
break;
- case 2:
- WARN_ON_ONCE(1);
+ case -EINVAL:
+ FTRACE_WARN_ON_ONCE(1);
pr_info("ftrace failed to modify ");
print_ip_sym(ip);
print_ip_ins(" expected: ", call);
@@ -615,6 +484,15 @@ ftrace_code_disable(struct dyn_ftrace *rec)
print_ip_ins(" replace: ", nop);
printk(KERN_CONT "\n");
break;
+ case -EPERM:
+ FTRACE_WARN_ON_ONCE(1);
+ pr_info("ftrace faulted on writing ");
+ print_ip_sym(ip);
+ break;
+ default:
+ FTRACE_WARN_ON_ONCE(1);
+ pr_info("ftrace faulted on unknown error ");
+ print_ip_sym(ip);
}
rec->flags |= FTRACE_FL_FAILED;
@@ -623,37 +501,18 @@ ftrace_code_disable(struct dyn_ftrace *rec)
return 1;
}
-static int __ftrace_update_code(void *ignore);
-
static int __ftrace_modify_code(void *data)
{
- unsigned long addr;
int *command = data;
- if (*command & FTRACE_ENABLE_CALLS) {
- /*
- * Update any recorded ips now that we have the
- * machine stopped
- */
- __ftrace_update_code(NULL);
+ if (*command & FTRACE_ENABLE_CALLS)
ftrace_replace_code(1);
- tracing_on = 1;
- } else if (*command & FTRACE_DISABLE_CALLS) {
+ else if (*command & FTRACE_DISABLE_CALLS)
ftrace_replace_code(0);
- tracing_on = 0;
- }
if (*command & FTRACE_UPDATE_TRACE_FUNC)
ftrace_update_ftrace_func(ftrace_trace_function);
- if (*command & FTRACE_ENABLE_MCOUNT) {
- addr = (unsigned long)ftrace_record_ip;
- ftrace_mcount_set(&addr);
- } else if (*command & FTRACE_DISABLE_MCOUNT) {
- addr = (unsigned long)ftrace_stub;
- ftrace_mcount_set(&addr);
- }
-
return 0;
}
@@ -662,26 +521,9 @@ static void ftrace_run_update_code(int command)
stop_machine(__ftrace_modify_code, &command, NULL);
}
-void ftrace_disable_daemon(void)
-{
- /* Stop the daemon from calling kstop_machine */
- mutex_lock(&ftraced_lock);
- ftraced_stop = 1;
- mutex_unlock(&ftraced_lock);
-
- ftrace_force_update();
-}
-
-void ftrace_enable_daemon(void)
-{
- mutex_lock(&ftraced_lock);
- ftraced_stop = 0;
- mutex_unlock(&ftraced_lock);
-
- ftrace_force_update();
-}
-
static ftrace_func_t saved_ftrace_func;
+static int ftrace_start;
+static DEFINE_MUTEX(ftrace_start_lock);
static void ftrace_startup(void)
{
@@ -690,9 +532,9 @@ static void ftrace_startup(void)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftraced_lock);
- ftraced_suspend++;
- if (ftraced_suspend == 1)
+ mutex_lock(&ftrace_start_lock);
+ ftrace_start++;
+ if (ftrace_start == 1)
command |= FTRACE_ENABLE_CALLS;
if (saved_ftrace_func != ftrace_trace_function) {
@@ -705,7 +547,7 @@ static void ftrace_startup(void)
ftrace_run_update_code(command);
out:
- mutex_unlock(&ftraced_lock);
+ mutex_unlock(&ftrace_start_lock);
}
static void ftrace_shutdown(void)
@@ -715,9 +557,9 @@ static void ftrace_shutdown(void)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftraced_lock);
- ftraced_suspend--;
- if (!ftraced_suspend)
+ mutex_lock(&ftrace_start_lock);
+ ftrace_start--;
+ if (!ftrace_start)
command |= FTRACE_DISABLE_CALLS;
if (saved_ftrace_func != ftrace_trace_function) {
@@ -730,7 +572,7 @@ static void ftrace_shutdown(void)
ftrace_run_update_code(command);
out:
- mutex_unlock(&ftraced_lock);
+ mutex_unlock(&ftrace_start_lock);
}
static void ftrace_startup_sysctl(void)
@@ -740,15 +582,15 @@ static void ftrace_startup_sysctl(void)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftraced_lock);
+ mutex_lock(&ftrace_start_lock);
/* Force update next time */
saved_ftrace_func = NULL;
- /* ftraced_suspend is true if we want ftrace running */
- if (ftraced_suspend)
+ /* ftrace_start is true if we want ftrace running */
+ if (ftrace_start)
command |= FTRACE_ENABLE_CALLS;
ftrace_run_update_code(command);
- mutex_unlock(&ftraced_lock);
+ mutex_unlock(&ftrace_start_lock);
}
static void ftrace_shutdown_sysctl(void)
@@ -758,112 +600,50 @@ static void ftrace_shutdown_sysctl(void)
if (unlikely(ftrace_disabled))
return;
- mutex_lock(&ftraced_lock);
- /* ftraced_suspend is true if ftrace is running */
- if (ftraced_suspend)
+ mutex_lock(&ftrace_start_lock);
+ /* ftrace_start is true if ftrace is running */
+ if (ftrace_start)
command |= FTRACE_DISABLE_CALLS;
ftrace_run_update_code(command);
- mutex_unlock(&ftraced_lock);
+ mutex_unlock(&ftrace_start_lock);
}
static cycle_t ftrace_update_time;
static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
-static int __ftrace_update_code(void *ignore)
+static int ftrace_update_code(void)
{
- int i, save_ftrace_enabled;
+ struct dyn_ftrace *p, *t;
cycle_t start, stop;
- struct dyn_ftrace *p;
- struct hlist_node *t, *n;
- struct hlist_head *head, temp_list;
-
- /* Don't be recording funcs now */
- ftrace_record_suspend++;
- save_ftrace_enabled = ftrace_enabled;
- ftrace_enabled = 0;
start = ftrace_now(raw_smp_processor_id());
ftrace_update_cnt = 0;
- /* No locks needed, the machine is stopped! */
- for (i = 0; i < FTRACE_HASHSIZE; i++) {
- INIT_HLIST_HEAD(&temp_list);
- head = &ftrace_hash[i];
-
- /* all CPUS are stopped, we are safe to modify code */
- hlist_for_each_entry_safe(p, t, n, head, node) {
- /* Skip over failed records which have not been
- * freed. */
- if (p->flags & FTRACE_FL_FAILED)
- continue;
-
- /* Unconverted records are always at the head of the
- * hash bucket. Once we encounter a converted record,
- * simply skip over to the next bucket. Saves ftraced
- * some processor cycles (ftrace does its bid for
- * global warming :-p ). */
- if (p->flags & (FTRACE_FL_CONVERTED))
- break;
+ list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
- /* Ignore updates to this record's mcount site.
- * Reintroduce this record at the head of this
- * bucket to attempt to "convert" it again if
- * the kprobe on it is unregistered before the
- * next run. */
- if (get_kprobe((void *)p->ip)) {
- ftrace_del_hash(p);
- INIT_HLIST_NODE(&p->node);
- hlist_add_head(&p->node, &temp_list);
- freeze_record(p);
- continue;
- } else {
- unfreeze_record(p);
- }
+ /* If something went wrong, bail without enabling anything */
+ if (unlikely(ftrace_disabled))
+ return -1;
- /* convert record (i.e, patch mcount-call with NOP) */
- if (ftrace_code_disable(p)) {
- p->flags |= FTRACE_FL_CONVERTED;
- ftrace_update_cnt++;
- } else {
- if ((system_state == SYSTEM_BOOTING) ||
- !core_kernel_text(p->ip)) {
- ftrace_del_hash(p);
- ftrace_free_rec(p);
- }
- }
- }
+ list_del_init(&p->list);
- hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
- hlist_del(&p->node);
- INIT_HLIST_NODE(&p->node);
- hlist_add_head(&p->node, head);
- }
+ /* convert record (i.e, patch mcount-call with NOP) */
+ if (ftrace_code_disable(p)) {
+ p->flags |= FTRACE_FL_CONVERTED;
+ ftrace_update_cnt++;
+ } else
+ ftrace_free_rec(p);
}
stop = ftrace_now(raw_smp_processor_id());
ftrace_update_time = stop - start;
ftrace_update_tot_cnt += ftrace_update_cnt;
- ftraced_trigger = 0;
-
- ftrace_enabled = save_ftrace_enabled;
- ftrace_record_suspend--;
return 0;
}
-static int ftrace_update_code(void)
-{
- if (unlikely(ftrace_disabled) ||
- !ftrace_enabled || !ftraced_trigger)
- return 0;
-
- stop_machine(__ftrace_update_code, NULL, NULL);
-
- return 1;
-}
-
static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
{
struct ftrace_page *pg;
@@ -892,8 +672,8 @@ static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
pg = ftrace_pages = ftrace_pages_start;
cnt = num_to_init / ENTRIES_PER_PAGE;
- pr_info("ftrace: allocating %ld hash entries in %d pages\n",
- num_to_init, cnt);
+ pr_info("ftrace: allocating %ld entries in %d pages\n",
+ num_to_init, cnt + 1);
for (i = 0; i < cnt; i++) {
pg->next = (void *)get_zeroed_page(GFP_KERNEL);
@@ -973,13 +753,11 @@ static void *t_start(struct seq_file *m, loff_t *pos)
void *p = NULL;
loff_t l = -1;
- if (*pos != iter->pos) {
- for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
- ;
- } else {
- l = *pos;
- p = t_next(m, p, &l);
- }
+ if (*pos > iter->pos)
+ *pos = iter->pos;
+
+ l = *pos;
+ p = t_next(m, p, &l);
return p;
}
@@ -990,15 +768,21 @@ static void t_stop(struct seq_file *m, void *p)
static int t_show(struct seq_file *m, void *v)
{
+ struct ftrace_iterator *iter = m->private;
struct dyn_ftrace *rec = v;
char str[KSYM_SYMBOL_LEN];
+ int ret = 0;
if (!rec)
return 0;
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
- seq_printf(m, "%s\n", str);
+ ret = seq_printf(m, "%s\n", str);
+ if (ret < 0) {
+ iter->pos--;
+ iter->idx--;
+ }
return 0;
}
@@ -1024,7 +808,7 @@ ftrace_avail_open(struct inode *inode, struct file *file)
return -ENOMEM;
iter->pg = ftrace_pages_start;
- iter->pos = -1;
+ iter->pos = 0;
ret = seq_open(file, &show_ftrace_seq_ops);
if (!ret) {
@@ -1111,7 +895,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
if (file->f_mode & FMODE_READ) {
iter->pg = ftrace_pages_start;
- iter->pos = -1;
+ iter->pos = 0;
iter->flags = enable ? FTRACE_ITER_FILTER :
FTRACE_ITER_NOTRACE;
@@ -1401,10 +1185,10 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
}
mutex_lock(&ftrace_sysctl_lock);
- mutex_lock(&ftraced_lock);
- if (iter->filtered && ftraced_suspend && ftrace_enabled)
+ mutex_lock(&ftrace_start_lock);
+ if (iter->filtered && ftrace_start && ftrace_enabled)
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
- mutex_unlock(&ftraced_lock);
+ mutex_unlock(&ftrace_start_lock);
mutex_unlock(&ftrace_sysctl_lock);
kfree(iter);
@@ -1424,55 +1208,6 @@ ftrace_notrace_release(struct inode *inode, struct file *file)
return ftrace_regex_release(inode, file, 0);
}
-static ssize_t
-ftraced_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- /* don't worry about races */
- char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
- int r = strlen(buf);
-
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t
-ftraced_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- char buf[64];
- long val;
- int ret;
-
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- if (strncmp(buf, "enable", 6) == 0)
- val = 1;
- else if (strncmp(buf, "disable", 7) == 0)
- val = 0;
- else {
- buf[cnt] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
- return ret;
-
- val = !!val;
- }
-
- if (val)
- ftrace_enable_daemon();
- else
- ftrace_disable_daemon();
-
- filp->f_pos += cnt;
-
- return cnt;
-}
-
static struct file_operations ftrace_avail_fops = {
.open = ftrace_avail_open,
.read = seq_read,
@@ -1503,54 +1238,6 @@ static struct file_operations ftrace_notrace_fops = {
.release = ftrace_notrace_release,
};
-static struct file_operations ftraced_fops = {
- .open = tracing_open_generic,
- .read = ftraced_read,
- .write = ftraced_write,
-};
-
-/**
- * ftrace_force_update - force an update to all recording ftrace functions
- */
-int ftrace_force_update(void)
-{
- int ret = 0;
-
- if (unlikely(ftrace_disabled))
- return -ENODEV;
-
- mutex_lock(&ftrace_sysctl_lock);
- mutex_lock(&ftraced_lock);
-
- /*
- * If ftraced_trigger is not set, then there is nothing
- * to update.
- */
- if (ftraced_trigger && !ftrace_update_code())
- ret = -EBUSY;
-
- mutex_unlock(&ftraced_lock);
- mutex_unlock(&ftrace_sysctl_lock);
-
- return ret;
-}
-
-static void ftrace_force_shutdown(void)
-{
- struct task_struct *task;
- int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
-
- mutex_lock(&ftraced_lock);
- task = ftraced_task;
- ftraced_task = NULL;
- ftraced_suspend = -1;
- ftrace_run_update_code(command);
- mutex_unlock(&ftraced_lock);
-
- if (task)
- kthread_stop(task);
-}
-
static __init int ftrace_init_debugfs(void)
{
struct dentry *d_tracer;
@@ -1581,17 +1268,11 @@ static __init int ftrace_init_debugfs(void)
pr_warning("Could not create debugfs "
"'set_ftrace_notrace' entry\n");
- entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
- NULL, &ftraced_fops);
- if (!entry)
- pr_warning("Could not create debugfs "
- "'ftraced_enabled' entry\n");
return 0;
}
fs_initcall(ftrace_init_debugfs);
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
static int ftrace_convert_nops(unsigned long *start,
unsigned long *end)
{
@@ -1599,20 +1280,18 @@ static int ftrace_convert_nops(unsigned long *start,
unsigned long addr;
unsigned long flags;
+ mutex_lock(&ftrace_start_lock);
p = start;
while (p < end) {
addr = ftrace_call_adjust(*p++);
- /* should not be called from interrupt context */
- spin_lock(&ftrace_lock);
ftrace_record_ip(addr);
- spin_unlock(&ftrace_lock);
- ftrace_shutdown_replenish();
}
- /* p is ignored */
+ /* disable interrupts to prevent kstop machine */
local_irq_save(flags);
- __ftrace_update_code(p);
+ ftrace_update_code();
local_irq_restore(flags);
+ mutex_unlock(&ftrace_start_lock);
return 0;
}
@@ -1658,130 +1337,34 @@ void __init ftrace_init(void)
failed:
ftrace_disabled = 1;
}
-#else /* CONFIG_FTRACE_MCOUNT_RECORD */
-static int ftraced(void *ignore)
-{
- unsigned long usecs;
-
- while (!kthread_should_stop()) {
-
- set_current_state(TASK_INTERRUPTIBLE);
- /* check once a second */
- schedule_timeout(HZ);
-
- if (unlikely(ftrace_disabled))
- continue;
-
- mutex_lock(&ftrace_sysctl_lock);
- mutex_lock(&ftraced_lock);
- if (!ftraced_suspend && !ftraced_stop &&
- ftrace_update_code()) {
- usecs = nsecs_to_usecs(ftrace_update_time);
- if (ftrace_update_tot_cnt > 100000) {
- ftrace_update_tot_cnt = 0;
- pr_info("hm, dftrace overflow: %lu change%s"
- " (%lu total) in %lu usec%s\n",
- ftrace_update_cnt,
- ftrace_update_cnt != 1 ? "s" : "",
- ftrace_update_tot_cnt,
- usecs, usecs != 1 ? "s" : "");
- ftrace_disabled = 1;
- WARN_ON_ONCE(1);
- }
- }
- mutex_unlock(&ftraced_lock);
- mutex_unlock(&ftrace_sysctl_lock);
-
- ftrace_shutdown_replenish();
- }
- __set_current_state(TASK_RUNNING);
- return 0;
-}
+#else
-static int __init ftrace_dynamic_init(void)
+static int __init ftrace_nodyn_init(void)
{
- struct task_struct *p;
- unsigned long addr;
- int ret;
-
- addr = (unsigned long)ftrace_record_ip;
-
- stop_machine(ftrace_dyn_arch_init, &addr, NULL);
-
- /* ftrace_dyn_arch_init places the return code in addr */
- if (addr) {
- ret = (int)addr;
- goto failed;
- }
-
- ret = ftrace_dyn_table_alloc(NR_TO_INIT);
- if (ret)
- goto failed;
-
- p = kthread_run(ftraced, NULL, "ftraced");
- if (IS_ERR(p)) {
- ret = -1;
- goto failed;
- }
-
- last_ftrace_enabled = ftrace_enabled = 1;
- ftraced_task = p;
-
+ ftrace_enabled = 1;
return 0;
-
- failed:
- ftrace_disabled = 1;
- return ret;
}
+device_initcall(ftrace_nodyn_init);
-core_initcall(ftrace_dynamic_init);
-#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
-
-#else
# define ftrace_startup() do { } while (0)
# define ftrace_shutdown() do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
-# define ftrace_force_shutdown() do { } while (0)
#endif /* CONFIG_DYNAMIC_FTRACE */
/**
- * ftrace_kill_atomic - kill ftrace from critical sections
+ * ftrace_kill - kill ftrace
*
* This function should be used by panic code. It stops ftrace
* but in a not so nice way. If you need to simply kill ftrace
* from a non-atomic section, use ftrace_kill.
*/
-void ftrace_kill_atomic(void)
-{
- ftrace_disabled = 1;
- ftrace_enabled = 0;
-#ifdef CONFIG_DYNAMIC_FTRACE
- ftraced_suspend = -1;
-#endif
- clear_ftrace_function();
-}
-
-/**
- * ftrace_kill - totally shutdown ftrace
- *
- * This is a safety measure. If something was detected that seems
- * wrong, calling this function will keep ftrace from doing
- * any more modifications, and updates.
- * used when something went wrong.
- */
void ftrace_kill(void)
{
- mutex_lock(&ftrace_sysctl_lock);
ftrace_disabled = 1;
ftrace_enabled = 0;
-
clear_ftrace_function();
- mutex_unlock(&ftrace_sysctl_lock);
-
- /* Try to totally disable ftrace */
- ftrace_force_shutdown();
}
/**
@@ -1870,3 +1453,4 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
mutex_unlock(&ftrace_sysctl_lock);
return ret;
}
+
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 94af1fe56bb..036456cbb4f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -16,14 +16,49 @@
#include <linux/list.h>
#include <linux/fs.h>
+#include "trace.h"
+
+/* Global flag to disable all recording to ring buffers */
+static int ring_buffers_off __read_mostly;
+
+/**
+ * tracing_on - enable all tracing buffers
+ *
+ * This function enables all tracing buffers that may have been
+ * disabled with tracing_off.
+ */
+void tracing_on(void)
+{
+ ring_buffers_off = 0;
+}
+
+/**
+ * tracing_off - turn off all tracing buffers
+ *
+ * This function stops all tracing buffers from recording data.
+ * It does not disable any overhead the tracers themselves may
+ * be causing. This function simply causes all recording to
+ * the ring buffers to fail.
+ */
+void tracing_off(void)
+{
+ ring_buffers_off = 1;
+}
+
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
/* FIXME!!! */
u64 ring_buffer_time_stamp(int cpu)
{
+ u64 time;
+
+ preempt_disable_notrace();
/* shift to debug/test normalization and TIME_EXTENTS */
- return sched_clock() << DEBUG_SHIFT;
+ time = sched_clock() << DEBUG_SHIFT;
+ preempt_enable_notrace();
+
+ return time;
}
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
@@ -130,7 +165,7 @@ struct buffer_page {
static inline void free_buffer_page(struct buffer_page *bpage)
{
if (bpage->page)
- __free_page(bpage->page);
+ free_page((unsigned long)bpage->page);
kfree(bpage);
}
@@ -503,6 +538,12 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
LIST_HEAD(pages);
int i, cpu;
+ /*
+ * Always succeed at resizing a non-existent buffer:
+ */
+ if (!buffer)
+ return size;
+
size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
size *= BUF_PAGE_SIZE;
buffer_size = buffer->pages * BUF_PAGE_SIZE;
@@ -966,7 +1007,9 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
if (unlikely(*delta > (1ULL << 59) && !once++)) {
printk(KERN_WARNING "Delta way too big! %llu"
" ts=%llu write stamp = %llu\n",
- *delta, *ts, cpu_buffer->write_stamp);
+ (unsigned long long)*delta,
+ (unsigned long long)*ts,
+ (unsigned long long)cpu_buffer->write_stamp);
WARN_ON(1);
}
@@ -1020,8 +1063,23 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event;
u64 ts, delta;
int commit = 0;
+ int nr_loops = 0;
again:
+ /*
+ * We allow for interrupts to reenter here and do a trace.
+ * If one does, it will cause this original code to loop
+ * back here. Even with heavy interrupts happening, this
+ * should only happen a few times in a row. If this happens
+ * 1000 times in a row, there must be either an interrupt
+ * storm or we have something buggy.
+ * Bail!
+ */
+ if (unlikely(++nr_loops > 1000)) {
+ RB_WARN_ON(cpu_buffer, 1);
+ return NULL;
+ }
+
ts = ring_buffer_time_stamp(cpu_buffer->cpu);
/*
@@ -1043,7 +1101,7 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
/* Did the write stamp get updated already? */
if (unlikely(ts < cpu_buffer->write_stamp))
- goto again;
+ delta = 0;
if (test_time_stamp(delta)) {
@@ -1116,6 +1174,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
struct ring_buffer_event *event;
int cpu, resched;
+ if (ring_buffers_off)
+ return NULL;
+
if (atomic_read(&buffer->record_disabled))
return NULL;
@@ -1232,6 +1293,9 @@ int ring_buffer_write(struct ring_buffer *buffer,
int ret = -EBUSY;
int cpu, resched;
+ if (ring_buffers_off)
+ return -EBUSY;
+
if (atomic_read(&buffer->record_disabled))
return -EBUSY;
@@ -1530,10 +1594,23 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *reader = NULL;
unsigned long flags;
+ int nr_loops = 0;
spin_lock_irqsave(&cpu_buffer->lock, flags);
again:
+ /*
+ * This should normally only loop twice. But because the
+ * start of the reader inserts an empty page, it causes
+ * a case where we will loop three times. There should be no
+ * reason to loop four times (that I know of).
+ */
+ if (unlikely(++nr_loops > 3)) {
+ RB_WARN_ON(cpu_buffer, 1);
+ reader = NULL;
+ goto out;
+ }
+
reader = cpu_buffer->reader_page;
/* If there's more to read, return this page */
@@ -1663,6 +1740,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
struct buffer_page *reader;
+ int nr_loops = 0;
if (!cpu_isset(cpu, buffer->cpumask))
return NULL;
@@ -1670,6 +1748,19 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
cpu_buffer = buffer->buffers[cpu];
again:
+ /*
+ * We repeat when a timestamp is encountered. It is possible
+ * to get multiple timestamps from an interrupt entering just
+ * as one timestamp is about to be written. The max times
+ * that this can happen is the number of nested interrupts we
+ * can have. Nesting 10 deep of interrupts is clearly
+ * an anomaly.
+ */
+ if (unlikely(++nr_loops > 10)) {
+ RB_WARN_ON(cpu_buffer, 1);
+ return NULL;
+ }
+
reader = rb_get_reader_page(cpu_buffer);
if (!reader)
return NULL;
@@ -1720,6 +1811,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer *buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
+ int nr_loops = 0;
if (ring_buffer_iter_empty(iter))
return NULL;
@@ -1728,6 +1820,19 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
buffer = cpu_buffer->buffer;
again:
+ /*
+ * We repeat when a timestamp is encountered. It is possible
+ * to get multiple timestamps from an interrupt entering just
+ * as one timestamp is about to be written. The max times
+ * that this can happen is the number of nested interrupts we
+ * can have. Nesting 10 deep of interrupts is clearly
+ * an anomaly.
+ */
+ if (unlikely(++nr_loops > 10)) {
+ RB_WARN_ON(cpu_buffer, 1);
+ return NULL;
+ }
+
if (rb_per_cpu_empty(cpu_buffer))
return NULL;
@@ -2012,3 +2117,69 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
return 0;
}
+static ssize_t
+rb_simple_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ int *p = filp->private_data;
+ char buf[64];
+ int r;
+
+ /* !ring_buffers_off == tracing_on */
+ r = sprintf(buf, "%d\n", !*p);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+rb_simple_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ int *p = filp->private_data;
+ char buf[64];
+ long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ /* !ring_buffers_off == tracing_on */
+ *p = !val;
+
+ (*ppos)++;
+
+ return cnt;
+}
+
+static struct file_operations rb_simple_fops = {
+ .open = tracing_open_generic,
+ .read = rb_simple_read,
+ .write = rb_simple_write,
+};
+
+
+static __init int rb_init_debugfs(void)
+{
+ struct dentry *d_tracer;
+ struct dentry *entry;
+
+ d_tracer = tracing_init_dentry();
+
+ entry = debugfs_create_file("tracing_on", 0644, d_tracer,
+ &ring_buffers_off, &rb_simple_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs 'tracing_on' entry\n");
+
+ return 0;
+}
+
+fs_initcall(rb_init_debugfs);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d345d649d07..697eda36b86 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -34,6 +34,7 @@
#include <linux/stacktrace.h>
#include <linux/ring_buffer.h>
+#include <linux/irqflags.h>
#include "trace.h"
@@ -655,7 +656,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
entry->preempt_count = pc & 0xff;
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
+#else
+ TRACE_FLAG_IRQS_NOSUPPORT |
+#endif
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@@ -700,6 +705,7 @@ static void ftrace_trace_stack(struct trace_array *tr,
unsigned long flags,
int skip, int pc)
{
+#ifdef CONFIG_STACKTRACE
struct ring_buffer_event *event;
struct stack_entry *entry;
struct stack_trace trace;
@@ -725,6 +731,7 @@ static void ftrace_trace_stack(struct trace_array *tr,
save_stack_trace(&trace);
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+#endif
}
void __trace_stack(struct trace_array *tr,
@@ -851,7 +858,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
preempt_enable_notrace();
}
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
static void
function_trace_call(unsigned long ip, unsigned long parent_ip)
{
@@ -865,9 +872,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
if (unlikely(!ftrace_function_enabled))
return;
- if (skip_trace(ip))
- return;
-
pc = preempt_count();
resched = need_resched();
preempt_disable_notrace();
@@ -1084,17 +1088,20 @@ static void s_stop(struct seq_file *m, void *p)
mutex_unlock(&trace_types_lock);
}
-#define KRETPROBE_MSG "[unknown/kretprobe'd]"
-
#ifdef CONFIG_KRETPROBES
-static inline int kretprobed(unsigned long addr)
+static inline const char *kretprobed(const char *name)
{
- return addr == (unsigned long)kretprobe_trampoline;
+ static const char tramp_name[] = "kretprobe_trampoline";
+ int size = sizeof(tramp_name);
+
+ if (strncmp(tramp_name, name, size) == 0)
+ return "[unknown/kretprobe'd]";
+ return name;
}
#else
-static inline int kretprobed(unsigned long addr)
+static inline const char *kretprobed(const char *name)
{
- return 0;
+ return name;
}
#endif /* CONFIG_KRETPROBES */
@@ -1103,10 +1110,13 @@ seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
{
#ifdef CONFIG_KALLSYMS
char str[KSYM_SYMBOL_LEN];
+ const char *name;
kallsyms_lookup(address, NULL, NULL, NULL, str);
- return trace_seq_printf(s, fmt, str);
+ name = kretprobed(str);
+
+ return trace_seq_printf(s, fmt, name);
#endif
return 1;
}
@@ -1117,9 +1127,12 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
{
#ifdef CONFIG_KALLSYMS
char str[KSYM_SYMBOL_LEN];
+ const char *name;
sprint_symbol(str, address);
- return trace_seq_printf(s, fmt, str);
+ name = kretprobed(str);
+
+ return trace_seq_printf(s, fmt, name);
#endif
return 1;
}
@@ -1246,7 +1259,8 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
trace_seq_printf(s, "%3d", cpu);
trace_seq_printf(s, "%c%c",
- (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
+ (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
+ (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
@@ -1372,10 +1386,7 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
seq_print_ip_sym(s, field->ip, sym_flags);
trace_seq_puts(s, " (");
- if (kretprobed(field->parent_ip))
- trace_seq_puts(s, KRETPROBE_MSG);
- else
- seq_print_ip_sym(s, field->parent_ip, sym_flags);
+ seq_print_ip_sym(s, field->parent_ip, sym_flags);
trace_seq_puts(s, ")\n");
break;
}
@@ -1491,12 +1502,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
ret = trace_seq_printf(s, " <-");
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- if (kretprobed(field->parent_ip))
- ret = trace_seq_puts(s, KRETPROBE_MSG);
- else
- ret = seq_print_ip_sym(s,
- field->parent_ip,
- sym_flags);
+ ret = seq_print_ip_sym(s,
+ field->parent_ip,
+ sym_flags);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
@@ -1747,7 +1755,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
return TRACE_TYPE_HANDLED;
SEQ_PUT_FIELD_RET(s, entry->pid);
- SEQ_PUT_FIELD_RET(s, iter->cpu);
+ SEQ_PUT_FIELD_RET(s, entry->cpu);
SEQ_PUT_FIELD_RET(s, iter->ts);
switch (entry->type) {
@@ -2379,9 +2387,10 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
int i;
size_t ret;
+ ret = cnt;
+
if (cnt > max_tracer_type_len)
cnt = max_tracer_type_len;
- ret = cnt;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
@@ -2414,8 +2423,8 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
out:
mutex_unlock(&trace_types_lock);
- if (ret == cnt)
- filp->f_pos += cnt;
+ if (ret > 0)
+ filp->f_pos += ret;
return ret;
}
@@ -2667,7 +2676,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
{
unsigned long val;
char buf[64];
- int ret;
+ int ret, cpu;
struct trace_array *tr = filp->private_data;
if (cnt >= sizeof(buf))
@@ -2695,6 +2704,14 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
goto out;
}
+ /* disable all cpu buffers */
+ for_each_tracing_cpu(cpu) {
+ if (global_trace.data[cpu])
+ atomic_inc(&global_trace.data[cpu]->disabled);
+ if (max_tr.data[cpu])
+ atomic_inc(&max_tr.data[cpu]->disabled);
+ }
+
if (val != global_trace.entries) {
ret = ring_buffer_resize(global_trace.buffer, val);
if (ret < 0) {
@@ -2726,6 +2743,13 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
if (tracing_disabled)
cnt = -ENOMEM;
out:
+ for_each_tracing_cpu(cpu) {
+ if (global_trace.data[cpu])
+ atomic_dec(&global_trace.data[cpu]->disabled);
+ if (max_tr.data[cpu])
+ atomic_dec(&max_tr.data[cpu]->disabled);
+ }
+
max_tr.entries = global_trace.entries;
mutex_unlock(&trace_types_lock);
@@ -3097,7 +3121,7 @@ void ftrace_dump(void)
dump_ran = 1;
/* No turning back! */
- ftrace_kill_atomic();
+ ftrace_kill();
for_each_tracing_cpu(cpu) {
atomic_inc(&global_trace.data[cpu]->disabled);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f1f99572cde..8465ad05270 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -120,18 +120,20 @@ struct trace_boot {
/*
* trace_flag_type is an enumeration that holds different
* states when a trace occurs. These are:
- * IRQS_OFF - interrupts were disabled
- * NEED_RESCED - reschedule is requested
- * HARDIRQ - inside an interrupt handler
- * SOFTIRQ - inside a softirq handler
- * CONT - multiple entries hold the trace item
+ * IRQS_OFF - interrupts were disabled
+ * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
+ * NEED_RESCED - reschedule is requested
+ * HARDIRQ - inside an interrupt handler
+ * SOFTIRQ - inside a softirq handler
+ * CONT - multiple entries hold the trace item
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
- TRACE_FLAG_NEED_RESCHED = 0x02,
- TRACE_FLAG_HARDIRQ = 0x04,
- TRACE_FLAG_SOFTIRQ = 0x08,
- TRACE_FLAG_CONT = 0x10,
+ TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
+ TRACE_FLAG_NEED_RESCHED = 0x04,
+ TRACE_FLAG_HARDIRQ = 0x08,
+ TRACE_FLAG_SOFTIRQ = 0x10,
+ TRACE_FLAG_CONT = 0x20,
};
#define TRACE_BUF_SIZE 1024
@@ -335,7 +337,7 @@ void update_max_tr_single(struct trace_array *tr,
extern cycle_t ftrace_now(int cpu);
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
void tracing_start_function_trace(void);
void tracing_stop_function_trace(void);
#else
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index e90eb0c2c56..0f85a64003d 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -64,7 +64,7 @@ static void function_trace_ctrl_update(struct trace_array *tr)
static struct tracer function_trace __read_mostly =
{
- .name = "ftrace",
+ .name = "function",
.init = function_trace_init,
.reset = function_trace_reset,
.ctrl_update = function_trace_ctrl_update,
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index a7db7f040ae..9c74071c10e 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -63,7 +63,7 @@ irq_trace(void)
*/
static __cacheline_aligned_in_smp unsigned long max_sequence;
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
/*
* irqsoff uses its own tracer function to keep the overhead down:
*/
@@ -104,7 +104,7 @@ static struct ftrace_ops trace_ops __read_mostly =
{
.func = irqsoff_tracer_call,
};
-#endif /* CONFIG_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
/*
* Should this new latency be reported/recorded?
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index fe4a252c236..3ae93f16b56 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -31,7 +31,7 @@ static raw_spinlock_t wakeup_lock =
static void __wakeup_reset(struct trace_array *tr);
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
/*
* irqsoff uses its own tracer function to keep the overhead down:
*/
@@ -96,7 +96,7 @@ static struct ftrace_ops trace_ops __read_mostly =
{
.func = wakeup_tracer_call,
};
-#endif /* CONFIG_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
/*
* Should this new latency be reported/recorded?
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 09cf230d7ec..90bc752a758 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -70,7 +70,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
return ret;
}
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -99,13 +99,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* passed in by parameter to fool gcc from optimizing */
func();
- /* update the records */
- ret = ftrace_force_update();
- if (ret) {
- printk(KERN_CONT ".. ftraced failed .. ");
- return ret;
- }
-
/*
* Some archs *cough*PowerPC*cough* add charachters to the
* start of the function names. We simply put a '*' to
@@ -183,13 +176,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* make sure msleep has been recorded */
msleep(1);
- /* force the recorded functions to be traced */
- ret = ftrace_force_update();
- if (ret) {
- printk(KERN_CONT ".. ftraced failed .. ");
- return ret;
- }
-
/* start the tracing */
ftrace_enabled = 1;
tracer_enabled = 1;
@@ -226,7 +212,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
return ret;
}
-#endif /* CONFIG_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_IRQSOFF_TRACER
int
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 74c5d9a3afa..be682b62fe5 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -44,6 +44,10 @@ static inline void check_stack(void)
if (this_size <= max_stack_size)
return;
+ /* we do not handle interrupt stacks yet */
+ if (!object_is_on_stack(&this_size))
+ return;
+
raw_local_irq_save(flags);
__raw_spin_lock(&max_stack_lock);