aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace/trace_event_profile.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-03-05 05:35:37 +0100
committerFrederic Weisbecker <fweisbec@gmail.com>2010-03-10 14:47:18 +0100
commit97d5a22005f38057b4bc0d95f81cd26510268794 (patch)
treeb981789b1cec8ac36527e52204e407b32efa0ea6 /kernel/trace/trace_event_profile.c
parentc530665c31c0140b74ca7689e7f836177796e5bd (diff)
perf: Drop the obsolete profile naming for trace events
Drop the obsolete "profile" naming used by perf for trace events. Perf can now do more than simple events counting, so generalize the API naming. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Jason Baron <jbaron@redhat.com>
Diffstat (limited to 'kernel/trace/trace_event_profile.c')
-rw-r--r--kernel/trace/trace_event_profile.c165
1 files changed, 0 insertions, 165 deletions
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
deleted file mode 100644
index e66d21e15a0..00000000000
--- a/kernel/trace/trace_event_profile.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * trace event based perf counter profiling
- *
- * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
- * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kprobes.h>
-#include "trace.h"
-
-DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
-
-static char *perf_trace_buf;
-static char *perf_trace_buf_nmi;
-
-typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
-
-/* Count the events in use (per event id, not per instance) */
-static int total_profile_count;
-
-static int ftrace_profile_enable_event(struct ftrace_event_call *event)
-{
- char *buf;
- int ret = -ENOMEM;
-
- if (event->profile_count++ > 0)
- return 0;
-
- if (!total_profile_count) {
- buf = (char *)alloc_percpu(perf_trace_t);
- if (!buf)
- goto fail_buf;
-
- rcu_assign_pointer(perf_trace_buf, buf);
-
- buf = (char *)alloc_percpu(perf_trace_t);
- if (!buf)
- goto fail_buf_nmi;
-
- rcu_assign_pointer(perf_trace_buf_nmi, buf);
- }
-
- ret = event->profile_enable(event);
- if (!ret) {
- total_profile_count++;
- return 0;
- }
-
-fail_buf_nmi:
- if (!total_profile_count) {
- free_percpu(perf_trace_buf_nmi);
- free_percpu(perf_trace_buf);
- perf_trace_buf_nmi = NULL;
- perf_trace_buf = NULL;
- }
-fail_buf:
- event->profile_count--;
-
- return ret;
-}
-
-int ftrace_profile_enable(int event_id)
-{
- struct ftrace_event_call *event;
- int ret = -EINVAL;
-
- mutex_lock(&event_mutex);
- list_for_each_entry(event, &ftrace_events, list) {
- if (event->id == event_id && event->profile_enable &&
- try_module_get(event->mod)) {
- ret = ftrace_profile_enable_event(event);
- break;
- }
- }
- mutex_unlock(&event_mutex);
-
- return ret;
-}
-
-static void ftrace_profile_disable_event(struct ftrace_event_call *event)
-{
- char *buf, *nmi_buf;
-
- if (--event->profile_count > 0)
- return;
-
- event->profile_disable(event);
-
- if (!--total_profile_count) {
- buf = perf_trace_buf;
- rcu_assign_pointer(perf_trace_buf, NULL);
-
- nmi_buf = perf_trace_buf_nmi;
- rcu_assign_pointer(perf_trace_buf_nmi, NULL);
-
- /*
- * Ensure every events in profiling have finished before
- * releasing the buffers
- */
- synchronize_sched();
-
- free_percpu(buf);
- free_percpu(nmi_buf);
- }
-}
-
-void ftrace_profile_disable(int event_id)
-{
- struct ftrace_event_call *event;
-
- mutex_lock(&event_mutex);
- list_for_each_entry(event, &ftrace_events, list) {
- if (event->id == event_id) {
- ftrace_profile_disable_event(event);
- module_put(event->mod);
- break;
- }
- }
- mutex_unlock(&event_mutex);
-}
-
-__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
- int *rctxp, unsigned long *irq_flags)
-{
- struct trace_entry *entry;
- char *trace_buf, *raw_data;
- int pc, cpu;
-
- pc = preempt_count();
-
- /* Protect the per cpu buffer, begin the rcu read side */
- local_irq_save(*irq_flags);
-
- *rctxp = perf_swevent_get_recursion_context();
- if (*rctxp < 0)
- goto err_recursion;
-
- cpu = smp_processor_id();
-
- if (in_nmi())
- trace_buf = rcu_dereference(perf_trace_buf_nmi);
- else
- trace_buf = rcu_dereference(perf_trace_buf);
-
- if (!trace_buf)
- goto err;
-
- raw_data = per_cpu_ptr(trace_buf, cpu);
-
- /* zero the dead bytes from align to not leak stack to user */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
-
- entry = (struct trace_entry *)raw_data;
- tracing_generic_entry_update(entry, *irq_flags, pc);
- entry->type = type;
-
- return raw_data;
-err:
- perf_swevent_put_recursion_context(*rctxp);
-err_recursion:
- local_irq_restore(*irq_flags);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare);