aboutsummaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-10-16 09:31:27 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-20 18:27:01 +0200
commitbd95b88d9e51fcbf392a7e90338a8fcc3499cbd6 (patch)
treea88df2bd756a4dd0715a92c3cec193366d86b861 /kernel/trace
parentc513867561eeb07d24a0bdda1a18a8f91921a301 (diff)
ftrace: release functions from hash
The x86 architecture uses a static recording of mcount caller locations and is not affected by this patch. For architectures still using the dynamic ftrace daemon, this patch is critical. It removes the race between the recording of a function that calls mcount, the unloading of a module, and the ftrace daemon updating the call sites. This patch adds the releasing of the hash functions that the daemon uses to update the mcount call sites. When a module is unloaded, not only are the replaced call site table update, but now so is the hash recorded functions that the ftrace daemon will use. Again, architectures that implement MCOUNT_RECORD are not affected by this (which currently only x86 has). Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4dda4f60a2a..1f54a94189f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -164,10 +164,14 @@ static DEFINE_SPINLOCK(ftrace_hash_lock);
#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
#define ftrace_hash_unlock(flags) \
spin_unlock_irqrestore(&ftrace_hash_lock, flags)
+static void ftrace_release_hash(unsigned long start, unsigned long end);
#else
/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
#define ftrace_hash_unlock(flags) do { } while(0)
+static inline void ftrace_release_hash(unsigned long start, unsigned long end)
+{
+}
#endif
/*
@@ -347,6 +351,7 @@ void ftrace_release(void *start, unsigned long size)
}
spin_unlock(&ftrace_lock);
+ ftrace_release_hash(s, e);
}
static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
@@ -1659,6 +1664,44 @@ void __init ftrace_init(void)
ftrace_disabled = 1;
}
#else /* CONFIG_FTRACE_MCOUNT_RECORD */
+
+static void ftrace_release_hash(unsigned long start, unsigned long end)
+{
+ struct dyn_ftrace *rec;
+ struct hlist_node *t, *n;
+ struct hlist_head *head, temp_list;
+ unsigned long flags;
+ int i, cpu;
+
+ preempt_disable_notrace();
+
+ /* disable incase we call something that calls mcount */
+ cpu = raw_smp_processor_id();
+ per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
+
+ ftrace_hash_lock(flags);
+
+ for (i = 0; i < FTRACE_HASHSIZE; i++) {
+ INIT_HLIST_HEAD(&temp_list);
+ head = &ftrace_hash[i];
+
+ /* all CPUS are stopped, we are safe to modify code */
+ hlist_for_each_entry_safe(rec, t, n, head, node) {
+ if (rec->flags & FTRACE_FL_FREE)
+ continue;
+
+ if ((rec->ip >= start) && (rec->ip < end))
+ ftrace_free_rec(rec);
+ }
+ }
+
+ ftrace_hash_unlock(flags);
+
+ per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
+ preempt_enable_notrace();
+
+}
+
static int ftraced(void *ignore)
{
unsigned long usecs;