From ca109491f612aab5c8152207631c0444f63da97f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 25 Nov 2008 12:43:51 +0100 Subject: hrtimer: removing all ur callback modes Impact: cleanup, move all hrtimer processing into hardirq context This is an attempt at removing some of the hrtimer complexity by reducing the number of callback modes to 1. This means that all hrtimer callback functions will be ran from HARD-irq context. I went through all the 30 odd hrtimer callback functions in the kernel and saw only one that I'm not quite sure of, which is the one in net/can/bcm.c - hence I'm CC-ing the folks responsible for that code. Furthermore, the hrtimer core now calls callbacks directly with IRQs disabled in case you try to enqueue an expired timer. If this timer is a periodic timer (which should use hrtimer_forward() to advance its time) then it might be possible to end up in an inf. recursive loop due to the fact that hrtimer_forward() doesn't round up to the next timer granularity, and therefore keeps on calling the callback - obviously this needs a fix. Aside from that, this seems to compile and actually boot on my dual core test box - although I'm sure there are some bugs in, me not hitting any makes me certain :-) Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/trace/trace_sysprof.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 9587d3bcba5..ae542e2e38d 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c @@ -202,7 +202,6 @@ static void start_stack_timer(int cpu) hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = stack_trace_timer_fn; - hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); } -- cgit v1.2.3 From 68814b58c52077da9561b544089fe532a0842f71 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Mon, 24 Nov 2008 12:24:12 +0100 Subject: ring_buffer: update description for ring_buffer_alloc() Trivial patch. Cc: Steven Rostedt Signed-off-by: Robert Richter --- kernel/trace/ring_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 668bbb5ef2b..c8996d239e4 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -381,7 +381,7 @@ extern int ring_buffer_page_too_big(void); /** * ring_buffer_alloc - allocate a new ring_buffer - * @size: the size in bytes that is needed. + * @size: the size in bytes per cpu that is needed. * @flags: attributes to set for the ring buffer. * * Currently the only flag that is available is the RB_FL_OVERWRITE -- cgit v1.2.3 From e2ac8ef576e45d9db7264abc51383e68d26067bb Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Wed, 12 Nov 2008 12:59:32 +0100 Subject: ftrace: remove unused function arg in trace_iterator_increment() This removes the unused cpu function parameter. Cc: Steven Rostedt Signed-off-by: Robert Richter --- kernel/trace/trace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d86e3252f30..a96b335fe75 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -914,7 +914,7 @@ enum trace_file_type { TRACE_FILE_LAT_FMT = 1, }; -static void trace_iterator_increment(struct trace_iterator *iter, int cpu) +static void trace_iterator_increment(struct trace_iterator *iter) { /* Don't allow ftrace to trace into the ring buffers */ ftrace_disable_cpu(); @@ -993,7 +993,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter) iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); if (iter->ent) - trace_iterator_increment(iter, iter->cpu); + trace_iterator_increment(iter); return iter->ent ? iter : NULL; } -- cgit v1.2.3 From c4f50183f90fb1fd99aa5941f01b90cd1b882d2e Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 11 Dec 2008 16:49:22 +0100 Subject: ring_buffer: adding EXPORT_SYMBOLs I added EXPORT_SYMBOL_GPLs for all functions part of the API (ring_buffer.h). This is required since oprofile is using the ring buffer and the compilation as modules would fail otherwise. Signed-off-by: Robert Richter Signed-off-by: Ingo Molnar --- kernel/trace/ring_buffer.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c8996d239e4..30d57dd01a8 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -31,6 +31,7 @@ void tracing_on(void) { ring_buffers_off = 0; } +EXPORT_SYMBOL_GPL(tracing_on); /** * tracing_off - turn off all tracing buffers @@ -44,6 +45,7 @@ void tracing_off(void) { ring_buffers_off = 1; } +EXPORT_SYMBOL_GPL(tracing_off); /* Up this if you want to test the TIME_EXTENTS and normalization */ #define DEBUG_SHIFT 0 @@ -60,12 +62,14 @@ u64 ring_buffer_time_stamp(int cpu) return time; } +EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) { /* Just stupid testing the normalize function and deltas */ *ts >>= DEBUG_SHIFT; } +EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) #define RB_ALIGNMENT_SHIFT 2 @@ -115,6 +119,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event) { return rb_event_length(event); } +EXPORT_SYMBOL_GPL(ring_buffer_event_length); /* inline for ring buffer fast paths */ static inline void * @@ -136,6 +141,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) { return rb_event_data(event); } +EXPORT_SYMBOL_GPL(ring_buffer_event_data); #define for_each_buffer_cpu(buffer, cpu) \ for_each_cpu_mask(cpu, buffer->cpumask) @@ -444,6 +450,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) kfree(buffer); return NULL; } +EXPORT_SYMBOL_GPL(ring_buffer_alloc); /** * ring_buffer_free - free a ring buffer. @@ -459,6 +466,7 @@ ring_buffer_free(struct ring_buffer *buffer) kfree(buffer); } +EXPORT_SYMBOL_GPL(ring_buffer_free); static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); @@ -620,6 +628,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) mutex_unlock(&buffer->mutex); return -ENOMEM; } +EXPORT_SYMBOL_GPL(ring_buffer_resize); static inline int rb_null_event(struct ring_buffer_event *event) { @@ -1220,6 +1229,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, preempt_enable_notrace(); return NULL; } +EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) @@ -1269,6 +1279,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, return 0; } +EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); /** * ring_buffer_write - write data to the buffer without reserving @@ -1334,6 +1345,7 @@ int ring_buffer_write(struct ring_buffer *buffer, return ret; } +EXPORT_SYMBOL_GPL(ring_buffer_write); static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) { @@ -1360,6 +1372,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer) { atomic_inc(&buffer->record_disabled); } +EXPORT_SYMBOL_GPL(ring_buffer_record_disable); /** * ring_buffer_record_enable - enable writes to the buffer @@ -1372,6 +1385,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer) { atomic_dec(&buffer->record_disabled); } +EXPORT_SYMBOL_GPL(ring_buffer_record_enable); /** * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer @@ -1393,6 +1407,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) cpu_buffer = buffer->buffers[cpu]; atomic_inc(&cpu_buffer->record_disabled); } +EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); /** * ring_buffer_record_enable_cpu - enable writes to the buffer @@ -1412,6 +1427,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) cpu_buffer = buffer->buffers[cpu]; atomic_dec(&cpu_buffer->record_disabled); } +EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); /** * ring_buffer_entries_cpu - get the number of entries in a cpu buffer @@ -1428,6 +1444,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) cpu_buffer = buffer->buffers[cpu]; return cpu_buffer->entries; } +EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); /** * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer @@ -1444,6 +1461,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) cpu_buffer = buffer->buffers[cpu]; return cpu_buffer->overrun; } +EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); /** * ring_buffer_entries - get the number of entries in a buffer @@ -1466,6 +1484,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) return entries; } +EXPORT_SYMBOL_GPL(ring_buffer_entries); /** * ring_buffer_overrun_cpu - get the number of overruns in buffer @@ -1488,6 +1507,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) return overruns; } +EXPORT_SYMBOL_GPL(ring_buffer_overruns); /** * ring_buffer_iter_reset - reset an iterator @@ -1513,6 +1533,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) else iter->read_stamp = iter->head_page->time_stamp; } +EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); /** * ring_buffer_iter_empty - check if an iterator has no more to read @@ -1527,6 +1548,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter) return iter->head_page == cpu_buffer->commit_page && iter->head == rb_commit_index(cpu_buffer); } +EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); static void rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, @@ -1797,6 +1819,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) return NULL; } +EXPORT_SYMBOL_GPL(ring_buffer_peek); /** * ring_buffer_iter_peek - peek at the next event to be read @@ -1867,6 +1890,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) return NULL; } +EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); /** * ring_buffer_consume - return an event and consume it @@ -1894,6 +1918,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) return event; } +EXPORT_SYMBOL_GPL(ring_buffer_consume); /** * ring_buffer_read_start - start a non consuming read of the buffer @@ -1934,6 +1959,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) return iter; } +EXPORT_SYMBOL_GPL(ring_buffer_read_start); /** * ring_buffer_finish - finish reading the iterator of the buffer @@ -1950,6 +1976,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter) atomic_dec(&cpu_buffer->record_disabled); kfree(iter); } +EXPORT_SYMBOL_GPL(ring_buffer_read_finish); /** * ring_buffer_read - read the next item in the ring buffer by the iterator @@ -1971,6 +1998,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) return event; } +EXPORT_SYMBOL_GPL(ring_buffer_read); /** * ring_buffer_size - return the size of the ring buffer (in bytes) @@ -1980,6 +2008,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer) { return BUF_PAGE_SIZE * buffer->pages; } +EXPORT_SYMBOL_GPL(ring_buffer_size); static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) @@ -2022,6 +2051,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) spin_unlock_irqrestore(&cpu_buffer->lock, flags); } +EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); /** * ring_buffer_reset - reset a ring buffer @@ -2034,6 +2064,7 @@ void ring_buffer_reset(struct ring_buffer *buffer) for_each_buffer_cpu(buffer, cpu) ring_buffer_reset_cpu(buffer, cpu); } +EXPORT_SYMBOL_GPL(ring_buffer_reset); /** * rind_buffer_empty - is the ring buffer empty? @@ -2052,6 +2083,7 @@ int ring_buffer_empty(struct ring_buffer *buffer) } return 1; } +EXPORT_SYMBOL_GPL(ring_buffer_empty); /** * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? @@ -2068,6 +2100,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) cpu_buffer = buffer->buffers[cpu]; return rb_per_cpu_empty(cpu_buffer); } +EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); /** * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers @@ -2117,6 +2150,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, return 0; } +EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); static ssize_t rb_simple_read(struct file *filp, char __user *ubuf, -- cgit v1.2.3 From 1af237a099a3b8ff56aa384f605c6a68af7bf288 Mon Sep 17 00:00:00 2001 From: Huang Weiyi Date: Tue, 30 Dec 2008 06:41:44 +0800 Subject: tracing: removed duplicated #include Removed duplicated #include in kernel/trace/trace.c. Signed-off-by: Huang Weiyi Signed-off-by: Linus Torvalds --- kernel/trace/trace.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 3608f6cb2f7..4185d522163 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3