aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-10-09 10:12:41 +0200
committerIngo Molnar <mingo@elte.hu>2009-10-09 15:56:44 +0200
commit3365e7798760dc6c190a9bbb0945a38f02625438 (patch)
treec3c1b49dcf4c2fcbddf1af27c281c9a7a55219ad /kernel
parent36a07902c2134649c4af7f07980413ffb1a56085 (diff)
lockdep: Use cpu_clock() for lockstat
Some tracepoint magic (TRACE_EVENT(lock_acquired)) relies on the fact that lock hold times are positive and uses div64 on that. That triggered a build warning on MIPS, and probably causes bad output in certain circumstances as well. Make it truly positive. Reported-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1254818502.21044.112.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/lockdep.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3815ac1d58b..9af56723c09 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
+static inline u64 lockstat_clock(void)
+{
+ return cpu_clock(smp_processor_id());
+}
+
static int lock_point(unsigned long points[], unsigned long ip)
{
int i;
@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip)
return i;
}
-static void lock_time_inc(struct lock_time *lt, s64 time)
+static void lock_time_inc(struct lock_time *lt, u64 time)
{
if (time > lt->max)
lt->max = time;
@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats)
static void lock_release_holdtime(struct held_lock *hlock)
{
struct lock_class_stats *stats;
- s64 holdtime;
+ u64 holdtime;
if (!lock_stat)
return;
- holdtime = sched_clock() - hlock->holdtime_stamp;
+ holdtime = lockstat_clock() - hlock->holdtime_stamp;
stats = get_lock_stats(hlock_class(hlock));
if (hlock->read)
@@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
hlock->references = references;
#ifdef CONFIG_LOCK_STAT
hlock->waittime_stamp = 0;
- hlock->holdtime_stamp = sched_clock();
+ hlock->holdtime_stamp = lockstat_clock();
#endif
if (check == 2 && !mark_irqflags(curr, hlock))
@@ -3322,7 +3327,7 @@ found_it:
if (hlock->instance != lock)
return;
- hlock->waittime_stamp = sched_clock();
+ hlock->waittime_stamp = lockstat_clock();
contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
contending_point = lock_point(hlock_class(hlock)->contending_point,
@@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
struct held_lock *hlock, *prev_hlock;
struct lock_class_stats *stats;
unsigned int depth;
- u64 now;
- s64 waittime = 0;
+ u64 now, waittime = 0;
int i, cpu;
depth = curr->lockdep_depth;
@@ -3374,7 +3378,7 @@ found_it:
cpu = smp_processor_id();
if (hlock->waittime_stamp) {
- now = sched_clock();
+ now = lockstat_clock();
waittime = now - hlock->waittime_stamp;
hlock->holdtime_stamp = now;
}