aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-09-03 02:14:39 -0700
committerDavid S. Miller <davem@davemloft.net>2008-09-03 02:14:39 -0700
commite5bd1c3fdd06b6c0fa6dfb98ce31cea1820ce4e9 (patch)
tree270c78d5714cfb84e5e99cd8b341d42a7720ecca
parentdbb8c35d9063fe233626865cc836fbc102fa083b (diff)
sparc64: Fix IPI call locking.
When I switched sparc64 over to the generic helpers for smp_call_function(), I didn't convert the dinky call_lock we had. Use ipi_call_lock() and ipi_call_unlock(). Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/smp.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 743ccad61c6..0712a445f98 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -80,8 +80,6 @@ void smp_bogo(struct seq_file *m)
i, cpu_data(i).clock_tick);
}
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
-
extern void setup_sparc64_timer(void);
static volatile unsigned long callin_flag = 0;
@@ -120,9 +118,9 @@ void __cpuinit smp_callin(void)
while (!cpu_isset(cpuid, smp_commenced_mask))
rmb();
- spin_lock(&call_lock);
+ ipi_call_lock();
cpu_set(cpuid, cpu_online_map);
- spin_unlock(&call_lock);
+ ipi_call_unlock();
/* idle thread is expected to have preempt disabled */
preempt_disable();
@@ -1305,9 +1303,9 @@ int __cpu_disable(void)
c->core_id = 0;
c->proc_id = -1;
- spin_lock(&call_lock);
+ ipi_call_lock();
cpu_clear(cpu, cpu_online_map);
- spin_unlock(&call_lock);
+ ipi_call_unlock();
smp_wmb();