diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-06-17 12:52:15 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-17 12:56:49 +0200 |
commit | eadb8a091b27a840de7450f84ecff5ef13476424 (patch) | |
tree | 58c3782d40def63baa8167f3d31e3048cb4c7660 /arch/arm/kernel/smp.c | |
parent | 73874005cd8800440be4299bd095387fff4b90ac (diff) | |
parent | 65795efbd380a832ae508b04dba8f8e53f0b84d9 (diff) |
Merge branch 'linus' into tracing/hw-breakpoints
Conflicts:
arch/x86/Kconfig
arch/x86/kernel/traps.c
arch/x86/power/cpu.c
arch/x86/power/cpu_32.c
kernel/Makefile
Semantic conflict:
arch/x86/kernel/hw_breakpoint.c
Merge reason: Resolve the conflicts, move from put_cpu_no_sched() to
put_cpu() in arch/x86/kernel/hw_breakpoint.c.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r-- | arch/arm/kernel/smp.c | 169 |
1 files changed, 106 insertions, 63 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 7801aac3c04..de885fd256c 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -22,16 +22,20 @@ #include <linux/smp.h> #include <linux/seq_file.h> #include <linux/irq.h> +#include <linux/percpu.h> +#include <linux/clockchips.h> #include <asm/atomic.h> #include <asm/cacheflush.h> #include <asm/cpu.h> +#include <asm/cputype.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> +#include <asm/localtimer.h> /* * as from 2.5, kernels no longer have an init_tasks structure @@ -163,7 +167,7 @@ int __cpuexit __cpu_disable(void) * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. */ - cpu_clear(cpu, cpu_online_map); + set_cpu_online(cpu, false); /* * OK - migrate IRQs away from this CPU @@ -274,9 +278,9 @@ asmlinkage void __cpuinit secondary_start_kernel(void) local_fiq_enable(); /* - * Setup local timer for this CPU. + * Setup the percpu timer for this CPU. */ - local_timer_setup(); + percpu_timer_setup(); calibrate_delay(); @@ -285,7 +289,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * OK, now it's safe to let the boot CPU continue */ - cpu_set(cpu, cpu_online_map); + set_cpu_online(cpu, true); /* * OK, it's off to the idle thread for us @@ -326,14 +330,14 @@ void __init smp_prepare_boot_cpu(void) per_cpu(cpu_data, cpu).idle = current; } -static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) +static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) { unsigned long flags; unsigned int cpu; local_irq_save(flags); - for_each_cpu_mask(cpu, callmap) { + for_each_cpu(cpu, mask) { struct ipi_data *ipi = &per_cpu(ipi_data, cpu); spin_lock(&ipi->lock); @@ -344,19 +348,19 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) /* * Call the platform specific cross-CPU call function. */ - smp_cross_call(callmap); + smp_cross_call(mask); local_irq_restore(flags); } -void arch_send_call_function_ipi(cpumask_t mask) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_ipi_message(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { - send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); } void show_ipi_list(struct seq_file *p) @@ -383,10 +387,16 @@ void show_local_irqs(struct seq_file *p) seq_putc(p, '\n'); } +/* + * Timer (local or broadcast) support + */ +static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); + static void ipi_timer(void) { + struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); irq_enter(); - local_timer_interrupt(); + evt->event_handler(evt); irq_exit(); } @@ -405,6 +415,42 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs) } #endif +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +static void smp_timer_broadcast(const struct cpumask *mask) +{ + send_ipi_message(mask, IPI_TIMER); +} + +static void broadcast_timer_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ +} + +static void local_timer_setup(struct clock_event_device *evt) +{ + evt->name = "dummy_timer"; + evt->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_DUMMY; + evt->rating = 400; + evt->mult = 1; + evt->set_mode = broadcast_timer_set_mode; + evt->broadcast = smp_timer_broadcast; + + clockevents_register_device(evt); +} +#endif + +void __cpuinit percpu_timer_setup(void) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); + + evt->cpumask = cpumask_of(cpu); + + local_timer_setup(evt); +} + static DEFINE_SPINLOCK(stop_lock); /* @@ -417,7 +463,7 @@ static void ipi_cpu_stop(unsigned int cpu) dump_stack(); spin_unlock(&stop_lock); - cpu_clear(cpu, cpu_online_map); + set_cpu_online(cpu, false); local_fiq_disable(); local_irq_disable(); @@ -498,26 +544,14 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs) void smp_send_reschedule(int cpu) { - send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); -} - -void smp_send_timer(void) -{ - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - send_ipi_message(mask, IPI_TIMER); -} - -void smp_timer_broadcast(cpumask_t mask) -{ - send_ipi_message(mask, IPI_TIMER); + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } void smp_send_stop(void) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); - send_ipi_message(mask, IPI_CPU_STOP); + send_ipi_message(&mask, IPI_CPU_STOP); } /* @@ -528,20 +562,17 @@ int setup_profiling_timer(unsigned int multiplier) return -EINVAL; } -static int -on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask) +static void +on_each_cpu_mask(void (*func)(void *), void *info, int wait, + const struct cpumask *mask) { - int ret = 0; - preempt_disable(); - ret = smp_call_function_mask(mask, func, info, wait); - if (cpu_isset(smp_processor_id(), mask)) + smp_call_function_many(mask, func, info, wait); + if (cpumask_test_cpu(smp_processor_id(), mask)) func(info); preempt_enable(); - - return ret; } /**********************************************************************/ @@ -555,6 +586,12 @@ struct tlb_args { unsigned long ta_end; }; +/* all SMP configurations have the extended CPUID registers */ +static inline int tlb_ops_need_broadcast(void) +{ + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; +} + static inline void ipi_flush_tlb_all(void *ignored) { local_flush_tlb_all(); @@ -597,55 +634,61 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) void flush_tlb_all(void) { - on_each_cpu(ipi_flush_tlb_all, NULL, 1); + if (tlb_ops_need_broadcast()) + on_each_cpu(ipi_flush_tlb_all, NULL, 1); + else + local_flush_tlb_all(); } void flush_tlb_mm(struct mm_struct *mm) { - cpumask_t mask = mm->cpu_vm_mask; - - on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask); + if (tlb_ops_need_broadcast()) + on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); + else + local_flush_tlb_mm(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { - cpumask_t mask = vma->vm_mm->cpu_vm_mask; - struct tlb_args ta; - - ta.ta_vma = vma; - ta.ta_start = uaddr; - - on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_vma = vma; + ta.ta_start = uaddr; + on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); + } else + local_flush_tlb_page(vma, uaddr); } void flush_tlb_kernel_page(unsigned long kaddr) { - struct tlb_args ta; - - ta.ta_start = kaddr; - - on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_start = kaddr; + on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); + } else + local_flush_tlb_kernel_page(kaddr); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - cpumask_t mask = vma->vm_mm->cpu_vm_mask; - struct tlb_args ta; - - ta.ta_vma = vma; - ta.ta_start = start; - ta.ta_end = end; - - on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_vma = vma; + ta.ta_start = start; + ta.ta_end = end; + on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); + } else + local_flush_tlb_range(vma, start, end); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - struct tlb_args ta; - - ta.ta_start = start; - ta.ta_end = end; - - on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); + if (tlb_ops_need_broadcast()) { + struct tlb_args ta; + ta.ta_start = start; + ta.ta_end = end; + on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); + } else + local_flush_tlb_kernel_range(start, end); } |