diff options
Diffstat (limited to 'arch/sparc/kernel/smp_32.c')
-rw-r--r-- | arch/sparc/kernel/smp_32.c | 423 |
1 files changed, 423 insertions, 0 deletions
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c new file mode 100644 index 00000000000..e396c1f17a9 --- /dev/null +++ b/arch/sparc/kernel/smp_32.c @@ -0,0 +1,423 @@ +/* smp.c: Sparc SMP support. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org) + */ + +#include <asm/head.h> + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/threads.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/fs.h> +#include <linux/seq_file.h> +#include <linux/cache.h> +#include <linux/delay.h> + +#include <asm/ptrace.h> +#include <asm/atomic.h> + +#include <asm/irq.h> +#include <asm/page.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/oplib.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> +#include <asm/cpudata.h> + +#include "irq.h" + +volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; +unsigned char boot_cpu_id = 0; +unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ + +cpumask_t cpu_online_map = CPU_MASK_NONE; +cpumask_t phys_cpu_present_map = CPU_MASK_NONE; +cpumask_t smp_commenced_mask = CPU_MASK_NONE; + +/* The only guaranteed locking primitive available on all Sparc + * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically + * places the current byte at the effective address into dest_reg and + * places 0xff there afterwards. Pretty lame locking primitive + * compared to the Alpha and the Intel no? Most Sparcs have 'swap' + * instruction which is much better... + */ + +void __cpuinit smp_store_cpu_info(int id) +{ + int cpu_node; + + cpu_data(id).udelay_val = loops_per_jiffy; + + cpu_find_by_mid(id, &cpu_node); + cpu_data(id).clock_tick = prom_getintdefault(cpu_node, + "clock-frequency", 0); + cpu_data(id).prom_node = cpu_node; + cpu_data(id).mid = cpu_get_hwmid(cpu_node); + + if (cpu_data(id).mid < 0) + panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + extern void smp4m_smp_done(void); + extern void smp4d_smp_done(void); + unsigned long bogosum = 0; + int cpu, num; + + for (cpu = 0, num = 0; cpu < NR_CPUS; cpu++) + if (cpu_online(cpu)) { + num++; + bogosum += cpu_data(cpu).udelay_val; + } + + printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n", + num, bogosum/(500000/HZ), + (bogosum/(5000/HZ))%100); + + switch(sparc_cpu_model) { + case sun4: + printk("SUN4\n"); + BUG(); + break; + case sun4c: + printk("SUN4C\n"); + BUG(); + break; + case sun4m: + smp4m_smp_done(); + break; + case sun4d: + smp4d_smp_done(); + break; + case sun4e: + printk("SUN4E\n"); + BUG(); + break; + case sun4u: + printk("SUN4U\n"); + BUG(); + break; + default: + printk("UNKNOWN!\n"); + BUG(); + break; + }; +} + +void cpu_panic(void) +{ + printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); + panic("SMP bolixed\n"); +} + +struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 }; + +void smp_send_reschedule(int cpu) +{ + /* See sparc64 */ +} + +void smp_send_stop(void) +{ +} + +void smp_flush_cache_all(void) +{ + xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all)); + local_flush_cache_all(); +} + +void smp_flush_tlb_all(void) +{ + xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all)); + local_flush_tlb_all(); +} + +void smp_flush_cache_mm(struct mm_struct *mm) +{ + if(mm->context != NO_CONTEXT) { + cpumask_t cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + if (!cpus_empty(cpu_mask)) + xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); + local_flush_cache_mm(mm); + } +} + +void smp_flush_tlb_mm(struct mm_struct *mm) +{ + if(mm->context != NO_CONTEXT) { + cpumask_t cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + if (!cpus_empty(cpu_mask)) { + xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); + if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) + mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); + } + local_flush_tlb_mm(mm); + } +} + +void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + + if (mm->context != NO_CONTEXT) { + cpumask_t cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + if (!cpus_empty(cpu_mask)) + xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); + local_flush_cache_range(vma, start, end); + } +} + +void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + + if (mm->context != NO_CONTEXT) { + cpumask_t cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + if (!cpus_empty(cpu_mask)) + xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); + local_flush_tlb_range(vma, start, end); + } +} + +void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + + if(mm->context != NO_CONTEXT) { + cpumask_t cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + if (!cpus_empty(cpu_mask)) + xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); + local_flush_cache_page(vma, page); + } +} + +void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + struct mm_struct *mm = vma->vm_mm; + + if(mm->context != NO_CONTEXT) { + cpumask_t cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + if (!cpus_empty(cpu_mask)) + xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); + local_flush_tlb_page(vma, page); + } +} + +void smp_reschedule_irq(void) +{ + set_need_resched(); +} + +void smp_flush_page_to_ram(unsigned long page) +{ + /* Current theory is that those who call this are the one's + * who have just dirtied their cache with the pages contents + * in kernel space, therefore we only run this on local cpu. + * + * XXX This experiment failed, research further... -DaveM + */ +#if 1 + xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page); +#endif + local_flush_page_to_ram(page); +} + +void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) +{ + cpumask_t cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + if (!cpus_empty(cpu_mask)) + xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); + local_flush_sig_insns(mm, insn_addr); +} + +extern unsigned int lvl14_resolution; + +/* /proc/profile writes can call this, don't __init it please. */ +static DEFINE_SPINLOCK(prof_setup_lock); + +int setup_profiling_timer(unsigned int multiplier) +{ + int i; + unsigned long flags; + + /* Prevent level14 ticker IRQ flooding. */ + if((!multiplier) || (lvl14_resolution / multiplier) < 500) + return -EINVAL; + + spin_lock_irqsave(&prof_setup_lock, flags); + for_each_possible_cpu(i) { + load_profile_irq(i, lvl14_resolution / multiplier); + prof_multiplier(i) = multiplier; + } + spin_unlock_irqrestore(&prof_setup_lock, flags); + + return 0; +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + extern void __init smp4m_boot_cpus(void); + extern void __init smp4d_boot_cpus(void); + int i, cpuid, extra; + + printk("Entering SMP Mode...\n"); + + extra = 0; + for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) { + if (cpuid >= NR_CPUS) + extra++; + } + /* i = number of cpus */ + if (extra && max_cpus > i - extra) + printk("Warning: NR_CPUS is too low to start all cpus\n"); + + smp_store_cpu_info(boot_cpu_id); + + switch(sparc_cpu_model) { + case sun4: + printk("SUN4\n"); + BUG(); + break; + case sun4c: + printk("SUN4C\n"); + BUG(); + break; + case sun4m: + smp4m_boot_cpus(); + break; + case sun4d: + smp4d_boot_cpus(); + break; + case sun4e: + printk("SUN4E\n"); + BUG(); + break; + case sun4u: + printk("SUN4U\n"); + BUG(); + break; + default: + printk("UNKNOWN!\n"); + BUG(); + break; + }; +} + +/* Set this up early so that things like the scheduler can init + * properly. We use the same cpu mask for both the present and + * possible cpu map. + */ +void __init smp_setup_cpu_possible_map(void) +{ + int instance, mid; + + instance = 0; + while (!cpu_find_by_instance(instance, NULL, &mid)) { + if (mid < NR_CPUS) { + cpu_set(mid, phys_cpu_present_map); + cpu_set(mid, cpu_present_map); + } + instance++; + } +} + +void __init smp_prepare_boot_cpu(void) +{ + int cpuid = hard_smp_processor_id(); + + if (cpuid >= NR_CPUS) { + prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); + prom_halt(); + } + if (cpuid != 0) + printk("boot cpu id != 0, this could work but is untested\n"); + + current_thread_info()->cpu = cpuid; + cpu_set(cpuid, cpu_online_map); + cpu_set(cpuid, phys_cpu_present_map); +} + +int __cpuinit __cpu_up(unsigned int cpu) +{ + extern int __cpuinit smp4m_boot_one_cpu(int); + extern int __cpuinit smp4d_boot_one_cpu(int); + int ret=0; + + switch(sparc_cpu_model) { + case sun4: + printk("SUN4\n"); + BUG(); + break; + case sun4c: + printk("SUN4C\n"); + BUG(); + break; + case sun4m: + ret = smp4m_boot_one_cpu(cpu); + break; + case sun4d: + ret = smp4d_boot_one_cpu(cpu); + break; + case sun4e: + printk("SUN4E\n"); + BUG(); + break; + case sun4u: + printk("SUN4U\n"); + BUG(); + break; + default: + printk("UNKNOWN!\n"); + BUG(); + break; + }; + + if (!ret) { + cpu_set(cpu, smp_commenced_mask); + while (!cpu_online(cpu)) + mb(); + } + return ret; +} + +void smp_bogo(struct seq_file *m) +{ + int i; + + for_each_online_cpu(i) { + seq_printf(m, + "Cpu%dBogo\t: %lu.%02lu\n", + i, + cpu_data(i).udelay_val/(500000/HZ), + (cpu_data(i).udelay_val/(5000/HZ))%100); + } +} + +void smp_info(struct seq_file *m) +{ + int i; + + seq_printf(m, "State:\n"); + for_each_online_cpu(i) + seq_printf(m, "CPU%d\t\t: online\n", i); +} |