diff options
-rw-r--r-- | arch/ia64/kernel/head.S | 280 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 88 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 22 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 81 | ||||
-rw-r--r-- | include/asm-ia64/sal.h | 38 |
5 files changed, 399 insertions, 110 deletions
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 105c7fec8c6..0d535d65eea 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -15,6 +15,8 @@ * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com> * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com> * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2. + * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com> + * Support for CPU Hotplug */ #include <linux/config.h> @@ -29,6 +31,134 @@ #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/system.h> +#include <asm/mca_asm.h> + +#ifdef CONFIG_HOTPLUG_CPU +#define SAL_PSR_BITS_TO_SET \ + (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL) + +#define SAVE_FROM_REG(src, ptr, dest) \ + mov dest=src;; \ + st8 [ptr]=dest,0x08 + +#define RESTORE_REG(reg, ptr, _tmp) \ + ld8 _tmp=[ptr],0x08;; \ + mov reg=_tmp + +#define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\ + mov ar.lc=IA64_NUM_DBG_REGS-1;; \ + mov _idx=0;; \ +1: \ + SAVE_FROM_REG(_breg[_idx], ptr, _dest);; \ + add _idx=1,_idx;; \ + br.cloop.sptk.many 1b + +#define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\ + mov ar.lc=IA64_NUM_DBG_REGS-1;; \ + mov _idx=0;; \ +_lbl: RESTORE_REG(_breg[_idx], ptr, _tmp);; \ + add _idx=1, _idx;; \ + br.cloop.sptk.many _lbl + +#define SAVE_ONE_RR(num, _reg, _tmp) \ + movl _tmp=(num<<61);; \ + mov _reg=rr[_tmp] + +#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \ + SAVE_ONE_RR(0,_r0, _tmp);; \ + SAVE_ONE_RR(1,_r1, _tmp);; \ + SAVE_ONE_RR(2,_r2, _tmp);; \ + SAVE_ONE_RR(3,_r3, _tmp);; \ + SAVE_ONE_RR(4,_r4, _tmp);; \ + SAVE_ONE_RR(5,_r5, _tmp);; \ + SAVE_ONE_RR(6,_r6, _tmp);; \ + SAVE_ONE_RR(7,_r7, _tmp);; + +#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \ + st8 [ptr]=_r0, 8;; \ + st8 [ptr]=_r1, 8;; \ + st8 [ptr]=_r2, 8;; \ + st8 [ptr]=_r3, 8;; \ + st8 [ptr]=_r4, 8;; \ + st8 [ptr]=_r5, 8;; \ + st8 [ptr]=_r6, 8;; \ + st8 [ptr]=_r7, 8;; + +#define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \ + mov ar.lc=0x08-1;; \ + movl _idx1=0x00;; \ +RestRR: \ + dep.z _idx2=_idx1,61,3;; \ + ld8 _tmp=[ptr],8;; \ + mov rr[_idx2]=_tmp;; \ + srlz.d;; \ + add _idx1=1,_idx1;; \ + br.cloop.sptk.few RestRR + +/* + * Adjust region registers saved before starting to save + * break regs and rest of the states that need to be preserved. + */ +#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred) \ + SAVE_FROM_REG(b0,_reg1,_reg2);; \ + SAVE_FROM_REG(b1,_reg1,_reg2);; \ + SAVE_FROM_REG(b2,_reg1,_reg2);; \ + SAVE_FROM_REG(b3,_reg1,_reg2);; \ + SAVE_FROM_REG(b4,_reg1,_reg2);; \ + SAVE_FROM_REG(b5,_reg1,_reg2);; \ + st8 [_reg1]=r1,0x08;; \ + st8 [_reg1]=r12,0x08;; \ + st8 [_reg1]=r13,0x08;; \ + SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \ + SAVE_FROM_REG(ar.pfs,_reg1,_reg2);; \ + SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \ + SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \ + SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \ + SAVE_FROM_REG(cr.dcr,_reg1,_reg2);; \ + SAVE_FROM_REG(cr.iva,_reg1,_reg2);; \ + SAVE_FROM_REG(cr.pta,_reg1,_reg2);; \ + SAVE_FROM_REG(cr.itv,_reg1,_reg2);; \ + SAVE_FROM_REG(cr.pmv,_reg1,_reg2);; \ + SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);; \ + SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);; \ + SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);; \ + st8 [_reg1]=r4,0x08;; \ + st8 [_reg1]=r5,0x08;; \ + st8 [_reg1]=r6,0x08;; \ + st8 [_reg1]=r7,0x08;; \ + st8 [_reg1]=_pred,0x08;; \ + SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \ + stf.spill.nta [_reg1]=f2,16;; \ + stf.spill.nta [_reg1]=f3,16;; \ + stf.spill.nta [_reg1]=f4,16;; \ + stf.spill.nta [_reg1]=f5,16;; \ + stf.spill.nta [_reg1]=f16,16;; \ + stf.spill.nta [_reg1]=f17,16;; \ + stf.spill.nta [_reg1]=f18,16;; \ + stf.spill.nta [_reg1]=f19,16;; \ + stf.spill.nta [_reg1]=f20,16;; \ + stf.spill.nta [_reg1]=f21,16;; \ + stf.spill.nta [_reg1]=f22,16;; \ + stf.spill.nta [_reg1]=f23,16;; \ + stf.spill.nta [_reg1]=f24,16;; \ + stf.spill.nta [_reg1]=f25,16;; \ + stf.spill.nta [_reg1]=f26,16;; \ + stf.spill.nta [_reg1]=f27,16;; \ + stf.spill.nta [_reg1]=f28,16;; \ + stf.spill.nta [_reg1]=f29,16;; \ + stf.spill.nta [_reg1]=f30,16;; \ + stf.spill.nta [_reg1]=f31,16;; + +#else +#define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2) +#define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) +#define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) +#endif + +#define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \ + movl _tmp1=(num << 61);; \ + mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \ + mov rr[_tmp1]=_tmp2 .section __special_page_section,"ax" @@ -64,6 +194,12 @@ start_ap: srlz.i ;; /* + * Save the region registers, predicate before they get clobbered + */ + SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15); + mov r25=pr;; + + /* * Initialize kernel region registers: * rr[0]: VHPT enabled, page size = PAGE_SHIFT * rr[1]: VHPT enabled, page size = PAGE_SHIFT @@ -76,32 +212,14 @@ start_ap: * We initialize all of them to prevent inadvertently assuming * something about the state of address translation early in boot. */ - mov r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1) - movl r7=(0<<61) - mov r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1) - movl r9=(1<<61) - mov r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1) - movl r11=(2<<61) - mov r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1) - movl r13=(3<<61) - mov r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1) - movl r15=(4<<61) - mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1) - movl r17=(5<<61) - mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) - movl r19=(6<<61) - mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) - movl r21=(7<<61) - ;; - mov rr[r7]=r6 - mov rr[r9]=r8 - mov rr[r11]=r10 - mov rr[r13]=r12 - mov rr[r15]=r14 - mov rr[r17]=r16 - mov rr[r19]=r18 - mov rr[r21]=r20 - ;; + SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);; + SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);; + SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);; + SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);; + SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);; + SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);; + SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);; + SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);; /* * Now pin mappings into the TLB for kernel text and data */ @@ -142,6 +260,13 @@ start_ap: ;; 1: // now we are in virtual mode + movl r2=sal_state_for_booting_cpu;; + ld8 r16=[r2];; + + STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15); + SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25) + ;; + // set IVT entry point---can't access I/O ports without it movl r3=ia64_ivt ;; @@ -211,12 +336,13 @@ start_ap: mov IA64_KR(CURRENT_STACK)=r16 mov r13=r2 /* - * Reserve space at the top of the stack for "struct pt_regs". Kernel threads - * don't store interesting values in that structure, but the space still needs - * to be there because time-critical stuff such as the context switching can - * be implemented more efficiently (for example, __switch_to() + * Reserve space at the top of the stack for "struct pt_regs". Kernel + * threads don't store interesting values in that structure, but the space + * still needs to be there because time-critical stuff such as the context + * switching can be implemented more efficiently (for example, __switch_to() * always sets the psr.dfh bit of the task it is switching to). */ + addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2 addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE mov ar.rsc=0 // place RSE in enforced lazy mode @@ -993,4 +1119,98 @@ END(ia64_spinlock_contention) #endif +#ifdef CONFIG_HOTPLUG_CPU +GLOBAL_ENTRY(ia64_jump_to_sal) + alloc r16=ar.pfs,1,0,0,0;; + rsm psr.i | psr.ic +{ + flushrs + srlz.i +} + tpa r25=in0 + movl r18=tlb_purge_done;; + DATA_VA_TO_PA(r18);; + mov b1=r18 // Return location + movl r18=ia64_do_tlb_purge;; + DATA_VA_TO_PA(r18);; + mov b2=r18 // doing tlb_flush work + mov ar.rsc=0 // Put RSE in enforced lazy, LE mode + movl r17=1f;; + DATA_VA_TO_PA(r17);; + mov cr.iip=r17 + movl r16=SAL_PSR_BITS_TO_SET;; + mov cr.ipsr=r16 + mov cr.ifs=r0;; + rfi;; +1: + /* + * Invalidate all TLB data/inst + */ + br.sptk.many b2;; // jump to tlb purge code + +tlb_purge_done: + RESTORE_REGION_REGS(r25, r17,r18,r19);; + RESTORE_REG(b0, r25, r17);; + RESTORE_REG(b1, r25, r17);; + RESTORE_REG(b2, r25, r17);; + RESTORE_REG(b3, r25, r17);; + RESTORE_REG(b4, r25, r17);; + RESTORE_REG(b5, r25, r17);; + ld8 r1=[r25],0x08;; + ld8 r12=[r25],0x08;; + ld8 r13=[r25],0x08;; + RESTORE_REG(ar.fpsr, r25, r17);; + RESTORE_REG(ar.pfs, r25, r17);; + RESTORE_REG(ar.rnat, r25, r17);; + RESTORE_REG(ar.unat, r25, r17);; + RESTORE_REG(ar.bspstore, r25, r17);; + RESTORE_REG(cr.dcr, r25, r17);; + RESTORE_REG(cr.iva, r25, r17);; + RESTORE_REG(cr.pta, r25, r17);; + RESTORE_REG(cr.itv, r25, r17);; + RESTORE_REG(cr.pmv, r25, r17);; + RESTORE_REG(cr.cmcv, r25, r17);; + RESTORE_REG(cr.lrr0, r25, r17);; + RESTORE_REG(cr.lrr1, r25, r17);; + ld8 r4=[r25],0x08;; + ld8 r5=[r25],0x08;; + ld8 r6=[r25],0x08;; + ld8 r7=[r25],0x08;; + ld8 r17=[r25],0x08;; + mov pr=r17,-1;; + RESTORE_REG(ar.lc, r25, r17);; + /* + * Now Restore floating point regs + */ + ldf.fill.nta f2=[r25],16;; + ldf.fill.nta f3=[r25],16;; + ldf.fill.nta f4=[r25],16;; + ldf.fill.nta f5=[r25],16;; + ldf.fill.nta f16=[r25],16;; + ldf.fill.nta f17=[r25],16;; + ldf.fill.nta f18=[r25],16;; + ldf.fill.nta f19=[r25],16;; + ldf.fill.nta f20=[r25],16;; + ldf.fill.nta f21=[r25],16;; + ldf.fill.nta f22=[r25],16;; + ldf.fill.nta f23=[r25],16;; + ldf.fill.nta f24=[r25],16;; + ldf.fill.nta f25=[r25],16;; + ldf.fill.nta f26=[r25],16;; + ldf.fill.nta f27=[r25],16;; + ldf.fill.nta f28=[r25],16;; + ldf.fill.nta f29=[r25],16;; + ldf.fill.nta f30=[r25],16;; + ldf.fill.nta f31=[r25],16;; + + /* + * Now that we have done all the register restores + * we are now ready for the big DIVE to SAL Land + */ + ssm psr.ic;; + srlz.d;; + br.ret.sptk.many b0;; +END(ia64_jump_to_sal) +#endif /* CONFIG_HOTPLUG_CPU */ + #endif /* CONFIG_SMP */ diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index cf3f8014f9a..ef3fd7265b6 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S @@ -110,46 +110,19 @@ .global ia64_os_mca_dispatch_end .global ia64_sal_to_os_handoff_state .global ia64_os_to_sal_handoff_state + .global ia64_do_tlb_purge .text .align 16 -ia64_os_mca_dispatch: - - // Serialize all MCA processing - mov r3=1;; - LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; -ia64_os_mca_spin: - xchg8 r4=[r2],r3;; - cmp.ne p6,p0=r4,r0 -(p6) br ia64_os_mca_spin - - // Save the SAL to OS MCA handoff state as defined - // by SAL SPEC 3.0 - // NOTE : The order in which the state gets saved - // is dependent on the way the C-structure - // for ia64_mca_sal_to_os_state_t has been - // defined in include/asm/mca.h - SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) - ;; - - // LOG PROCESSOR STATE INFO FROM HERE ON.. -begin_os_mca_dump: - br ia64_os_mca_proc_state_dump;; - -ia64_os_mca_done_dump: - - LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) - ;; - ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. - ;; - tbit.nz p6,p7=r18,60 -(p7) br.spnt done_tlb_purge_and_reload - - // The following code purges TC and TR entries. Then reload all TC entries. - // Purge percpu data TC entries. -begin_tlb_purge_and_reload: +/* + * Just the TLB purge part is moved to a separate function + * so we can re-use the code for cpu hotplug code as well + * Caller should now setup b1, so we can branch once the + * tlb flush is complete. + */ +ia64_do_tlb_purge: #define O(member) IA64_CPUINFO_##member##_OFFSET GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 @@ -230,6 +203,51 @@ begin_tlb_purge_and_reload: ;; srlz.i ;; + // Now branch away to caller. + br.sptk.many b1 + ;; + +ia64_os_mca_dispatch: + + // Serialize all MCA processing + mov r3=1;; + LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; +ia64_os_mca_spin: + xchg8 r4=[r2],r3;; + cmp.ne p6,p0=r4,r0 +(p6) br ia64_os_mca_spin + + // Save the SAL to OS MCA handoff state as defined + // by SAL SPEC 3.0 + // NOTE : The order in which the state gets saved + // is dependent on the way the C-structure + // for ia64_mca_sal_to_os_state_t has been + // defined in include/asm/mca.h + SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) + ;; + + // LOG PROCESSOR STATE INFO FROM HERE ON.. +begin_os_mca_dump: + br ia64_os_mca_proc_state_dump;; + +ia64_os_mca_done_dump: + + LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) + ;; + ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. + ;; + tbit.nz p6,p7=r18,60 +(p7) br.spnt done_tlb_purge_and_reload + + // The following code purges TC and TR entries. Then reload all TC entries. + // Purge percpu data TC entries. +begin_tlb_purge_and_reload: + movl r18=ia64_reload_tr;; + LOAD_PHYSICAL(p0,r18,ia64_reload_tr);; + mov b1=r18;; + br.sptk.many ia64_do_tlb_purge;; + +ia64_reload_tr: // Finally reload the TR registers. // 1. Reload DTR/ITR registers for kernel. mov r18=KERNEL_TR_PAGE_SHIFT<<2 diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 91293388dd2..7c43aea5f7f 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -3,6 +3,7 @@ * * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> + * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support */ #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ #include <linux/config.h> @@ -200,27 +201,20 @@ default_idle (void) static inline void play_dead(void) { extern void ia64_cpu_local_tick (void); + unsigned int this_cpu = smp_processor_id(); + /* Ack it */ __get_cpu_var(cpu_state) = CPU_DEAD; - /* We shouldn't have to disable interrupts while dead, but - * some interrupts just don't seem to go away, and this makes - * it "work" for testing purposes. */ max_xtp(); local_irq_disable(); - /* Death loop */ - while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) - cpu_relax(); - + idle_task_exit(); + ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); /* - * Enable timer interrupts from now on - * Not required if we put processor in SAL_BOOT_RENDEZ mode. + * The above is a point of no-return, the processor is + * expected to be in SAL loop now. */ - local_flush_tlb_all(); - cpu_set(smp_processor_id(), cpu_online_map); - wmb(); - ia64_cpu_local_tick (); - local_irq_enable(); + BUG(); } #else static inline void play_dead(void) diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 5318f0cbfc2..ca1536db339 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -9,6 +9,7 @@ * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence. * smp_boot_cpus()/smp_commence() is replaced by * smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). + * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support */ #include <linux/config.h> @@ -58,6 +59,37 @@ #define Dprintk(x...) #endif +#ifdef CONFIG_HOTPLUG_CPU +/* + * Store all idle threads, this can be reused instead of creating + * a new thread. Also avoids complicated thread destroy functionality + * for idle threads. + */ +struct task_struct *idle_thread_array[NR_CPUS]; + +/* + * Global array allocated for NR_CPUS at boot time + */ +struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; + +/* + * start_ap in head.S uses this to store current booting cpu + * info. + */ +struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; + +#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]); + +#define get_idle_for_cpu(x) (idle_thread_array[(x)]) +#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) + +#else + +#define get_idle_for_cpu(x) (NULL) +#define set_idle_for_cpu(x,p) +#define set_brendez_area(x) +#endif + /* * ITC synchronization related stuff: @@ -345,7 +377,6 @@ start_secondary (void *unused) { /* Early console may use I/O ports */ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); - Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); efi_map_pal_code(); cpu_init(); @@ -384,6 +415,13 @@ do_boot_cpu (int sapicid, int cpu) .done = COMPLETION_INITIALIZER(c_idle.done), }; DECLARE_WORK(work, do_fork_idle, &c_idle); + + c_idle.idle = get_idle_for_cpu(cpu); + if (c_idle.idle) { + init_idle(c_idle.idle, cpu); + goto do_rest; + } + /* * We can't use kernel_thread since we must avoid to reschedule the child. */ @@ -396,10 +434,15 @@ do_boot_cpu (int sapicid, int cpu) if (IS_ERR(c_idle.idle)) panic("failed fork for CPU %d", cpu); + + set_idle_for_cpu(cpu, c_idle.idle); + +do_rest: task_for_booting_cpu = c_idle.idle; Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); + set_brendez_area(cpu); platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); /* @@ -555,16 +598,6 @@ void __devinit smp_prepare_boot_cpu(void) #ifdef CONFIG_HOTPLUG_CPU extern void fixup_irqs(void); /* must be called with cpucontrol mutex held */ -static int __devinit cpu_enable(unsigned int cpu) -{ - per_cpu(cpu_state,cpu) = CPU_UP_PREPARE; - wmb(); - - while (!cpu_online(cpu)) - cpu_relax(); - return 0; -} - int __cpu_disable(void) { int cpu = smp_processor_id(); @@ -577,7 +610,7 @@ int __cpu_disable(void) fixup_irqs(); local_flush_tlb_all(); - printk ("Disabled cpu %u\n", smp_processor_id()); + cpu_clear(cpu, cpu_callin_map); return 0; } @@ -589,12 +622,7 @@ void __cpu_die(unsigned int cpu) /* They ack this in play_dead by setting CPU_DEAD */ if (per_cpu(cpu_state, cpu) == CPU_DEAD) { - /* - * TBD: Enable this when physical removal - * or when we put the processor is put in - * SAL_BOOT_RENDEZ mode - * cpu_clear(cpu, cpu_callin_map); - */ + printk ("CPU %d is now offline\n", cpu); return; } msleep(100); @@ -602,11 +630,6 @@ void __cpu_die(unsigned int cpu) printk(KERN_ERR "CPU %u didn't die...\n", cpu); } #else /* !CONFIG_HOTPLUG_CPU */ -static int __devinit cpu_enable(unsigned int cpu) -{ - return 0; -} - int __cpu_disable(void) { return -ENOSYS; @@ -648,16 +671,12 @@ __cpu_up (unsigned int cpu) return -EINVAL; /* - * Already booted.. just enable and get outa idle lool + * Already booted cpu? not valid anymore since we dont + * do idle loop tightspin anymore. */ if (cpu_isset(cpu, cpu_callin_map)) - { - cpu_enable(cpu); - local_irq_enable(); - while (!cpu_isset(cpu, cpu_online_map)) - mb(); - return 0; - } + return -EINVAL; + /* Processor goes to start_secondary(), sets online flag */ ret = do_boot_cpu(sapicid, cpu); if (ret < 0) diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index ea1ed377de4..240676f7539 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -832,6 +832,44 @@ extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64, u64, u64, u64, u64, u64); extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64, u64, u64, u64, u64, u64); +#ifdef CONFIG_HOTPLUG_CPU +/* + * System Abstraction Layer Specification + * Section 3.2.5.1: OS_BOOT_RENDEZ to SAL return State. + * Note: region regs are stored first in head.S _start. Hence they must + * stay up front. + */ +struct sal_to_os_boot { + u64 rr[8]; /* Region Registers */ + u64 br[6]; /* br0: return addr into SAL boot rendez routine */ + u64 gr1; /* SAL:GP */ + u64 gr12; /* SAL:SP */ + u64 gr13; /* SAL: Task Pointer */ + u64 fpsr; + u64 pfs; + u64 rnat; + u64 unat; + u64 bspstore; + u64 dcr; /* Default Control Register */ + u64 iva; + u64 pta; + u64 itv; + u64 pmv; + u64 cmcv; + u64 lrr[2]; + u64 gr[4]; + u64 pr; /* Predicate registers */ + u64 lc; /* Loop Count */ + struct ia64_fpreg fp[20]; +}; + +/* + * Global array allocated for NR_CPUS at boot time + */ +extern struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; + +extern void ia64_jump_to_sal(struct sal_to_os_boot *); +#endif extern void ia64_sal_handler_init(void *entry_point, void *gpval); |