diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kernel/Makefile | 15 | ||||
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/btext.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/ppc_ksyms.c | 101 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 1324 | ||||
-rw-r--r-- | arch/powerpc/lib/Makefile | 4 | ||||
-rw-r--r-- | arch/powerpc/lib/locks.c | 95 |
9 files changed, 1481 insertions, 69 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index bc063edd6de..47a8eb6e7e3 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -10,6 +10,12 @@ CFLAGS_prom_init.o += -fPIC CFLAGS_btext.o += -fPIC endif +obj-y := semaphore.o traps.o +obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o +obj-$(CONFIG_POWER4) += idle_power4.o + +ifeq ($(CONFIG_PPC_MERGE),y) + extra-$(CONFIG_PPC_STD_MMU) := head_32.o extra-$(CONFIG_PPC64) := head_64.o extra-$(CONFIG_40x) := head_4xx.o @@ -21,15 +27,12 @@ extra-$(CONFIG_PPC64) += entry_64.o extra-$(CONFIG_PPC_FPU) += fpu.o extra-y += vmlinux.lds -obj-y += traps.o prom.o semaphore.o +obj-y += process.o init_task.o \ + prom.o systbl.o obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o -obj-$(CONFIG_PPC64) += idle_power4.o -obj-$(CONFIG_PPC64) += misc_64.o -ifeq ($(CONFIG_PPC32),y) +obj-$(CONFIG_PPC64) += setup_64.o misc_64.o obj-$(CONFIG_PPC_OF) += prom_init.o of_device.o obj-$(CONFIG_MODULES) += ppc_ksyms.o -endif -obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o obj-$(CONFIG_BOOTX_TEXT) += btext.o ifeq ($(CONFIG_PPC_ISERIES),y) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 3a247c033e8..ddf0c81e195 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -111,6 +111,7 @@ int main(void) DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); DEFINE(PLATFORM, offsetof(struct systemcfg, platform)); + DEFINE(PLATFORM_LPAR, PLATFORM_LPAR); /* paca */ DEFINE(PACA_SIZE, sizeof(struct paca_struct)); diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 44f5d98e27c..bdfba92b2b3 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -50,7 +50,7 @@ static unsigned char vga_font[cmapsz]; int boot_text_mapped; int force_printk_to_btext = 0; - +#ifdef CONFIG_PPC32 /* Calc BAT values for mapping the display and store them * in disp_BAT. Those values are then used from head.S to map * the display during identify_machine() and MMU_Init() @@ -93,6 +93,7 @@ btext_prepare_BAT(void) } logicalDisplayBase = (void *) (vaddr + lowbits); } +#endif /* This function will enable the early boot text when doing OF booting. This * way, xmon output should work too diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 3fcac3c37b9..a4ceb9ae20f 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -746,6 +746,7 @@ bad_stack: * any task or sent any task a signal, you should use * ret_from_except or ret_from_except_lite instead of this. */ + .globl fast_exception_return fast_exception_return: ld r12,_MSR(r1) ld r11,_NIP(r1) diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 91a562e3257..010554e5fe4 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -51,6 +51,7 @@ #include <asm/commproc.h> #endif +#ifdef CONFIG_PPC32 extern void transfer_to_handler(void); extern void do_IRQ(struct pt_regs *regs); extern void machine_check_exception(struct pt_regs *regs); @@ -61,14 +62,12 @@ extern int do_signal(sigset_t *, struct pt_regs *); extern int pmac_newworld; extern int sys_sigreturn(struct pt_regs *regs); -long long __ashrdi3(long long, int); -long long __ashldi3(long long, int); -long long __lshrdi3(long long, int); - -extern unsigned long mm_ptov (unsigned long paddr); - EXPORT_SYMBOL(clear_pages); -EXPORT_SYMBOL(clear_user_page); +EXPORT_SYMBOL(ISA_DMA_THRESHOLD); +EXPORT_SYMBOL(DMA_MODE_READ); +EXPORT_SYMBOL(DMA_MODE_WRITE); +EXPORT_SYMBOL(__div64_32); + EXPORT_SYMBOL(do_signal); EXPORT_SYMBOL(transfer_to_handler); EXPORT_SYMBOL(do_IRQ); @@ -77,12 +76,8 @@ EXPORT_SYMBOL(alignment_exception); EXPORT_SYMBOL(program_check_exception); EXPORT_SYMBOL(single_step_exception); EXPORT_SYMBOL(sys_sigreturn); -EXPORT_SYMBOL(ppc_n_lost_interrupts); -EXPORT_SYMBOL(ppc_lost_interrupts); +#endif -EXPORT_SYMBOL(ISA_DMA_THRESHOLD); -EXPORT_SYMBOL(DMA_MODE_READ); -EXPORT_SYMBOL(DMA_MODE_WRITE); #if defined(CONFIG_PPC_PREP) EXPORT_SYMBOL(_prep_type); EXPORT_SYMBOL(ucSystemType); @@ -110,7 +105,6 @@ EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(strcasecmp); -EXPORT_SYMBOL(__div64_32); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); @@ -132,21 +126,21 @@ EXPORT_SYMBOL(_insw_ns); EXPORT_SYMBOL(_outsw_ns); EXPORT_SYMBOL(_insl_ns); EXPORT_SYMBOL(_outsl_ns); -EXPORT_SYMBOL(iopa); -EXPORT_SYMBOL(mm_ptov); EXPORT_SYMBOL(ioremap); #ifdef CONFIG_44x EXPORT_SYMBOL(ioremap64); #endif EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(iounmap); +#ifdef CONFIG_PPC32 EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ +#endif -#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) +#if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)) EXPORT_SYMBOL(ppc_ide_md); #endif -#ifdef CONFIG_PCI +#if defined(CONFIG_PCI) && defined(CONFIG_PPC32) EXPORT_SYMBOL(isa_io_base); EXPORT_SYMBOL(isa_mem_base); EXPORT_SYMBOL(pci_dram_offset); @@ -168,31 +162,31 @@ EXPORT_SYMBOL(flush_dcache_all); EXPORT_SYMBOL(start_thread); EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(flush_instruction_cache); EXPORT_SYMBOL(giveup_fpu); +#ifdef CONFIG_ALTIVEC +EXPORT_SYMBOL(giveup_altivec); +#endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_SPE +EXPORT_SYMBOL(giveup_spe); +#endif /* CONFIG_SPE */ + #ifdef CONFIG_PPC64 EXPORT_SYMBOL(__flush_icache_range); #else +EXPORT_SYMBOL(flush_instruction_cache); EXPORT_SYMBOL(flush_icache_range); -#endif -EXPORT_SYMBOL(flush_dcache_range); -EXPORT_SYMBOL(flush_icache_user_range); -EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_tlb_kernel_range); EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(_tlbie); -#ifdef CONFIG_ALTIVEC -EXPORT_SYMBOL(giveup_altivec); -#endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_SPE -EXPORT_SYMBOL(giveup_spe); -#endif /* CONFIG_SPE */ +#endif +EXPORT_SYMBOL(flush_dcache_range); + #ifdef CONFIG_SMP EXPORT_SYMBOL(smp_call_function); +#ifdef CONFIG_PPC32 EXPORT_SYMBOL(smp_hw_index); #endif - -EXPORT_SYMBOL(ppc_md); +#endif #ifdef CONFIG_ADB EXPORT_SYMBOL(adb_request); @@ -205,25 +199,27 @@ EXPORT_SYMBOL(adb_try_handler_change); EXPORT_SYMBOL(cuda_request); EXPORT_SYMBOL(cuda_poll); #endif /* CONFIG_ADB_CUDA */ -#ifdef CONFIG_PPC_MULTIPLATFORM +#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_PPC32) EXPORT_SYMBOL(_machine); #endif #ifdef CONFIG_PPC_PMAC EXPORT_SYMBOL(sys_ctrler); -EXPORT_SYMBOL(pmac_newworld); #endif #ifdef CONFIG_VT EXPORT_SYMBOL(kd_mksound); #endif EXPORT_SYMBOL(to_tm); -EXPORT_SYMBOL(pm_power_off); - +#ifdef CONFIG_PPC32 +long long __ashrdi3(long long, int); +long long __ashldi3(long long, int); +long long __lshrdi3(long long, int); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); +#endif + EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(cacheable_memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memscan); @@ -234,17 +230,14 @@ EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(screen_info); #endif +#ifdef CONFIG_PPC32 +EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(__delay); EXPORT_SYMBOL(timer_interrupt); EXPORT_SYMBOL(irq_desc); EXPORT_SYMBOL(tb_ticks_per_jiffy); -EXPORT_SYMBOL(get_wchan); EXPORT_SYMBOL(console_drivers); - -#ifdef CONFIG_PPC_ISERIES -EXPORT_SYMBOL(local_irq_disable); -EXPORT_SYMBOL(local_irq_enable); -EXPORT_SYMBOL(local_get_flags); +EXPORT_SYMBOL(cacheable_memcpy); #endif #ifdef CONFIG_XMON @@ -255,22 +248,6 @@ EXPORT_SYMBOL(__up); EXPORT_SYMBOL(__down); EXPORT_SYMBOL(__down_interruptible); -#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) -extern void (*debugger)(struct pt_regs *regs); -extern int (*debugger_bpt)(struct pt_regs *regs); -extern int (*debugger_sstep)(struct pt_regs *regs); -extern int (*debugger_iabr_match)(struct pt_regs *regs); -extern int (*debugger_dabr_match)(struct pt_regs *regs); -extern void (*debugger_fault_handler)(struct pt_regs *regs); - -EXPORT_SYMBOL(debugger); -EXPORT_SYMBOL(debugger_bpt); -EXPORT_SYMBOL(debugger_sstep); -EXPORT_SYMBOL(debugger_iabr_match); -EXPORT_SYMBOL(debugger_dabr_match); -EXPORT_SYMBOL(debugger_fault_handler); -#endif - #ifdef CONFIG_8xx EXPORT_SYMBOL(cpm_install_handler); EXPORT_SYMBOL(cpm_free_handler); @@ -280,22 +257,24 @@ EXPORT_SYMBOL(cpm_free_handler); EXPORT_SYMBOL(__res); #endif +#ifdef CONFIG_PPC32 EXPORT_SYMBOL(next_mmu_context); EXPORT_SYMBOL(set_context); -EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */ EXPORT_SYMBOL(disarm_decr); -#ifdef CONFIG_PPC_STD_MMU +#endif + +#ifdef CONFIG_PPC_STD_MMU_32 extern long mol_trampoline; EXPORT_SYMBOL(mol_trampoline); /* For MOL */ EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ +EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */ #ifdef CONFIG_SMP extern int mmu_hash_lock; EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ #endif /* CONFIG_SMP */ extern long *intercept_table; EXPORT_SYMBOL(intercept_table); -#endif /* CONFIG_PPC_STD_MMU */ -EXPORT_SYMBOL(cur_cpu_spec); +#endif /* CONFIG_PPC_STD_MMU_32 */ #ifdef CONFIG_PPC_PMAC extern unsigned long agp_special_page; EXPORT_SYMBOL(agp_special_page); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index dc3d24ea3bf..ce0dff1caa8 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -47,6 +47,10 @@ #include <asm/sections.h> #include <asm/machdep.h> #include <asm/pSeries_reconfig.h> +#include <asm/pci-bridge.h> +#ifdef CONFIG_PPC64 +#include <asm/systemcfg.h> +#endif #ifdef DEBUG #define DBG(fmt...) printk(KERN_ERR fmt) @@ -1072,7 +1076,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, } else { /* Check if it's the boot-cpu, set it's hw index in paca now */ if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) { - u32 *prop = get_flat_dt_prop(node, "reg", NULL); + prop = get_flat_dt_prop(node, "reg", NULL); set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop); boot_cpuid_phys = get_hard_smp_processor_id(0); } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c new file mode 100644 index 00000000000..212b00823f8 --- /dev/null +++ b/arch/powerpc/kernel/setup_64.c @@ -0,0 +1,1324 @@ +/* + * + * Common boot and setup code. + * + * Copyright (C) 2001 PPC64 Team, IBM Corp + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#undef DEBUG + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/sched.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/reboot.h> +#include <linux/delay.h> +#include <linux/initrd.h> +#include <linux/ide.h> +#include <linux/seq_file.h> +#include <linux/ioport.h> +#include <linux/console.h> +#include <linux/utsname.h> +#include <linux/tty.h> +#include <linux/root_dev.h> +#include <linux/notifier.h> +#include <linux/cpu.h> +#include <linux/unistd.h> +#include <linux/serial.h> +#include <linux/serial_8250.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/processor.h> +#include <asm/pgtable.h> +#include <asm/bootinfo.h> +#include <asm/smp.h> +#include <asm/elf.h> +#include <asm/machdep.h> +#include <asm/paca.h> +#include <asm/ppcdebug.h> +#include <asm/time.h> +#include <asm/cputable.h> +#include <asm/sections.h> +#include <asm/btext.h> +#include <asm/nvram.h> +#include <asm/setup.h> +#include <asm/system.h> +#include <asm/rtas.h> +#include <asm/iommu.h> +#include <asm/serial.h> +#include <asm/cache.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/lmb.h> +#include <asm/iSeries/ItLpNaca.h> +#include <asm/firmware.h> +#include <asm/systemcfg.h> + +#ifdef DEBUG +#define DBG(fmt...) udbg_printf(fmt) +#else +#define DBG(fmt...) +#endif + +/* + * Here are some early debugging facilities. You can enable one + * but your kernel will not boot on anything else if you do so + */ + +/* This one is for use on LPAR machines that support an HVC console + * on vterm 0 + */ +extern void udbg_init_debug_lpar(void); +/* This one is for use on Apple G5 machines + */ +extern void udbg_init_pmac_realmode(void); +/* That's RTAS panel debug */ +extern void call_rtas_display_status_delay(unsigned char c); +/* Here's maple real mode debug */ +extern void udbg_init_maple_realmode(void); + +#define EARLY_DEBUG_INIT() do {} while(0) + +#if 0 +#define EARLY_DEBUG_INIT() udbg_init_debug_lpar() +#define EARLY_DEBUG_INIT() udbg_init_maple_realmode() +#define EARLY_DEBUG_INIT() udbg_init_pmac_realmode() +#define EARLY_DEBUG_INIT() \ + do { udbg_putc = call_rtas_display_status_delay; } while(0) +#endif + +/* extern void *stab; */ +extern unsigned long klimit; + +extern void mm_init_ppc64(void); +extern void stab_initialize(unsigned long stab); +extern void htab_initialize(void); +extern void early_init_devtree(void *flat_dt); +extern void unflatten_device_tree(void); + +extern void smp_release_cpus(void); + +int have_of = 1; +int boot_cpuid = 0; +int boot_cpuid_phys = 0; +dev_t boot_dev; +u64 ppc64_pft_size; + +struct ppc64_caches ppc64_caches; +EXPORT_SYMBOL_GPL(ppc64_caches); + +/* + * These are used in binfmt_elf.c to put aux entries on the stack + * for each elf executable being started. + */ +int dcache_bsize; +int icache_bsize; +int ucache_bsize; + +/* The main machine-dep calls structure + */ +struct machdep_calls ppc_md; +EXPORT_SYMBOL(ppc_md); + +#ifdef CONFIG_MAGIC_SYSRQ +unsigned long SYSRQ_KEY; +#endif /* CONFIG_MAGIC_SYSRQ */ + + +static int ppc64_panic_event(struct notifier_block *, unsigned long, void *); +static struct notifier_block ppc64_panic_block = { + .notifier_call = ppc64_panic_event, + .priority = INT_MIN /* may not return; must be done last */ +}; + +/* + * Perhaps we can put the pmac screen_info[] here + * on pmac as well so we don't need the ifdef's. + * Until we get multiple-console support in here + * that is. -- Cort + * Maybe tie it to serial consoles, since this is really what + * these processors use on existing boards. -- Dan + */ +struct screen_info screen_info = { + .orig_x = 0, + .orig_y = 25, + .orig_video_cols = 80, + .orig_video_lines = 25, + .orig_video_isVGA = 1, + .orig_video_points = 16 +}; + +#ifdef CONFIG_SMP + +static int smt_enabled_cmdline; + +/* Look for ibm,smt-enabled OF option */ +static void check_smt_enabled(void) +{ + struct device_node *dn; + char *smt_option; + + /* Allow the command line to overrule the OF option */ + if (smt_enabled_cmdline) + return; + + dn = of_find_node_by_path("/options"); + + if (dn) { + smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL); + + if (smt_option) { + if (!strcmp(smt_option, "on")) + smt_enabled_at_boot = 1; + else if (!strcmp(smt_option, "off")) + smt_enabled_at_boot = 0; + } + } +} + +/* Look for smt-enabled= cmdline option */ +static int __init early_smt_enabled(char *p) +{ + smt_enabled_cmdline = 1; + + if (!p) + return 0; + + if (!strcmp(p, "on") || !strcmp(p, "1")) + smt_enabled_at_boot = 1; + else if (!strcmp(p, "off") || !strcmp(p, "0")) + smt_enabled_at_boot = 0; + + return 0; +} +early_param("smt-enabled", early_smt_enabled); + +/** + * setup_cpu_maps - initialize the following cpu maps: + * cpu_possible_map + * cpu_present_map + * cpu_sibling_map + * + * Having the possible map set up early allows us to restrict allocations + * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. + * + * We do not initialize the online map here; cpus set their own bits in + * cpu_online_map as they come up. + * + * This function is valid only for Open Firmware systems. finish_device_tree + * must be called before using this. + * + * While we're here, we may as well set the "physical" cpu ids in the paca. + */ +static void __init setup_cpu_maps(void) +{ + struct device_node *dn = NULL; + int cpu = 0; + int swap_cpuid = 0; + + check_smt_enabled(); + + while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { + u32 *intserv; + int j, len = sizeof(u32), nthreads; + + intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s", + &len); + if (!intserv) + intserv = (u32 *)get_property(dn, "reg", NULL); + + nthreads = len / sizeof(u32); + + for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { + cpu_set(cpu, cpu_present_map); + set_hard_smp_processor_id(cpu, intserv[j]); + + if (intserv[j] == boot_cpuid_phys) + swap_cpuid = cpu; + cpu_set(cpu, cpu_possible_map); + cpu++; + } + } + + /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that + * boot cpu is logical 0. + */ + if (boot_cpuid_phys != get_hard_smp_processor_id(0)) { + u32 tmp; + tmp = get_hard_smp_processor_id(0); + set_hard_smp_processor_id(0, boot_cpuid_phys); + set_hard_smp_processor_id(swap_cpuid, tmp); + } + + /* + * On pSeries LPAR, we need to know how many cpus + * could possibly be added to this partition. + */ + if (systemcfg->platform == PLATFORM_PSERIES_LPAR && + (dn = of_find_node_by_path("/rtas"))) { + int num_addr_cell, num_size_cell, maxcpus; + unsigned int *ireg; + + num_addr_cell = prom_n_addr_cells(dn); + num_size_cell = prom_n_size_cells(dn); + + ireg = (unsigned int *) + get_property(dn, "ibm,lrdr-capacity", NULL); + + if (!ireg) + goto out; + + maxcpus = ireg[num_addr_cell + num_size_cell]; + + /* Double maxcpus for processors which have SMT capability */ + if (cpu_has_feature(CPU_FTR_SMT)) + maxcpus *= 2; + + if (maxcpus > NR_CPUS) { + printk(KERN_WARNING + "Partition configured for %d cpus, " + "operating system maximum is %d.\n", + maxcpus, NR_CPUS); + maxcpus = NR_CPUS; + } else + printk(KERN_INFO "Partition configured for %d cpus.\n", + maxcpus); + + for (cpu = 0; cpu < maxcpus; cpu++) + cpu_set(cpu, cpu_possible_map); + out: + of_node_put(dn); + } + + /* + * Do the sibling map; assume only two threads per processor. + */ + for_each_cpu(cpu) { + cpu_set(cpu, cpu_sibling_map[cpu]); + if (cpu_has_feature(CPU_FTR_SMT)) + cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); + } + + systemcfg->processorCount = num_present_cpus(); +} +#endif /* CONFIG_SMP */ + +extern struct machdep_calls pSeries_md; +extern struct machdep_calls pmac_md; +extern struct machdep_calls maple_md; +extern struct machdep_calls bpa_md; +extern struct machdep_calls iseries_md; + +/* Ultimately, stuff them in an elf section like initcalls... */ +static struct machdep_calls __initdata *machines[] = { +#ifdef CONFIG_PPC_PSERIES + &pSeries_md, +#endif /* CONFIG_PPC_PSERIES */ +#ifdef CONFIG_PPC_PMAC + &pmac_md, +#endif /* CONFIG_PPC_PMAC */ +#ifdef CONFIG_PPC_MAPLE + &maple_md, +#endif /* CONFIG_PPC_MAPLE */ +#ifdef CONFIG_PPC_BPA + &bpa_md, +#endif +#ifdef CONFIG_PPC_ISERIES + &iseries_md, +#endif + NULL +}; + +/* + * Early initialization entry point. This is called by head.S + * with MMU translation disabled. We rely on the "feature" of + * the CPU that ignores the top 2 bits of the address in real + * mode so we can access kernel globals normally provided we + * only toy with things in the RMO region. From here, we do + * some early parsing of the device-tree to setup out LMB + * data structures, and allocate & initialize the hash table + * and segment tables so we can start running with translation + * enabled. + * + * It is this function which will call the probe() callback of + * the various platform types and copy the matching one to the + * global ppc_md structure. Your platform can eventually do + * some very early initializations from the probe() routine, but + * this is not recommended, be very careful as, for example, the + * device-tree is not accessible via normal means at this point. + */ + +void __init early_setup(unsigned long dt_ptr) +{ + struct paca_struct *lpaca = get_paca(); + static struct machdep_calls **mach; + + /* + * Enable early debugging if any specified (see top of + * this file) + */ + EARLY_DEBUG_INIT(); + + DBG(" -> early_setup()\n"); + + /* + * Fill the default DBG level (do we want to keep + * that old mecanism around forever ?) + */ + ppcdbg_initialize(); + + /* + * Do early initializations using the flattened device + * tree, like retreiving the physical memory map or + * calculating/retreiving the hash table size + */ + early_init_devtree(__va(dt_ptr)); + + /* + * Iterate all ppc_md structures until we find the proper + * one for the current machine type + */ + DBG("Probing machine type for platform %x...\n", + systemcfg->platform); + + for (mach = machines; *mach; mach++) { + if ((*mach)->probe(systemcfg->platform)) + break; + } + /* What can we do if we didn't find ? */ + if (*mach == NULL) { + DBG("No suitable machine found !\n"); + for (;;); + } + ppc_md = **mach; + + DBG("Found, Initializing memory management...\n"); + + /* + * Initialize stab / SLB management + */ + if (!firmware_has_feature(FW_FEATURE_ISERIES)) + stab_initialize(lpaca->stab_real); + + /* + * Initialize the MMU Hash table and create the linear mapping + * of memory + */ + htab_initialize(); + + DBG(" <- early_setup()\n"); +} + + +/* + * Initialize some remaining members of the ppc64_caches and systemcfg structures + * (at least until we get rid of them completely). This is mostly some + * cache informations about the CPU that will be used by cache flush + * routines and/or provided to userland + */ +static void __init initialize_cache_info(void) +{ + struct device_node *np; + unsigned long num_cpus = 0; + + DBG(" -> initialize_cache_info()\n"); + + for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) { + num_cpus += 1; + + /* We're assuming *all* of the CPUs have the same + * d-cache and i-cache sizes... -Peter + */ + + if ( num_cpus == 1 ) { + u32 *sizep, *lsizep; + u32 size, lsize; + const char *dc, *ic; + + /* Then read cache informations */ + if (systemcfg->platform == PLATFORM_POWERMAC) { + dc = "d-cache-block-size"; + ic = "i-cache-block-size"; + } else { + dc = "d-cache-line-size"; + ic = "i-cache-line-size"; + } + + size = 0; + lsize = cur_cpu_spec->dcache_bsize; + sizep = (u32 *)get_property(np, "d-cache-size", NULL); + if (sizep != NULL) + size = *sizep; + lsizep = (u32 *) get_property(np, dc, NULL); + if (lsizep != NULL) + lsize = *lsizep; + if (sizep == 0 || lsizep == 0) + DBG("Argh, can't find dcache properties ! " + "sizep: %p, lsizep: %p\n", sizep, lsizep); + + systemcfg->dcache_size = ppc64_caches.dsize = size; + systemcfg->dcache_line_size = + ppc64_caches.dline_size = lsize; + ppc64_caches.log_dline_size = __ilog2(lsize); + ppc64_caches.dlines_per_page = PAGE_SIZE / lsize; + + size = 0; + lsize = cur_cpu_spec->icache_bsize; + sizep = (u32 *)get_property(np, "i-cache-size", NULL); + if (sizep != NULL) + size = *sizep; + lsizep = (u32 *)get_property(np, ic, NULL); + if (lsizep != NULL) + lsize = *lsizep; + if (sizep == 0 || lsizep == 0) + DBG("Argh, can't find icache properties ! " + "sizep: %p, lsizep: %p\n", sizep, lsizep); + + systemcfg->icache_size = ppc64_caches.isize = size; + systemcfg->icache_line_size = + ppc64_caches.iline_size = lsize; + ppc64_caches.log_iline_size = __ilog2(lsize); + ppc64_caches.ilines_per_page = PAGE_SIZE / lsize; + } + } + + /* Add an eye catcher and the systemcfg layout version number */ + strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64"); + systemcfg->version.major = SYSTEMCFG_MAJOR; + systemcfg->version.minor = SYSTEMCFG_MINOR; + systemcfg->processor = mfspr(SPRN_PVR); + + DBG(" <- initialize_cache_info()\n"); +} + +static void __init check_for_initrd(void) +{ +#ifdef CONFIG_BLK_DEV_INITRD + u64 *prop; + + DBG(" -> check_for_initrd()\n"); + + if (of_chosen) { + prop = (u64 *)get_property(of_chosen, + "linux,initrd-start", NULL); + if (prop != NULL) { + initrd_start = (unsigned long)__va(*prop); + prop = (u64 *)get_property(of_chosen, + "linux,initrd-end", NULL); + if (prop != NULL) { + initrd_end = (unsigned long)__va(*prop); + initrd_below_start_ok = 1; + } else + initrd_start = 0; + } + } + + /* If we were passed an initrd, set the ROOT_DEV properly if the values + * look sensible. If not, clear initrd reference. + */ + if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE && + initrd_end > initrd_start) + ROOT_DEV = Root_RAM0; + else + initrd_start = initrd_end = 0; + + if (initrd_start) + printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); + + DBG(" <- check_for_initrd()\n"); +#endif /* CONFIG_BLK_DEV_INITRD */ +} + +/* + * Do some initial setup of the system. The parameters are those which + * were passed in from the bootloader. + */ +void __init setup_system(void) +{ + DBG(" -> setup_system()\n"); + + /* + * Unflatten the device-tree passed by prom_init or kexec + */ + unflatten_device_tree(); + + /* + * Fill the ppc64_caches & systemcfg structures with informations + * retreived from the device-tree. Need to be called before + * finish_device_tree() since the later requires some of the + * informations filled up here to properly parse the interrupt + * tree. + * It also sets up the cache line sizes which allows to call + * routines like flush_icache_range (used by the hash init + * later on). + */ + initialize_cache_info(); + +#ifdef CONFIG_PPC_RTAS + /* + * Initialize RTAS if available + */ + rtas_initialize(); +#endif /* CONFIG_PPC_RTAS */ + printk("%s:%d rtas.dev=%p (@ %p)\n", __FILE__, __LINE__, rtas.dev, + &rtas.dev); + + /* + * Check if we have an initrd provided via the device-tree + */ + check_for_initrd(); + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + + /* + * Do some platform specific early initializations, that includes + * setting up the hash table pointers. It also sets up some interrupt-mapping + * related options that will be used by finish_device_tree() + */ + ppc_md.init_early(); + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + + /* + * "Finish" the device-tree, that is do the actual parsing of + * some of the properties like the interrupt map + */ + finish_device_tree(); + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + + /* + * Initialize xmon + */ +#ifdef CONFIG_XMON_DEFAULT + xmon_init(1); +#endif + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + /* + * Register early console + */ + register_early_udbg_console(); + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + + /* Save unparsed command line copy for /proc/cmdline */ + strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); + + parse_early_param(); + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + +#ifdef CONFIG_SMP + /* + * iSeries has already initialized the cpu maps at this point. + */ + setup_cpu_maps(); + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + + /* Release secondary cpus out of their spinloops at 0x60 now that + * we can map physical -> logical CPU ids + */ + smp_release_cpus(); + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); +#endif + + printk("Starting Linux PPC64 %s\n", system_utsname.version); + + printk("-----------------------------------------------------\n"); + printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); + printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch); + printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller); + printk("systemcfg = 0x%p\n", systemcfg); + printk("systemcfg->platform = 0x%x\n", systemcfg->platform); + printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount); + printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize); + printk("ppc64_caches.dcache_line_size = 0x%x\n", + ppc64_caches.dline_size); + printk("ppc64_caches.icache_line_size = 0x%x\n", + ppc64_caches.iline_size); + printk("htab_address = 0x%p\n", htab_address); + printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); + printk("-----------------------------------------------------\n"); + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + + mm_init_ppc64(); + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + + DBG(" <- setup_system()\n"); +} + +/* also used by kexec */ +void machine_shutdown(void) +{ + if (ppc_md.nvram_sync) + ppc_md.nvram_sync(); +} + +void machine_restart(char *cmd) +{ + machine_shutdown(); + ppc_md.restart(cmd); +#ifdef CONFIG_SMP + smp_send_stop(); +#endif + printk(KERN_EMERG "System Halted, OK to turn off power\n"); + local_irq_disable(); + while (1) ; +} + +void machine_power_off(void) +{ + machine_shutdown(); + ppc_md.power_off(); +#ifdef CONFIG_SMP + smp_send_stop(); +#endif + printk(KERN_EMERG "System Halted, OK to turn off power\n"); + local_irq_disable(); + while (1) ; +} +/* Used by the G5 thermal driver */ +EXPORT_SYMBOL_GPL(machine_power_off); + +void machine_halt(void) +{ + machine_shutdown(); + ppc_md.halt(); +#ifdef CONFIG_SMP + smp_send_stop(); +#endif + printk(KERN_EMERG "System Halted, OK to turn off power\n"); + local_irq_disable(); + while (1) ; +} + +static int ppc64_panic_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + ppc_md.panic((char *)ptr); /* May not return */ + return NOTIFY_DONE; +} + + +#ifdef CONFIG_SMP +DEFINE_PER_CPU(unsigned int, pvr); +#endif + +static int show_cpuinfo(struct seq_file *m, void *v) +{ + unsigned long cpu_id = (unsigned long)v - 1; + unsigned int pvr; + unsigned short maj; + unsigned short min; + + if (cpu_id == NR_CPUS) { + seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); + + if (ppc_md.get_cpuinfo != NULL) + ppc_md.get_cpuinfo(m); + + return 0; + } + + /* We only show online cpus: disable preempt (overzealous, I + * knew) to prevent cpu going down. */ + preempt_disable(); + if (!cpu_online(cpu_id)) { + preempt_enable(); + return 0; + } + +#ifdef CONFIG_SMP + pvr = per_cpu(pvr, cpu_id); +#else + pvr = mfspr(SPRN_PVR); +#endif + maj = (pvr >> 8) & 0xFF; + min = pvr & 0xFF; + + seq_printf(m, "processor\t: %lu\n", cpu_id); + seq_printf(m, "cpu\t\t: "); + + if (cur_cpu_spec->pvr_mask) + seq_printf(m, "%s", cur_cpu_spec->cpu_name); + else + seq_printf(m, "unknown (%08x)", pvr); + +#ifdef CONFIG_ALTIVEC + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + seq_printf(m, ", altivec supported"); +#endif /* CONFIG_ALTIVEC */ + + seq_printf(m, "\n"); + + /* + * Assume here that all clock rates are the same in a + * smp system. -- Cort + */ + seq_printf(m, "clock\t\t: %lu.%06luMHz\n", ppc_proc_freq / 1000000, + ppc_proc_freq % 1000000); + + seq_printf(m, "revision\t: %hd.%hd\n\n", maj, min); + + preempt_enable(); + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos <= NR_CPUS ? (void *)((*pos)+1) : NULL; +} +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} +static void c_stop(struct seq_file *m, void *v) +{ +} +struct seq_operations cpuinfo_op = { + .start =c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + +/* + * These three variables are used to save values passed to us by prom_init() + * via the device tree. The TCE variables are needed because with a memory_limit + * in force we may need to explicitly map the TCE are at the top of RAM. + */ +unsigned long memory_limit; +unsigned long tce_alloc_start; +unsigned long tce_alloc_end; + +#ifdef CONFIG_PPC_ISERIES +/* + * On iSeries we just parse the mem=X option from the command line. + * On pSeries it's a bit more complicated, see prom_init_mem() + */ +static int __init early_parsemem(char *p) +{ + if (!p) + return 0; + + memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE); + + return 0; +} +early_param("mem", early_parsemem); +#endif /* CONFIG_PPC_ISERIES */ + +#ifdef CONFIG_PPC_MULTIPLATFORM +static int __init set_preferred_console(void) +{ + struct device_node *prom_stdout = NULL; + char *name; + u32 *spd; + int offset = 0; + + DBG(" -> set_preferred_console()\n"); + + /* The user has requested a console so this is already set up. */ + if (strstr(saved_command_line, "console=")) { + DBG(" console was specified !\n"); + return -EBUSY; + } + + if (!of_chosen) { + DBG(" of_chosen is NULL !\n"); + return -ENODEV; + } + /* We are getting a weird phandle from OF ... */ + /* ... So use the full path instead */ + name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); + if (name == NULL) { + DBG(" no linux,stdout-path !\n"); + return -ENODEV; + } + prom_stdout = of_find_node_by_path(name); + if (!prom_stdout) { + DBG(" can't find stdout package %s !\n", name); + return -ENODEV; + } + DBG("stdout is %s\n", prom_stdout->full_name); + + name = (char *)get_property(prom_stdout, "name", NULL); + if (!name) { + DBG(" stdout package has no name !\n"); + goto not_found; + } + spd = (u32 *)get_property(prom_stdout, "current-speed", NULL); + + if (0) + ; +#ifdef CONFIG_SERIAL_8250_CONSOLE + else if (strcmp(name, "serial") == 0) { + int i; + u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i); + if (i > 8) { + switch (reg[1]) { + case 0x3f8: + offset = 0; + break; + case 0x2f8: + offset = 1; + break; + case 0x898: + offset = 2; + break; + case 0x890: + offset = 3; + break; + default: + /* We dont recognise the serial port */ + goto not_found; + } + } + } +#endif /* CONFIG_SERIAL_8250_CONSOLE */ +#ifdef CONFIG_PPC_PSERIES + else if (strcmp(name, "vty") == 0) { + u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL); + char *compat = (char *)get_property(prom_stdout, "compatible", NULL); + + if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) { + /* Host Virtual Serial Interface */ + int offset; + switch (reg[0]) { + case 0x30000000: + offset = 0; + break; + case 0x30000001: + offset = 1; + break; + default: + goto not_found; + } + of_node_put(prom_stdout); + DBG("Found hvsi console at offset %d\n", offset); + return add_preferred_console("hvsi", offset, NULL); + } else { + /* pSeries LPAR virtual console */ + of_node_put(prom_stdout); + DBG("Found hvc console\n"); + return add_preferred_console("hvc", 0, NULL); + } + } +#endif /* CONFIG_PPC_PSERIES */ +#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE + else if (strcmp(name, "ch-a") == 0) + offset = 0; + else if (strcmp(name, "ch-b") == 0) + offset = 1; +#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ + else + goto not_found; + of_node_put(prom_stdout); + + DBG("Found serial console at ttyS%d\n", offset); + + if (spd) { + static char __initdata opt[16]; + sprintf(opt, "%d", *spd); + return add_preferred_console("ttyS", offset, opt); + } else + return add_preferred_console("ttyS", offset, NULL); + + not_found: + DBG("No preferred console found !\n"); + of_node_put(prom_stdout); + return -ENODEV; +} +console_initcall(set_preferred_console); +#endif /* CONFIG_PPC_MULTIPLATFORM */ + +#ifdef CONFIG_IRQSTACKS +static void __init irqstack_early_init(void) +{ + unsigned int i; + + /* + * interrupt stacks must be under 256MB, we cannot afford to take + * SLB misses on them. + */ + for_each_cpu(i) { + softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, + THREAD_SIZE, 0x10000000)); + hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, + THREAD_SIZE, 0x10000000)); + } +} +#else +#define irqstack_early_init() +#endif + +/* + * Stack space used when we detect a bad kernel stack pointer, and + * early in SMP boots before relocation is enabled. + */ +static void __init emergency_stack_init(void) +{ + unsigned long limit; + unsigned int i; + + /* + * Emergency stacks must be under 256MB, we cannot afford to take + * SLB misses on them. The ABI also requires them to be 128-byte + * aligned. + * + * Since we use these as temporary stacks during secondary CPU + * bringup, we need to get at them in real mode. This means they + * must also be within the RMO region. + */ + limit = min(0x10000000UL, lmb.rmo_size); + + for_each_cpu(i) + paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128, + limit)) + PAGE_SIZE; +} + +/* + * Called from setup_arch to initialize the bitmap of available + * syscalls in the systemcfg page + */ +void __init setup_syscall_map(void) +{ + unsigned int i, count64 = 0, count32 = 0; + extern unsigned long *sys_call_table; + extern unsigned long sys_ni_syscall; + + + for (i = 0; i < __NR_syscalls; i++) { + if (sys_call_table[i*2] != sys_ni_syscall) { + count64++; + systemcfg->syscall_map_64[i >> 5] |= + 0x80000000UL >> (i & 0x1f); + } + if (sys_call_table[i*2+1] != sys_ni_syscall) { + count32++; + systemcfg->syscall_map_32[i >> 5] |= + 0x80000000UL >> (i & 0x1f); + } + } + printk(KERN_INFO "Syscall map setup, %d 32-bit and %d 64-bit syscalls\n", + count32, count64); +} + +/* + * Called into from start_kernel, after lock_kernel has been called. + * Initializes bootmem, which is unsed to manage page allocation until + * mem_init is called. + */ +void __init setup_arch(char **cmdline_p) +{ + extern void do_init_bootmem(void); + + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + ppc64_boot_msg(0x12, "Setup Arch"); + + *cmdline_p = cmd_line; + + /* + * Set cache line size based on type of cpu as a default. + * Systems with OF can look in the properties on the cpu node(s) + * for a possibly more accurate value. + */ + dcache_bsize = ppc64_caches.dline_size; + icache_bsize = ppc64_caches.iline_size; + + /* reboot on panic */ + panic_timeout = 180; + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + + if (ppc_md.panic) + notifier_chain_register(&panic_notifier_list, &ppc64_panic_block); + + init_mm.start_code = PAGE_OFFSET; + init_mm.end_code = (unsigned long) _etext; + init_mm.end_data = (unsigned long) _edata; + init_mm.brk = klimit; + + irqstack_early_init(); + emergency_stack_init(); + + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + stabs_alloc(); + + /* set up the bootmem stuff with available memory */ + do_init_bootmem(); + sparse_init(); + + printk("%s:%d rtas.dev=%p\n", __FILE__, __LINE__, rtas.dev); + /* initialize the syscall map in systemcfg */ + setup_syscall_map(); + + ppc_md.setup_arch(); + + /* Use the default idle loop if the platform hasn't provided one. */ + if (NULL == ppc_md.idle_loop) { + ppc_md.idle_loop = default_idle; + printk(KERN_INFO "Using default idle loop\n"); + } + + paging_init(); + ppc64_boot_msg(0x15, "Setup Done"); +} + + +/* ToDo: do something useful if ppc_md is not yet setup. */ +#define PPC64_LINUX_FUNCTION 0x0f000000 +#define PPC64_IPL_MESSAGE 0xc0000000 +#define PPC64_TERM_MESSAGE 0xb0000000 + +static void ppc64_do_msg(unsigned int src, const char *msg) +{ + if (ppc_md.progress) { + char buf[128]; + + sprintf(buf, "%08X\n", src); + ppc_md.progress(buf, 0); + snprintf(buf, 128, "%s", msg); + ppc_md.progress(buf, 0); + } +} + +/* Print a boot progress message. */ +void ppc64_boot_msg(unsigned int src, const char *msg) +{ + ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg); + printk("[boot]%04x %s\n", src, msg); +} + +/* Print a termination message (print only -- does not stop the kernel) */ +void ppc64_terminate_msg(unsigned int src, const char *msg) +{ + ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg); + printk("[terminate]%04x %s\n", src, msg); +} + +/* This should only be called on processor 0 during calibrate decr */ +void __init setup_default_decr(void) +{ + struct paca_struct *lpaca = get_paca(); + + lpaca->default_decr = tb_ticks_per_jiffy; + lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy; +} + +#ifndef CONFIG_PPC_ISERIES +/* + * This function can be used by platforms to "find" legacy serial ports. + * It works for "serial" nodes under an "isa" node, and will try to + * respect the "ibm,aix-loc" property if any. It works with up to 8 + * ports. + */ + +#define MAX_LEGACY_SERIAL_PORTS 8 +static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1]; +static unsigned int old_serial_count; + +void __init generic_find_legacy_serial_ports(u64 *physport, + unsigned int *default_speed) +{ + struct device_node *np; + u32 *sizeprop; + + struct isa_reg_property { + u32 space; + u32 address; + u32 size; + }; + struct pci_reg_property { + struct pci_address addr; + u32 size_hi; + u32 size_lo; + }; + + DBG(" -> generic_find_legacy_serial_port()\n"); + + *physport = 0; + if (default_speed) + *default_speed = 0; + + np = of_find_node_by_path("/"); + if (!np) + return; + + /* First fill our array */ + for (np = NULL; (np = of_find_node_by_type(np, "serial"));) { + struct device_node *isa, *pci; + struct isa_reg_property *reg; + unsigned long phys_size, addr_size, io_base; + u32 *rangesp; + u32 *interrupts, *clk, *spd; + char *typep; + int index, rlen, rentsize; + + /* Ok, first check if it's under an "isa" parent */ + isa = of_get_parent(np); + if (!isa || strcmp(isa->name, "isa")) { + DBG("%s: no isa parent found\n", np->full_name); + continue; + } + + /* Now look for an "ibm,aix-loc" property that gives us ordering + * if any... + */ + typep = (char *)get_property(np, "ibm,aix-loc", NULL); + + /* Get the ISA port number */ + reg = (struct isa_reg_property *)get_property(np, "reg", NULL); + if (reg == NULL) + goto next_port; + /* We assume the interrupt number isn't translated ... */ + interrupts = (u32 *)get_property(np, "interrupts", NULL); + /* get clock freq. if present */ + clk = (u32 *)get_property(np, "clock-frequency", NULL); + /* get default speed if present */ + spd = (u32 *)get_property(np, "current-speed", NULL); + /* Default to locate at end of array */ + index = old_serial_count; /* end of the array by default */ + + /* If we have a location index, then use it */ + if (typep && *typep == 'S') { + index = simple_strtol(typep+1, NULL, 0) - 1; + /* if index is out of range, use end of array instead */ + if (index >= MAX_LEGACY_SERIAL_PORTS) + index = old_serial_count; + /* if our index is still out of range, that mean that + * array is full, we could scan for a free slot but that + * make little sense to bother, just skip the port + */ + if (index >= MAX_LEGACY_SERIAL_PORTS) + goto next_port; + if (index >= old_serial_count) + old_serial_count = index + 1; + /* Check if there is a port who already claimed our slot */ + if (serial_ports[index].iobase != 0) { + /* if we still have some room, move it, else override */ + if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) { + DBG("Moved legacy port %d -> %d\n", index, + old_serial_count); + serial_ports[old_serial_count++] = + serial_ports[index]; + } else { + DBG("Replacing legacy port %d\n", index); + } + } + } + if (index >= MAX_LEGACY_SERIAL_PORTS) + goto next_port; + if (index >= old_serial_count) + old_serial_count = index + 1; + + /* Now fill the entry */ + memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port)); + serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16; + serial_ports[index].iobase = reg->address; + serial_ports[index].irq = interrupts ? interrupts[0] : 0; + serial_ports[index].flags = ASYNC_BOOT_AUTOCONF; + + DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n", + index, + serial_ports[index].iobase, + serial_ports[index].irq, + serial_ports[index].uartclk); + + /* Get phys address of IO reg for port 1 */ + if (index != 0) + goto next_port; + + pci = of_get_parent(isa); + if (!pci) { + DBG("%s: no pci parent found\n", np->full_name); + goto next_port; + } + + rangesp = (u32 *)get_property(pci, "ranges", &rlen); + if (rangesp == NULL) { + of_node_put(pci); + goto next_port; + } + rlen /= 4; + + /* we need the #size-cells of the PCI bridge node itself */ + phys_size = 1; + sizeprop = (u32 *)get_property(pci, "#size-cells", NULL); + if (sizeprop != NULL) + phys_size = *sizeprop; + /* we need the parent #addr-cells */ + addr_size = prom_n_addr_cells(pci); + rentsize = 3 + addr_size + phys_size; + io_base = 0; + for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) { + if (((rangesp[0] >> 24) & 0x3) != 1) + continue; /* not IO space */ + io_base = rangesp[3]; + if (addr_size == 2) + io_base = (io_base << 32) | rangesp[4]; + } + if (io_base != 0) { + *physport = io_base + reg->address; + if (default_speed && spd) + *default_speed = *spd; + } + of_node_put(pci); + next_port: + of_node_put(isa); + } + + DBG(" <- generic_find_legacy_serial_port()\n"); +} + +static struct platform_device serial_device = { + .name = "serial8250", + .id = PLAT8250_DEV_PLATFORM, + .dev = { + .platform_data = serial_ports, + }, +}; + +static int __init serial_dev_init(void) +{ + return platform_device_register(&serial_device); +} +arch_initcall(serial_dev_init); + +#endif /* CONFIG_PPC_ISERIES */ + +int check_legacy_ioport(unsigned long base_port) +{ + if (ppc_md.check_legacy_ioport == NULL) + return 0; + return ppc_md.check_legacy_ioport(base_port); +} +EXPORT_SYMBOL(check_legacy_ioport); + +#ifdef CONFIG_XMON +static int __init early_xmon(char *p) +{ + /* ensure xmon is enabled */ + if (p) { + if (strncmp(p, "on", 2) == 0) + xmon_init(1); + if (strncmp(p, "off", 3) == 0) + xmon_init(0); + if (strncmp(p, "early", 5) != 0) + return 0; + } + xmon_init(1); + debugger(NULL); + + return 0; +} +early_param("xmon", early_xmon); +#endif + +void cpu_die(void) +{ + if (ppc_md.cpu_die) + ppc_md.cpu_die(); +} diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index a8cedb96de5..30367a0237d 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -7,3 +7,7 @@ obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o memcpy_64.o \ usercopy_64.o sstep.o checksum_64.o mem_64.o obj-$(CONFIG_PPC_ISERIES) += e2a.o +ifeq ($(CONFIG_PPC64),y) +obj-$(CONFIG_SMP) += locks.o +endif + diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c new file mode 100644 index 00000000000..4b8c5ad5e7d --- /dev/null +++ b/arch/powerpc/lib/locks.c @@ -0,0 +1,95 @@ +/* + * Spin and read/write lock operations. + * + * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM + * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM + * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM + * Rework to support virtual processors + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/module.h> +#include <linux/stringify.h> + +/* waiting for a spinlock... */ +#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) +#include <asm/hvcall.h> +#include <asm/iSeries/HvCall.h> + +void __spin_yield(raw_spinlock_t *lock) +{ + unsigned int lock_value, holder_cpu, yield_count; + struct paca_struct *holder_paca; + + lock_value = lock->slock; + if (lock_value == 0) + return; + holder_cpu = lock_value & 0xffff; + BUG_ON(holder_cpu >= NR_CPUS); + holder_paca = &paca[holder_cpu]; + yield_count = holder_paca->lppaca.yield_count; + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + if (lock->slock != lock_value) + return; /* something has changed */ +#ifdef CONFIG_PPC_ISERIES + HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, + ((u64)holder_cpu << 32) | yield_count); +#else + plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu), + yield_count); +#endif +} + +/* + * Waiting for a read lock or a write lock on a rwlock... + * This turns out to be the same for read and write locks, since + * we only know the holder if it is write-locked. + */ +void __rw_yield(raw_rwlock_t *rw) +{ + int lock_value; + unsigned int holder_cpu, yield_count; + struct paca_struct *holder_paca; + + lock_value = rw->lock; + if (lock_value >= 0) + return; /* no write lock at present */ + holder_cpu = lock_value & 0xffff; + BUG_ON(holder_cpu >= NR_CPUS); + holder_paca = &paca[holder_cpu]; + yield_count = holder_paca->lppaca.yield_count; + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + if (rw->lock != lock_value) + return; /* something has changed */ +#ifdef CONFIG_PPC_ISERIES + HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, + ((u64)holder_cpu << 32) | yield_count); +#else + plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu), + yield_count); +#endif +} +#endif + +void __raw_spin_unlock_wait(raw_spinlock_t *lock) +{ + while (lock->slock) { + HMT_low(); + if (SHARED_PROCESSOR) + __spin_yield(lock); + } + HMT_medium(); +} + +EXPORT_SYMBOL(__raw_spin_unlock_wait); |