diff options
Diffstat (limited to 'arch/ppc64')
-rw-r--r-- | arch/ppc64/Kconfig | 11 | ||||
-rw-r--r-- | arch/ppc64/Kconfig.debug | 4 | ||||
-rw-r--r-- | arch/ppc64/boot/main.c | 11 | ||||
-rw-r--r-- | arch/ppc64/kernel/idle.c | 1 | ||||
-rw-r--r-- | arch/ppc64/kernel/kprobes.c | 138 | ||||
-rw-r--r-- | arch/ppc64/kernel/lparcfg.c | 4 | ||||
-rw-r--r-- | arch/ppc64/kernel/machine_kexec.c | 1 | ||||
-rw-r--r-- | arch/ppc64/kernel/misc.S | 72 | ||||
-rw-r--r-- | arch/ppc64/kernel/pci.c | 17 | ||||
-rw-r--r-- | arch/ppc64/kernel/prom.c | 25 | ||||
-rw-r--r-- | arch/ppc64/kernel/prom_init.c | 3 | ||||
-rw-r--r-- | arch/ppc64/kernel/rtas_pci.c | 6 | ||||
-rw-r--r-- | arch/ppc64/kernel/scanlog.c | 3 | ||||
-rw-r--r-- | arch/ppc64/kernel/sysfs.c | 1 | ||||
-rw-r--r-- | arch/ppc64/kernel/udbg.c | 55 |
15 files changed, 200 insertions, 152 deletions
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig index 2130cc31595..29552348e58 100644 --- a/arch/ppc64/Kconfig +++ b/arch/ppc64/Kconfig @@ -56,6 +56,7 @@ config PPC_STD_MMU # max order + 1 config FORCE_MAX_ZONEORDER int + default "9" if PPC_64K_PAGES default "13" source "init/Kconfig" @@ -173,6 +174,16 @@ config KEXEC support. As of this writing the exact hardware interface is strongly in flux, so no good recommendation can be made. +source "drivers/cpufreq/Kconfig" + +config CPU_FREQ_PMAC64 + bool "Support for some Apple G5s" + depends on CPU_FREQ && PMAC_SMU && PPC64 + select CPU_FREQ_TABLE + help + This adds support for frequency switching on Apple iMac G5, + and some of the more recent desktop G5 machines as well. + config IBMVIO depends on PPC_PSERIES || PPC_ISERIES bool diff --git a/arch/ppc64/Kconfig.debug b/arch/ppc64/Kconfig.debug index f16a5030527..b258c9314a1 100644 --- a/arch/ppc64/Kconfig.debug +++ b/arch/ppc64/Kconfig.debug @@ -55,10 +55,6 @@ config XMON_DEFAULT xmon is normally disabled unless booted with 'xmon=on'. Use 'xmon=off' to disable xmon init during runtime. -config PPCDBG - bool "Include PPCDBG realtime debugging" - depends on DEBUG_KERNEL - config IRQSTACKS bool "Use separate kernel stacks when processing interrupts" help diff --git a/arch/ppc64/boot/main.c b/arch/ppc64/boot/main.c index c1dc876bcca..e0dde24a72c 100644 --- a/arch/ppc64/boot/main.c +++ b/arch/ppc64/boot/main.c @@ -203,8 +203,15 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp) if (elf64ph->p_type == PT_LOAD && elf64ph->p_offset != 0) break; } - vmlinux.size = (unsigned long)elf64ph->p_filesz; - vmlinux.memsize = (unsigned long)elf64ph->p_memsz; + vmlinux.size = (unsigned long)elf64ph->p_filesz + + (unsigned long)elf64ph->p_offset; + /* We need to claim the memsize plus the file offset since gzip + * will expand the header (file offset), then the kernel, then + * possible rubbish we don't care about. But the kernel bss must + * be claimed (it will be zero'd by the kernel itself) + */ + vmlinux.memsize = (unsigned long)elf64ph->p_memsz + + (unsigned long)elf64ph->p_offset; printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize); vmlinux.addr = try_claim(vmlinux.memsize); if (vmlinux.addr == 0) { diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index 8abd2ad9283..8fec2746980 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c @@ -28,6 +28,7 @@ #include <asm/time.h> #include <asm/systemcfg.h> #include <asm/machdep.h> +#include <asm/smp.h> extern void power4_idle(void); diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c index ed876a5178a..511af54e623 100644 --- a/arch/ppc64/kernel/kprobes.c +++ b/arch/ppc64/kernel/kprobes.c @@ -30,19 +30,14 @@ #include <linux/config.h> #include <linux/kprobes.h> #include <linux/ptrace.h> -#include <linux/spinlock.h> #include <linux/preempt.h> #include <asm/cacheflush.h> #include <asm/kdebug.h> #include <asm/sstep.h> static DECLARE_MUTEX(kprobe_mutex); - -static struct kprobe *current_kprobe; -static unsigned long kprobe_status, kprobe_saved_msr; -static struct kprobe *kprobe_prev; -static unsigned long kprobe_status_prev, kprobe_saved_msr_prev; -static struct pt_regs jprobe_saved_regs; +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); int __kprobes arch_prepare_kprobe(struct kprobe *p) { @@ -108,20 +103,28 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) regs->nip = (unsigned long)p->ainsn.insn; } -static inline void save_previous_kprobe(void) +static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; + kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; +} + +static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) { - kprobe_prev = current_kprobe; - kprobe_status_prev = kprobe_status; - kprobe_saved_msr_prev = kprobe_saved_msr; + __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; + kcb->kprobe_status = kcb->prev_kprobe.status; + kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; } -static inline void restore_previous_kprobe(void) +static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { - current_kprobe = kprobe_prev; - kprobe_status = kprobe_status_prev; - kprobe_saved_msr = kprobe_saved_msr_prev; + __get_cpu_var(current_kprobe) = p; + kcb->kprobe_saved_msr = regs->msr; } +/* Called with kretprobe_lock held */ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) { @@ -145,19 +148,24 @@ static inline int kprobe_handler(struct pt_regs *regs) struct kprobe *p; int ret = 0; unsigned int *addr = (unsigned int *)regs->nip; + struct kprobe_ctlblk *kcb; + + /* + * We don't want to be preempted for the entire + * duration of kprobe processing + */ + preempt_disable(); + kcb = get_kprobe_ctlblk(); /* Check we're not actually recursing */ if (kprobe_running()) { - /* We *are* holding lock here, so this is safe. - Disarm the probe we just hit, and ignore it. */ p = get_kprobe(addr); if (p) { kprobe_opcode_t insn = *p->ainsn.insn; - if (kprobe_status == KPROBE_HIT_SS && + if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) { regs->msr &= ~MSR_SE; - regs->msr |= kprobe_saved_msr; - unlock_kprobes(); + regs->msr |= kcb->kprobe_saved_msr; goto no_kprobe; } /* We have reentered the kprobe_handler(), since @@ -166,27 +174,24 @@ static inline int kprobe_handler(struct pt_regs *regs) * just single step on the instruction of the new probe * without calling any user handlers. */ - save_previous_kprobe(); - current_kprobe = p; - kprobe_saved_msr = regs->msr; + save_previous_kprobe(kcb); + set_current_kprobe(p, regs, kcb); + kcb->kprobe_saved_msr = regs->msr; p->nmissed++; prepare_singlestep(p, regs); - kprobe_status = KPROBE_REENTER; + kcb->kprobe_status = KPROBE_REENTER; return 1; } else { - p = current_kprobe; + p = __get_cpu_var(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) { goto ss_probe; } } - /* If it's not ours, can't be delete race, (we hold lock). */ goto no_kprobe; } - lock_kprobes(); p = get_kprobe(addr); if (!p) { - unlock_kprobes(); if (*addr != BREAKPOINT_INSTRUCTION) { /* * PowerPC has multiple variants of the "trap" @@ -209,24 +214,19 @@ static inline int kprobe_handler(struct pt_regs *regs) goto no_kprobe; } - kprobe_status = KPROBE_HIT_ACTIVE; - current_kprobe = p; - kprobe_saved_msr = regs->msr; + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + set_current_kprobe(p, regs, kcb); if (p->pre_handler && p->pre_handler(p, regs)) /* handler has already set things up, so skip ss setup */ return 1; ss_probe: prepare_singlestep(p, regs); - kprobe_status = KPROBE_HIT_SS; - /* - * This preempt_disable() matches the preempt_enable_no_resched() - * in post_kprobe_handler(). - */ - preempt_disable(); + kcb->kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: + preempt_enable_no_resched(); return ret; } @@ -251,9 +251,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) struct kretprobe_instance *ri = NULL; struct hlist_head *head; struct hlist_node *node, *tmp; - unsigned long orig_ret_address = 0; + unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; + spin_lock_irqsave(&kretprobe_lock, flags); head = kretprobe_inst_table_head(current); /* @@ -292,12 +293,14 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); regs->nip = orig_ret_address; - unlock_kprobes(); + reset_current_kprobe(); + spin_unlock_irqrestore(&kretprobe_lock, flags); + preempt_enable_no_resched(); /* * By returning a non-zero value, we are telling - * kprobe_handler() that we have handled unlocking - * and re-enabling preemption. + * kprobe_handler() that we don't want the post_handler + * to run (and have re-enabled preemption) */ return 1; } @@ -323,23 +326,26 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) static inline int post_kprobe_handler(struct pt_regs *regs) { - if (!kprobe_running()) + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (!cur) return 0; - if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) { - kprobe_status = KPROBE_HIT_SSDONE; - current_kprobe->post_handler(current_kprobe, regs, 0); + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); } - resume_execution(current_kprobe, regs); - regs->msr |= kprobe_saved_msr; + resume_execution(cur, regs); + regs->msr |= kcb->kprobe_saved_msr; /*Restore back the original saved kprobes variables and continue. */ - if (kprobe_status == KPROBE_REENTER) { - restore_previous_kprobe(); + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); goto out; } - unlock_kprobes(); + reset_current_kprobe(); out: preempt_enable_no_resched(); @@ -354,19 +360,20 @@ out: return 1; } -/* Interrupts disabled, kprobe_lock held. */ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { - if (current_kprobe->fault_handler - && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) return 1; - if (kprobe_status & KPROBE_HIT_SS) { - resume_execution(current_kprobe, regs); + if (kcb->kprobe_status & KPROBE_HIT_SS) { + resume_execution(cur, regs); regs->msr &= ~MSR_SE; - regs->msr |= kprobe_saved_msr; + regs->msr |= kcb->kprobe_saved_msr; - unlock_kprobes(); + reset_current_kprobe(); preempt_enable_no_resched(); } return 0; @@ -381,11 +388,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; - /* - * Interrupts are not disabled here. We need to disable - * preemption, because kprobe_running() uses smp_processor_id(). - */ - preempt_disable(); switch (val) { case DIE_BPT: if (kprobe_handler(args->regs)) @@ -396,22 +398,25 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, ret = NOTIFY_STOP; break; case DIE_PAGE_FAULT: + /* kprobe_running() needs smp_processor_id() */ + preempt_disable(); if (kprobe_running() && kprobe_fault_handler(args->regs, args->trapnr)) ret = NOTIFY_STOP; + preempt_enable(); break; default: break; } - preempt_enable_no_resched(); return ret; } int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs)); + memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); /* setup return addr to the jprobe handler routine */ regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry); @@ -431,12 +436,15 @@ void __kprobes jprobe_return_end(void) int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + /* * FIXME - we should ideally be validating that we got here 'cos * of the "trap" in jprobe_return() above, before restoring the * saved regs... */ - memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs)); + memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); + preempt_enable_no_resched(); return 1; } diff --git a/arch/ppc64/kernel/lparcfg.c b/arch/ppc64/kernel/lparcfg.c index e86155770bb..3e7b2f28ec8 100644 --- a/arch/ppc64/kernel/lparcfg.c +++ b/arch/ppc64/kernel/lparcfg.c @@ -599,9 +599,7 @@ int __init lparcfg_init(void) void __exit lparcfg_cleanup(void) { if (proc_ppc64_lparcfg) { - if (proc_ppc64_lparcfg->data) { - kfree(proc_ppc64_lparcfg->data); - } + kfree(proc_ppc64_lparcfg->data); remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent); } } diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c index ff8679f260f..07ea03598c0 100644 --- a/arch/ppc64/kernel/machine_kexec.c +++ b/arch/ppc64/kernel/machine_kexec.c @@ -24,6 +24,7 @@ #include <asm/mmu.h> #include <asm/sections.h> /* _end */ #include <asm/prom.h> +#include <asm/smp.h> #define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */ diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S index 077507ffbab..914632ec587 100644 --- a/arch/ppc64/kernel/misc.S +++ b/arch/ppc64/kernel/misc.S @@ -560,7 +560,7 @@ _GLOBAL(real_readb) isync blr - /* +/* * Do an IO access in real mode */ _GLOBAL(real_writeb) @@ -593,6 +593,76 @@ _GLOBAL(real_writeb) #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */ /* + * SCOM access functions for 970 (FX only for now) + * + * unsigned long scom970_read(unsigned int address); + * void scom970_write(unsigned int address, unsigned long value); + * + * The address passed in is the 24 bits register address. This code + * is 970 specific and will not check the status bits, so you should + * know what you are doing. + */ +_GLOBAL(scom970_read) + /* interrupts off */ + mfmsr r4 + ori r0,r4,MSR_EE + xori r0,r0,MSR_EE + mtmsrd r0,1 + + /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits + * (including parity). On current CPUs they must be 0'd, + * and finally or in RW bit + */ + rlwinm r3,r3,8,0,15 + ori r3,r3,0x8000 + + /* do the actual scom read */ + sync + mtspr SPRN_SCOMC,r3 + isync + mfspr r3,SPRN_SCOMD + isync + mfspr r0,SPRN_SCOMC + isync + + /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah + * that's the best we can do). Not implemented yet as we don't use + * the scom on any of the bogus CPUs yet, but may have to be done + * ultimately + */ + + /* restore interrupts */ + mtmsrd r4,1 + blr + + +_GLOBAL(scom970_write) + /* interrupts off */ + mfmsr r5 + ori r0,r5,MSR_EE + xori r0,r0,MSR_EE + mtmsrd r0,1 + + /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits + * (including parity). On current CPUs they must be 0'd. + */ + + rlwinm r3,r3,8,0,15 + + sync + mtspr SPRN_SCOMD,r4 /* write data */ + isync + mtspr SPRN_SCOMC,r3 /* write command */ + isync + mfspr 3,SPRN_SCOMC + isync + + /* restore interrupts */ + mtmsrd r5,1 + blr + + +/* * Create a kernel thread * kernel_thread(fn, arg, flags) */ diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c index 3d2106b022a..30247ff7497 100644 --- a/arch/ppc64/kernel/pci.c +++ b/arch/ppc64/kernel/pci.c @@ -295,8 +295,8 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) } } -static struct pci_dev *of_create_pci_dev(struct device_node *node, - struct pci_bus *bus, int devfn) +struct pci_dev *of_create_pci_dev(struct device_node *node, + struct pci_bus *bus, int devfn) { struct pci_dev *dev; const char *type; @@ -354,10 +354,9 @@ static struct pci_dev *of_create_pci_dev(struct device_node *node, return dev; } +EXPORT_SYMBOL(of_create_pci_dev); -static void of_scan_pci_bridge(struct device_node *node, struct pci_dev *dev); - -static void __devinit of_scan_bus(struct device_node *node, +void __devinit of_scan_bus(struct device_node *node, struct pci_bus *bus) { struct device_node *child = NULL; @@ -381,9 +380,10 @@ static void __devinit of_scan_bus(struct device_node *node, do_bus_setup(bus); } +EXPORT_SYMBOL(of_scan_bus); -static void __devinit of_scan_pci_bridge(struct device_node *node, - struct pci_dev *dev) +void __devinit of_scan_pci_bridge(struct device_node *node, + struct pci_dev *dev) { struct pci_bus *bus; u32 *busrange, *ranges; @@ -464,9 +464,10 @@ static void __devinit of_scan_pci_bridge(struct device_node *node, else if (mode == PCI_PROBE_NORMAL) pci_scan_child_bus(bus); } +EXPORT_SYMBOL(of_scan_pci_bridge); #endif /* CONFIG_PPC_MULTIPLATFORM */ -static void __devinit scan_phb(struct pci_controller *hose) +void __devinit scan_phb(struct pci_controller *hose) { struct pci_bus *bus; struct device_node *node = hose->arch_data; diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c index dece31e58bc..3402fbee62c 100644 --- a/arch/ppc64/kernel/prom.c +++ b/arch/ppc64/kernel/prom.c @@ -31,6 +31,7 @@ #include <linux/initrd.h> #include <linux/bitops.h> #include <linux/module.h> +#include <linux/module.h> #include <asm/prom.h> #include <asm/rtas.h> @@ -46,7 +47,6 @@ #include <asm/pgtable.h> #include <asm/pci.h> #include <asm/iommu.h> -#include <asm/ppcdebug.h> #include <asm/btext.h> #include <asm/sections.h> #include <asm/machdep.h> @@ -1866,17 +1866,32 @@ get_property(struct device_node *np, const char *name, int *lenp) EXPORT_SYMBOL(get_property); /* - * Add a property to a node + * Add a property to a node. */ -void +int prom_add_property(struct device_node* np, struct property* prop) { - struct property **next = &np->properties; + struct property **next; prop->next = NULL; - while (*next) + write_lock(&devtree_lock); + next = &np->properties; + while (*next) { + if (strcmp(prop->name, (*next)->name) == 0) { + /* duplicate ! don't insert it */ + write_unlock(&devtree_lock); + return -1; + } next = &(*next)->next; + } *next = prop; + write_unlock(&devtree_lock); + + /* try to add to proc as well if it was initialized */ + if (np->pde) + proc_device_tree_add_prop(np->pde, prop); + + return 0; } #if 0 diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c index a4bbca6dbb8..e4c880dab99 100644 --- a/arch/ppc64/kernel/prom_init.c +++ b/arch/ppc64/kernel/prom_init.c @@ -44,7 +44,6 @@ #include <asm/pgtable.h> #include <asm/pci.h> #include <asm/iommu.h> -#include <asm/ppcdebug.h> #include <asm/btext.h> #include <asm/sections.h> #include <asm/machdep.h> @@ -1825,7 +1824,7 @@ static void __init fixup_device_tree(void) if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) == PROM_ERROR) return; - if (u3_rev != 0x35 && u3_rev != 0x37) + if (u3_rev < 0x35 || u3_rev > 0x39) return; /* does it need fixup ? */ if (prom_getproplen(i2c, "interrupts") > 0) diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c index 3ad15c90fbb..3c3f19192fc 100644 --- a/arch/ppc64/kernel/rtas_pci.c +++ b/arch/ppc64/kernel/rtas_pci.c @@ -440,7 +440,6 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) struct device_node *root = of_find_node_by_path("/"); unsigned int root_size_cells = 0; struct pci_controller *phb; - struct pci_bus *bus; int primary; root_size_cells = prom_n_size_cells(root); @@ -456,10 +455,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) of_node_put(root); pci_devs_phb_init_dynamic(phb); - phb->last_busno = 0xff; - bus = pci_scan_bus(phb->first_busno, phb->ops, phb->arch_data); - phb->bus = bus; - phb->last_busno = bus->subordinate; + scan_phb(phb); return phb; } diff --git a/arch/ppc64/kernel/scanlog.c b/arch/ppc64/kernel/scanlog.c index 215bf890030..2edc947f7c4 100644 --- a/arch/ppc64/kernel/scanlog.c +++ b/arch/ppc64/kernel/scanlog.c @@ -225,8 +225,7 @@ int __init scanlog_init(void) void __exit scanlog_cleanup(void) { if (proc_ppc64_scan_log_dump) { - if (proc_ppc64_scan_log_dump->data) - kfree(proc_ppc64_scan_log_dump->data); + kfree(proc_ppc64_scan_log_dump->data); remove_proc_entry("scan-log-dump", proc_ppc64_scan_log_dump->parent); } } diff --git a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c index 6654b350979..e99ec62c2c5 100644 --- a/arch/ppc64/kernel/sysfs.c +++ b/arch/ppc64/kernel/sysfs.c @@ -20,6 +20,7 @@ #include <asm/paca.h> #include <asm/lppaca.h> #include <asm/machdep.h> +#include <asm/smp.h> static DEFINE_PER_CPU(struct cpu, cpu_devices); diff --git a/arch/ppc64/kernel/udbg.c b/arch/ppc64/kernel/udbg.c index d49c3613c8e..0d878e72fc4 100644 --- a/arch/ppc64/kernel/udbg.c +++ b/arch/ppc64/kernel/udbg.c @@ -10,12 +10,10 @@ */ #include <stdarg.h> -#define WANT_PPCDBG_TAB /* Only defined here */ #include <linux/config.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/console.h> -#include <asm/ppcdebug.h> #include <asm/processor.h> void (*udbg_putc)(unsigned char c); @@ -89,59 +87,6 @@ void udbg_printf(const char *fmt, ...) va_end(args); } -/* PPCDBG stuff */ - -u64 ppc64_debug_switch; - -/* Special print used by PPCDBG() macro */ -void udbg_ppcdbg(unsigned long debug_flags, const char *fmt, ...) -{ - unsigned long active_debugs = debug_flags & ppc64_debug_switch; - - if (active_debugs) { - va_list ap; - unsigned char buf[UDBG_BUFSIZE]; - unsigned long i, len = 0; - - for (i=0; i < PPCDBG_NUM_FLAGS; i++) { - if (((1U << i) & active_debugs) && - trace_names[i]) { - len += strlen(trace_names[i]); - udbg_puts(trace_names[i]); - break; - } - } - - snprintf(buf, UDBG_BUFSIZE, " [%s]: ", current->comm); - len += strlen(buf); - udbg_puts(buf); - - while (len < 18) { - udbg_puts(" "); - len++; - } - - va_start(ap, fmt); - vsnprintf(buf, UDBG_BUFSIZE, fmt, ap); - udbg_puts(buf); - va_end(ap); - } -} - -unsigned long udbg_ifdebug(unsigned long flags) -{ - return (flags & ppc64_debug_switch); -} - -/* - * Initialize the PPCDBG state. Called before relocation has been enabled. - */ -void __init ppcdbg_initialize(void) -{ - ppc64_debug_switch = PPC_DEBUG_DEFAULT; /* | PPCDBG_BUSWALK | */ - /* PPCDBG_PHBINIT | PPCDBG_MM | PPCDBG_MMINIT | PPCDBG_TCEINIT | PPCDBG_TCE */; -} - /* * Early boot console based on udbg */ |