aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Makefile3
-rw-r--r--arch/powerpc/kernel/btext.c4
-rw-r--r--arch/powerpc/kernel/entry_32.S6
-rw-r--r--arch/powerpc/kernel/rtas.c99
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/mm/init_64.c16
-rw-r--r--arch/powerpc/mm/stab.c1
-rw-r--r--arch/powerpc/mm/tlb_64.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/prpmc2800.c1
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/sysdev/cpm_common.c4
11 files changed, 79 insertions, 61 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 4e165342210..bd87626c1f6 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -107,6 +107,9 @@ endif
# No AltiVec instruction when building kernel
KBUILD_CFLAGS += $(call cc-option,-mno-altivec)
+# No SPE instruction when building kernel
+KBUILD_CFLAGS += $(call cc-option,-mno-spe)
+
# Enable unit-at-a-time mode when possible. It shrinks the
# kernel considerably.
KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 3ef51fb6f10..9c74fdf29ee 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -186,7 +186,9 @@ int btext_initialize(struct device_node *np)
pitch = *prop;
if (pitch == 1)
pitch = 0x1000;
- prop = of_get_property(np, "address", NULL);
+ prop = of_get_property(np, "linux,bootx-addr", NULL);
+ if (prop == NULL)
+ prop = of_get_property(np, "address", NULL);
if (prop)
address = *prop;
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index a7572cf464b..69a91bd4611 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -251,6 +251,9 @@ syscall_exit_cont:
bne- 2f
1:
#endif /* CONFIG_44x */
+BEGIN_FTR_SECTION
+ lwarx r7,0,r1
+END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
@@ -717,6 +720,9 @@ restore:
mtctr r11
PPC405_ERR77(0,r1)
+BEGIN_FTR_SECTION
+ lwarx r11,0,r1
+END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 21478079828..52e95c2158c 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -19,6 +19,9 @@
#include <linux/init.h>
#include <linux/capability.h>
#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/completion.h>
+#include <linux/cpumask.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -34,6 +37,8 @@
#include <asm/lmb.h>
#include <asm/udbg.h>
#include <asm/syscalls.h>
+#include <asm/smp.h>
+#include <asm/atomic.h>
struct rtas_t rtas = {
.lock = SPIN_LOCK_UNLOCKED
@@ -41,8 +46,10 @@ struct rtas_t rtas = {
EXPORT_SYMBOL(rtas);
struct rtas_suspend_me_data {
- long waiting;
- struct rtas_args *args;
+ atomic_t working; /* number of cpus accessing this struct */
+ int token; /* ibm,suspend-me */
+ int error;
+ struct completion *complete; /* wait on this until working == 0 */
};
DEFINE_SPINLOCK(rtas_data_buf_lock);
@@ -657,50 +664,62 @@ static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
#ifdef CONFIG_PPC_PSERIES
static void rtas_percpu_suspend_me(void *info)
{
- int i;
long rc;
- long flags;
+ unsigned long msr_save;
+ int cpu;
struct rtas_suspend_me_data *data =
(struct rtas_suspend_me_data *)info;
- /*
- * We use "waiting" to indicate our state. As long
- * as it is >0, we are still trying to all join up.
- * If it goes to 0, we have successfully joined up and
- * one thread got H_CONTINUE. If any error happens,
- * we set it to <0.
- */
- local_irq_save(flags);
- do {
- rc = plpar_hcall_norets(H_JOIN);
- smp_rmb();
- } while (rc == H_SUCCESS && data->waiting > 0);
- if (rc == H_SUCCESS)
- goto out;
+ atomic_inc(&data->working);
+
+ /* really need to ensure MSR.EE is off for H_JOIN */
+ msr_save = mfmsr();
+ mtmsr(msr_save & ~(MSR_EE));
+
+ rc = plpar_hcall_norets(H_JOIN);
+
+ mtmsr(msr_save);
- if (rc == H_CONTINUE) {
- data->waiting = 0;
- data->args->args[data->args->nargs] =
- rtas_call(ibm_suspend_me_token, 0, 1, NULL);
- for_each_possible_cpu(i)
- plpar_hcall_norets(H_PROD,i);
+ if (rc == H_SUCCESS) {
+ /* This cpu was prodded and the suspend is complete. */
+ goto out;
+ } else if (rc == H_CONTINUE) {
+ /* All other cpus are in H_JOIN, this cpu does
+ * the suspend.
+ */
+ printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n",
+ smp_processor_id());
+ data->error = rtas_call(data->token, 0, 1, NULL);
+
+ if (data->error)
+ printk(KERN_DEBUG "ibm,suspend-me returned %d\n",
+ data->error);
} else {
- data->waiting = -EBUSY;
- printk(KERN_ERR "Error on H_JOIN hypervisor call\n");
+ printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
+ smp_processor_id(), rc);
+ data->error = rc;
}
-
+ /* This cpu did the suspend or got an error; in either case,
+ * we need to prod all other other cpus out of join state.
+ * Extra prods are harmless.
+ */
+ for_each_online_cpu(cpu)
+ plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
out:
- local_irq_restore(flags);
- return;
+ if (atomic_dec_return(&data->working) == 0)
+ complete(data->complete);
}
static int rtas_ibm_suspend_me(struct rtas_args *args)
{
- int i;
long state;
long rc;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
struct rtas_suspend_me_data data;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ if (!rtas_service_present("ibm,suspend-me"))
+ return -ENOSYS;
/* Make sure the state is valid */
rc = plpar_hcall(H_VASI_STATE, retbuf,
@@ -721,25 +740,23 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
return 0;
}
- data.waiting = 1;
- data.args = args;
+ atomic_set(&data.working, 0);
+ data.token = rtas_token("ibm,suspend-me");
+ data.error = 0;
+ data.complete = &done;
/* Call function on all CPUs. One of us will make the
* rtas call
*/
if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0))
- data.waiting = -EINVAL;
+ data.error = -EINVAL;
- if (data.waiting != 0)
- printk(KERN_ERR "Error doing global join\n");
+ wait_for_completion(&done);
- /* Prod each CPU. This won't hurt, and will wake
- * anyone we successfully put to sleep with H_JOIN.
- */
- for_each_possible_cpu(i)
- plpar_hcall_norets(H_PROD, i);
+ if (data.error != 0)
+ printk(KERN_ERR "Error doing global join\n");
- return data.waiting;
+ return data.error;
}
#else /* CONFIG_PPC_PSERIES */
static int rtas_ibm_suspend_me(struct rtas_args *args)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 4beb6329dfb..c0d77723ba1 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -829,7 +829,7 @@ static void register_decrementer_clockevent(int cpu)
*dec = decrementer_clockevent;
dec->cpumask = cpumask_of_cpu(cpu);
- printk(KERN_INFO "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
+ printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
dec->name, dec->mult, dec->shift, cpu);
clockevents_register_device(dec);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d9c82d3d648..c0f5cff7703 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -19,8 +19,6 @@
*
*/
-#undef DEBUG
-
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -66,12 +64,6 @@
#include "mmu_decl.h"
-#ifdef DEBUG
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
#if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
@@ -175,8 +167,8 @@ void pgtable_cache_init(void)
int size = pgtable_cache_size[i];
const char *name = pgtable_cache_name[i];
- DBG("Allocating page table cache %s (#%d) "
- "for size: %08x...\n", name, i, size);
+ pr_debug("Allocating page table cache %s (#%d) "
+ "for size: %08x...\n", name, i, size);
pgtable_cache[i] = kmem_cache_create(name,
size, size,
SLAB_PANIC,
@@ -239,8 +231,8 @@ int __meminit vmemmap_populate(struct page *start_page,
if (!p)
return -ENOMEM;
- printk(KERN_WARNING "vmemmap %08lx allocated at %p, "
- "physical %08lx.\n", start, p, __pa(p));
+ pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
+ start, p, __pa(p));
mapped = htab_bolt_mapping(start, start + page_size,
__pa(p), mode_rw, mmu_linear_psize,
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 9e85bda7621..50448d5de9d 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -20,6 +20,7 @@
#include <asm/lmb.h>
#include <asm/abs_addr.h>
#include <asm/firmware.h>
+#include <asm/iseries/hv_call.h>
struct stab_entry {
unsigned long esid_data;
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index eafbca52bff..e2d867ce1c7 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -54,12 +54,10 @@ unsigned long pte_freelist_forced_free;
((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
/ sizeof(pgtable_free_t))
-#ifdef CONFIG_SMP
static void pte_free_smp_sync(void *arg)
{
/* Do nothing, just ensure we sync with all CPUs */
}
-#endif
/* This is only called when we are critically out of memory
* (and fail to get a page in pte_free_tlb).
diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
index e484cac7509..653a5eb91c9 100644
--- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c
+++ b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
@@ -144,6 +144,7 @@ static int __init prpmc2800_probe(void)
strncpy(prpmc2800_platform_name, m,
min((int)len, PLATFORM_NAME_MAX - 1));
+ _set_L2CR(_get_L2CR() | L2CR_L2E);
return 1;
}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 16e4e401b82..306a9d07491 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -21,7 +21,7 @@ config PPC_SPLPAR
config EEH
bool "PCI Extended Error Handling (EEH)" if EMBEDDED
- depends on PPC_PSERIES
+ depends on PPC_PSERIES && PCI
default y if !EMBEDDED
config SCANLOG
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 66c8ad4cfce..165981c8778 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -77,8 +77,6 @@ int __init cpm_muram_init(void)
int i = 0;
int ret = 0;
- printk("cpm_muram_init\n");
-
spin_lock_init(&cpm_muram_lock);
/* initialize the info header */
rh_init(&cpm_muram_info, 1,
@@ -193,7 +191,7 @@ void __iomem *cpm_muram_addr(unsigned long offset)
EXPORT_SYMBOL(cpm_muram_addr);
/**
- * cpm_muram_phys - turn a muram virtual address into a DMA address
+ * cpm_muram_dma - turn a muram virtual address into a DMA address
* @offset: virtual address from cpm_muram_addr() to convert
*/
dma_addr_t cpm_muram_dma(void __iomem *addr)