aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/module.c6
-rw-r--r--arch/powerpc/kernel/process.c83
-rw-r--r--arch/powerpc/kernel/setup_32.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal.h10
-rw-r--r--arch/powerpc/kernel/signal_32.c109
-rw-r--r--arch/powerpc/kernel/signal_64.c43
-rw-r--r--arch/powerpc/kernel/vdso.c10
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S3
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S3
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/lib/feature-fixups-test.S21
-rw-r--r--arch/powerpc/lib/feature-fixups.c36
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/numa.c310
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c117
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c38
-rw-r--r--include/asm-powerpc/code-patching.h3
-rw-r--r--include/asm-powerpc/cputable.h21
-rw-r--r--include/asm-powerpc/elf.h20
-rw-r--r--include/asm-powerpc/feature-fixups.h10
-rw-r--r--include/asm-powerpc/pSeries_reconfig.h6
-rw-r--r--include/asm-powerpc/processor.h2
-rw-r--r--include/asm-powerpc/sparsemem.h4
-rw-r--r--include/asm-powerpc/synch.h38
25 files changed, 602 insertions, 304 deletions
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 40dd52d81c1..af07003573c 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -86,6 +86,12 @@ int module_finalize(const Elf_Ehdr *hdr,
(void *)sect->sh_addr + sect->sh_size);
#endif
+ sect = find_section(hdr, sechdrs, "__lwsync_fixup");
+ if (sect != NULL)
+ do_lwsync_fixups(cur_cpu_spec->cpu_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+
return 0;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 1924b57bd24..85e557300d8 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -105,29 +105,6 @@ void enable_kernel_fp(void)
}
EXPORT_SYMBOL(enable_kernel_fp);
-int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
-{
-#ifdef CONFIG_VSX
- int i;
- elf_fpreg_t *reg;
-#endif
-
- if (!tsk->thread.regs)
- return 0;
- flush_fp_to_thread(current);
-
-#ifdef CONFIG_VSX
- reg = (elf_fpreg_t *)fpregs;
- for (i = 0; i < ELF_NFPREG - 1; i++, reg++)
- *reg = tsk->thread.TS_FPR(i);
- memcpy(reg, &tsk->thread.fpscr, sizeof(elf_fpreg_t));
-#else
- memcpy(fpregs, &tsk->thread.TS_FPR(0), sizeof(*fpregs));
-#endif
-
- return 1;
-}
-
#ifdef CONFIG_ALTIVEC
void enable_kernel_altivec(void)
{
@@ -161,35 +138,6 @@ void flush_altivec_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
-
-int dump_task_altivec(struct task_struct *tsk, elf_vrregset_t *vrregs)
-{
- /* ELF_NVRREG includes the VSCR and VRSAVE which we need to save
- * separately, see below */
- const int nregs = ELF_NVRREG - 2;
- elf_vrreg_t *reg;
- u32 *dest;
-
- if (tsk == current)
- flush_altivec_to_thread(tsk);
-
- reg = (elf_vrreg_t *)vrregs;
-
- /* copy the 32 vr registers */
- memcpy(reg, &tsk->thread.vr[0], nregs * sizeof(*reg));
- reg += nregs;
-
- /* copy the vscr */
- memcpy(reg, &tsk->thread.vscr, sizeof(*reg));
- reg++;
-
- /* vrsave is stored in the high 32bit slot of the final 128bits */
- memset(reg, 0, sizeof(*reg));
- dest = (u32 *)reg;
- *dest = tsk->thread.vrsave;
-
- return 1;
-}
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
@@ -224,29 +172,6 @@ void flush_vsx_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
-
-/*
- * This dumps the lower half 64bits of the first 32 VSX registers.
- * This needs to be called with dump_task_fp and dump_task_altivec to
- * get all the VSX state.
- */
-int dump_task_vsx(struct task_struct *tsk, elf_vrreg_t *vrregs)
-{
- elf_vrreg_t *reg;
- double buf[32];
- int i;
-
- if (tsk == current)
- flush_vsx_to_thread(tsk);
-
- reg = (elf_vrreg_t *)vrregs;
-
- for (i = 0; i < 32 ; i++)
- buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
- memcpy(reg, buf, sizeof(buf));
-
- return 1;
-}
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
@@ -279,14 +204,6 @@ void flush_spe_to_thread(struct task_struct *tsk)
preempt_enable();
}
}
-
-int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
-{
- flush_spe_to_thread(current);
- /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
- memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
- return 1;
-}
#endif /* CONFIG_SPE */
#ifndef CONFIG_SMP
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 9e83add5429..0109e7f0ccf 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -101,6 +101,10 @@ unsigned long __init early_init(unsigned long dt_ptr)
PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup));
+ do_lwsync_fixups(spec->cpu_features,
+ PTRRELOC(&__start___lwsync_fixup),
+ PTRRELOC(&__stop___lwsync_fixup));
+
return KERNELBASE + offset;
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 098fd96a394..04d8de9f0fc 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -363,6 +363,8 @@ void __init setup_system(void)
&__start___ftr_fixup, &__stop___ftr_fixup);
do_feature_fixups(powerpc_firmware_features,
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
+ do_lwsync_fixups(cur_cpu_spec->cpu_features,
+ &__start___lwsync_fixup, &__stop___lwsync_fixup);
/*
* Unflatten the device-tree passed by prom_init or kexec
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index 77efb3d5465..28f4b9f5fe5 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -24,6 +24,16 @@ extern int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset,
struct pt_regs *regs);
+extern unsigned long copy_fpr_to_user(void __user *to,
+ struct task_struct *task);
+extern unsigned long copy_fpr_from_user(struct task_struct *task,
+ void __user *from);
+#ifdef CONFIG_VSX
+extern unsigned long copy_vsx_to_user(void __user *to,
+ struct task_struct *task);
+extern unsigned long copy_vsx_from_user(struct task_struct *task,
+ void __user *from);
+#endif
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 349d3487d92..9991e2a58bf 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -328,6 +328,75 @@ struct rt_sigframe {
int abigap[56];
};
+#ifdef CONFIG_VSX
+unsigned long copy_fpr_to_user(void __user *to,
+ struct task_struct *task)
+{
+ double buf[ELF_NFPREG];
+ int i;
+
+ /* save FPR copy to local buffer then write to the thread_struct */
+ for (i = 0; i < (ELF_NFPREG - 1) ; i++)
+ buf[i] = task->thread.TS_FPR(i);
+ memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
+ return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
+}
+
+unsigned long copy_fpr_from_user(struct task_struct *task,
+ void __user *from)
+{
+ double buf[ELF_NFPREG];
+ int i;
+
+ if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
+ return 1;
+ for (i = 0; i < (ELF_NFPREG - 1) ; i++)
+ task->thread.TS_FPR(i) = buf[i];
+ memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
+
+ return 0;
+}
+
+unsigned long copy_vsx_to_user(void __user *to,
+ struct task_struct *task)
+{
+ double buf[ELF_NVSRHALFREG];
+ int i;
+
+ /* save FPR copy to local buffer then write to the thread_struct */
+ for (i = 0; i < ELF_NVSRHALFREG; i++)
+ buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
+ return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
+}
+
+unsigned long copy_vsx_from_user(struct task_struct *task,
+ void __user *from)
+{
+ double buf[ELF_NVSRHALFREG];
+ int i;
+
+ if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
+ return 1;
+ for (i = 0; i < ELF_NVSRHALFREG ; i++)
+ task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ return 0;
+}
+#else
+inline unsigned long copy_fpr_to_user(void __user *to,
+ struct task_struct *task)
+{
+ return __copy_to_user(to, task->thread.fpr,
+ ELF_NFPREG * sizeof(double));
+}
+
+inline unsigned long copy_fpr_from_user(struct task_struct *task,
+ void __user *from)
+{
+ return __copy_from_user(task->thread.fpr, from,
+ ELF_NFPREG * sizeof(double));
+}
+#endif
+
/*
* Save the current user registers on the user stack.
* We only save the altivec/spe registers if the process has used
@@ -337,10 +406,6 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
int sigret)
{
unsigned long msr = regs->msr;
-#ifdef CONFIG_VSX
- double buf[32];
- int i;
-#endif
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
@@ -370,14 +435,9 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
return 1;
#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
- /* save FPR copy to local buffer then write to the thread_struct */
- flush_fp_to_thread(current);
- for (i = 0; i < 32 ; i++)
- buf[i] = current->thread.TS_FPR(i);
- memcpy(&buf[i], &current->thread.fpscr, sizeof(double));
- if (__copy_to_user(&frame->mc_fregs, buf, ELF_NFPREG * sizeof(double)))
+ if (copy_fpr_to_user(&frame->mc_fregs, current))
return 1;
+#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
* buffer, then write that to userspace. Also set MSR_VSX in
@@ -386,18 +446,10 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
*/
if (current->thread.used_vsr) {
flush_vsx_to_thread(current);
- for (i = 0; i < 32 ; i++)
- buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
- if (__copy_to_user(&frame->mc_vsregs, buf,
- ELF_NVSRHALFREG * sizeof(double)))
+ if (copy_vsx_to_user(&frame->mc_vsregs, current))
return 1;
msr |= MSR_VSX;
}
-#else
- /* save floating-point registers */
- if (__copy_to_user(&frame->mc_fregs, current->thread.fpr,
- ELF_NFPREG * sizeof(double)))
- return 1;
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/* save spe registers */
@@ -442,7 +494,6 @@ static long restore_user_regs(struct pt_regs *regs,
unsigned int save_r2 = 0;
unsigned long msr;
#ifdef CONFIG_VSX
- double buf[32];
int i;
#endif
@@ -490,13 +541,10 @@ static long restore_user_regs(struct pt_regs *regs,
if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
return 1;
#endif /* CONFIG_ALTIVEC */
+ if (copy_fpr_from_user(current, &sr->mc_fregs))
+ return 1;
#ifdef CONFIG_VSX
- if (__copy_from_user(buf, &sr->mc_fregs,sizeof(sr->mc_fregs)))
- return 1;
- for (i = 0; i < 32 ; i++)
- current->thread.TS_FPR(i) = buf[i];
- memcpy(&current->thread.fpscr, &buf[i], sizeof(double));
/*
* Force the process to reload the VSX registers from
* current->thread when it next does VSX instruction.
@@ -507,18 +555,11 @@ static long restore_user_regs(struct pt_regs *regs,
* Restore altivec registers from the stack to a local
* buffer, then write this out to the thread_struct
*/
- if (__copy_from_user(buf, &sr->mc_vsregs,
- sizeof(sr->mc_vsregs)))
+ if (copy_vsx_from_user(current, &sr->mc_vsregs))
return 1;
- for (i = 0; i < 32 ; i++)
- current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++)
current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
-#else
- if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
- sizeof(sr->mc_fregs)))
- return 1;
#endif /* CONFIG_VSX */
/*
* force the process to reload the FP registers from
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 8214e57aab6..93ebfb6944b 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -89,10 +89,6 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
#endif
unsigned long msr = regs->msr;
long err = 0;
-#ifdef CONFIG_VSX
- double buf[FP_REGS_SIZE];
- int i;
-#endif
flush_fp_to_thread(current);
@@ -117,12 +113,9 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
err |= __put_user(0, &sc->v_regs);
#endif /* CONFIG_ALTIVEC */
flush_fp_to_thread(current);
+ /* copy fpr regs and fpscr */
+ err |= copy_fpr_to_user(&sc->fp_regs, current);
#ifdef CONFIG_VSX
- /* Copy FP to local buffer then write that out */
- for (i = 0; i < 32 ; i++)
- buf[i] = current->thread.TS_FPR(i);
- memcpy(&buf[i], &current->thread.fpscr, sizeof(double));
- err |= __copy_to_user(&sc->fp_regs, buf, FP_REGS_SIZE);
/*
* Copy VSX low doubleword to local buffer for formatting,
* then out to userspace. Update v_regs to point after the
@@ -131,17 +124,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
if (current->thread.used_vsr) {
flush_vsx_to_thread(current);
v_regs += ELF_NVRREG;
- for (i = 0; i < 32 ; i++)
- buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
- err |= __copy_to_user(v_regs, buf, 32 * sizeof(double));
+ err |= copy_vsx_to_user(v_regs, current);
/* set MSR_VSX in the MSR value in the frame to
* indicate that sc->vs_reg) contains valid data.
*/
msr |= MSR_VSX;
}
-#else /* CONFIG_VSX */
- /* copy fpr regs and fpscr */
- err |= __copy_to_user(&sc->fp_regs, &current->thread.fpr, FP_REGS_SIZE);
#endif /* CONFIG_VSX */
err |= __put_user(&sc->gp_regs, &sc->regs);
WARN_ON(!FULL_REGS(regs));
@@ -165,13 +153,12 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
#ifdef CONFIG_ALTIVEC
elf_vrreg_t __user *v_regs;
#endif
-#ifdef CONFIG_VSX
- double buf[FP_REGS_SIZE];
- int i;
-#endif
unsigned long err = 0;
unsigned long save_r13 = 0;
unsigned long msr;
+#ifdef CONFIG_VSX
+ int i;
+#endif
/* If this is not a signal return, we preserve the TLS in r13 */
if (!sig)
@@ -234,15 +221,9 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
else
current->thread.vrsave = 0;
#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
/* restore floating point */
- err |= __copy_from_user(buf, &sc->fp_regs, FP_REGS_SIZE);
- if (err)
- return err;
- for (i = 0; i < 32 ; i++)
- current->thread.TS_FPR(i) = buf[i];
- memcpy(&current->thread.fpscr, &buf[i], sizeof(double));
-
+ err |= copy_fpr_from_user(current, &sc->fp_regs);
+#ifdef CONFIG_VSX
/*
* Get additional VSX data. Update v_regs to point after the
* VMX data. Copy VSX low doubleword from userspace to local
@@ -250,14 +231,12 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
*/
v_regs += ELF_NVRREG;
if ((msr & MSR_VSX) != 0)
- err |= __copy_from_user(buf, v_regs, 32 * sizeof(double));
+ err |= copy_vsx_from_user(current, v_regs);
else
- memset(buf, 0, 32 * sizeof(double));
+ for (i = 0; i < 32 ; i++)
+ current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
- for (i = 0; i < 32 ; i++)
- current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
#else
- err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
#endif
return err;
}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index ce245a850db..f177c60ea76 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -571,6 +571,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
if (start64)
do_feature_fixups(powerpc_firmware_features,
start64, start64 + size64);
+
+ start64 = find_section64(v64->hdr, "__lwsync_fixup", &size64);
+ if (start64)
+ do_lwsync_fixups(cur_cpu_spec->cpu_features,
+ start64, start64 + size64);
#endif /* CONFIG_PPC64 */
start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
@@ -585,6 +590,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
start32, start32 + size32);
#endif /* CONFIG_PPC64 */
+ start32 = find_section32(v32->hdr, "__lwsync_fixup", &size32);
+ if (start32)
+ do_lwsync_fixups(cur_cpu_spec->cpu_features,
+ start32, start32 + size32);
+
return 0;
}
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 271793577cd..be3b6a41dc0 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -33,6 +33,9 @@ SECTIONS
. = ALIGN(8);
__ftr_fixup : { *(__ftr_fixup) }
+ . = ALIGN(8);
+ __lwsync_fixup : { *(__lwsync_fixup) }
+
#ifdef CONFIG_PPC64
. = ALIGN(8);
__fw_ftr_fixup : { *(__fw_ftr_fixup) }
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index e608d1bd3bf..d0b2526dd38 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -35,6 +35,9 @@ SECTIONS
__ftr_fixup : { *(__ftr_fixup) }
. = ALIGN(8);
+ __lwsync_fixup : { *(__lwsync_fixup) }
+
+ . = ALIGN(8);
__fw_ftr_fixup : { *(__fw_ftr_fixup) }
/*
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 3c07811989f..6856f6c1572 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -127,6 +127,12 @@ SECTIONS
*(__ftr_fixup)
__stop___ftr_fixup = .;
}
+ . = ALIGN(8);
+ __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
+ __start___lwsync_fixup = .;
+ *(__lwsync_fixup)
+ __stop___lwsync_fixup = .;
+ }
#ifdef CONFIG_PPC64
. = ALIGN(8);
__fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S
index 10d038b501a..cb737484c5a 100644
--- a/arch/powerpc/lib/feature-fixups-test.S
+++ b/arch/powerpc/lib/feature-fixups-test.S
@@ -10,6 +10,7 @@
#include <asm/feature-fixups.h>
#include <asm/ppc_asm.h>
+#include <asm/synch.h>
.text
@@ -139,14 +140,14 @@ globl(ftr_fixup_test6)
1: or 1,1,1
BEGIN_FTR_SECTION
or 5,5,5
-2: cmpdi r3,0
+2: PPC_LCMPI r3,0
beq 4f
blt 2b
b 1b
b 4f
FTR_SECTION_ELSE
2: or 2,2,2
- cmpdi r3,1
+ PPC_LCMPI r3,1
beq 3f
blt 2b
b 3f
@@ -161,7 +162,7 @@ globl(end_ftr_fixup_test6)
globl(ftr_fixup_test6_expected)
1: or 1,1,1
2: or 2,2,2
- cmpdi r3,1
+ PPC_LCMPI r3,1
beq 3f
blt 2b
b 3f
@@ -725,3 +726,17 @@ MAKE_MACRO_TEST_EXPECTED(FTR);
MAKE_MACRO_TEST(FW_FTR);
MAKE_MACRO_TEST_EXPECTED(FW_FTR);
#endif
+
+globl(lwsync_fixup_test)
+1: or 1,1,1
+ LWSYNC
+globl(end_lwsync_fixup_test)
+
+globl(lwsync_fixup_test_expected_LWSYNC)
+1: or 1,1,1
+ lwsync
+
+globl(lwsync_fixup_test_expected_SYNC)
+1: or 1,1,1
+ sync
+
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 48e1ed89052..4e43702b981 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -110,6 +110,22 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
}
}
+void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+{
+ unsigned int *start, *end, *dest;
+
+ if (!(value & CPU_FTR_LWSYNC))
+ return ;
+
+ start = fixup_start;
+ end = fixup_end;
+
+ for (; start < end; start++) {
+ dest = (void *)start + *start;
+ patch_instruction(dest, PPC_LWSYNC_INSTR);
+ }
+}
+
#ifdef CONFIG_FTR_FIXUP_SELFTEST
#define check(x) \
@@ -295,6 +311,25 @@ static void test_fw_macros(void)
#endif
}
+static void test_lwsync_macros(void)
+{
+ extern void lwsync_fixup_test;
+ extern void end_lwsync_fixup_test;
+ extern void lwsync_fixup_test_expected_LWSYNC;
+ extern void lwsync_fixup_test_expected_SYNC;
+ unsigned long size = &end_lwsync_fixup_test -
+ &lwsync_fixup_test;
+
+ /* The fixups have already been done for us during boot */
+ if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
+ check(memcmp(&lwsync_fixup_test,
+ &lwsync_fixup_test_expected_LWSYNC, size) == 0);
+ } else {
+ check(memcmp(&lwsync_fixup_test,
+ &lwsync_fixup_test_expected_SYNC, size) == 0);
+ }
+}
+
static int __init test_feature_fixups(void)
{
printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
@@ -307,6 +342,7 @@ static int __init test_feature_fixups(void)
test_alternative_case_with_external_branch();
test_cpu_macros();
test_fw_macros();
+ test_lwsync_macros();
return 0;
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 51f82d83bf1..776ba6ad5e1 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -44,6 +44,7 @@
#include <asm/btext.h>
#include <asm/tlb.h>
#include <asm/sections.h>
+#include <asm/sparsemem.h>
#include <asm/vdso.h>
#include <asm/fixmap.h>
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index dc704da363e..cf4bffba6f7 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -268,6 +268,144 @@ static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
return result;
}
+struct of_drconf_cell {
+ u64 base_addr;
+ u32 drc_index;
+ u32 reserved;
+ u32 aa_index;
+ u32 flags;
+};
+
+#define DRCONF_MEM_ASSIGNED 0x00000008
+#define DRCONF_MEM_AI_INVALID 0x00000040
+#define DRCONF_MEM_RESERVED 0x00000080
+
+/*
+ * Read the next lmb list entry from the ibm,dynamic-memory property
+ * and return the information in the provided of_drconf_cell structure.
+ */
+static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
+{
+ const u32 *cp;
+
+ drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
+
+ cp = *cellp;
+ drmem->drc_index = cp[0];
+ drmem->reserved = cp[1];
+ drmem->aa_index = cp[2];
+ drmem->flags = cp[3];
+
+ *cellp = cp + 4;
+}
+
+/*
+ * Retreive and validate the ibm,dynamic-memory property of the device tree.
+ *
+ * The layout of the ibm,dynamic-memory property is a number N of lmb
+ * list entries followed by N lmb list entries. Each lmb list entry
+ * contains information as layed out in the of_drconf_cell struct above.
+ */
+static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
+{
+ const u32 *prop;
+ u32 len, entries;
+
+ prop = of_get_property(memory, "ibm,dynamic-memory", &len);
+ if (!prop || len < sizeof(unsigned int))
+ return 0;
+
+ entries = *prop++;
+
+ /* Now that we know the number of entries, revalidate the size
+ * of the property read in to ensure we have everything
+ */
+ if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
+ return 0;
+
+ *dm = prop;
+ return entries;
+}
+
+/*
+ * Retreive and validate the ibm,lmb-size property for drconf memory
+ * from the device tree.
+ */
+static u64 of_get_lmb_size(struct device_node *memory)
+{
+ const u32 *prop;
+ u32 len;
+
+ prop = of_get_property(memory, "ibm,lmb-size", &len);
+ if (!prop || len < sizeof(unsigned int))
+ return 0;
+
+ return read_n_cells(n_mem_size_cells, &prop);
+}
+
+struct assoc_arrays {
+ u32 n_arrays;
+ u32 array_sz;
+ const u32 *arrays;
+};
+
+/*
+ * Retreive and validate the list of associativity arrays for drconf
+ * memory from the ibm,associativity-lookup-arrays property of the
+ * device tree..
+ *
+ * The layout of the ibm,associativity-lookup-arrays property is a number N
+ * indicating the number of associativity arrays, followed by a number M
+ * indicating the size of each associativity array, followed by a list
+ * of N associativity arrays.
+ */
+static int of_get_assoc_arrays(struct device_node *memory,
+ struct assoc_arrays *aa)
+{
+ const u32 *prop;
+ u32 len;
+
+ prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
+ if (!prop || len < 2 * sizeof(unsigned int))
+ return -1;
+
+ aa->n_arrays = *prop++;
+ aa->array_sz = *prop++;
+
+ /* Now that we know the number of arrrays and size of each array,
+ * revalidate the size of the property read in.
+ */
+ if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
+ return -1;
+
+ aa->arrays = prop;
+ return 0;
+}
+
+/*
+ * This is like of_node_to_nid_single() for memory represented in the
+ * ibm,dynamic-reconfiguration-memory node.
+ */
+static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
+ struct assoc_arrays *aa)
+{
+ int default_nid = 0;
+ int nid = default_nid;
+ int index;
+
+ if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
+ !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
+ drmem->aa_index < aa->n_arrays) {
+ index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
+ nid = aa->arrays[index];
+
+ if (nid == 0xffff || nid >= MAX_NUMNODES)
+ nid = default_nid;
+ }
+
+ return nid;
+}
+
/*
* Figure out to which domain a cpu belongs and stick it there.
* Return the id of the domain used.
@@ -355,57 +493,50 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
*/
static void __init parse_drconf_memory(struct device_node *memory)
{
- const unsigned int *lm, *dm, *aa;
- unsigned int ls, ld, la;
- unsigned int n, aam, aalen;
- unsigned long lmb_size, size, start;
- int nid, default_nid = 0;
- unsigned int ai, flags;
-
- lm = of_get_property(memory, "ibm,lmb-size", &ls);
- dm = of_get_property(memory, "ibm,dynamic-memory", &ld);
- aa = of_get_property(memory, "ibm,associativity-lookup-arrays", &la);
- if (!lm || !dm || !aa ||
- ls < sizeof(unsigned int) || ld < sizeof(unsigned int) ||
- la < 2 * sizeof(unsigned int))
+ const u32 *dm;
+ unsigned int n, rc;
+ unsigned long lmb_size, size;
+ int nid;
+ struct assoc_arrays aa;
+
+ n = of_get_drconf_memory(memory, &dm);
+ if (!n)
+ return;
+
+ lmb_size = of_get_lmb_size(memory);
+ if (!lmb_size)
return;
- lmb_size = read_n_cells(n_mem_size_cells, &lm);
- n = *dm++; /* number of LMBs */
- aam = *aa++; /* number of associativity lists */
- aalen = *aa++; /* length of each associativity list */
- if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) ||
- la < (aam * aalen + 2) * sizeof(unsigned int))
+ rc = of_get_assoc_arrays(memory, &aa);
+ if (rc)
return;
for (; n != 0; --n) {
- start = read_n_cells(n_mem_addr_cells, &dm);
- ai = dm[2];
- flags = dm[3];
- dm += 4;
- /* 0x80 == reserved, 0x8 = assigned to us */
- if ((flags & 0x80) || !(flags & 0x8))
+ struct of_drconf_cell drmem;
+
+ read_drconf_cell(&drmem, &dm);
+
+ /* skip this block if the reserved bit is set in flags (0x80)
+ or if the block is not assigned to this partition (0x8) */
+ if ((drmem.flags & DRCONF_MEM_RESERVED)
+ || !(drmem.flags & DRCONF_MEM_ASSIGNED))
continue;
- nid = default_nid;
- /* flags & 0x40 means associativity index is invalid */
- if (min_common_depth > 0 && min_common_depth <= aalen &&
- (flags & 0x40) == 0 && ai < aam) {
- /* this is like of_node_to_nid_single */
- nid = aa[ai * aalen + min_common_depth - 1];
- if (nid == 0xffff || nid >= MAX_NUMNODES)
- nid = default_nid;
- }
- fake_numa_create_new_node(((start + lmb_size) >> PAGE_SHIFT),
- &nid);
+ nid = of_drconf_to_nid_single(&drmem, &aa);
+
+ fake_numa_create_new_node(
+ ((drmem.base_addr + lmb_size) >> PAGE_SHIFT),
+ &nid);
+
node_set_online(nid);
- size = numa_enforce_memory_limit(start, lmb_size);
+ size = numa_enforce_memory_limit(drmem.base_addr, lmb_size);
if (!size)
continue;
- add_active_range(nid, start >> PAGE_SHIFT,
- (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
+ add_active_range(nid, drmem.base_addr >> PAGE_SHIFT,
+ (drmem.base_addr >> PAGE_SHIFT)
+ + (size >> PAGE_SHIFT));
}
}
@@ -770,6 +901,79 @@ early_param("numa", early_numa);
#ifdef CONFIG_MEMORY_HOTPLUG
/*
+ * Validate the node associated with the memory section we are
+ * trying to add.
+ */
+int valid_hot_add_scn(int *nid, unsigned long start, u32 lmb_size,
+ unsigned long scn_addr)
+{
+ nodemask_t nodes;
+
+ if (*nid < 0 || !node_online(*nid))
+ *nid = any_online_node(NODE_MASK_ALL);
+
+ if ((scn_addr >= start) && (scn_addr < (start + lmb_size))) {
+ nodes_setall(nodes);
+ while (NODE_DATA(*nid)->node_spanned_pages == 0) {
+ node_clear(*nid, nodes);
+ *nid = any_online_node(nodes);
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Find the node associated with a hot added memory section represented
+ * by the ibm,dynamic-reconfiguration-memory node.
+ */
+static int hot_add_drconf_scn_to_nid(struct device_node *memory,
+ unsigned long scn_addr)
+{
+ const u32 *dm;
+ unsigned int n, rc;
+ unsigned long lmb_size;
+ int default_nid = any_online_node(NODE_MASK_ALL);
+ int nid;
+ struct assoc_arrays aa;
+
+ n = of_get_drconf_memory(memory, &dm);
+ if (!n)
+ return default_nid;;
+
+ lmb_size = of_get_lmb_size(memory);
+ if (!lmb_size)
+ return default_nid;
+
+ rc = of_get_assoc_arrays(memory, &aa);
+ if (rc)
+ return default_nid;
+
+ for (; n != 0; --n) {
+ struct of_drconf_cell drmem;
+
+ read_drconf_cell(&drmem, &dm);
+
+ /* skip this block if it is reserved or not assigned to
+ * this partition */
+ if ((drmem.flags & DRCONF_MEM_RESERVED)
+ || !(drmem.flags & DRCONF_MEM_ASSIGNED))
+ continue;
+
+ nid = of_drconf_to_nid_single(&drmem, &aa);
+
+ if (valid_hot_add_scn(&nid, drmem.base_addr, lmb_size,
+ scn_addr))
+ return nid;
+ }
+
+ BUG(); /* section address should be found above */
+ return 0;
+}
+
+/*
* Find the node associated with a hot added memory section. Section
* corresponds to a SPARSEMEM section, not an LMB. It is assumed that
* sections are fully contained within a single LMB.
@@ -777,12 +981,17 @@ early_param("numa", early_numa);
int hot_add_scn_to_nid(unsigned long scn_addr)
{
struct device_node *memory = NULL;
- nodemask_t nodes;
- int default_nid = any_online_node(NODE_MASK_ALL);
int nid;
if (!numa_enabled || (min_common_depth < 0))
- return default_nid;
+ return any_online_node(NODE_MASK_ALL);
+
+ memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (memory) {
+ nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
+ of_node_put(memory);
+ return nid;
+ }
while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
unsigned long start, size;
@@ -801,13 +1010,9 @@ ha_new_range:
size = read_n_cells(n_mem_size_cells, &memcell_buf);
nid = of_node_to_nid_single(memory);
- /* Domains not present at boot default to 0 */
- if (nid < 0 || !node_online(nid))
- nid = default_nid;
-
- if ((scn_addr >= start) && (scn_addr < (start + size))) {
+ if (valid_hot_add_scn(&nid, start, size, scn_addr)) {
of_node_put(memory);
- goto got_nid;
+ return nid;
}
if (--ranges) /* process all ranges in cell */
@@ -815,14 +1020,5 @@ ha_new_range:
}
BUG(); /* section address should be found above */
return 0;
-
- /* Temporary code to ensure that returned node is not empty */
-got_nid:
- nodes_setall(nodes);
- while (NODE_DATA(nid)->node_spanned_pages == 0) {
- node_clear(nid, nodes);
- nid = any_online_node(nodes);
- }
- return nid;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 3c5727dd5aa..a1a368dd2d9 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -15,34 +15,13 @@
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
-static int pseries_remove_memory(struct device_node *np)
+static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
{
- const char *type;
- const unsigned int *my_index;
- const unsigned int *regs;
- u64 start_pfn, start;
+ unsigned long start, start_pfn;
struct zone *zone;
- int ret = -EINVAL;
-
- /*
- * Check to see if we are actually removing memory
- */
- type = of_get_property(np, "device_type", NULL);
- if (type == NULL || strcmp(type, "memory") != 0)
- return 0;
+ int ret;
- /*
- * Find the memory index and size of the removing section
- */
- my_index = of_get_property(np, "ibm,my-drc-index", NULL);
- if (!my_index)
- return ret;
-
- regs = of_get_property(np, "reg", NULL);
- if (!regs)
- return ret;
-
- start_pfn = section_nr_to_pfn(*my_index & 0xffff);
+ start_pfn = base >> PFN_SECTION_SHIFT;
zone = page_zone(pfn_to_page(start_pfn));
/*
@@ -54,56 +33,111 @@ static int pseries_remove_memory(struct device_node *np)
* to sysfs "state" file and we can't remove sysfs entries
* while writing to it. So we have to defer it to here.
*/
- ret = __remove_pages(zone, start_pfn, regs[3] >> PAGE_SHIFT);
+ ret = __remove_pages(zone, start_pfn, lmb_size >> PAGE_SHIFT);
if (ret)
return ret;
/*
* Update memory regions for memory remove
*/
- lmb_remove(start_pfn << PAGE_SHIFT, regs[3]);
+ lmb_remove(base, lmb_size);
/*
* Remove htab bolted mappings for this section of memory
*/
- start = (unsigned long)__va(start_pfn << PAGE_SHIFT);
- ret = remove_section_mapping(start, start + regs[3]);
+ start = (unsigned long)__va(base);
+ ret = remove_section_mapping(start, start + lmb_size);
return ret;
}
-static int pseries_add_memory(struct device_node *np)
+static int pseries_remove_memory(struct device_node *np)
{
const char *type;
- const unsigned int *my_index;
const unsigned int *regs;
- u64 start_pfn;
+ unsigned long base;
+ unsigned int lmb_size;
int ret = -EINVAL;
/*
- * Check to see if we are actually adding memory
+ * Check to see if we are actually removing memory
*/
type = of_get_property(np, "device_type", NULL);
if (type == NULL || strcmp(type, "memory") != 0)
return 0;
/*
- * Find the memory index and size of the added section
+ * Find the bae address and size of the lmb
*/
- my_index = of_get_property(np, "ibm,my-drc-index", NULL);
- if (!my_index)
+ regs = of_get_property(np, "reg", NULL);
+ if (!regs)
return ret;
+ base = *(unsigned long *)regs;
+ lmb_size = regs[3];
+
+ ret = pseries_remove_lmb(base, lmb_size);
+ return ret;
+}
+
+static int pseries_add_memory(struct device_node *np)
+{
+ const char *type;
+ const unsigned int *regs;
+ unsigned long base;
+ unsigned int lmb_size;
+ int ret = -EINVAL;
+
+ /*
+ * Check to see if we are actually adding memory
+ */
+ type = of_get_property(np, "device_type", NULL);
+ if (type == NULL || strcmp(type, "memory") != 0)
+ return 0;
+
+ /*
+ * Find the base and size of the lmb
+ */
regs = of_get_property(np, "reg", NULL);
if (!regs)
return ret;
- start_pfn = section_nr_to_pfn(*my_index & 0xffff);
+ base = *(unsigned long *)regs;
+ lmb_size = regs[3];
/*
* Update memory region to represent the memory add
*/
- lmb_add(start_pfn << PAGE_SHIFT, regs[3]);
- return 0;
+ ret = lmb_add(base, lmb_size);
+ return (ret < 0) ? -EINVAL : 0;
+}
+
+static int pseries_drconf_memory(unsigned long *base, unsigned int action)
+{
+ struct device_node *np;
+ const unsigned long *lmb_size;
+ int rc;
+
+ np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (!np)
+ return -EINVAL;
+
+ lmb_size = of_get_property(np, "ibm,lmb-size", NULL);
+ if (!lmb_size) {
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ if (action == PSERIES_DRCONF_MEM_ADD) {
+ rc = lmb_add(*base, *lmb_size);
+ rc = (rc < 0) ? -EINVAL : 0;
+ } else if (action == PSERIES_DRCONF_MEM_REMOVE) {
+ rc = pseries_remove_lmb(*base, *lmb_size);
+ } else {
+ rc = -EINVAL;
+ }
+
+ of_node_put(np);
+ return rc;
}
static int pseries_memory_notifier(struct notifier_block *nb,
@@ -120,6 +154,11 @@ static int pseries_memory_notifier(struct notifier_block *nb,
if (pseries_remove_memory(node))
err = NOTIFY_BAD;
break;
+ case PSERIES_DRCONF_MEM_ADD:
+ case PSERIES_DRCONF_MEM_REMOVE:
+ if (pseries_drconf_memory(node, action))
+ err = NOTIFY_BAD;
+ break;
default:
err = NOTIFY_DONE;
break;
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 75769aae41d..7637bd38c79 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -365,7 +365,7 @@ static char *parse_node(char *buf, size_t bufsize, struct device_node **npp)
*buf = '\0';
buf++;
- handle = simple_strtoul(handle_str, NULL, 10);
+ handle = simple_strtoul(handle_str, NULL, 0);
*npp = of_find_node_by_phandle(handle);
return buf;
@@ -422,8 +422,8 @@ static int do_update_property(char *buf, size_t bufsize)
{
struct device_node *np;
unsigned char *value;
- char *name, *end;
- int length;
+ char *name, *end, *next_prop;
+ int rc, length;
struct property *newprop, *oldprop;
buf = parse_node(buf, bufsize, &np);
end = buf + bufsize;
@@ -431,7 +431,8 @@ static int do_update_property(char *buf, size_t bufsize)
if (!np)
return -ENODEV;
- if (parse_next_property(buf, end, &name, &length, &value) == NULL)
+ next_prop = parse_next_property(buf, end, &name, &length, &value);
+ if (!next_prop)
return -EINVAL;
newprop = new_property(name, length, value, NULL);
@@ -442,7 +443,34 @@ static int do_update_property(char *buf, size_t bufsize)
if (!oldprop)
return -ENODEV;
- return prom_update_property(np, newprop, oldprop);
+ rc = prom_update_property(np, newprop, oldprop);
+ if (rc)
+ return rc;
+
+ /* For memory under the ibm,dynamic-reconfiguration-memory node
+ * of the device tree, adding and removing memory is just an update
+ * to the ibm,dynamic-memory property instead of adding/removing a
+ * memory node in the device tree. For these cases we still need to
+ * involve the notifier chain.
+ */
+ if (!strcmp(name, "ibm,dynamic-memory")) {
+ int action;
+
+ next_prop = parse_next_property(next_prop, end, &name,
+ &length, &value);
+ if (!next_prop)
+ return -EINVAL;
+
+ if (!strcmp(name, "add"))
+ action = PSERIES_DRCONF_MEM_ADD;
+ else
+ action = PSERIES_DRCONF_MEM_REMOVE;
+
+ blocking_notifier_call_chain(&pSeries_reconfig_chain,
+ action, value);
+ }
+
+ return 0;
}
/**
diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h
index ef3a5d156db..107d9b915e3 100644
--- a/include/asm-powerpc/code-patching.h
+++ b/include/asm-powerpc/code-patching.h
@@ -12,7 +12,8 @@
#include <asm/types.h>
-#define PPC_NOP_INSTR 0x60000000
+#define PPC_NOP_INSTR 0x60000000
+#define PPC_LWSYNC_INSTR 0x7c2004ac
/* Flags for create_branch:
* "b" == create_branch(addr, target, 0);
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 4e4491cb9d3..3171ac904b9 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -156,6 +156,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000)
#define CPU_FTR_SPE ASM_CONST(0x0000000002000000)
#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000)
+#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
/*
* Add the 64-bit processor unique features in the top half of the word;
@@ -369,43 +370,43 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
CPU_FTR_NODSISRALIGN)
#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \
- CPU_FTR_L2CSR)
+ CPU_FTR_L2CSR | CPU_FTR_LWSYNC)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
-#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
-#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | \
+#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \
CPU_FTR_MMCRA | CPU_FTR_CTRL)
-#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA)
-#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | \
+#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA)
-#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR)
-#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR)
-#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | \
+#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR)
-#define CPU_FTRS_CELL (CPU_FTR_USE_TB | \
+#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG)
-#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | \
+#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index 38a51728406..89664675b46 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -204,28 +204,8 @@ static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
}
#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
-static inline int dump_task_regs(struct task_struct *tsk,
- elf_gregset_t *elf_regs)
-{
- struct pt_regs *regs = tsk->thread.regs;
- if (regs)
- ppc_elf_core_copy_regs(*elf_regs, regs);
-
- return 1;
-}
-#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
-
-extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
-#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
-
typedef elf_vrregset_t elf_fpxregset_t;
-#ifdef CONFIG_ALTIVEC
-extern int dump_task_altivec(struct task_struct *, elf_vrregset_t *vrregs);
-#define ELF_CORE_COPY_XFPREGS(tsk, regs) dump_task_altivec(tsk, regs)
-#define ELF_CORE_XFPREG_TYPE NT_PPC_VMX
-#endif
-
/* ELF_HWCAP yields a mask that user programs can use to figure out what
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
diff --git a/include/asm-powerpc/feature-fixups.h b/include/asm-powerpc/feature-fixups.h
index ab30129dced..a1029967620 100644
--- a/include/asm-powerpc/feature-fixups.h
+++ b/include/asm-powerpc/feature-fixups.h
@@ -113,4 +113,14 @@ label##5: \
#endif /* __ASSEMBLY__ */
+/* LWSYNC feature sections */
+#define START_LWSYNC_SECTION(label) label##1:
+#define MAKE_LWSYNC_SECTION_ENTRY(label, sect) \
+label##2: \
+ .pushsection sect,"a"; \
+ .align 2; \
+label##3: \
+ .long label##1b-label##3b; \
+ .popsection;
+
#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
diff --git a/include/asm-powerpc/pSeries_reconfig.h b/include/asm-powerpc/pSeries_reconfig.h
index ea6cfb8efb8..e482e5352e6 100644
--- a/include/asm-powerpc/pSeries_reconfig.h
+++ b/include/asm-powerpc/pSeries_reconfig.h
@@ -9,8 +9,10 @@
* added or removed on pSeries systems.
*/
-#define PSERIES_RECONFIG_ADD 0x0001
-#define PSERIES_RECONFIG_REMOVE 0x0002
+#define PSERIES_RECONFIG_ADD 0x0001
+#define PSERIES_RECONFIG_REMOVE 0x0002
+#define PSERIES_DRCONF_MEM_ADD 0x0003
+#define PSERIES_DRCONF_MEM_REMOVE 0x0004
#ifdef CONFIG_PPC_PSERIES
extern int pSeries_reconfig_notifier_register(struct notifier_block *);
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index e93e72df4bc..061cd17ba83 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -222,7 +222,7 @@ struct thread_struct {
.ksp_limit = INIT_SP_LIMIT, \
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
.fs = KERNEL_DS, \
- .fpr = {0}, \
+ .fpr = {{0}}, \
.fpscr = { .val = 0, }, \
.fpexc_mode = 0, \
}
diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h
index 9aea8e9f0bd..54a47ea2c3a 100644
--- a/include/asm-powerpc/sparsemem.h
+++ b/include/asm-powerpc/sparsemem.h
@@ -13,6 +13,8 @@
#define MAX_PHYSADDR_BITS 44
#define MAX_PHYSMEM_BITS 44
+#endif /* CONFIG_SPARSEMEM */
+
#ifdef CONFIG_MEMORY_HOTPLUG
extern void create_section_mapping(unsigned long start, unsigned long end);
extern int remove_section_mapping(unsigned long start, unsigned long end);
@@ -26,7 +28,5 @@ static inline int hot_add_scn_to_nid(unsigned long scn_addr)
#endif /* CONFIG_NUMA */
#endif /* CONFIG_MEMORY_HOTPLUG */
-#endif /* CONFIG_SPARSEMEM */
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
index 42a1ef59069..45963e80f55 100644
--- a/include/asm-powerpc/synch.h
+++ b/include/asm-powerpc/synch.h
@@ -3,34 +3,42 @@
#ifdef __KERNEL__
#include <linux/stringify.h>
+#include <asm/feature-fixups.h>
-#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
-#define __SUBARCH_HAS_LWSYNC
-#endif
+#ifndef __ASSEMBLY__
+extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
+extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
+ void *fixup_end);
+
+static inline void eieio(void)
+{
+ __asm__ __volatile__ ("eieio" : : : "memory");
+}
+
+static inline void isync(void)
+{
+ __asm__ __volatile__ ("isync" : : : "memory");
+}
+#endif /* __ASSEMBLY__ */
-#ifdef __SUBARCH_HAS_LWSYNC
+#if defined(__powerpc64__)
# define LWSYNC lwsync
+#elif defined(CONFIG_E500)
+# define LWSYNC \
+ START_LWSYNC_SECTION(96); \
+ sync; \
+ MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
#else
# define LWSYNC sync
#endif
#ifdef CONFIG_SMP
#define ISYNC_ON_SMP "\n\tisync\n"
-#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n"
+#define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n"
#else
#define ISYNC_ON_SMP
#define LWSYNC_ON_SMP
#endif
-static inline void eieio(void)
-{
- __asm__ __volatile__ ("eieio" : : : "memory");
-}
-
-static inline void isync(void)
-{
- __asm__ __volatile__ ("isync" : : : "memory");
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYNCH_H */