aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2008-06-24 11:32:36 +1000
committerPaul Mackerras <paulus@samba.org>2008-07-01 11:28:24 +1000
commit51c52e86694f19e84600a40f6156889feafd8ae9 (patch)
tree3a1d532ba5c210c4ad4c0d2c9eee24c23b53e677 /arch/powerpc
parentb7bcda631e87eb3466d0baa9885650ba7d7ed89d (diff)
powerpc: Split out do_feature_fixups() from cputable.c
The logic to patch CPU feature sections lives in cputable.c, but these days it's used for CPU features as well as firmware features. Move it into it's own file for neatness and as preparation for some additions. While we're moving the code, we pull the loop body logic into a separate routine, and remove a comment which doesn't apply anymore. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/cputable.c36
-rw-r--r--arch/powerpc/lib/Makefile1
-rw-r--r--arch/powerpc/lib/feature-fixups.c56
3 files changed, 57 insertions, 36 deletions
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index ba5b23f5476..817cea1b5ad 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <asm/oprofile_impl.h>
-#include <asm/code-patching.h>
#include <asm/cputable.h>
#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
@@ -1638,38 +1637,3 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
BUG();
return NULL;
}
-
-void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
-{
- struct fixup_entry {
- unsigned long mask;
- unsigned long value;
- long start_off;
- long end_off;
- } *fcur, *fend;
-
- fcur = fixup_start;
- fend = fixup_end;
-
- for (; fcur < fend; fcur++) {
- unsigned int *pstart, *pend, *p;
-
- if ((value & fcur->mask) == fcur->value)
- continue;
-
- /* These PTRRELOCs will disappear once the new scheme for
- * modules and vdso is implemented
- */
- pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
- pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
-
- for (p = pstart; p < pend; p++) {
- *p = PPC_NOP_INSTR;
- asm volatile ("dcbst 0, %0" : : "r" (p));
- }
- asm volatile ("sync" : : : "memory");
- for (p = pstart; p < pend; p++)
- asm volatile ("icbi 0,%0" : : "r" (p));
- asm volatile ("sync; isync" : : : "memory");
- }
-}
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index e7f7042b9f6..fc52771f0cd 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -26,3 +26,4 @@ endif
obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
obj-y += code-patching.o
+obj-y += feature-fixups.o
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
new file mode 100644
index 00000000000..f6fd5d2ff10
--- /dev/null
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * Modifications for ppc64:
+ * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
+ *
+ * Copyright 2008 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <asm/cputable.h>
+#include <asm/code-patching.h>
+
+
+struct fixup_entry {
+ unsigned long mask;
+ unsigned long value;
+ long start_off;
+ long end_off;
+};
+
+static void patch_feature_section(unsigned long value, struct fixup_entry *fcur)
+{
+ unsigned int *pstart, *pend, *p;
+
+ if ((value & fcur->mask) == fcur->value)
+ return;
+
+ pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
+ pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
+
+ for (p = pstart; p < pend; p++) {
+ *p = PPC_NOP_INSTR;
+ asm volatile ("dcbst 0, %0" : : "r" (p));
+ }
+ asm volatile ("sync" : : : "memory");
+ for (p = pstart; p < pend; p++)
+ asm volatile ("icbi 0,%0" : : "r" (p));
+ asm volatile ("sync; isync" : : : "memory");
+}
+
+void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+{
+ struct fixup_entry *fcur, *fend;
+
+ fcur = fixup_start;
+ fend = fixup_end;
+
+ for (; fcur < fend; fcur++)
+ patch_feature_section(value, fcur);
+}