aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-03-01 15:56:31 +0900
committerPaul Mundt <lethal@linux-sh.org>2007-03-05 14:13:26 +0900
commit87e29cacb7d09c81b09224bec395f970df958af4 (patch)
tree536a67c79bc54a6de1e911b31b6918efa2700c6d
parent5c36e6578d81f79ede871d3e66a0d6beeffeb3dc (diff)
sh: Use L1_CACHE_BYTES for .data.cacheline_aligned.
Previously this was using a hardcoded 32, use L1_CACHE_BYTES for cacheline alignment instead. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/kernel/vmlinux.lds.S3
-rw-r--r--include/asm-sh/cache.h3
2 files changed, 4 insertions, 2 deletions
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 75de165867a..78a6c09875b 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -3,6 +3,7 @@
* Written by Niibe Yutaka
*/
#include <asm/thread_info.h>
+#include <asm/cache.h>
#include <asm-generic/vmlinux.lds.h>
#ifdef CONFIG_CPU_LITTLE_ENDIAN
@@ -53,7 +54,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
.data.page_aligned : { *(.data.page_aligned) }
- . = ALIGN(32);
+ . = ALIGN(L1_CACHE_BYTES);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
diff --git a/include/asm-sh/cache.h b/include/asm-sh/cache.h
index e3a180cf506..9a3cb6ba9d1 100644
--- a/include/asm-sh/cache.h
+++ b/include/asm-sh/cache.h
@@ -21,6 +21,7 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
+#ifndef __ASSEMBLY__
struct cache_info {
unsigned int ways; /* Number of cache ways */
unsigned int sets; /* Number of cache sets */
@@ -47,6 +48,6 @@ struct cache_info {
unsigned long flags;
};
-
+#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */