aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sh/page.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-11-10 19:57:58 +0900
committerPaul Mundt <lethal@linux-sh.org>2008-01-28 13:18:43 +0900
commit01fed9311ab8a724283b3f456c12e573cb51d92b (patch)
treea270222feed305e586c779063df50b1447ef877f /include/asm-sh/page.h
parent9b01bd9ee6408846c0553c03fb4b864353a845c9 (diff)
sh: Consolidate slab/kmalloc minalign values.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/page.h')
-rw-r--r--include/asm-sh/page.h14
1 files changed, 14 insertions, 0 deletions
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index d00a8fde7c7..d0273dbce6b 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -157,8 +157,22 @@ typedef struct { unsigned long pgd; } pgd_t;
* Slub defaults to 8-byte alignment, we're only interested in 4.
* Slab defaults to BYTES_PER_WORD, which ends up being the same anyways.
*/
+#ifdef CONFIG_SUPERH32
#define ARCH_KMALLOC_MINALIGN 4
#define ARCH_SLAB_MINALIGN 4
+#else
+/* If gcc inlines memset, it will use st.q instructions. Therefore, we need
+ kmalloc allocations to be 8-byte aligned. Without this, the alignment
+ becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on
+ sh64 at the moment). */
+#define ARCH_KMALLOC_MINALIGN 8
+
+/*
+ * We want 8-byte alignment for the slab caches as well, otherwise we have
+ * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create().
+ */
+#define ARCH_SLAB_MINALIGN 8
+#endif
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PAGE_H */