aboutsummaryrefslogtreecommitdiff
path: root/include/asm-x86
diff options
context:
space:
mode:
authorVegard Nossum <vegard.nossum@gmail.com>2008-06-10 23:45:45 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-18 12:27:03 +0200
commit0db125c467afcbcc229abb1a87bc36ef72777dc2 (patch)
treeafb52caf91eb9db5115355c7163fdf11bb4bec0a /include/asm-x86
parente6e07d8a2d2989c1f42287131308aa2fde253631 (diff)
x86: more header fixes
Summary: Add missing include guards for some x86 headers. This has only had the most rudimentary testing, but is hopefully obviously correct. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/seccomp_64.h1
-rw-r--r--include/asm-x86/suspend_32.h5
-rw-r--r--include/asm-x86/xor_32.h5
-rw-r--r--include/asm-x86/xor_64.h5
4 files changed, 16 insertions, 0 deletions
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h
index 553af65a228..76cfe69aa63 100644
--- a/include/asm-x86/seccomp_64.h
+++ b/include/asm-x86/seccomp_64.h
@@ -1,4 +1,5 @@
#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
#include <linux/thread_info.h>
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
index 24e1c080aa8..8675c6782a7 100644
--- a/include/asm-x86/suspend_32.h
+++ b/include/asm-x86/suspend_32.h
@@ -3,6 +3,9 @@
* Based on code
* Copyright 2001 Patrick Mochel <mochel@osdl.org>
*/
+#ifndef __ASM_X86_32_SUSPEND_H
+#define __ASM_X86_32_SUSPEND_H
+
#include <asm/desc.h>
#include <asm/i387.h>
@@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point)
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
#endif
+
+#endif /* __ASM_X86_32_SUSPEND_H */
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h
index 067b5c1835a..921b4584044 100644
--- a/include/asm-x86/xor_32.h
+++ b/include/asm-x86/xor_32.h
@@ -1,3 +1,6 @@
+#ifndef ASM_X86__XOR_32_H
+#define ASM_X86__XOR_32_H
+
/*
* Optimized RAID-5 checksumming functions for MMX and SSE.
*
@@ -881,3 +884,5 @@ do { \
deals with a load to a line that is being prefetched. */
#define XOR_SELECT_TEMPLATE(FASTEST) \
(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
+
+#endif /* ASM_X86__XOR_32_H */
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h
index 24957e39ac8..2d3a18de295 100644
--- a/include/asm-x86/xor_64.h
+++ b/include/asm-x86/xor_64.h
@@ -1,3 +1,6 @@
+#ifndef ASM_X86__XOR_64_H
+#define ASM_X86__XOR_64_H
+
/*
* Optimized RAID-5 checksumming functions for MMX and SSE.
*
@@ -354,3 +357,5 @@ do { \
We may also be able to load into the L1 only depending on how the cpu
deals with a load to a line that is being prefetched. */
#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
+
+#endif /* ASM_X86__XOR_64_H */