aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/bitops.h2
-rw-r--r--include/asm-i386/current.h2
-rw-r--r--include/asm-i386/string.h8
-rw-r--r--include/asm-i386/uaccess.h8
-rw-r--r--include/asm-x86_64/fixmap.h2
-rw-r--r--include/asm-x86_64/uaccess.h6
-rw-r--r--include/linux/mm.h2
7 files changed, 15 insertions, 15 deletions
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index fe0819fe9c6..88e6ca248cd 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -247,7 +247,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
static int test_bit(int nr, const volatile void * addr);
#endif
-static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
+static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
}
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
index d97328951f5..3cbbecd7901 100644
--- a/include/asm-i386/current.h
+++ b/include/asm-i386/current.h
@@ -5,7 +5,7 @@
struct task_struct;
-static inline struct task_struct * get_current(void)
+static __always_inline struct task_struct * get_current(void)
{
return current_thread_info()->task;
}
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index 02c8f5d2206..bb5f88a27f7 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -201,7 +201,7 @@ __asm__ __volatile__(
return __res;
}
-static inline void * __memcpy(void * to, const void * from, size_t n)
+static __always_inline void * __memcpy(void * to, const void * from, size_t n)
{
int d0, d1, d2;
__asm__ __volatile__(
@@ -223,7 +223,7 @@ return (to);
* This looks ugly, but the compiler can optimize it totally,
* as the count is constant.
*/
-static inline void * __constant_memcpy(void * to, const void * from, size_t n)
+static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n)
{
long esi, edi;
if (!n) return to;
@@ -367,7 +367,7 @@ return s;
* things 32 bits at a time even when we don't know the size of the
* area at compile-time..
*/
-static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
{
int d0, d1;
__asm__ __volatile__(
@@ -416,7 +416,7 @@ extern char *strstr(const char *cs, const char *ct);
* This looks horribly ugly, but the compiler can optimize it totally,
* as we by now know that both pattern and count is constant..
*/
-static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
{
switch (count) {
case 0:
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 89ab7e2bc5a..3f1337c3420 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -411,7 +411,7 @@ unsigned long __must_check __copy_from_user_ll(void *to,
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
@@ -432,7 +432,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
return __copy_to_user_ll(to, from, n);
}
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_sleep();
@@ -456,7 +456,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
-static inline unsigned long
+static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
@@ -477,7 +477,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return __copy_from_user_ll(to, from, n);
}
-static inline unsigned long
+static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
diff --git a/include/asm-x86_64/fixmap.h b/include/asm-x86_64/fixmap.h
index a582cfcf223..7b286bd21d1 100644
--- a/include/asm-x86_64/fixmap.h
+++ b/include/asm-x86_64/fixmap.h
@@ -76,7 +76,7 @@ extern void __this_fixmap_does_not_exist(void);
* directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
-static inline unsigned long fix_to_virt(const unsigned int idx)
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index 2892c4b7a28..bddffcb591b 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -244,7 +244,7 @@ extern unsigned long copy_to_user(void __user *to, const void *from, unsigned le
extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len);
-static inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
+static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
@@ -273,7 +273,7 @@ static inline int __copy_from_user(void *dst, const void __user *src, unsigned s
}
}
-static inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
+static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
@@ -305,7 +305,7 @@ static inline int __copy_to_user(void __user *dst, const void *src, unsigned siz
}
-static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c643016499a..85854b86746 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -512,7 +512,7 @@ static inline void set_page_links(struct page *page, unsigned long zone,
extern struct page *mem_map;
#endif
-static inline void *lowmem_page_address(struct page *page)
+static __always_inline void *lowmem_page_address(struct page *page)
{
return __va(page_to_pfn(page) << PAGE_SHIFT);
}