aboutsummaryrefslogtreecommitdiff
path: root/include/asm-x86
diff options
context:
space:
mode:
authorAlexander van Heukelum <heukelum@mailshack.com>2008-03-11 16:17:19 +0100
committerIngo Molnar <mingo@elte.hu>2008-04-26 19:21:16 +0200
commit64970b68d2b3ed32b964b0b30b1b98518fde388e (patch)
tree7d8eb5ea3ab1a841afa0f7ae1c65e7be4a9ca690 /include/asm-x86
parent60b6783a044a55273b637983f52965c2808a6b86 (diff)
x86, generic: optimize find_next_(zero_)bit for small constant-size bitmaps
This moves an optimization for searching constant-sized small bitmaps form x86_64-specific to generic code. On an i386 defconfig (the x86#testing one), the size of vmlinux hardly changes with this applied. I have observed only four places where this optimization avoids a call into find_next_bit: In the functions return_unused_surplus_pages, alloc_fresh_huge_page, and adjust_pool_surplus, this patch avoids a call for a 1-bit bitmap. In __next_cpu a call is avoided for a 32-bit bitmap. That's it. On x86_64, 52 locations are optimized with a minimal increase in code size: Current #testing defconfig: 146 x bsf, 27 x find_next_*bit text data bss dec hex filename 5392637 846592 724424 6963653 6a41c5 vmlinux After removing the x86_64 specific optimization for find_next_*bit: 94 x bsf, 79 x find_next_*bit text data bss dec hex filename 5392358 846592 724424 6963374 6a40ae vmlinux After this patch (making the optimization generic): 146 x bsf, 27 x find_next_*bit text data bss dec hex filename 5392396 846592 724424 6963412 6a40d4 vmlinux [ tglx@linutronix.de: build fixes ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/bitops.h6
-rw-r--r--include/asm-x86/bitops_64.h10
2 files changed, 0 insertions, 16 deletions
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index 31e408de90c..1ae7b270a1e 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -306,12 +306,6 @@ static int test_bit(int nr, const volatile unsigned long *addr);
#undef BIT_ADDR
#undef ADDR
-unsigned long find_next_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset);
-unsigned long find_next_zero_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset);
-
-
#ifdef CONFIG_X86_32
# include "bitops_32.h"
#else
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index 65b20fb2ae7..7118ef2cc4e 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -15,16 +15,6 @@ static inline long __scanbit(unsigned long val, unsigned long max)
return val;
}
-#define find_next_bit(addr,size,off) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
- find_next_bit(addr,size,off)))
-
-#define find_next_zero_bit(addr,size,off) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
- find_next_zero_bit(addr,size,off)))
-
#define find_first_bit(addr, size) \
((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
? (__scanbit(*(unsigned long *)(addr), (size))) \