aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorAndi Kleen <andi@firstfloor.org>2008-09-04 13:46:11 +0200
committerH. Peter Anvin <hpa@zytor.com>2008-09-04 08:42:06 -0700
commitfb481dd56adf3c5b0993b8f052cc9ba966e3959d (patch)
tree22312cb77dbc8bf71bc54dff40efad7cf8e2d822 /arch/x86/lib
parenta5444d15b611cf2ffe2bc52aaf11f2ac51882f89 (diff)
x86: drop -funroll-loops for csum_partial_64.c
Impact: performance optimization I did some rebenchmarking with modern compilers and dropping -funroll-loops makes the function consistently go faster by a few percent. So drop that flag. Thanks to Richard Guenther for a hint. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/Makefile3
1 files changed, 0 insertions, 3 deletions
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index aa3fa411942..55e11aa6d66 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -17,9 +17,6 @@ ifeq ($(CONFIG_X86_32),y)
lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
else
obj-y += io_64.o iomap_copy_64.o
-
- CFLAGS_csum-partial_64.o := -funroll-loops
-
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
lib-y += thunk_64.o clear_page_64.o copy_page_64.o
lib-y += memmove_64.o memset_64.o