From 2d48b41c8fa610067c4d664ac2339ae6ca43e78c Mon Sep 17 00:00:00 2001 From: Ondrej Bilka Date: Mon, 20 May 2013 08:20:00 +0200 Subject: Faster memcpy on x64. We add new memcpy version that uses unaligned loads which are fast on modern processors. This allows second improvement which is avoiding computed jump which is relatively expensive operation. Tests available here: http://kam.mff.cuni.cz/~ondra/memcpy_profile_result27_04_13.tar.bz2 --- sysdeps/x86_64/multiarch/ifunc-impl-list.c | 1 + 1 file changed, 1 insertion(+) (limited to 'sysdeps/x86_64/multiarch/ifunc-impl-list.c') diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c index 05315fdd7a..28d35793c5 100644 --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c @@ -227,6 +227,7 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, memcpy, HAS_SSSE3, __memcpy_ssse3_back) IFUNC_IMPL_ADD (array, i, memcpy, HAS_SSSE3, __memcpy_ssse3) + IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_sse2_unaligned) IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_sse2)) /* Support sysdeps/x86_64/multiarch/mempcpy_chk.S. */ -- cgit 1.4.1