From 05f3633da4f9df870d04dd77336e793746e57ed4 Mon Sep 17 00:00:00 2001 From: Ling Ma Date: Mon, 14 Jul 2014 00:02:52 -0400 Subject: Improve 64bit memcpy performance for Haswell CPU with AVX instruction In this patch we take advantage of HSW memory bandwidth, manage to reduce miss branch prediction by avoiding using branch instructions and force destination to be aligned with avx instruction. The CPU2006 403.gcc benchmark indicates this patch improves performance from 2% to 10%. --- sysdeps/x86_64/multiarch/mempcpy_chk.S | 3 +++ 1 file changed, 3 insertions(+) (limited to 'sysdeps/x86_64/multiarch/mempcpy_chk.S') diff --git a/sysdeps/x86_64/multiarch/mempcpy_chk.S b/sysdeps/x86_64/multiarch/mempcpy_chk.S index c28473a669..88e0b74e83 100644 --- a/sysdeps/x86_64/multiarch/mempcpy_chk.S +++ b/sysdeps/x86_64/multiarch/mempcpy_chk.S @@ -39,6 +39,9 @@ ENTRY(__mempcpy_chk) testl $bit_Fast_Copy_Backward, __cpu_features+FEATURE_OFFSET+index_Fast_Copy_Backward(%rip) jz 2f leaq __mempcpy_chk_ssse3_back(%rip), %rax + testl $bit_AVX_Usable, __cpu_features+FEATURE_OFFSET+index_AVX_Usable(%rip) + jz 2f + leaq __mempcpy_chk_avx_unaligned(%rip), %rax 2: ret END(__mempcpy_chk) # else -- cgit 1.4.1