From e41b395523040fcb58c7d378475720c2836d280c Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Mon, 28 Mar 2016 04:39:48 -0700 Subject: [x86] Add a feature bit: Fast_Unaligned_Copy On AMD processors, memcpy optimized with unaligned SSE load is slower than emcpy optimized with aligned SSSE3 while other string functions are faster with unaligned SSE load. A feature bit, Fast_Unaligned_Copy, is added to select memcpy optimized with unaligned SSE load. [BZ #19583] * sysdeps/x86/cpu-features.c (init_cpu_features): Set Fast_Unaligned_Copy with Fast_Unaligned_Load for Intel processors. Set Fast_Copy_Backward for AMD Excavator processors. * sysdeps/x86/cpu-features.h (bit_arch_Fast_Unaligned_Copy): New. (index_arch_Fast_Unaligned_Copy): Likewise. * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Check Fast_Unaligned_Copy instead of Fast_Unaligned_Load. --- sysdeps/x86/cpu-features.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'sysdeps/x86/cpu-features.h') diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h index e06eb7e41b..bfe1f4c68d 100644 --- a/sysdeps/x86/cpu-features.h +++ b/sysdeps/x86/cpu-features.h @@ -35,6 +35,7 @@ #define bit_arch_I686 (1 << 15) #define bit_arch_Prefer_MAP_32BIT_EXEC (1 << 16) #define bit_arch_Prefer_No_VZEROUPPER (1 << 17) +#define bit_arch_Fast_Unaligned_Copy (1 << 18) /* CPUID Feature flags. */ @@ -101,6 +102,7 @@ # define index_arch_I686 FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1*FEATURE_SIZE +# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1*FEATURE_SIZE # if defined (_LIBC) && !IS_IN (nonlib) @@ -265,6 +267,7 @@ extern const struct cpu_features *__get_cpu_features (void) # define index_arch_I686 FEATURE_INDEX_1 # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1 +# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1 #endif /* !__ASSEMBLER__ */ -- cgit 1.4.1