diff options
author | H.J. Lu <hjl.tools@gmail.com> | 2016-03-23 10:33:19 -0700 |
---|---|---|
committer | H.J. Lu <hjl.tools@gmail.com> | 2016-03-23 10:56:38 -0700 |
commit | 327aadf6348bd41d1fae46ee7780e214c0a493c1 (patch) | |
tree | 3a1f3550ee36ea010e53e1ad8f4e1ffc450b5c18 | |
parent | 7a25d6a84df9fea56963569ceccaaf7c2a88f161 (diff) | |
download | glibc-327aadf6348bd41d1fae46ee7780e214c0a493c1.tar.gz glibc-327aadf6348bd41d1fae46ee7780e214c0a493c1.tar.xz glibc-327aadf6348bd41d1fae46ee7780e214c0a493c1.zip |
[x86] Add a feature bit: Fast_Unaligned_Copy hjl/pr19583
On AMD processors, memcpy optimized with unaligned SSE load is slower than emcpy optimized with aligned SSSE3 while other string functions are faster with unaligned SSE load. A feature bit, Fast_Unaligned_Copy, is added to select memcpy optimized with unaligned SSE load. [BZ #19583] * sysdeps/x86/cpu-features.c (init_cpu_features): Set Fast_Unaligned_Copy with Fast_Unaligned_Load for Intel processors. Set Fast_Copy_Backward for AMD Excavator processors. * sysdeps/x86/cpu-features.h (bit_arch_Fast_Unaligned_Copy): New. (index_arch_Fast_Unaligned_Copy): Likewise. * sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Check Fast_Unaligned_Copy instead of Fast_Unaligned_Load.
-rw-r--r-- | sysdeps/x86/cpu-features.c | 14 | ||||
-rw-r--r-- | sysdeps/x86/cpu-features.h | 3 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/memcpy.S | 2 |
3 files changed, 17 insertions, 2 deletions
diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c index c8f81efd03..de75c79cf9 100644 --- a/sysdeps/x86/cpu-features.c +++ b/sysdeps/x86/cpu-features.c @@ -153,8 +153,12 @@ init_cpu_features (struct cpu_features *cpu_features) #if index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 # error index_arch_Fast_Unaligned_Load != index_arch_Slow_SSE4_2 #endif +#if index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy +# error index_arch_Fast_Unaligned_Load != index_arch_Fast_Unaligned_Copy +#endif cpu_features->feature[index_arch_Fast_Unaligned_Load] |= (bit_arch_Fast_Unaligned_Load + | bit_arch_Fast_Unaligned_Copy | bit_arch_Prefer_PMINUB_for_stringop | bit_arch_Slow_SSE4_2); break; @@ -183,10 +187,14 @@ init_cpu_features (struct cpu_features *cpu_features) #if index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop # error index_arch_Fast_Rep_String != index_arch_Prefer_PMINUB_for_stringop #endif +#if index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy +# error index_arch_Fast_Rep_String != index_arch_Fast_Unaligned_Copy +#endif cpu_features->feature[index_arch_Fast_Rep_String] |= (bit_arch_Fast_Rep_String | bit_arch_Fast_Copy_Backward | bit_arch_Fast_Unaligned_Load + | bit_arch_Fast_Unaligned_Copy | bit_arch_Prefer_PMINUB_for_stringop); break; } @@ -220,10 +228,14 @@ init_cpu_features (struct cpu_features *cpu_features) if (family == 0x15) { +#if index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward +# error index_arch_Fast_Unaligned_Load != index_arch_Fast_Copy_Backward +#endif /* "Excavator" */ if (model >= 0x60 && model <= 0x7f) cpu_features->feature[index_arch_Fast_Unaligned_Load] - |= bit_arch_Fast_Unaligned_Load; + |= (bit_arch_Fast_Unaligned_Load + | bit_arch_Fast_Copy_Backward); } } else diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h index e06eb7e41b..bfe1f4c68d 100644 --- a/sysdeps/x86/cpu-features.h +++ b/sysdeps/x86/cpu-features.h @@ -35,6 +35,7 @@ #define bit_arch_I686 (1 << 15) #define bit_arch_Prefer_MAP_32BIT_EXEC (1 << 16) #define bit_arch_Prefer_No_VZEROUPPER (1 << 17) +#define bit_arch_Fast_Unaligned_Copy (1 << 18) /* CPUID Feature flags. */ @@ -101,6 +102,7 @@ # define index_arch_I686 FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1*FEATURE_SIZE +# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1*FEATURE_SIZE # if defined (_LIBC) && !IS_IN (nonlib) @@ -265,6 +267,7 @@ extern const struct cpu_features *__get_cpu_features (void) # define index_arch_I686 FEATURE_INDEX_1 # define index_arch_Prefer_MAP_32BIT_EXEC FEATURE_INDEX_1 # define index_arch_Prefer_No_VZEROUPPER FEATURE_INDEX_1 +# define index_arch_Fast_Unaligned_Copy FEATURE_INDEX_1 #endif /* !__ASSEMBLER__ */ diff --git a/sysdeps/x86_64/multiarch/memcpy.S b/sysdeps/x86_64/multiarch/memcpy.S index 8882590e51..5b045d7847 100644 --- a/sysdeps/x86_64/multiarch/memcpy.S +++ b/sysdeps/x86_64/multiarch/memcpy.S @@ -42,7 +42,7 @@ ENTRY(__new_memcpy) HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load) jnz 2f lea __memcpy_sse2_unaligned(%rip), %RAX_LP - HAS_ARCH_FEATURE (Fast_Unaligned_Load) + HAS_ARCH_FEATURE (Fast_Unaligned_Copy) jnz 2f lea __memcpy_sse2(%rip), %RAX_LP HAS_CPU_FEATURE (SSSE3) |