diff options
author | H.J. Lu <hjl.tools@gmail.com> | 2021-03-05 07:26:42 -0800 |
---|---|---|
committer | H.J. Lu <hjl.tools@gmail.com> | 2021-03-29 07:40:17 -0700 |
commit | 7ebba91361badf7531d4e75050627a88d424872f (patch) | |
tree | d99781a37b47b95441ad358d119ec3741960d405 /sysdeps/x86_64/multiarch/memchr-avx2.S | |
parent | 91264fe3577fe887b4860923fa6142b5274c8965 (diff) | |
download | glibc-7ebba91361badf7531d4e75050627a88d424872f.tar.gz glibc-7ebba91361badf7531d4e75050627a88d424872f.tar.xz glibc-7ebba91361badf7531d4e75050627a88d424872f.zip |
x86-64: Add AVX optimized string/memory functions for RTM
Since VZEROUPPER triggers RTM abort while VZEROALL won't, select AVX optimized string/memory functions with xtest jz 1f vzeroall ret 1: vzeroupper ret at function exit on processors with usable RTM, but without 256-bit EVEX instructions to avoid VZEROUPPER inside a transactionally executing RTM region.
Diffstat (limited to 'sysdeps/x86_64/multiarch/memchr-avx2.S')
-rw-r--r-- | sysdeps/x86_64/multiarch/memchr-avx2.S | 45 |
1 files changed, 21 insertions, 24 deletions
diff --git a/sysdeps/x86_64/multiarch/memchr-avx2.S b/sysdeps/x86_64/multiarch/memchr-avx2.S index 77a9523168..1fcb1c350f 100644 --- a/sysdeps/x86_64/multiarch/memchr-avx2.S +++ b/sysdeps/x86_64/multiarch/memchr-avx2.S @@ -34,9 +34,13 @@ # define VZEROUPPER vzeroupper # endif +# ifndef SECTION +# define SECTION(p) p##.avx +# endif + # define VEC_SIZE 32 - .section .text.avx,"ax",@progbits + .section SECTION(.text),"ax",@progbits ENTRY (MEMCHR) # ifndef USE_AS_RAWMEMCHR /* Check for zero length. */ @@ -107,8 +111,8 @@ L(cros_page_boundary): # endif addq %rdi, %rax addq %rcx, %rax - VZEROUPPER - ret +L(return_vzeroupper): + ZERO_UPPER_VEC_REGISTERS_RETURN .p2align 4 L(aligned_more): @@ -224,8 +228,7 @@ L(last_4x_vec_or_less): jnz L(first_vec_x3_check) xorl %eax, %eax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(last_2x_vec): @@ -243,8 +246,7 @@ L(last_2x_vec): testl %eax, %eax jnz L(first_vec_x1_check) xorl %eax, %eax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(first_vec_x0_check): @@ -253,8 +255,7 @@ L(first_vec_x0_check): cmpq %rax, %rdx jbe L(zero) addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(first_vec_x1_check): @@ -264,8 +265,7 @@ L(first_vec_x1_check): jbe L(zero) addq $VEC_SIZE, %rax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(first_vec_x2_check): @@ -275,8 +275,7 @@ L(first_vec_x2_check): jbe L(zero) addq $(VEC_SIZE * 2), %rax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(first_vec_x3_check): @@ -286,12 +285,14 @@ L(first_vec_x3_check): jbe L(zero) addq $(VEC_SIZE * 3), %rax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(zero): - VZEROUPPER + xorl %eax, %eax + jmp L(return_vzeroupper) + + .p2align 4 L(null): xorl %eax, %eax ret @@ -301,24 +302,21 @@ L(null): L(first_vec_x0): tzcntl %eax, %eax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(first_vec_x1): tzcntl %eax, %eax addq $VEC_SIZE, %rax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(first_vec_x2): tzcntl %eax, %eax addq $(VEC_SIZE * 2), %rax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(4x_vec_end): @@ -337,8 +335,7 @@ L(first_vec_x3): tzcntl %eax, %eax addq $(VEC_SIZE * 3), %rax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN END (MEMCHR) #endif |