diff options
author | H.J. Lu <hjl.tools@gmail.com> | 2017-01-30 10:59:15 -0800 |
---|---|---|
committer | H.J. Lu <hjl.tools@gmail.com> | 2017-02-24 09:46:15 -0800 |
commit | 4d393a8831fc0ab1b7c29e2d5bafbdbfe2102082 (patch) | |
tree | 61dd7efdad17adb9047e1f6eaf6fe646d20b5d16 | |
parent | d012ea850680a2a94959f1c5136502a0f712b30a (diff) | |
download | glibc-4d393a8831fc0ab1b7c29e2d5bafbdbfe2102082.tar.gz glibc-4d393a8831fc0ab1b7c29e2d5bafbdbfe2102082.tar.xz glibc-4d393a8831fc0ab1b7c29e2d5bafbdbfe2102082.zip |
Add VZEROUPPER to memset-vec-unaligned-erms.S [BZ #21081]
Since memset-vec-unaligned-erms.S has VDUP_TO_VEC0_AND_SET_RETURN at function entry, memset optimized for AVX2 and AVX512 will always use ymm/zmm register. VZEROUPPER should be placed before ret in L(stosb): movq %rdx, %rcx movzbl %sil, %eax movq %rdi, %rdx rep stosb movq %rdx, %rax ret since it can be reached from L(stosb_more_2x_vec): cmpq $REP_STOSB_THRESHOLD, %rdx ja L(stosb) [BZ #21081] * sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S (L(stosb)): Add VZEROUPPER before ret. (cherry picked from commit 02b78ff749f0c88771713368dbb2a09b1979814f)
-rw-r--r-- | ChangeLog | 6 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S | 2 |
2 files changed, 8 insertions, 0 deletions
diff --git a/ChangeLog b/ChangeLog index a9b7540017..1b7d40adc0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +2017-01-30 H.J. Lu <hongjiu.lu@intel.com> + + [BZ #21081] + * sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S + (L(stosb)): Add VZEROUPPER before ret. + 2016-11-28 H.J. Lu <hongjiu.lu@intel.com> [BZ #20750] diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S index 28e71fd576..acf448c9a6 100644 --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S @@ -110,6 +110,8 @@ ENTRY (__memset_erms) ENTRY (MEMSET_SYMBOL (__memset, erms)) # endif L(stosb): + /* Issue vzeroupper before rep stosb. */ + VZEROUPPER movq %rdx, %rcx movzbl %sil, %eax movq %rdi, %rdx |