about summary refs log tree commit diff
path: root/sysdeps/x86_64/multiarch
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2016-07-01 05:54:43 -0700
committerH.J. Lu <hjl.tools@gmail.com>2016-07-01 06:03:05 -0700
commitf43cb35c9b3c35addc6dc0f1427caf51786ca1d2 (patch)
treea2c943de50b515cca9d14ea8e26d6c76152de139 /sysdeps/x86_64/multiarch
parentee2196bb6766ca7e63a1ba22ebb7619a3266776a (diff)
downloadglibc-f43cb35c9b3c35addc6dc0f1427caf51786ca1d2.tar.gz
glibc-f43cb35c9b3c35addc6dc0f1427caf51786ca1d2.tar.xz
glibc-f43cb35c9b3c35addc6dc0f1427caf51786ca1d2.zip
Require binutils 2.24 to build x86-64 glibc [BZ #20139]
If assembler doesn't support AVX512DQ, _dl_runtime_resolve_avx is used
to save the first 8 vector registers, which only saves the lower 256
bits of vector register, for lazy binding.  When it is called on AVX512
platform, the upper 256 bits of ZMM registers are clobbered.  Parameters
passed in ZMM registers will be wrong when the function is called the
first time.  This patch requires binutils 2.24, whose assembler can store
and load ZMM registers, to build x86-64 glibc.  Since mathvec library
needs assembler support for AVX512DQ,  we disable mathvec if assembler
doesn't support AVX512DQ.

	[BZ #20139]
	* config.h.in (HAVE_AVX512_ASM_SUPPORT): Renamed to ...
	(HAVE_AVX512DQ_ASM_SUPPORT): This.
	* sysdeps/x86_64/configure.ac: Require assembler from binutils
	2.24 or above.
	(HAVE_AVX512_ASM_SUPPORT): Removed.
	(HAVE_AVX512DQ_ASM_SUPPORT): New.
	* sysdeps/x86_64/configure: Regenerated.
	* sysdeps/x86_64/dl-trampoline.S: Make HAVE_AVX512_ASM_SUPPORT
	check unconditional.
	* sysdeps/x86_64/multiarch/ifunc-impl-list.c: Likewise.
	* sysdeps/x86_64/multiarch/memcpy.S: Likewise.
	* sysdeps/x86_64/multiarch/memcpy_chk.S: Likewise.
	* sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S:
	Likewise.
	* sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S:
	Likewise.
	* sysdeps/x86_64/multiarch/memmove.S: Likewise.
	* sysdeps/x86_64/multiarch/memmove_chk.S: Likewise.
	* sysdeps/x86_64/multiarch/mempcpy.S: Likewise.
	* sysdeps/x86_64/multiarch/mempcpy_chk.S: Likewise.
	* sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S:
	Likewise.
	* sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S:
	Likewise.
	* sysdeps/x86_64/multiarch/memset.S: Likewise.
	* sysdeps/x86_64/multiarch/memset_chk.S: Likewise.
	* sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S: Check
	HAVE_AVX512DQ_ASM_SUPPORT instead of HAVE_AVX512_ASM_SUPPORT.
	* sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S:
	Likewise.
	* sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S:
	Likewise.
	* sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S:
	Likewise.
	* sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S:
	Likewise.
	* sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.:
	Likewise.
	* sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S:
	Likewise.
	* sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S:
	Likewise.
	* sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S:
	Likewise.
	* sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S:
	Likewise.
	* sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx51:
	Likewise.
	* sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S:
	Likewise.
Diffstat (limited to 'sysdeps/x86_64/multiarch')
-rw-r--r--sysdeps/x86_64/multiarch/ifunc-impl-list.c16
-rw-r--r--sysdeps/x86_64/multiarch/memcpy.S2
-rw-r--r--sysdeps/x86_64/multiarch/memcpy_chk.S2
-rw-r--r--sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S2
-rw-r--r--sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S2
-rw-r--r--sysdeps/x86_64/multiarch/memmove.S2
-rw-r--r--sysdeps/x86_64/multiarch/memmove_chk.S2
-rw-r--r--sysdeps/x86_64/multiarch/mempcpy.S2
-rw-r--r--sysdeps/x86_64/multiarch/mempcpy_chk.S2
-rw-r--r--sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S2
-rw-r--r--sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S2
-rw-r--r--sysdeps/x86_64/multiarch/memset.S2
-rw-r--r--sysdeps/x86_64/multiarch/memset_chk.S2
13 files changed, 4 insertions, 36 deletions
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index 449b04647e..a443e41d2f 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -48,7 +48,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 
   /* Support sysdeps/x86_64/multiarch/memmove_chk.c.  */
   IFUNC_IMPL (i, name, __memmove_chk,
-#ifdef HAVE_AVX512_ASM_SUPPORT
 	      IFUNC_IMPL_ADD (array, i, __memmove_chk,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memmove_chk_avx512_no_vzeroupper)
@@ -58,7 +57,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, __memmove_chk,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memmove_chk_avx512_unaligned_erms)
-#endif
 	      IFUNC_IMPL_ADD (array, i, __memmove_chk,
 			      HAS_ARCH_FEATURE (AVX_Usable),
 			      __memmove_chk_avx_unaligned)
@@ -84,7 +82,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, memmove,
 			      HAS_ARCH_FEATURE (AVX_Usable),
 			      __memmove_avx_unaligned_erms)
-#ifdef HAVE_AVX512_ASM_SUPPORT
 	      IFUNC_IMPL_ADD (array, i, memmove,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memmove_avx512_no_vzeroupper)
@@ -94,7 +91,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, memmove,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memmove_avx512_unaligned_erms)
-#endif
 	      IFUNC_IMPL_ADD (array, i, memmove, HAS_CPU_FEATURE (SSSE3),
 			      __memmove_ssse3_back)
 	      IFUNC_IMPL_ADD (array, i, memmove, HAS_CPU_FEATURE (SSSE3),
@@ -117,7 +113,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, __memset_chk,
 			      HAS_ARCH_FEATURE (AVX2_Usable),
 			      __memset_chk_avx2_unaligned_erms)
-#ifdef HAVE_AVX512_ASM_SUPPORT
 	      IFUNC_IMPL_ADD (array, i, __memset_chk,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memset_chk_avx512_unaligned_erms)
@@ -127,7 +122,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, __memset_chk,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memset_chk_avx512_no_vzeroupper)
-#endif
 	      )
 
   /* Support sysdeps/x86_64/multiarch/memset.S.  */
@@ -143,7 +137,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, memset,
 			      HAS_ARCH_FEATURE (AVX2_Usable),
 			      __memset_avx2_unaligned_erms)
-#ifdef HAVE_AVX512_ASM_SUPPORT
 	      IFUNC_IMPL_ADD (array, i, memset,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memset_avx512_unaligned_erms)
@@ -153,7 +146,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, memset,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memset_avx512_no_vzeroupper)
-#endif
 	     )
 
   /* Support sysdeps/x86_64/multiarch/stpncpy.S.  */
@@ -311,7 +303,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 #ifdef SHARED
   /* Support sysdeps/x86_64/multiarch/memcpy_chk.S.  */
   IFUNC_IMPL (i, name, __memcpy_chk,
-#ifdef HAVE_AVX512_ASM_SUPPORT
 	      IFUNC_IMPL_ADD (array, i, __memcpy_chk,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memcpy_chk_avx512_no_vzeroupper)
@@ -321,7 +312,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, __memcpy_chk,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memcpy_chk_avx512_unaligned_erms)
-#endif
 	      IFUNC_IMPL_ADD (array, i, __memcpy_chk,
 			      HAS_ARCH_FEATURE (AVX_Usable),
 			      __memcpy_chk_avx_unaligned)
@@ -351,7 +341,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 			      __memcpy_ssse3_back)
 	      IFUNC_IMPL_ADD (array, i, memcpy, HAS_CPU_FEATURE (SSSE3),
 			      __memcpy_ssse3)
-#ifdef HAVE_AVX512_ASM_SUPPORT
 	      IFUNC_IMPL_ADD (array, i, memcpy,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memcpy_avx512_no_vzeroupper)
@@ -361,7 +350,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, memcpy,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __memcpy_avx512_unaligned_erms)
-#endif
 	      IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_sse2_unaligned)
 	      IFUNC_IMPL_ADD (array, i, memcpy, 1,
 			      __memcpy_sse2_unaligned_erms)
@@ -369,7 +357,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 
   /* Support sysdeps/x86_64/multiarch/mempcpy_chk.S.  */
   IFUNC_IMPL (i, name, __mempcpy_chk,
-#ifdef HAVE_AVX512_ASM_SUPPORT
 	      IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __mempcpy_chk_avx512_no_vzeroupper)
@@ -379,7 +366,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __mempcpy_chk_avx512_unaligned_erms)
-#endif
 	      IFUNC_IMPL_ADD (array, i, __mempcpy_chk,
 			      HAS_ARCH_FEATURE (AVX_Usable),
 			      __mempcpy_chk_avx_unaligned)
@@ -399,7 +385,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 
   /* Support sysdeps/x86_64/multiarch/mempcpy.S.  */
   IFUNC_IMPL (i, name, mempcpy,
-#ifdef HAVE_AVX512_ASM_SUPPORT
 	      IFUNC_IMPL_ADD (array, i, mempcpy,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __mempcpy_avx512_no_vzeroupper)
@@ -409,7 +394,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, mempcpy,
 			      HAS_ARCH_FEATURE (AVX512F_Usable),
 			      __mempcpy_avx512_unaligned_erms)
-#endif
 	      IFUNC_IMPL_ADD (array, i, mempcpy,
 			      HAS_ARCH_FEATURE (AVX_Usable),
 			      __mempcpy_avx_unaligned)
diff --git a/sysdeps/x86_64/multiarch/memcpy.S b/sysdeps/x86_64/multiarch/memcpy.S
index df7fbacd8a..b8677596f9 100644
--- a/sysdeps/x86_64/multiarch/memcpy.S
+++ b/sysdeps/x86_64/multiarch/memcpy.S
@@ -32,7 +32,6 @@ ENTRY(__new_memcpy)
 	lea	__memcpy_erms(%rip), %RAX_LP
 	HAS_ARCH_FEATURE (Prefer_ERMS)
 	jnz	2f
-# ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
 	lea	__memcpy_avx512_no_vzeroupper(%rip), %RAX_LP
@@ -43,7 +42,6 @@ ENTRY(__new_memcpy)
 	jnz	2f
 	lea	__memcpy_avx512_unaligned(%rip), %RAX_LP
 	ret
-# endif
 1:	lea	__memcpy_avx_unaligned(%rip), %RAX_LP
 	HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
 	jz	L(Fast_Unaligned_Load)
diff --git a/sysdeps/x86_64/multiarch/memcpy_chk.S b/sysdeps/x86_64/multiarch/memcpy_chk.S
index 11f13104c2..9d92c8a7e3 100644
--- a/sysdeps/x86_64/multiarch/memcpy_chk.S
+++ b/sysdeps/x86_64/multiarch/memcpy_chk.S
@@ -30,7 +30,6 @@
 ENTRY(__memcpy_chk)
 	.type	__memcpy_chk, @gnu_indirect_function
 	LOAD_RTLD_GLOBAL_RO_RDX
-# ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
 	lea	__memcpy_chk_avx512_no_vzeroupper(%rip), %RAX_LP
@@ -41,7 +40,6 @@ ENTRY(__memcpy_chk)
 	jnz	2f
 	lea	__memcpy_chk_avx512_unaligned(%rip), %RAX_LP
 	ret
-# endif
 1:	lea	__memcpy_chk_avx_unaligned(%rip), %RAX_LP
 	HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
 	jz	L(Fast_Unaligned_Load)
diff --git a/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S b/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
index 5b8ff57ed5..664b74de49 100644
--- a/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
+++ b/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
@@ -18,7 +18,7 @@
 
 #include <sysdep.h>
 
-#if defined HAVE_AVX512_ASM_SUPPORT && IS_IN (libc)
+#if IS_IN (libc)
 
 # include "asm-syntax.h"
 
diff --git a/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S
index f9af6fdce6..aac1515cf6 100644
--- a/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S
@@ -1,4 +1,4 @@
-#if defined HAVE_AVX512_ASM_SUPPORT && IS_IN (libc)
+#if IS_IN (libc)
 # define VEC_SIZE	64
 # define VEC(i)		zmm##i
 # define VMOVNT		vmovntdq
diff --git a/sysdeps/x86_64/multiarch/memmove.S b/sysdeps/x86_64/multiarch/memmove.S
index 8e1c6ac8e8..ff5e041420 100644
--- a/sysdeps/x86_64/multiarch/memmove.S
+++ b/sysdeps/x86_64/multiarch/memmove.S
@@ -30,7 +30,6 @@ ENTRY(__libc_memmove)
 	lea	__memmove_erms(%rip), %RAX_LP
 	HAS_ARCH_FEATURE (Prefer_ERMS)
 	jnz	2f
-# ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
 	lea	__memmove_avx512_no_vzeroupper(%rip), %RAX_LP
@@ -41,7 +40,6 @@ ENTRY(__libc_memmove)
 	jnz	2f
 	lea	__memmove_avx512_unaligned(%rip), %RAX_LP
 	ret
-# endif
 1:	lea	__memmove_avx_unaligned(%rip), %RAX_LP
 	HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
 	jz	L(Fast_Unaligned_Load)
diff --git a/sysdeps/x86_64/multiarch/memmove_chk.S b/sysdeps/x86_64/multiarch/memmove_chk.S
index cd639b8862..7f861206df 100644
--- a/sysdeps/x86_64/multiarch/memmove_chk.S
+++ b/sysdeps/x86_64/multiarch/memmove_chk.S
@@ -29,7 +29,6 @@
 ENTRY(__memmove_chk)
 	.type	__memmove_chk, @gnu_indirect_function
 	LOAD_RTLD_GLOBAL_RO_RDX
-# ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
 	lea	__memmove_chk_avx512_no_vzeroupper(%rip), %RAX_LP
@@ -40,7 +39,6 @@ ENTRY(__memmove_chk)
 	jnz	2f
 	lea	__memmove_chk_avx512_unaligned(%rip), %RAX_LP
 	ret
-# endif
 1:	lea	__memmove_chk_avx_unaligned(%rip), %RAX_LP
 	HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
 	jz	L(Fast_Unaligned_Load)
diff --git a/sysdeps/x86_64/multiarch/mempcpy.S b/sysdeps/x86_64/multiarch/mempcpy.S
index 4011a1a4f0..51970687cf 100644
--- a/sysdeps/x86_64/multiarch/mempcpy.S
+++ b/sysdeps/x86_64/multiarch/mempcpy.S
@@ -32,7 +32,6 @@ ENTRY(__mempcpy)
 	lea	__mempcpy_erms(%rip), %RAX_LP
 	HAS_ARCH_FEATURE (Prefer_ERMS)
 	jnz	2f
-# ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
 	lea	__mempcpy_avx512_no_vzeroupper(%rip), %RAX_LP
@@ -43,7 +42,6 @@ ENTRY(__mempcpy)
 	jnz	2f
 	lea	__mempcpy_avx512_unaligned(%rip), %RAX_LP
 	ret
-# endif
 1:	lea	__mempcpy_avx_unaligned(%rip), %RAX_LP
 	HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
 	jz	L(Fast_Unaligned_Load)
diff --git a/sysdeps/x86_64/multiarch/mempcpy_chk.S b/sysdeps/x86_64/multiarch/mempcpy_chk.S
index 80f460fd01..9e49f6f26e 100644
--- a/sysdeps/x86_64/multiarch/mempcpy_chk.S
+++ b/sysdeps/x86_64/multiarch/mempcpy_chk.S
@@ -30,7 +30,6 @@
 ENTRY(__mempcpy_chk)
 	.type	__mempcpy_chk, @gnu_indirect_function
 	LOAD_RTLD_GLOBAL_RO_RDX
-# ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	1f
 	lea	__mempcpy_chk_avx512_no_vzeroupper(%rip), %RAX_LP
@@ -41,7 +40,6 @@ ENTRY(__mempcpy_chk)
 	jnz	2f
 	lea	__mempcpy_chk_avx512_unaligned(%rip), %RAX_LP
 	ret
-# endif
 1:	lea	__mempcpy_chk_avx_unaligned(%rip), %RAX_LP
 	HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
 	jz	L(Fast_Unaligned_Load)
diff --git a/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S b/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S
index eab8c5a651..9687df0d3e 100644
--- a/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S
+++ b/sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S
@@ -18,7 +18,7 @@
 
 #include <sysdep.h>
 
-#if defined HAVE_AVX512_ASM_SUPPORT && IS_IN (libc)
+#if IS_IN (libc)
 
 #include "asm-syntax.h"
 #ifndef MEMSET
diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
index f1b3cb23d3..a5ec349198 100644
--- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
@@ -1,4 +1,4 @@
-#if defined HAVE_AVX512_ASM_SUPPORT && IS_IN (libc)
+#if IS_IN (libc)
 # define VEC_SIZE	64
 # define VEC(i)		zmm##i
 # define VMOVU		vmovdqu64
diff --git a/sysdeps/x86_64/multiarch/memset.S b/sysdeps/x86_64/multiarch/memset.S
index 2b964a0398..96e99341aa 100644
--- a/sysdeps/x86_64/multiarch/memset.S
+++ b/sysdeps/x86_64/multiarch/memset.S
@@ -41,7 +41,6 @@ ENTRY(memset)
 	jnz	L(AVX512F)
 	lea	__memset_avx2_unaligned(%rip), %RAX_LP
 L(AVX512F):
-# ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	2f
 	lea	__memset_avx512_no_vzeroupper(%rip), %RAX_LP
@@ -51,7 +50,6 @@ L(AVX512F):
 	HAS_CPU_FEATURE (ERMS)
 	jnz	2f
 	lea	__memset_avx512_unaligned(%rip), %RAX_LP
-# endif
 2:	ret
 END(memset)
 #endif
diff --git a/sysdeps/x86_64/multiarch/memset_chk.S b/sysdeps/x86_64/multiarch/memset_chk.S
index 8517cfc073..2efe6ed909 100644
--- a/sysdeps/x86_64/multiarch/memset_chk.S
+++ b/sysdeps/x86_64/multiarch/memset_chk.S
@@ -38,7 +38,6 @@ ENTRY(__memset_chk)
 	jnz	L(AVX512F)
 	lea	__memset_chk_avx2_unaligned(%rip), %RAX_LP
 L(AVX512F):
-#ifdef HAVE_AVX512_ASM_SUPPORT
 	HAS_ARCH_FEATURE (AVX512F_Usable)
 	jz	2f
 	lea	__memset_chk_avx512_no_vzeroupper(%rip), %RAX_LP
@@ -48,7 +47,6 @@ L(AVX512F):
 	HAS_CPU_FEATURE (ERMS)
 	jnz	2f
 	lea	__memset_chk_avx512_unaligned(%rip), %RAX_LP
-#endif
 2:	ret
 END(__memset_chk)